pax_global_header00006660000000000000000000000064141566646510014530gustar00rootroot0000000000000052 comment=88428965dc4a38c915920337ce653289be839db8 bx-python-0.8.13/000077500000000000000000000000001415666465100135515ustar00rootroot00000000000000bx-python-0.8.13/.github/000077500000000000000000000000001415666465100151115ustar00rootroot00000000000000bx-python-0.8.13/.github/workflows/000077500000000000000000000000001415666465100171465ustar00rootroot00000000000000bx-python-0.8.13/.github/workflows/deploy.yaml000066400000000000000000000054501415666465100213320ustar00rootroot00000000000000name: Deploy on: [push, pull_request] concurrency: group: deploy-${{ github.ref }} cancel-in-progress: true jobs: build_wheels: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest, macos-latest] arch: [auto] include: - os: ubuntu-latest arch: aarch64 steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - name: Set up QEMU to build non-native architectures if: ${{ matrix.arch == 'aarch64' }} uses: docker/setup-qemu-action@v1 - name: Install required Python packages run: | python -m pip install --upgrade pip setuptools wheel python -m pip install 'cibuildwheel>=2.2.0' twine - name: Build wheels run: python -m cibuildwheel --output-dir dist env: CIBW_ARCHS: ${{matrix.arch}} # Skip building musllinux wheels for aarch64, each one currently takes # more than 2 hours to build. # Skip also building the PyPy 3.7 wheel for macOS, because numpy # doesn't have a wheel on PyPI and it fails to install. CIBW_SKIP: '*-musllinux_aarch64 pp37-macosx_x86_64' - name: Check packages run: twine check dist/* - uses: actions/upload-artifact@v2 with: name: packages path: dist/ build_sdist: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - name: Install required Python packages run: | python -m pip install --upgrade pip setuptools wheel python -m pip install build twine - name: Build sdist run: | python -m build --sdist python -m venv test_venv . test_venv/bin/activate python -m pip install dist/*.tar.gz # Test with the same command specified for cibuildwheel in pyproject.toml python -c 'import bx, bx.align, bx.align.sitemask, bx.align.tools, bx.arrays, bx.bbi, bx.cookbook, bx.intervals, bx.intervals.operations, bx.intseq, bx.misc, bx.motif, bx.motif.io, bx.motif.logo, bx.phylo, bx.pwm, bx.seq, bx.tabular, bx_extras' - name: Check packages run: twine check dist/* - uses: actions/upload-artifact@v2 with: name: packages path: dist/ upload_pypi: if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && github.repository_owner == 'bxlab' needs: [build_wheels, build_sdist] runs-on: ubuntu-latest steps: - uses: actions/download-artifact@v2 with: name: packages path: dist - name: Publish to PyPI uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} bx-python-0.8.13/.github/workflows/test.yaml000066400000000000000000000017351415666465100210170ustar00rootroot00000000000000name: Lint and test on: [push, pull_request] concurrency: group: test-${{ github.ref }} cancel-in-progress: true jobs: lint: runs-on: ubuntu-latest strategy: matrix: python-version: ['3.7', '3.10'] steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install flake8 run: pip install flake8 flake8-import-order - name: Lint run: flake8 . test: runs-on: ubuntu-latest strategy: fail-fast: false matrix: python-version: ['3.7', '3.8', '3.9', '3.10'] steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install liblzo2-dev run: sudo apt-get update && sudo apt-get -y install liblzo2-dev - name: Install tox run: pip install tox - name: Test run: tox bx-python-0.8.13/.gitignore000066400000000000000000000005011415666465100155350ustar00rootroot00000000000000# Build directory build # Python bytecode *.pyc # Object files *.so *.pyd # Source files generated by Cython *.c *.h # egg-info for inplace builds bx_python.egg-info # IDE project files *.kpf # windows shortcuts *.lnk # nose egg nose*.egg # .eggs directory .eggs # Virtualenv .venv # Built sdist directory dist bx-python-0.8.13/LICENSE000066400000000000000000000021571415666465100145630ustar00rootroot00000000000000Copyright (c) 2005-2015 The Pennsylvania State University Copyright (c) 2013-2020 The Johns Hopkins University Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. bx-python-0.8.13/MANIFEST.in000066400000000000000000000002241415666465100153050ustar00rootroot00000000000000include LICENSE recursive-include src *.h recursive-include src *.c recursive-include lib *.h recursive-include lib *.c recursive-include lib *.pyx bx-python-0.8.13/README.md000066400000000000000000000033341415666465100150330ustar00rootroot00000000000000[![Build Status](https://travis-ci.org/bxlab/bx-python.svg?branch=master)](https://travis-ci.org/bxlab/bx-python) [![Read the Docs](https://img.shields.io/readthedocs/bx-python.svg)](https://bx-python.readthedocs.io/) # bx-python The bx-python project is a Python library and associated set of scripts for rapid implementation of genome scale analyses. The library contains a variety of useful modules, but the particular strengths are: * Classes for reading and working with genome-scale multiple local alignments (in MAF, AXT, and LAV formats) * Generic data structure for indexing on disk files that contain blocks of data associated with intervals on various sequences (used, for example, to provide random access to individual alignments in huge files; optimized for use over network filesystems) * Data structures for working with intervals on sequences * "Binned bitsets" which act just like chromosome sized bit arrays, but lazily allocate regions and allow large blocks of all set or all unset bits to be stored compactly * "Intersecter" for performing fast intersection tests that preserve both query and target intervals and associated annotation ## Requirements Build currently requires liblzo, e.g. sudo apt-get install liblzo2-dev on debian/ubuntu). ## Installing The package can be installed with pip: ```pip install bx-python``` It is available in [bioconda](https://anaconda.org/bioconda/bx-python) (recommended): ```conda install -c conda-forge -c bioconda bx-python``` It is available in [Debian](https://tracker.debian.org/pkg/python-bx) and [Ubuntu](https://packages.ubuntu.com/python3-bx): ```sudo apt install python3-bx``` Or can be built from a checkout of the repository: ```python setup.py install``` bx-python-0.8.13/doc/000077500000000000000000000000001415666465100143165ustar00rootroot00000000000000bx-python-0.8.13/doc/Makefile000066400000000000000000000051631415666465100157630ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html web pickle htmlhelp latex changes linkcheck help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " apidoc to run epydoc" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview over all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" clean: -rm -rf docbuild/* html: mkdir -p build/html build/doctrees $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) docbuild/html @echo @echo "Build finished. The HTML pages are in docbuild/html." apidoc: mkdir -p build/html/apidoc epydoc-2.6 --docformat restructuredtext ../lib/bx -o docbuild/html/apidoc @echo @echo "Epydoc finished. The pages are in docbuild/html/apidoc." pickle: mkdir -p build/pickle build/doctrees $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) docbuild/pickle @echo @echo "Build finished; now you can process the pickle files." web: pickle json: mkdir -p build/json build/doctrees $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) docbuild/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: mkdir -p build/htmlhelp build/doctrees $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) docbuild/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in build/htmlhelp." latex: mkdir -p build/latex build/doctrees $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) docbuild/latex @echo @echo "Build finished; the LaTeX files are in build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: mkdir -p build/changes build/doctrees $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) docbuild/changes @echo @echo "The overview file is in build/changes." linkcheck: mkdir -p build/linkcheck build/doctrees $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) docbuild/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in build/linkcheck/output.txt." bx-python-0.8.13/doc/requirements.txt000066400000000000000000000001201415666465100175730ustar00rootroot00000000000000https://bitbucket.org/james_taylor/python-lzo-static/get/63987d89fd1b.zip numpy bx-python-0.8.13/doc/source/000077500000000000000000000000001415666465100156165ustar00rootroot00000000000000bx-python-0.8.13/doc/source/conf.py000066400000000000000000000141241415666465100171170ustar00rootroot00000000000000# # BxPython documentation build configuration file, created by # sphinx-quickstart on Fri May 08 10:18:22 2009. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. import bx # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx'] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = 'bx-python' copyright = '2017, James Taylor' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = bx.__version__ # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'base.css' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_index = 'index.html' html_sidebars = {'index': 'indexsidebar.html'} # Additional templates that should be rendered to pages, maps page names to # template names. ##html_additional_pages = { ## 'index': 'index.html', ##} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/. #html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'bx-doc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [( 'index', 'bx-python.tex', 'bx-python Documentation', 'James Taylor', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/dev': None} bx-python-0.8.13/doc/source/contents.rst000066400000000000000000000003751415666465100202120ustar00rootroot00000000000000 bx-python documentation contents ================================ Browse the Python API `class documentation `_ Contents: .. toctree:: :maxdepth: 2 modules/index.rst * :ref:`genindex` * :ref:`modindex` * :ref:`search` bx-python-0.8.13/doc/source/index.rst000066400000000000000000000024551415666465100174650ustar00rootroot00000000000000About bx-python =============== The bx-python project is a python library and associated set of scripts to allow for rapid implementation of genome scale analyses. The library contains a variety of useful modules, but the particular strengths are: * Classes for reading and working with genome-scale multiple local alignments (in MAF, AXT, and LAV formats) * Generic data structure for indexing on disk files that contain blocks of data associated with intervals on various sequences (used, for example, to provide random access to individual alignments in huge files; optimized for use over network filesystems) * Data structures for working with intervals on sequences * "Binned bitsets" which act just like chromosome sized bit arrays, but lazily allocate regions and allow large blocks of all set or all unset bits to be stored compactly * "Intersecter" for performing fast intersection tests that preserve both query and target intervals and associated annotation These tools have been used in a variety of published research, and are a fundamental part of the ongoing Galaxy and ESPERR projects. Contents ======== .. toctree:: :maxdepth: 5 Application Documentation Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` bx-python-0.8.13/doc/source/lib/000077500000000000000000000000001415666465100163645ustar00rootroot00000000000000bx-python-0.8.13/doc/source/lib/bx.align.axt.rst000066400000000000000000000001771415666465100214200ustar00rootroot00000000000000bx.align.axt module =================== .. automodule:: bx.align.axt :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.core.rst000066400000000000000000000002021415666465100215410ustar00rootroot00000000000000bx.align.core module ==================== .. automodule:: bx.align.core :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.epo.rst000066400000000000000000000001771415666465100214070ustar00rootroot00000000000000bx.align.epo module =================== .. automodule:: bx.align.epo :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.epo_tests.rst000066400000000000000000000002211415666465100226170ustar00rootroot00000000000000bx.align.epo_tests module ========================= .. automodule:: bx.align.epo_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.lav.rst000066400000000000000000000001771415666465100214060ustar00rootroot00000000000000bx.align.lav module =================== .. automodule:: bx.align.lav :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.lav_tests.rst000066400000000000000000000002211415666465100226160ustar00rootroot00000000000000bx.align.lav_tests module ========================= .. automodule:: bx.align.lav_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.maf.rst000066400000000000000000000001771415666465100213670ustar00rootroot00000000000000bx.align.maf module =================== .. automodule:: bx.align.maf :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.maf_tests.rst000066400000000000000000000002211415666465100225770ustar00rootroot00000000000000bx.align.maf_tests module ========================= .. automodule:: bx.align.maf_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.rst000066400000000000000000000007121415666465100206200ustar00rootroot00000000000000bx.align package ================ Subpackages ----------- .. toctree:: bx.align.sitemask bx.align.tools Submodules ---------- .. toctree:: bx.align.axt bx.align.core bx.align.epo bx.align.epo_tests bx.align.lav bx.align.lav_tests bx.align.maf bx.align.maf_tests bx.align.score bx.align.score_tests Module contents --------------- .. automodule:: bx.align :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.score.rst000066400000000000000000000002051415666465100217270ustar00rootroot00000000000000bx.align.score module ===================== .. automodule:: bx.align.score :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.score_tests.rst000066400000000000000000000002271415666465100231550ustar00rootroot00000000000000bx.align.score_tests module =========================== .. automodule:: bx.align.score_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.sitemask.core.rst000066400000000000000000000002351415666465100233660ustar00rootroot00000000000000bx.align.sitemask.core module ============================= .. automodule:: bx.align.sitemask.core :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.sitemask.cpg.rst000066400000000000000000000002321415666465100232040ustar00rootroot00000000000000bx.align.sitemask.cpg module ============================ .. automodule:: bx.align.sitemask.cpg :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.sitemask.quality.rst000066400000000000000000000002461415666465100241300ustar00rootroot00000000000000bx.align.sitemask.quality module ================================ .. automodule:: bx.align.sitemask.quality :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.sitemask.rst000066400000000000000000000005131415666465100224360ustar00rootroot00000000000000bx.align.sitemask package ========================= Submodules ---------- .. toctree:: bx.align.sitemask.core bx.align.sitemask.cpg bx.align.sitemask.quality bx.align.sitemask.sitemask_tests Module contents --------------- .. automodule:: bx.align.sitemask :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.sitemask.sitemask_tests.rst000066400000000000000000000002731415666465100255020ustar00rootroot00000000000000bx.align.sitemask.sitemask_tests module ======================================= .. automodule:: bx.align.sitemask.sitemask_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.tools.chop.rst000066400000000000000000000002241415666465100227050ustar00rootroot00000000000000bx.align.tools.chop module ========================== .. automodule:: bx.align.tools.chop :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.tools.fuse.rst000066400000000000000000000002241415666465100227160ustar00rootroot00000000000000bx.align.tools.fuse module ========================== .. automodule:: bx.align.tools.fuse :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.tools.rst000066400000000000000000000004541415666465100217620ustar00rootroot00000000000000bx.align.tools package ====================== Submodules ---------- .. toctree:: bx.align.tools.chop bx.align.tools.fuse bx.align.tools.thread bx.align.tools.tile Module contents --------------- .. automodule:: bx.align.tools :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.tools.thread.rst000066400000000000000000000002321415666465100232220ustar00rootroot00000000000000bx.align.tools.thread module ============================ .. automodule:: bx.align.tools.thread :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.align.tools.tile.rst000066400000000000000000000002241415666465100227110ustar00rootroot00000000000000bx.align.tools.tile module ========================== .. automodule:: bx.align.tools.tile :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.arrays.array_tree.rst000066400000000000000000000002271415666465100231640ustar00rootroot00000000000000bx.arrays.array_tree module =========================== .. automodule:: bx.arrays.array_tree :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.arrays.array_tree_tests.rst000066400000000000000000000002511415666465100244030ustar00rootroot00000000000000bx.arrays.array_tree_tests module ================================= .. automodule:: bx.arrays.array_tree_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.arrays.bed.rst000066400000000000000000000002021415666465100215520ustar00rootroot00000000000000bx.arrays.bed module ==================== .. automodule:: bx.arrays.bed :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.arrays.rst000066400000000000000000000004321415666465100210260ustar00rootroot00000000000000bx.arrays package ================= Submodules ---------- .. toctree:: bx.arrays.array_tree bx.arrays.array_tree_tests bx.arrays.bed bx.arrays.wiggle Module contents --------------- .. automodule:: bx.arrays :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.arrays.wiggle.rst000066400000000000000000000002131415666465100223000ustar00rootroot00000000000000bx.arrays.wiggle module ======================= .. automodule:: bx.arrays.wiggle :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bbi.bbi_file.rst000066400000000000000000000002101415666465100220050ustar00rootroot00000000000000bx.bbi.bbi_file module ====================== .. automodule:: bx.bbi.bbi_file :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bbi.bigbed_file.rst000066400000000000000000000002211415666465100224670ustar00rootroot00000000000000bx.bbi.bigbed_file module ========================= .. automodule:: bx.bbi.bigbed_file :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bbi.bigwig_file.rst000066400000000000000000000002211415666465100225230ustar00rootroot00000000000000bx.bbi.bigwig_file module ========================= .. automodule:: bx.bbi.bigwig_file :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bbi.bigwig_tests.rst000066400000000000000000000002241415666465100227510ustar00rootroot00000000000000bx.bbi.bigwig_tests module ========================== .. automodule:: bx.bbi.bigwig_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bbi.bpt_file.rst000066400000000000000000000002101415666465100220360ustar00rootroot00000000000000bx.bbi.bpt_file module ====================== .. automodule:: bx.bbi.bpt_file :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bbi.cirtree_file.rst000066400000000000000000000002241415666465100227130ustar00rootroot00000000000000bx.bbi.cirtree_file module ========================== .. automodule:: bx.bbi.cirtree_file :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bbi.rst000066400000000000000000000004661415666465100202700ustar00rootroot00000000000000bx.bbi package ============== Submodules ---------- .. toctree:: bx.bbi.bbi_file bx.bbi.bigbed_file bx.bbi.bigwig_file bx.bbi.bigwig_tests bx.bbi.bpt_file bx.bbi.cirtree_file Module contents --------------- .. automodule:: bx.bbi :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.binned_array.rst000066400000000000000000000002101415666465100221540ustar00rootroot00000000000000bx.binned_array module ====================== .. automodule:: bx.binned_array :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.binned_array_tests.rst000066400000000000000000000002321415666465100234020ustar00rootroot00000000000000bx.binned_array_tests module ============================ .. automodule:: bx.binned_array_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bitset.rst000066400000000000000000000001661415666465100210230ustar00rootroot00000000000000bx.bitset module ================ .. automodule:: bx.bitset :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bitset_builders.rst000066400000000000000000000002211415666465100227040ustar00rootroot00000000000000bx.bitset_builders module ========================= .. automodule:: bx.bitset_builders :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bitset_tests.rst000066400000000000000000000002101415666465100222330ustar00rootroot00000000000000bx.bitset_tests module ====================== .. automodule:: bx.bitset_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.bitset_utils.rst000066400000000000000000000002101415666465100222310ustar00rootroot00000000000000bx.bitset_utils module ====================== .. automodule:: bx.bitset_utils :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.cookbook.argparse.rst000066400000000000000000000002271415666465100231400ustar00rootroot00000000000000bx.cookbook.argparse module =========================== .. automodule:: bx.cookbook.argparse :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.cookbook.attribute.rst000066400000000000000000000002321415666465100233330ustar00rootroot00000000000000bx.cookbook.attribute module ============================ .. automodule:: bx.cookbook.attribute :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.cookbook.doc_optparse.rst000066400000000000000000000002431415666465100240140ustar00rootroot00000000000000bx.cookbook.doc_optparse module =============================== .. automodule:: bx.cookbook.doc_optparse :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.cookbook.progress_bar.rst000066400000000000000000000002431415666465100240220ustar00rootroot00000000000000bx.cookbook.progress_bar module =============================== .. automodule:: bx.cookbook.progress_bar :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.cookbook.rst000066400000000000000000000004561415666465100213410ustar00rootroot00000000000000bx.cookbook package =================== Submodules ---------- .. toctree:: bx.cookbook.argparse bx.cookbook.attribute bx.cookbook.doc_optparse bx.cookbook.progress_bar Module contents --------------- .. automodule:: bx.cookbook :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.filter.rst000066400000000000000000000001661415666465100210160ustar00rootroot00000000000000bx.filter module ================ .. automodule:: bx.filter :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.gene_reader.rst000066400000000000000000000002051415666465100217630ustar00rootroot00000000000000bx.gene_reader module ===================== .. automodule:: bx.gene_reader :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.interval_index_file.rst000066400000000000000000000002351415666465100235400ustar00rootroot00000000000000bx.interval_index_file module ============================= .. automodule:: bx.interval_index_file :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.interval_index_file_tests.rst000066400000000000000000000002571415666465100247660ustar00rootroot00000000000000bx.interval_index_file_tests module =================================== .. automodule:: bx.interval_index_file_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.cluster.rst000066400000000000000000000002271415666465100232160ustar00rootroot00000000000000bx.intervals.cluster module =========================== .. automodule:: bx.intervals.cluster :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.cluster_tests.rst000066400000000000000000000002511415666465100244350ustar00rootroot00000000000000bx.intervals.cluster_tests module ================================= .. automodule:: bx.intervals.cluster_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.intersection.rst000066400000000000000000000002461415666465100242440ustar00rootroot00000000000000bx.intervals.intersection module ================================ .. automodule:: bx.intervals.intersection :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.intersection_tests.rst000066400000000000000000000002701415666465100254630ustar00rootroot00000000000000bx.intervals.intersection_tests module ====================================== .. automodule:: bx.intervals.intersection_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.io.rst000066400000000000000000000002101415666465100221340ustar00rootroot00000000000000bx.intervals.io module ====================== .. automodule:: bx.intervals.io :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.base_coverage.rst000066400000000000000000000003121415666465100264770ustar00rootroot00000000000000bx.intervals.operations.base_coverage module ============================================ .. automodule:: bx.intervals.operations.base_coverage :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.complement.rst000066400000000000000000000003011415666465100260530ustar00rootroot00000000000000bx.intervals.operations.complement module ========================================= .. automodule:: bx.intervals.operations.complement :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.concat.rst000066400000000000000000000002651415666465100251700ustar00rootroot00000000000000bx.intervals.operations.concat module ===================================== .. automodule:: bx.intervals.operations.concat :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.coverage.rst000066400000000000000000000002731415666465100255130ustar00rootroot00000000000000bx.intervals.operations.coverage module ======================================= .. automodule:: bx.intervals.operations.coverage :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.find_clusters.rst000066400000000000000000000003121415666465100265560ustar00rootroot00000000000000bx.intervals.operations.find_clusters module ============================================ .. automodule:: bx.intervals.operations.find_clusters :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.intersect.rst000066400000000000000000000002761415666465100257230ustar00rootroot00000000000000bx.intervals.operations.intersect module ======================================== .. automodule:: bx.intervals.operations.intersect :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.join.rst000066400000000000000000000002571415666465100246610ustar00rootroot00000000000000bx.intervals.operations.join module =================================== .. automodule:: bx.intervals.operations.join :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.merge.rst000066400000000000000000000002621415666465100250150ustar00rootroot00000000000000bx.intervals.operations.merge module ==================================== .. automodule:: bx.intervals.operations.merge :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.quicksect.rst000066400000000000000000000002761415666465100257160ustar00rootroot00000000000000bx.intervals.operations.quicksect module ======================================== .. automodule:: bx.intervals.operations.quicksect :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.rst000066400000000000000000000011261415666465100237170ustar00rootroot00000000000000bx.intervals.operations package =============================== Submodules ---------- .. toctree:: bx.intervals.operations.base_coverage bx.intervals.operations.complement bx.intervals.operations.concat bx.intervals.operations.coverage bx.intervals.operations.find_clusters bx.intervals.operations.intersect bx.intervals.operations.join bx.intervals.operations.merge bx.intervals.operations.quicksect bx.intervals.operations.subtract Module contents --------------- .. automodule:: bx.intervals.operations :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.operations.subtract.rst000066400000000000000000000002731415666465100255470ustar00rootroot00000000000000bx.intervals.operations.subtract module ======================================= .. automodule:: bx.intervals.operations.subtract :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.random_intervals.rst000066400000000000000000000002621415666465100251030ustar00rootroot00000000000000bx.intervals.random_intervals module ==================================== .. automodule:: bx.intervals.random_intervals :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intervals.rst000066400000000000000000000006661415666465100215450ustar00rootroot00000000000000bx.intervals package ==================== Subpackages ----------- .. toctree:: bx.intervals.operations Submodules ---------- .. toctree:: bx.intervals.cluster bx.intervals.cluster_tests bx.intervals.intersection bx.intervals.intersection_tests bx.intervals.io bx.intervals.random_intervals Module contents --------------- .. automodule:: bx.intervals :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intseq.ngramcount.rst000066400000000000000000000002271415666465100232060ustar00rootroot00000000000000bx.intseq.ngramcount module =========================== .. automodule:: bx.intseq.ngramcount :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.intseq.rst000066400000000000000000000003271415666465100210330ustar00rootroot00000000000000bx.intseq package ================= Submodules ---------- .. toctree:: bx.intseq.ngramcount Module contents --------------- .. automodule:: bx.intseq :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.bgzf.rst000066400000000000000000000001771415666465100214150ustar00rootroot00000000000000bx.misc.bgzf module =================== .. automodule:: bx.misc.bgzf :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.bgzf_tests.rst000066400000000000000000000002211415666465100226250ustar00rootroot00000000000000bx.misc.bgzf_tests module ========================= .. automodule:: bx.misc.bgzf_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.binary_file.rst000066400000000000000000000002241415666465100227410ustar00rootroot00000000000000bx.misc.binary_file module ========================== .. automodule:: bx.misc.binary_file :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.cdb.rst000066400000000000000000000001741415666465100212120ustar00rootroot00000000000000bx.misc.cdb module ================== .. automodule:: bx.misc.cdb :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.cdb_tests.rst000066400000000000000000000002161415666465100224310ustar00rootroot00000000000000bx.misc.cdb_tests module ======================== .. automodule:: bx.misc.cdb_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.filecache.rst000066400000000000000000000002161415666465100223620ustar00rootroot00000000000000bx.misc.filecache module ======================== .. automodule:: bx.misc.filecache :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.filecache_tests.rst000066400000000000000000000002401415666465100236010ustar00rootroot00000000000000bx.misc.filecache_tests module ============================== .. automodule:: bx.misc.filecache_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.readlengths.rst000066400000000000000000000002241415666465100227560ustar00rootroot00000000000000bx.misc.readlengths module ========================== .. automodule:: bx.misc.readlengths :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.rst000066400000000000000000000006771415666465100204730ustar00rootroot00000000000000bx.misc package =============== Submodules ---------- .. toctree:: bx.misc.bgzf bx.misc.bgzf_tests bx.misc.binary_file bx.misc.cdb bx.misc.cdb_tests bx.misc.filecache bx.misc.filecache_tests bx.misc.readlengths bx.misc.seekbzip2 bx.misc.seekbzip2_tests bx.misc.seeklzop bx.misc.seeklzop_tests Module contents --------------- .. automodule:: bx.misc :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.seekbzip2.rst000066400000000000000000000002161415666465100223550ustar00rootroot00000000000000bx.misc.seekbzip2 module ======================== .. automodule:: bx.misc.seekbzip2 :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.seekbzip2_tests.rst000066400000000000000000000002401415666465100235740ustar00rootroot00000000000000bx.misc.seekbzip2_tests module ============================== .. automodule:: bx.misc.seekbzip2_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.seeklzop.rst000066400000000000000000000002131415666465100223100ustar00rootroot00000000000000bx.misc.seeklzop module ======================= .. automodule:: bx.misc.seeklzop :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.misc.seeklzop_tests.rst000066400000000000000000000002351415666465100235360ustar00rootroot00000000000000bx.misc.seeklzop_tests module ============================= .. automodule:: bx.misc.seeklzop_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.motif.io.rst000066400000000000000000000003731415666465100212550ustar00rootroot00000000000000bx.motif.io package =================== Submodules ---------- .. toctree:: bx.motif.io.transfac bx.motif.io.transfac_tests Module contents --------------- .. automodule:: bx.motif.io :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.motif.io.transfac.rst000066400000000000000000000002271415666465100230530ustar00rootroot00000000000000bx.motif.io.transfac module =========================== .. automodule:: bx.motif.io.transfac :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.motif.io.transfac_tests.rst000066400000000000000000000002511415666465100242720ustar00rootroot00000000000000bx.motif.io.transfac_tests module ================================= .. automodule:: bx.motif.io.transfac_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.motif.logo.rst000066400000000000000000000002451415666465100216040ustar00rootroot00000000000000bx.motif.logo package ===================== Module contents --------------- .. automodule:: bx.motif.logo :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.motif.pwm.rst000066400000000000000000000001771415666465100214530ustar00rootroot00000000000000bx.motif.pwm module =================== .. automodule:: bx.motif.pwm :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.motif.pwm_tests.rst000066400000000000000000000002211415666465100226630ustar00rootroot00000000000000bx.motif.pwm_tests module ========================= .. automodule:: bx.motif.pwm_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.motif.rst000066400000000000000000000004541415666465100206470ustar00rootroot00000000000000bx.motif package ================ Subpackages ----------- .. toctree:: bx.motif.io bx.motif.logo Submodules ---------- .. toctree:: bx.motif.pwm bx.motif.pwm_tests Module contents --------------- .. automodule:: bx.motif :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.phylo.newick.rst000066400000000000000000000002101415666465100221310ustar00rootroot00000000000000bx.phylo.newick module ====================== .. automodule:: bx.phylo.newick :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.phylo.newick_tests.rst000066400000000000000000000002321415666465100233570ustar00rootroot00000000000000bx.phylo.newick_tests module ============================ .. automodule:: bx.phylo.newick_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.phylo.phast.rst000066400000000000000000000002051415666465100217740ustar00rootroot00000000000000bx.phylo.phast module ===================== .. automodule:: bx.phylo.phast :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.phylo.phast_tests.rst000066400000000000000000000002271415666465100232220ustar00rootroot00000000000000bx.phylo.phast_tests module =========================== .. automodule:: bx.phylo.phast_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.phylo.rst000066400000000000000000000004221415666465100206570ustar00rootroot00000000000000bx.phylo package ================ Submodules ---------- .. toctree:: bx.phylo.newick bx.phylo.newick_tests bx.phylo.phast bx.phylo.phast_tests Module contents --------------- .. automodule:: bx.phylo :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.pwm.bed_score_aligned_pwm.rst000066400000000000000000000002571415666465100246270ustar00rootroot00000000000000bx.pwm.bed_score_aligned_pwm module =================================== .. automodule:: bx.pwm.bed_score_aligned_pwm :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.pwm.bed_score_aligned_string.rst000066400000000000000000000002701415666465100253250ustar00rootroot00000000000000bx.pwm.bed_score_aligned_string module ====================================== .. automodule:: bx.pwm.bed_score_aligned_string :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.pwm.maf_select_motifs.rst000066400000000000000000000002431415666465100240120ustar00rootroot00000000000000bx.pwm.maf_select_motifs module =============================== .. automodule:: bx.pwm.maf_select_motifs :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.pwm.position_weight_matrix.rst000066400000000000000000000002621415666465100251270ustar00rootroot00000000000000bx.pwm.position_weight_matrix module ==================================== .. automodule:: bx.pwm.position_weight_matrix :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.pwm.pwm_score_maf.rst000066400000000000000000000002271415666465100231520ustar00rootroot00000000000000bx.pwm.pwm_score_maf module =========================== .. automodule:: bx.pwm.pwm_score_maf :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.pwm.pwm_score_motifs.rst000066400000000000000000000002401415666465100237030ustar00rootroot00000000000000bx.pwm.pwm_score_motifs module ============================== .. automodule:: bx.pwm.pwm_score_motifs :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.pwm.pwm_score_positions.rst000066400000000000000000000002511415666465100244330ustar00rootroot00000000000000bx.pwm.pwm_score_positions module ================================= .. automodule:: bx.pwm.pwm_score_positions :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.pwm.pwm_tests.rst000066400000000000000000000002131415666465100223510ustar00rootroot00000000000000bx.pwm.pwm_tests module ======================= .. automodule:: bx.pwm.pwm_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.pwm.rst000066400000000000000000000006331415666465100203330ustar00rootroot00000000000000bx.pwm package ============== Submodules ---------- .. toctree:: bx.pwm.bed_score_aligned_pwm bx.pwm.bed_score_aligned_string bx.pwm.maf_select_motifs bx.pwm.position_weight_matrix bx.pwm.pwm_score_maf bx.pwm.pwm_score_motifs bx.pwm.pwm_score_positions bx.pwm.pwm_tests Module contents --------------- .. automodule:: bx.pwm :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.rst000066400000000000000000000012101415666465100175210ustar00rootroot00000000000000bx package ========== Subpackages ----------- .. toctree:: bx.align bx.arrays bx.bbi bx.cookbook bx.intervals bx.intseq bx.misc bx.motif bx.phylo bx.pwm bx.seq bx.tabular Submodules ---------- .. toctree:: bx.binned_array bx.binned_array_tests bx.bitset bx.bitset_builders bx.bitset_tests bx.bitset_utils bx.filter bx.gene_reader bx.interval_index_file bx.interval_index_file_tests bx.seqmapping bx.seqmapping_tests bx.wiggle bx.wiggle_tests Module contents --------------- .. automodule:: bx :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.core.rst000066400000000000000000000001741415666465100212470ustar00rootroot00000000000000bx.seq.core module ================== .. automodule:: bx.seq.core :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.fasta.rst000066400000000000000000000001771415666465100214200ustar00rootroot00000000000000bx.seq.fasta module =================== .. automodule:: bx.seq.fasta :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.fasta_tests.rst000066400000000000000000000002211415666465100226300ustar00rootroot00000000000000bx.seq.fasta_tests module ========================= .. automodule:: bx.seq.fasta_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.nib.rst000066400000000000000000000001711415666465100210640ustar00rootroot00000000000000bx.seq.nib module ================= .. automodule:: bx.seq.nib :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.nib_tests.rst000066400000000000000000000002131415666465100223030ustar00rootroot00000000000000bx.seq.nib_tests module ======================= .. automodule:: bx.seq.nib_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.qdna.rst000066400000000000000000000001741415666465100212420ustar00rootroot00000000000000bx.seq.qdna module ================== .. automodule:: bx.seq.qdna :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.qdna_tests.rst000066400000000000000000000002161415666465100224610ustar00rootroot00000000000000bx.seq.qdna_tests module ======================== .. automodule:: bx.seq.qdna_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.rst000066400000000000000000000005731415666465100203230ustar00rootroot00000000000000bx.seq package ============== Submodules ---------- .. toctree:: bx.seq.core bx.seq.fasta bx.seq.fasta_tests bx.seq.nib bx.seq.nib_tests bx.seq.qdna bx.seq.qdna_tests bx.seq.seq bx.seq.seq_tests bx.seq.twobit bx.seq.twobit_tests Module contents --------------- .. automodule:: bx.seq :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.seq.rst000066400000000000000000000001711415666465100211040ustar00rootroot00000000000000bx.seq.seq module ================= .. automodule:: bx.seq.seq :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.seq_tests.rst000066400000000000000000000002131415666465100223230ustar00rootroot00000000000000bx.seq.seq_tests module ======================= .. automodule:: bx.seq.seq_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.twobit.rst000066400000000000000000000002021415666465100216170ustar00rootroot00000000000000bx.seq.twobit module ==================== .. automodule:: bx.seq.twobit :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seq.twobit_tests.rst000066400000000000000000000002241415666465100230450ustar00rootroot00000000000000bx.seq.twobit_tests module ========================== .. automodule:: bx.seq.twobit_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seqmapping.rst000066400000000000000000000002021415666465100216640ustar00rootroot00000000000000bx.seqmapping module ==================== .. automodule:: bx.seqmapping :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.seqmapping_tests.rst000066400000000000000000000002241415666465100231120ustar00rootroot00000000000000bx.seqmapping_tests module ========================== .. automodule:: bx.seqmapping_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.tabular.io.rst000066400000000000000000000002021415666465100215600ustar00rootroot00000000000000bx.tabular.io module ==================== .. automodule:: bx.tabular.io :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.tabular.rst000066400000000000000000000003231415666465100211560ustar00rootroot00000000000000bx.tabular package ================== Submodules ---------- .. toctree:: bx.tabular.io Module contents --------------- .. automodule:: bx.tabular :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.wiggle.rst000066400000000000000000000001661415666465100210070ustar00rootroot00000000000000bx.wiggle module ================ .. automodule:: bx.wiggle :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx.wiggle_tests.rst000066400000000000000000000002101415666465100222170ustar00rootroot00000000000000bx.wiggle_tests module ====================== .. automodule:: bx.wiggle_tests :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx_extras.fpconst.rst000066400000000000000000000002161415666465100225670ustar00rootroot00000000000000bx_extras.fpconst module ======================== .. automodule:: bx_extras.fpconst :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx_extras.lrucache.rst000066400000000000000000000002211415666465100226750ustar00rootroot00000000000000bx_extras.lrucache module ========================= .. automodule:: bx_extras.lrucache :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx_extras.pstat.rst000066400000000000000000000002101415666465100222400ustar00rootroot00000000000000bx_extras.pstat module ====================== .. automodule:: bx_extras.pstat :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx_extras.pyparsing.rst000066400000000000000000000002241415666465100231260ustar00rootroot00000000000000bx_extras.pyparsing module ========================== .. automodule:: bx_extras.pyparsing :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx_extras.rst000066400000000000000000000004471415666465100211220ustar00rootroot00000000000000bx_extras package ================= Submodules ---------- .. toctree:: bx_extras.fpconst bx_extras.lrucache bx_extras.pstat bx_extras.pyparsing bx_extras.stats Module contents --------------- .. automodule:: bx_extras :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/bx_extras.stats.rst000066400000000000000000000002101415666465100222430ustar00rootroot00000000000000bx_extras.stats module ====================== .. automodule:: bx_extras.stats :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/lib/modules.rst000066400000000000000000000001101415666465100205560ustar00rootroot00000000000000lib === .. toctree:: :maxdepth: 4 bx bx_extras psyco_full bx-python-0.8.13/doc/source/lib/psyco_full.rst000066400000000000000000000001711415666465100212740ustar00rootroot00000000000000psyco_full module ================= .. automodule:: psyco_full :members: :undoc-members: :show-inheritance: bx-python-0.8.13/doc/source/static/000077500000000000000000000000001415666465100171055ustar00rootroot00000000000000bx-python-0.8.13/doc/source/static/base.css000066400000000000000000000041431415666465100205330ustar00rootroot00000000000000@import url(tripoli.base.css); html { font-family: 'Verdana', sans-serif; color: #333333; } body { padding: 3em 3em; } h1.pageheader { font-variant: small-caps; margin-top: 0; border-top: solid 1px; padding-top: 2px; border-bottom: solid 1px; border-color: #CCCCCC; margin-bottom: 1em; } h1.pageheader a { color: inherit; text-decoration: inherit; border: none; } .content h1, .content h2, .content h3, .content h4, .content h5, .content h6 { font-family: 'Hoefler Text', 'Georgia', serif; font-weight: normal; color: #666666; /* border-bottom: solid #666666 1px; */ } .content h1.pagetitle { color: #c33; } #main { } .colpad { padding: 0 2em; } #main > .inner { min-width: 70em; max-width: 90em; margin: auto; height: 100%; } #left { background: white; margin-right: 36%; /* 31em; */ padding-right: 3%; height: 100%; } #right { float: right; width: 33%; /* 28em; */ padding-left: 3%; border-left: solid #CCCCCC 1px; } .sidebar { font-size: 1em; } .sidebar ul { margin-left: 0; } .sidebar ul li { list-style-type: none; margin-bottom: 0.6em; } .sidebar ul.pages { margin-left: 5px; margin-top: 0.6em; } .sidebar ul.pages li { background: url(hbullet.png) 0 0.4em no-repeat; padding-left: 25px; list-style-type: none; } .sidebar ul.pages li { } .sidebar h1 { clear: both; } .sidebar .publications .info { color: #666666; } .postinfo { color: #666666; font-size: 92%; margin-top: -1em; } .postreadlink { margin-top: -1em; } .sidebar .posts .info { color: #666666; } .comments_title { margin-top: 2em; } label { display: block; } #footer { clear: both; } a, a:link, a:visited { text-decoration: none; border-bottom: dotted #666666 1px; color: black; } a:hover { color: #CC3333; } li { list-style: square; } table.layout td { vertical-align: top; padding-left: 2em; padding-right: 2em; border-left: solid #999999 1px } hr { border: none; height: 1px; background: #999999; } bx-python-0.8.13/doc/source/static/tripoli.base.css000066400000000000000000000126551415666465100222230ustar00rootroot00000000000000/* * Tripoli is a generic CSS standard for HTML rendering. * Copyright (C) 2007 David Hellsing * * http://devkick.com/lab/tripoli/ * * You should have received a copy of the GNU General Public License * along with this program. If not, see . **/ /* _______________________________ RESET */ * { text-decoration:none; font-size:1em; outline:none; margin:0; padding:0; } code,kbd,samp,pre,tt,var,textarea,input,select,isindex,listing,xmp,plaintext { font:inherit; white-space:normal; } a,img,a img,iframe,form,abbr,acronym,object,applet,table,a abbr,a acronym { border-width:0; } dfn,i,cite,var,address,em { font-style:normal; } th,b,strong,h1,h2,h3,h4,h5,h6,dt { font-weight:normal; } caption,th,td { text-align:left; } html { background:white; color:black; line-height:1; font-family:arial, sans-serif; } /* \*/ html { font-family:sans-serif; } /* */ q { quotes:"\201C""\201D""\2018""\2019"; } ul,ol,dir,menu { list-style:none; } sub,sup { vertical-align:baseline; } a { color:inherit; } /* _______________________________ DISABLE DEPRECATED HTML */ font,basefont { color:inherit; font:inherit; font-size:100%; } center,*[align] { text-align:inherit; } s,strike,u { text-decoration:inherit; } img { border:none; margin:0; } ol { list-style-type:decimal; } body { background-color:transparent; } tr,th,td { width:auto; height:auto; background-color:transparent; vertical-align:inherit; border:none; } table[border],.content table[border] { border-collapse:separate; border-spacing:0; } nobr { white-space:normal; } marquee { overflow:visible; -moz-binding:none; } blink { text-decoration:none; } /* _______________________________ GENERAL */ html { font-size:125%; } body { font-size:50%; } a { text-decoration:underline; } strong,th,thead td,h1,h2,h3,h4,h5,h6,dt { font-weight:bold; } cite,em,dfn { font-style:italic; } code,kbd,samp,pre,tt,var,input[type='text'],input[type='password'],textarea { font-size:100%; font-family:mono-space,monospace; } pre { white-space:pre; } pre * { font-size:100%; white-space:pre; } del { text-decoration:line-through; } ins,dfn { border-bottom:1px solid black; } small,sup,sub { font-size:85%; } big { font-size:125%; line-height:80%; } abbr,acronym { text-transform:uppercase; font-size:85%; letter-spacing:.1em; } abbr[title],acronym[title],dfn[title] { cursor:help; border-bottom:1px dotted black; } sup { vertical-align:super; } sub { vertical-align:sub; } blockquote { padding-left:2.2em; } hr { display:none; /* We will re-reset it later for content */ } :lang(af),:lang(nl),:lang(pl) { quotes:'\201E' '\201D' '\201A' '\2019'; } :lang(bg),:lang(cs),:lang(de),:lang(is),:lang(lt),:lang(sk),:lang(sr),:lang(ro) { quotes:'\201E' '\201C' '\201A' '\2018'; } :lang(da),:lang(hr) { quotes:'\00BB' '\00AB' '\203A' '\2039'; } :lang(el),:lang(es),:lang(sq),:lang(tr) { quotes:'\00AB' '\00BB' '\2039' '\203A'; } :lang(en-GB) { quotes:'\2018' '\2019' '\201C' '\201D'; } :lang(fi),:lang(sv) { quotes:'\201D' '\201D' '\2019' '\2019'; } :lang(fr) { quotes:'\ab\2005' '\2005\bb' '\2039\2005' '\2005\203a'; } *[lang|='en'] q:before { content:'\201C'; } *[lang|='en'] q:after { content:'\201D'; } *[lang|='en'] q q:before { content:'\2018'; } *[lang|='en'] q q:after { content:'\2019'; } input,select,button { cursor:pointer; } input[type='text'],input[type='password'] { cursor:text; } input[type='hidden'] { display:none; } /* _______________________________ CONTENT */ .content { font-size:1.2em; line-height:1.6em; } .content h1 { font-size:1.6em; line-height:1; margin:1em 0 .5em; } .content h2 { font-size:1.5em; line-height:1; margin:1.07em 0 .535em; } .content h3 { font-size:1.4em; line-height:1; margin:1.14em 0 .57em; } .content h4 { font-size:1.3em; line-height:1; margin:1.23em 0 .615em; } .content h5 { font-size:1.2em; line-height:1; margin:1.33em 0 .67em; } .content h6 { font-size:1em; line-height:1; margin:1.6em 0 .8em; } .content hr { display:block; background:black; color:black; width:100%; height:1px; border:none; } .content ul { list-style:disc outside; } .content ol { list-style:decimal outside; } .content table { border-collapse:collapse; } .content hr,.content p,.content ul,.content ol,.content dl,.content pre, .content address,.content table,.content form { margin-bottom:1.6em; } .content p+p { margin-top:-.8em; } .content fieldset { margin:1.6em 0; padding:1.6em; } /* \*/ .content legend { padding-left:.8em; padding-right:.8em; } /* */ @media all and (min-width: 0px) /* for Opera 8 */ { .content legend { margin-bottom:1.6em; } .content fieldset { margin-top:0; } .content[class^='content'] fieldset { margin-top:1.6em; } } .content fieldset>*:first-child { margin-top:0; } .content textarea,.content input[type='text'] { padding:.1em .2em; } .content input { padding:.2em .1em; } .content select { padding:.2em .1em 0; } .content select[multiple] { margin-bottom:.8em; } .content option { padding:0 .4em .1em; } .content button { padding:.3em .5em; } .content input[type='radio'] { position:relative; bottom:-.2em; } .content dt { margin-top:.8em; margin-bottom:.4em; } .content ul,.content ol { margin-left:2.2em; } .content caption,.content form div { padding-bottom:.8em; } .content ul ul,content ol ul,.content ul ol,content ol ol { margin-bottom:0; } /* _______________________________ END */ bx-python-0.8.13/doc/source/templates/000077500000000000000000000000001415666465100176145ustar00rootroot00000000000000bx-python-0.8.13/doc/source/templates/index.html000066400000000000000000000032071415666465100216130ustar00rootroot00000000000000{% extends "layout.html" %} {% set title = 'bx python' %} {% block body %}

Welcome

The bx-python project is a python library and associated set of scripts to allow for rapid implementation of genome scale analyses. The library contains a variety of useful modules, but the particular strengths are:

  • Classes for reading and working with genome-scale multiple local alignments (in MAF, AXT, and LAV formats)
  • Generic data structure for indexing on disk files that contain blocks of data associated with intervals on various sequences (used, for example, to provide random access to individual alignments in huge files; optomized for use over network filesystems)
  • Data structures for working with intervals on sequences
  • "Binned bitsets" which act just like chromosome sized bit arrays, but lazily allocate regions and allow large blocks of all set or all unset bits to be stored compactly
  • "Intersecter" for performing fast intersection tests that preserve both query and target intervals and associated annotation
  • These tools have been used in a variety of published research, and are a fundamental part of the ongoing Galaxy and ESPERR projects.

Documentation

{% endblock %} bx-python-0.8.13/doc/source/templates/indexsidebar.html000066400000000000000000000003001415666465100231340ustar00rootroot00000000000000

About bx-python

Current version: {{ version }}

Download

bx-python source

bx-python-0.8.13/doc/source/templates/layout.html000066400000000000000000000030261415666465100220200ustar00rootroot00000000000000{% extends "!layout.html" %} {%- block document %}
{%- if builder != 'htmlhelp' %}
{%- endif %}
{% block body %} {% endblock %}
{%- if builder != 'htmlhelp' %}
{%- endif %}
{%- endblock %} {% block doctype %} {% endblock %} {% block rootrellink %}
  • bx-python home
  • Table of contents
  • {% endblock %} {# Sidebar and already handled #} {% block relbar1 %}{% endblock %} {% block relbar2 %}{% endblock %} {% block sidebar1 %}{% endblock %} {% block sidebar2 %}{% endblock %} {% block footer %}{% endblock %}bx-python-0.8.13/lib/000077500000000000000000000000001415666465100143175ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/000077500000000000000000000000001415666465100147305ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/__init__.py000066400000000000000000000000271415666465100170400ustar00rootroot00000000000000__version__ = '0.8.13' bx-python-0.8.13/lib/bx/_seqmapping.pyx000066400000000000000000000200651415666465100200000ustar00rootroot00000000000000""" Pyrex extension classes used by `seqmapping.py`. """ cdef extern from "stdlib.h": void* malloc( size_t ) void free( void* ) cdef extern from "Python.h": int PyObject_AsReadBuffer(object, const void **, Py_ssize_t *) except -1 int PyObject_AsWriteBuffer(object, void **, Py_ssize_t *) except -1 int PyBytes_AsStringAndSize(object, char **, Py_ssize_t *) except -1 from numpy import zeros from math import floor import random import sys cdef class CharToIntArrayMapping: """Mapping for converting strings to int arrays""" cdef int table[256] cdef int out_size cdef object reverse_table def __cinit__( self ): """Init empty mapping (all characters map to -1)""" cdef int i for i from 0 <= i < 256: self.table[i] = -1 self.out_size = 0 def __init__( self ): self.reverse_table = dict() def set_mapping( self, c, int symbol ): """Modify mapping so 'chars' map to 'symbol'""" char = ord( c ) self.table[ char ] = symbol if self.out_size <= symbol: self.out_size = symbol + 1 self.reverse_table[ symbol ] = chr( char ) def translate( self, string ): """Translate 'string' and return as int array""" cdef Py_ssize_t s_len, t_len cdef unsigned char * s_buf cdef int * t_buf # Get direct access to string PyBytes_AsStringAndSize( string, &s_buf, &s_len ) # Initialize empty array rval = zeros( s_len, 'i' ) PyObject_AsWriteBuffer( rval, &t_buf, &t_len ) # Translate for i from 0 <= i < s_len: t_buf[i] = self.table[ s_buf[ i ] ] # Done return rval def translate_list( self, strings ): """Translate a list of strings into an int array""" cdef Py_ssize_t text_len, i cdef Py_ssize_t s_len, t_len cdef int factor cdef unsigned char * s_buf cdef int * t_buf # No input, no output if len( strings ) < 1: return None # Length of result text_len = len( strings[0] ) # Init result array rval = zeros( text_len, 'i' ) PyObject_AsWriteBuffer( rval, &t_buf, &t_len ) # Loop over seqs and accumulate result values factor = 1 for string in strings: PyBytes_AsStringAndSize( string, &s_buf, &s_len ) for i from 0 <= i < text_len: if t_buf[i] >= 0: if self.table[ s_buf[i] ] == -1: t_buf[i] = -1 else: t_buf[i] = t_buf[i] + ( self.table[ s_buf[i] ] * factor ) factor = factor * self.out_size return rval def reverse_map( self, val, nseqs ): factor = self.out_size ** (nseqs-1) rval = [] while factor > 0: rval.append( self.reverse_table[ int( floor( val / factor ) ) ] ) val = val - ( floor(val/factor) * factor ) factor = floor( factor / self.out_size ) rval.reverse() return rval def get_out_size( self ): return self.out_size cdef class IntToIntMapping: cdef int* table cdef int in_size cdef int out_size def __cinit__( self, int in_size ): self.in_size = in_size self.table = malloc( in_size * sizeof( int ) ) if self.table == NULL: raise "Malloc Failed" for i from 0 <= i < in_size: self.table[i] = -1 self.out_size = 0 def __dealloc__( self ): # sys.stderr.write( "freeing mapping_helper.IntToIntMapping\n" ); sys.stderr.flush() free( self.table ) def set_mapping( self, int index, int symbol ): assert ( -1 <= index < self.in_size ), "%d not between 0 and %s" % ( index, self.in_size ) self.table[index] = symbol if self.out_size <= symbol: self.out_size = symbol + 1 def translate( self, src ): """Translate `string` and return as int array""" cdef Py_ssize_t s_len, t_len cdef int *s_buf cdef int *t_buf # Get direct access to string PyObject_AsReadBuffer( src, &s_buf, &s_len ) s_len = s_len / sizeof( int ) assert s_len == len( src ), "`src` argument must be a buffer of 32bit integers" # Initialize empty array rval = zeros( s_len, 'i' ) PyObject_AsWriteBuffer( rval, &t_buf, &t_len ) # Translate for i from 0 <= i < s_len: if s_buf[i] == -1: t_buf[i] = -1 elif s_buf[i] >= self.in_size: t_buf[i] = -1 else: t_buf[i] = self.table[ s_buf[ i ] ] # Done return rval def __getitem__( self, int x ): if x == -1: return -1 assert 0 <= x < self.in_size return self.table[ x ] def collapse( self, int a, int b ): cdef int i cdef IntToIntMapping copy copy = IntToIntMapping( self.in_size ) copy.out_size = self.out_size - 1 if a > b: a, b = b, a for i from 0 <= i < self.in_size: if self.table[i] == b: copy.table[i] = a elif self.table[i] == copy.out_size: copy.table[i] = b else: copy.table[i] = self.table[i] return copy def expand( self, int x ): """Grow the alphabet by making 'a' a seperate symbol. If it already mapped to a single symbol, do nothing""" cdef int i, count, a, b cdef IntToIntMapping copy # Get the symbol x maps to a = self.table[x] # Symbols that map to -1 should not be touched if a < 0: return self # Count how many other input symbols map to a count = 0 for i from 0 <= i < self.in_size: if self.table[i] == a: count = count + 1 # Already a singleton if count < 2: return self # Otherwise, make a copy with the separated symbol copy = IntToIntMapping( self.in_size ) copy.out_size = self.out_size + 1 for i from 0 <= i < self.in_size: copy.table[i] = self.table[i] copy.table[x] = self.out_size return copy def expand_out( self, int a ): """Grow the alphabet breaking 'a' into two symbols randomly""" cdef int i, count, to_split, b cdef IntToIntMapping copy count = 0 for i from 0 <= i < self.in_size: if self.table[i] == a: count = count + 1 if count < 2: return self copy = IntToIntMapping( self.in_size ) copy.out_size = self.out_size + 1 b = self.out_size to_split = random.randrange( count ) count = 0 for i from 0 <= i < self.in_size: if self.table[i] == a: if count == to_split: copy.table[i] = b else: copy.table[i] = a count = count + 1 else: copy.table[i] = self.table[i] return copy def expand_random_split( self, int a ): """Grow the alphabet breaking 'a' into two symbols randomly""" cdef int i, count, b cdef IntToIntMapping copy count = 0 for i from 0 <= i < self.in_size: if self.table[i] == a: count = count + 1 if count < 2: return self copy = IntToIntMapping( self.in_size ) copy.out_size = self.out_size + 1 b = self.out_size to_split = random.sample( range( count ), count/2 ) count = 0 for i from 0 <= i < self.in_size: if self.table[i] == a: if count in to_split: copy.table[i] = b else: copy.table[i] = a count = count + 1 else: copy.table[i] = self.table[i] return copy def get_in_size( self ): return self.in_size def get_out_size( self ): return self.out_size def get_table( self ): rval = zeros( self.in_size, 'i' ) for i in range( self.in_size ): rval[i] = self.table[i] return rval bx-python-0.8.13/lib/bx/align/000077500000000000000000000000001415666465100160225ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/align/__init__.py000066400000000000000000000003401415666465100201300ustar00rootroot00000000000000""" Support for dealing with (genome scale) sequence alignments. See `core` for the abstract alignment classes and `maf`, `axt`, and `lav` for readers and writers in various formats. """ from bx.align.core import * # noqa bx-python-0.8.13/lib/bx/align/_core.pyx000066400000000000000000000004541415666465100176560ustar00rootroot00000000000000""" Pyrex extension to speed up some operations in `core.py`. """ def coord_to_col( int start, char * text, int pos ): cdef int col col = 0 while start < pos: # Note: ord( '-' ) = 45 if text[col] != 45: start = start + 1 col = col + 1 return colbx-python-0.8.13/lib/bx/align/_epo.pyx000066400000000000000000000132121415666465100175050ustar00rootroot00000000000000 import logging, gzip from collections import namedtuple import numpy cimport numpy log = logging.getLogger(__name__) cimport cython DTYPE = numpy.uint64 cdef inline int max2( int a, int b ): if b > a: return b return a cdef inline int min2( int a, int b ): if b < a: return b return a def rem_dash(p, q): """remove dash columns and shift match intervals to the left. both iterables are read on the same direction left-to-right. """ def myp(l): if l: return l.pop(0) def adv(queue, i, d): # shifted interval shi = i[0]-d, i[1]-d assert shi[0] >= 0 if queue and queue[-1][1] == shi[0]: # join to the preceeding one queue[-1] = (queue[-1][0], shi[1]) else: queue.append( shi ) return queue p_card = sum( map(lambda i: p[i][1] - p[i][0], range(len(p))) ) q_card = sum( map(lambda i: q[i][1] - q[i][0], range(len(q))) ) P, Q = [], [] dash = 0 # dash (on both cigars) count so far a, b = p.pop(0), q.pop(0) #while p or q: while a and b: assert dash <= min(a[0], b[0]) i = max(a[0], b[0]) - min(a[1], b[1]) if i >= 0: # no intersection if a[1] <= b[0]: if p: i = min(i, p[0][0] - a[1]) P = adv(P, a, dash) a = myp(p) else: if q: i = min(i, q[0][0] - b[1]) Q = adv(Q, b, dash) b = myp(q) dash += i else: # intersection if a[1] >= b[1]: Q = adv(Q, b, dash); b = myp(q) elif a[1] < b[1]: P = adv(P, a, dash); a = myp(p) #if not a or not b: # no more matchings # break assert (not p) or (not q), "one or both should be empty: p=%s, q=%s" % (str(p), str(q)) if a: P = adv(P, a, dash) if b: Q = adv(Q, b, dash) # remaining intervals (in q or p) r, R = p, P if q: r, R = q, Q # just extend the last inteval by the remaining bases R[-1] = (R[-1][0], R[-1][1] + sum( map(lambda i: i[1]-i[0], r) )) P_card = sum( map(lambda i: P[i][1] - P[i][0], range(len(P))) ) Q_card = sum( map(lambda i: Q[i][1] - Q[i][0], range(len(Q))) ) assert p_card == P_card, "%d != %d" % (p_card, P_card) assert q_card == Q_card, "%d != %d" % (q_card, Q_card) return P, Q def fastLoadChain(fname, hf): data = [] open_f = (fname.endswith(".gz") and gzip.open or open) with open_f(fname, "rt") as fd: while True: line = fd.readline() if line == "": break hd = hf(line) N = [] line = fd.readline().split() while len(line) == 3: N.append( (int(line[0]), int(line[1]), int(line[2])) ) line = fd.readline().split() if len(line) != 1: raise ValueError("last matching block expected (found %s)" % str(line)) N.append( (int(line[0]), 0, 0) ) s, t, q = zip( *N ) data.append( (hd, numpy.array(s, dtype=numpy.int), numpy.array(t, dtype=numpy.int), numpy.array(q, dtype=numpy.int)) ) assert hd.tEnd - hd.tStart == sum(s) + sum(t) assert hd.qEnd - hd.qStart == sum(s) + sum(q) fd.readline() # a blank line log.info("parsed %d elements from %s" % (len(data), fname)) return data @cython.wraparound(False) @cython.boundscheck(False) cpdef numpy.ndarray[numpy.uint64_t, ndim=2] bed_union( numpy.ndarray[numpy.uint64_t, ndim=2] elements ): """compute the union of these elements. simply walk the sorted elements and join the intersecting ones works on half-open intervals, i.e., [a, b), [b, c) ---> [a, c) @param elements: 2-dim numpy array of unsigned64 ints @return: 2-dim numpy array of unsigned64 ints""" assert numpy.shape(elements)[0] > 0 cdef Py_ssize_t cst, cen, i, j cdef numpy.ndarray[numpy.uint64_t, ndim=2] tmp_elems, final_elems elements.sort(axis=0) assert elements[0][0] <= elements[numpy.shape(elements)[0]-1][0] tmp_elems = numpy.zeros((numpy.shape(elements)[0], 2), dtype=DTYPE) cst = elements[0, 0] cen = elements[0, 1] j = 0 for i in range(1, numpy.shape(elements)[0]): if elements[i, 0] <= cen: # overlaps with the last one cen = max2(cen, elements[i, 1]) else: tmp_elems[j, 0] = cst tmp_elems[j, 1] = cen j += 1 cst = elements[i, 0] cen = elements[i, 1] tmp_elems[j, 0] = cst tmp_elems[j, 1] = cen j += 1 final_elems = numpy.empty((j, 2), dtype=DTYPE) for i in range(j): final_elems[i, 0] = tmp_elems[i, 0] final_elems[i, 1] = tmp_elems[i, 1] assert final_elems[0, 0] == elements[0, 0], "fe=%d, e=%d" % (final_elems[0,0], elements[0,0]) return final_elems #@cython.wraparound(False) #@cython.boundscheck(False) cpdef numpy.ndarray[numpy.int64_t, ndim=2] cummulative_intervals(numpy.ndarray[numpy.int64_t, ndim=1] S, numpy.ndarray[numpy.int64_t, ndim=1] D ): """compute cummulative intervals for this side of an aligmnent. S and D are one side of the alignment as described in the chain file format""" cdef int N = S.shape[0] cdef int i = 0, j = 0 assert N == D.shape[0] cdef numpy.ndarray[numpy.int64_t, ndim=2] cumm_i = numpy.empty((N, 2), dtype=numpy.int64) cumm_i[0,0] = 0 cumm_i[0,1] = S[0] for i in range(N-1): j = i + 1 cumm_i[j,0] = cumm_i[i, 1] + D[i] cumm_i[j,1] = cumm_i[j,0] + S[j] return cumm_i bx-python-0.8.13/lib/bx/align/axt.py000066400000000000000000000164541415666465100172020ustar00rootroot00000000000000""" Support for reading and writing the `AXT`_ format used for pairwise alignments. .. _AXT: http://genome.ucsc.edu/goldenPath/help/axt.html """ from bx import interval_index_file from bx.align import ( Alignment, Component, src_split ) # Tools for dealing with pairwise alignments in AXT format class MultiIndexed: """Similar to 'indexed' but wraps more than one axt_file""" def __init__(self, axt_filenames, keep_open=False): self.indexes = [Indexed(axt_file, axt_file + ".index") for axt_file in axt_filenames] def get(self, src, start, end): blocks = [] for index in self.indexes: blocks += index.get(src, start, end) return blocks class Indexed: """Indexed access to a axt using overlap queries, requires an index file""" def __init__(self, axt_filename, index_filename=None, keep_open=False, species1=None, species2=None, species_to_lengths=None, support_ids=False): if index_filename is None: index_filename = axt_filename + ".index" self.indexes = interval_index_file.Indexes(filename=index_filename) self.axt_filename = axt_filename # nota bene: (self.species1 = species1 or "species1") is incorrect if species1="" self.species1 = species1 if self.species1 is None: self.species1 = "species1" self.species2 = species2 if self.species2 is None: self.species2 = "species2" self.species_to_lengths = species_to_lengths self.support_ids = support_ids # for extra text at end of axt header lines if keep_open: self.f = open(axt_filename) else: self.f = None def get(self, src, start, end): intersections = self.indexes.find(src, start, end) return (self.get_axt_at_offset(val) for start, end, val in intersections) def get_axt_at_offset(self, offset): if self.f: self.f.seek(offset) return read_next_axt(self.f, self.species1, self.species2, self.species_to_lengths, self.support_ids) else: f = open(self.axt_filename) try: f.seek(offset) return read_next_axt(f, self.species1, self.species2, self.species_to_lengths, self.support_ids) finally: f.close() class Reader: """Iterate over all axt blocks in a file in order""" def __init__(self, file, species1=None, species2=None, species_to_lengths=None, support_ids=False): self.file = file # nota bene: (self.species1 = species1 or "species1") is incorrect if species1="" self.species1 = species1 if self.species1 is None: self.species1 = "species1" self.species2 = species2 if self.species2 is None: self.species2 = "species2" self.species_to_lengths = species_to_lengths self.support_ids = support_ids # for extra text at end of axt header lines self.attributes = {} def __next__(self): return read_next_axt(self.file, self.species1, self.species2, self.species_to_lengths, self.support_ids) def __iter__(self): return ReaderIter(self) def close(self): self.file.close() class ReaderIter: def __init__(self, reader): self.reader = reader def __iter__(self): return self def __next__(self): v = next(self.reader) if not v: raise StopIteration return v class Writer: def __init__(self, file, attributes=None): if attributes is None: attributes = {} self.file = file self.block = 0 self.src_split = True if "src_split" in attributes: self.src_split = attributes["src_split"] def write(self, alignment): if len(alignment.components) != 2: raise ValueError( "%d-component alignment is not compatible with axt" % len(alignment.components)) c1 = alignment.components[0] c2 = alignment.components[1] if c1.strand != "+": c1 = c1.reverse_complement() c2 = c2.reverse_complement() if self.src_split: spec1, chr1 = src_split(c1.src) spec2, chr2 = src_split(c2.src) else: chr1, chr2 = c1.src, c2.src self.file.write( "%d %s %d %d %s %d %d %s %s\n" % (self.block, chr1, c1.start+1, c1.start+c1.size, chr2, c2.start+1, c2.start+c2.size, c2.strand, alignment.score)) self.file.write("%s\n" % c1.text) self.file.write("%s\n" % c2.text) self.file.write("\n") self.block += 1 def close(self): self.file.close() # ---- Helper methods --------------------------------------------------------- # typical axt block: # 0 chr19 3001012 3001075 chr11 70568380 70568443 - 3500 [optional text] # TCAGCTCATAAATCACCTCCTGCCACAAGCCTGGCCTGGTCCCAGGAGAGTGTCCAGGCTCAGA # TCTGTTCATAAACCACCTGCCATGACAAGCCTGGCCTGTTCCCAAGACAATGTCCAGGCTCAGA # start and stop are origin-1, inclusive # first species is always on plus strand # when second species is on minus strand, start and stop are counted from sequence end def read_next_axt(file, species1, species2, species_to_lengths=None, support_ids=False): line = readline(file, skip_blank=True) if not line: return fields = line.split() if len(fields) < 9 or (not support_ids and len(fields) > 9): raise ValueError("bad axt-block header: %s" % line) attributes = {} if len(fields) > 9: attributes["id"] = "_".join(fields[9:]) seq1 = readline(file) if not line or line.isspace(): raise ValueError("incomplete axt-block; header: %s" % line) seq2 = readline(file) if not line or line.isspace(): raise ValueError("incomplete axt-block; header: %s" % line) # Build 2 component alignment alignment = Alignment(attributes=attributes, species_to_lengths=species_to_lengths) # Build component for species 1 component = Component() component.src = fields[1] if species1 != "": component.src = species1 + "." + component.src component.start = int(fields[2]) - 1 # (axt intervals are origin-1 end = int(fields[3]) # and inclusive on both ends) component.size = end - component.start component.strand = "+" component.text = seq1.strip() alignment.add_component(component) # Build component for species 2 component = Component() component.src = fields[4] if species2 != "": component.src = species2 + "." + component.src component.start = int(fields[5]) - 1 end = int(fields[6]) component.size = end - component.start component.strand = fields[7] component.text = seq2.strip() alignment.add_component(component) # add score try: alignment.score = int(fields[8]) except ValueError: try: alignment.score = float(fields[8]) except ValueError: alignment.score = fields[8] return alignment def readline(file, skip_blank=False): """Read a line from provided file, skipping any blank or comment lines""" while True: line = file.readline() if not line: return None if line[0] != '#' and not (skip_blank and line.isspace()): return line bx-python-0.8.13/lib/bx/align/core.py000066400000000000000000000434471415666465100173400ustar00rootroot00000000000000""" Classes that represent alignments between multiple sequences. """ import random import weakref from bx.misc.readlengths import read_lengths_file # DNA reverse complement table # DNA_COMP = " - " \ # " TVGH CD M KN YSA BWXR tvgh cd m kn ysa bwxr " \ # " " \ # " " DNA_COMP = str.maketrans("ACGTacgt", "TGCAtgca") class Alignment: def __init__(self, score=0, attributes=None, species_to_lengths=None): # species_to_lengths is needed only for file formats that don't provide # chromosome lengths; it maps each species name to one of these: # - the name of a file that contains a list of chromosome length pairs # - a dict mapping chromosome names to their length # - a single length value (useful when we just have one sequence and no chromosomes) # internally a file name is replaced by a dict, but only on an "as # needed" basis if attributes is None: attributes = {} self.score = score self.text_size = 0 self.attributes = attributes if species_to_lengths is None: self.species_to_lengths = {} else: self.species_to_lengths = species_to_lengths self.components = [] def add_component(self, component): component._alignment = weakref.ref(self) self.components.append(component) if component.text is not None: if self.text_size == 0: self.text_size = len(component.text) elif self.text_size != len(component.text): raise Exception("Components must have same text length") def get_score(self): return self.__score def set_score(self, score): if isinstance(score, str): try: score = int(score) except ValueError: try: score = float(score) except ValueError: pass self.__score = score score = property(fget=get_score, fset=set_score) def __str__(self): s = "a score=" + str(self.score) for key in self.attributes: s += f" {key}={self.attributes[key]}" s += "\n" # Components for c in self.components: s += str(c) s += "\n" return s def src_size(self, src): species, chrom = src_split(src) if species in self.species_to_lengths: chrom_to_length = self.species_to_lengths[species] elif chrom in self.species_to_lengths: chrom_to_length = self.species_to_lengths else: raise ValueError("no src_size (no length file for %s)" % species) if isinstance(chrom_to_length, int): # (if it's a single length) return chrom_to_length if isinstance(chrom_to_length, str): # (if it's a file name) chrom_to_length = read_lengths_file(chrom_to_length) self.species_to_lengths[species] = chrom_to_length if chrom not in chrom_to_length: raise ValueError(f"no src_size ({species} has no length for {chrom})") return chrom_to_length[chrom] def get_component_by_src(self, src): for c in self.components: if c.src == src: return c return None def get_components_by_src(self, src): for c in self.components: if c.src == src: yield c def get_component_by_src_start(self, src): for c in self.components: if c.src.startswith(src): return c return None def slice(self, start, end): new = Alignment(score=self.score, attributes=self.attributes) for component in self.components: new.components.append(component.slice(start, end)) new.text_size = end - start return new def reverse_complement(self): new = Alignment(score=self.score, attributes=self.attributes) for component in self.components: new.components.append(component.reverse_complement()) new.text_size = self.text_size return new def slice_by_component(self, component_index, start, end): """ Return a slice of the alignment, corresponding to an coordinate interval in a specific component. component_index is one of an integer offset into the components list a string indicating the src of the desired component a component start and end are relative to the + strand, regardless of the component's strand. """ if isinstance(component_index, int): ref = self.components[component_index] elif isinstance(component_index, str): ref = self.get_component_by_src(component_index) elif isinstance(component_index, Component): ref = component_index else: raise ValueError("can't figure out what to do") start_col = ref.coord_to_col(start) end_col = ref.coord_to_col(end) if ref.strand == '-': (start_col, end_col) = (end_col, start_col) return self.slice(start_col, end_col) def column_iter(self): # FIXME: The empty component are not present # in column_iter. # Maybe it would be good to use - and = for i in range(self.text_size): yield [c.text[i] for c in self.components if not c.empty] def limit_to_species(self, species): new = Alignment(score=self.score, attributes=self.attributes) new.text_size = self.text_size for component in self.components: if component.src.split('.')[0] in species: new.add_component(component) return new def remove_all_gap_columns(self): """ Remove any columns containing only gaps from alignment components, text of components is modified IN PLACE. """ seqs = [] for c in self.components: if c.empty: seqs.append(None) try: seqs.append(list(c.text)) except TypeError: seqs.append(None) i = 0 text_size = self.text_size while i < text_size: all_gap = True for seq in seqs: if seq is None: continue if seq[i] != '-': all_gap = False if all_gap: for seq in seqs: if seq is None: continue del seq[i] text_size -= 1 else: i += 1 for i in range(len(self.components)): if seqs[i] is None: continue self.components[i].text = ''.join(seqs[i]) self.text_size = text_size def __eq__(self, other): if other is None or not isinstance(other, type(self)): return False if self.score != other.score: return False if self.attributes != other.attributes: return False if len(self.components) != len(other.components): return False for c1, c2 in zip(self.components, other.components): if c1 != c2: return False return True def __ne__(self, other): return not(self.__eq__(other)) def __deepcopy__(self, memo): from copy import deepcopy new = Alignment(score=self.score, attributes=deepcopy(self.attributes), species_to_lengths=deepcopy(self.species_to_lengths)) for component in self.components: new.add_component(deepcopy(component)) return new class Component: def __init__(self, src='', start=0, size=0, strand=None, src_size=None, text=''): self._alignment = None self.src = src self.start = start # Nota Bene: start,size,strand are as they self.size = size # .. appear in a MAF file-- origin-zero, end self.strand = strand # .. excluded, and minus strand counts from self._src_size = src_size # .. end of sequence self.text = text self.quality = None # Optional fields to keep track of synteny status (only makes sense # when the alignment is part of an ordered set) self.synteny_left = None self.synteny_right = None self.synteny_empty = None # If true, this component actually represents a non-aligning region, # and text is None. self.empty = False # Index maps a coordinate (distance along + strand from + start) to alignment column self.index = None def __str__(self): if self.empty: rval = "e %s %d %d %s %d %s" % ( self.src, self.start, self.size, self.strand, self.src_size, self.synteny_empty) else: rval = "s %s %d %d %s %d %s" % ( self.src, self.start, self.size, self.strand, self.src_size, self.text) if self.synteny_left and self.synteny_right: rval += "\ni %s %s %d %s %d" % ( self.src, self.synteny_left[0], self.synteny_left[1], self.synteny_right[0], self.synteny_right[1]) return rval def get_end(self): return self.start + self.size end = property(fget=get_end) def get_src_size(self): if self._src_size is None: if self._alignment is None: raise Exception("component has no src_size") self._src_size = self._alignment().src_size(self.src) return self._src_size def set_src_size(self, src_size): self._src_size = src_size src_size = property(fget=get_src_size, fset=set_src_size) def get_forward_strand_start(self): if self.strand == '-': return self.src_size - self.end else: return self.start forward_strand_start = property(fget=get_forward_strand_start) def get_forward_strand_end(self): if self.strand == '-': return self.src_size - self.start else: return self.end forward_strand_end = property(fget=get_forward_strand_end) def reverse_complement(self): start = self.src_size - self.end if self.strand == "+": strand = "-" else: strand = "+" if self.empty: text = None else: comp = [ch for ch in self.text.translate(DNA_COMP)] comp.reverse() text = "".join(comp) new = Component(self.src, start, self.size, strand, self._src_size, text) if self.empty: new.empty = True new.synteny_empty = self.synteny_empty # Propagate supplementary info if self.synteny_left: new.synteny_right = self.synteny_left if self.synteny_right: new.synteny_left = self.synteny_right new._alignment = self._alignment return new def slice(self, start, end): new = Component(src=self.src, start=self.start, strand=self.strand, src_size=self._src_size) new._alignment = self._alignment if self.empty: new.empty = True new.size = self.size new.text = None new.synteny_empty = self.synteny_empty return new new.text = self.text[start:end] # for i in range( 0, start ): # if self.text[i] != '-': new.start += 1 # for c in new.text: # if c != '-': new.size += 1 new.start += start - self.text.count('-', 0, start) new.size = len(new.text) - new.text.count('-') # FIXME: This annotation probably means nothing after slicing if # one of the ends changes. In general the 'i' rows of a MAF only # make sense in context (relative to the previous and next alignments # in a stream, slicing breaks that). # LD: Indeed, I think it is wrong to keep them. Let's keep the info # only when the boundaries are kept. if self.synteny_left: if start == 0: new.synteny_left = self.synteny_left if self.synteny_right: if end == len(self.text): new.synteny_right = self.synteny_right return new def slice_by_coord(self, start, end): """ Return the slice of the component corresponding to a coordinate interval. start and end are relative to the + strand, regardless of the component's strand. """ start_col = self.coord_to_col(start) end_col = self.coord_to_col(end) if (self.strand == '-'): (start_col, end_col) = (end_col, start_col) return self.slice(start_col, end_col) def coord_to_col(self, pos): """ Return the alignment column index corresponding to coordinate pos. pos is relative to the + strand, regardless of the component's strand. """ if self.empty: raise ValueError("There is no column index. It is empty.") start, end = self.get_forward_strand_start(), self.get_forward_strand_end() if pos < start or pos > end: raise ValueError("Range error: %d not in %d-%d" % (pos, start, end)) if not self.index: self.index = list() if self.strand == '-': # nota bene: for - strand self.index[x] maps to one column # higher than is actually associated with the position; thus # when slice_by_component() and slice_by_coord() flip the ends, # the resulting slice is correct for x in range(len(self.text)-1, -1, -1): if not self.text[x] == '-': self.index.append(x + 1) self.index.append(0) else: for x in range(len(self.text)): if not self.text[x] == '-': self.index.append(x) self.index.append(len(self.text)) x = None try: x = self.index[pos - start] except IndexError: raise Exception("Error in index.") return x def __eq__(self, other): if other is None or not isinstance(other, type(self)): return False return (self.src == other.src and self.start == other.start and self.size == other.size and self.strand == other.strand and self._src_size == other._src_size and self.text == other.text and self.synteny_left == other.synteny_left and self.synteny_right == other.synteny_right and self.synteny_empty == other.synteny_empty and self.empty == other.empty) def __ne__(self, other): return not(self.__eq__(other)) def __deepcopy__(self, memo): new = Component(src=self.src, start=self.start, size=self.size, strand=self.strand, src_size=self._src_size, text=self.text) new._alignment = self._alignment new.quality = self.quality new.synteny_left = self.synteny_left new.synteny_right = self.synteny_right new.synteny_empty = self.synteny_empty new.empty = self.empty new.index = self.index return new def get_reader(format, infile, species_to_lengths=None): import bx.align.axt import bx.align.lav import bx.align.maf if format == "maf": return bx.align.maf.Reader(infile, species_to_lengths) elif format == "axt": return bx.align.axt.Reader(infile, species_to_lengths) elif format == "lav": return bx.align.lav.Reader(infile) else: raise ValueError("Unknown alignment format %s" % format) def get_writer(format, outfile, attributes=None): import bx.align.axt import bx.align.lav import bx.align.maf if attributes is None: attributes = {} if format == "maf": return bx.align.maf.Writer(outfile, attributes) elif format == "axt": return bx.align.axt.Writer(outfile, attributes) elif format == "lav": return bx.align.lav.Writer(outfile, attributes) else: raise ValueError("Unknown alignment format %s" % format) def get_indexed(format, filename, index_filename=None, keep_open=False, species_to_lengths=None): import bx.align.axt import bx.align.lav import bx.align.maf if format == "maf": return bx.align.maf.Indexed(filename, index_filename, keep_open, species_to_lengths) elif format == "axt": return bx.align.axt.Indexed(filename, index_filename, keep_open, species_to_lengths) elif format == "lav": raise Exception("LAV support for Indexed has not been implemented") else: raise ValueError("Unknown alignment format %s" % format) def shuffle_columns(a): """Randomize the columns of an alignment""" mask = list(range(a.text_size)) random.shuffle(mask) for c in a.components: if not c.empty: c.text = ''.join([c.text[i] for i in mask]) def src_split(src): # splits src into species,chrom dot = src.rfind(".") if dot == -1: return None, src else: return src[:dot], src[dot+1:] def src_merge(species, chrom, contig=None): # creates src (inverse of src_split) if species is None: src = chrom else: src = species + "." + chrom if contig is not None: src += "[%s]" % contig return src # ---- Read C extension if available --------------------------------------- try: from ._core import coord_to_col except ImportError: def coord_to_col(start, text, pos): col = 0 while start < pos: if text[col] != '-': start += 1 col += 1 return col bx-python-0.8.13/lib/bx/align/epo.py000066400000000000000000000264031415666465100171640ustar00rootroot00000000000000"""Classes and utilities for mutliple alignments from the EPO pipeline""" import logging import os import pickle as cPickle import re from collections import namedtuple from ._epo import ( # noqa: F401 bed_union, cummulative_intervals, fastLoadChain, rem_dash ) log = logging.getLogger(__name__) class Chain(namedtuple('Chain', 'score tName tSize tStrand tStart tEnd qName qSize qStrand qStart qEnd id')): """A Chain header as in http://genome.ucsc.edu/goldenPath/help/chain.html chain coordinates are with respect to the strand, so for example tStart on the + strand is the distance from the leftmost position; tStart on the - strand is the distance from the rightmost position.""" __slots__ = () def __str__(self): return "chain {score} {tName} {tSize} {tStrand} {tStart} {tEnd} {qName} {qSize} {qStrand} {qStart} {qEnd} {id}".format(**self._asdict()) @classmethod def _strfactory(cls, line): """factory class method for Chain :param line: header of a chain (in .chain format) """ assert isinstance(line, str), "this is a factory from string" line = line.rstrip().split()[1:] # the first component is the keyword "chain" tup = [t[0](t[1]) for t in zip([int, str, int, str, int, int, str, int, str, int, int, str], line)] return tuple.__new__(cls, tup) @classmethod def _make_from_epo(cls, trg_comp, qr_comp, trg_chrom_sizes, qr_chrom_sizes): """crate a chain of collinear rings from the given components. The target of the chain will always be on the forward strand. This is done to avoid confusion when mapping psl files. So, if trg_comp.strand=-, qr_comp.strand=- (resp. +) the chain header will have tStrand=+, qStrand=+ (resp. -). No strand changes on the other cases. :param trg_comp: target (i.e, the first) component :type trg_comp: L{EPOitem} :param qr_comp: query (i.e, the second) component :type qr_comp: L{EPOitem} :param trg_chrom_sizes: chromosome sizes of the target :type trg_chrom_sizes: dictionary of the type (chrom) --> size :param qr_chrom_sizes: chromosome sizes of the query :type qr_chrom_sizes: dictionary of the type (chrom) --> size :return: A L{Chain} instance""" # size, target, query arrays S, T, Q = [], [], [] # the target strand of the chain must be on the forward strand trg_intervals = trg_comp.intervals(reverse=trg_comp.strand == '-') qr_intervals = qr_comp.intervals(reverse=trg_comp.strand == '-') if len(trg_intervals) == 0 or len(qr_intervals) == 0: log.warning("deletion/insertion only intervals") return None A, B = rem_dash(trg_intervals, qr_intervals) # correct for when cigar starts/ends with dashes (in number of bases) tr_start_correction = max(B[0][0] - A[0][0], 0) tr_end_correction = max(A[-1][1] - B[-1][1], 0) qr_start_correction = max(A[0][0] - B[0][0], 0) qr_end_correction = max(B[-1][1] - A[-1][1], 0) a, b = A.pop(0), B.pop(0) # intervals are 0-base, halfo-open => lengths = coordinate difference while A or B: if a[1] < b[1]: T.append(0) Q.append(A[0][0] - a[1]) S.append(min(a[1], b[1]) - max(a[0], b[0])) a = A.pop(0) elif b[1] < a[1]: Q.append(0) T.append(B[0][0] - b[1]) S.append(min(a[1], b[1]) - max(a[0], b[0])) b = B.pop(0) elif A and B: assert 1 > 2, "there are dash columns" else: break S.append(min(a[1], b[1]) - max(a[0], b[0])) assert len(T) == len(Q) == len(S) - 1, "(S, T, Q) = (%d, %d, %d)" % tuple(map(len, (S, T, Q))) tSize = trg_chrom_sizes[trg_comp.chrom] qSize = qr_chrom_sizes[qr_comp.chrom] # UCSC coordinates are 0-based, half-open and e! coordinates are 1-base, closed # chain_start = epo_start - 1 and chain_end = epo_end if qr_comp.strand == '+': chain = Chain( 0, trg_comp.chrom, tSize, "+", (trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction, qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'), (qr_comp.start - 1) + qr_start_correction, qr_comp.end - qr_end_correction, qr_comp.gabid) else: chain = Chain( 0, trg_comp.chrom, tSize, "+", (trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction, qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'), (qr_comp.start - 1) + qr_end_correction, qr_comp.end - qr_start_correction, qr_comp.gabid) # strand correction. in UCSC coordinates this is: size - coord if chain.qStrand == '-': chain = chain._replace( qEnd=chain.qSize - chain.qStart, qStart=chain.qSize - chain.qEnd) assert chain.tEnd - chain.tStart == sum(S) + sum(T), "[%s] %d != %d" % ( str(chain), chain.tEnd - chain.tStart, sum(S) + sum(T)) assert chain.qEnd - chain.qStart == sum(S) + sum(Q), "[%s] %d != %d" % ( str(chain), chain.qEnd - chain.qStart, sum(S) + sum(Q)) return chain, S, T, Q def slice(self, who): "return the slice entry (in a bed6 format), AS IS in the chain header" assert who in ('t', 'q'), "who should be 't' or 'q'" if who == 't': return (self.tName, self.tStart, self.tEnd, self.id, self.score, self.tStrand) else: return (self.qName, self.qStart, self.qEnd, self.id, self.score, self.qStrand) def bedInterval(self, who): "return a BED6 entry, thus DOES coordinate conversion for minus strands" if who == 't': st, en = self.tStart, self.tEnd if self.tStrand == '-': st, en = self.tSize-en, self.tSize-st return (self.tName, st, en, self.id, self.score, self.tStrand) else: st, en = self.qStart, self.qEnd if self.qStrand == '-': st, en = self.qSize-en, self.qSize-st assert en-st == self.qEnd - self.qStart return (self.qName, st, en, self.id, self.score, self.qStrand) @classmethod def _parse_file(cls, path, pickle=False): """parse a .chain file into a list of the type [(L{Chain}, arr, arr, arr) ...] :param fname: name of the file""" fname = path if fname.endswith(".gz"): fname = path[:-3] if fname.endswith('.pkl'): # you asked for the pickled file. I'll give it to you log.debug("loading pickled file %s ...", fname) with open(fname, "rb") as f: return cPickle.load(f) elif os.path.isfile("%s.pkl" % fname): # there is a cached version I can give to you log.info("loading pickled file %s.pkl ...", fname) if os.stat(path).st_mtime > os.stat("%s.pkl" % fname).st_mtime: log.critical("*** pickled file %s.pkl is not up to date ***", fname) try: with open("%s.pkl" % fname, "rb") as f: return cPickle.load(f) except Exception: log.warning("Loading pickled file %s.pkl failed", fname) data = fastLoadChain(path, cls._strfactory) if pickle and not os.path.isfile('%s.pkl' % fname): log.info("pickling to %s.pkl", fname) with open('%s.pkl' % fname, 'wb') as f: cPickle.dump(data, f) return data class EPOitem(namedtuple('Epo_item', 'species gabid chrom start end strand cigar')): "this format is how alignments are delivered from e!" __slots__ = () cigar_pattern = re.compile(r"(\d*)([MD])") def __repr__(self): return str(self) def __str__(self): c = self.cigar[:5] + "..." + self.cigar[-5:] return "(%s %s %s %d %d %s %s)" % tuple(self[:6] + (c,)) @classmethod def _strfactory(cls, line): """factory method for an EPOitem :param line: a line of input""" cmp = line.rstrip().split() chrom = cmp[2] if not chrom.startswith("chr"): chrom = "chr%s" % chrom instance = tuple.__new__( cls, (cmp[0], cmp[1], chrom, int(cmp[3]), int(cmp[4]), {'1': '+', '-1': '-'}[cmp[5]], cmp[6])) span = instance.end - instance.start + 1 m_num = sum((t[1] == "M" and [t[0]] or [0])[0] for t in instance.cigar_iter(False)) if span != m_num: log.warning("[{gabid}] {species}.{chrom}:{start}-{end}.".format(**instance._asdict()) + "(span) %d != %d (matches)" % (span, m_num)) return None return instance @classmethod def _parse_epo(cls, fname): """Load an entire file in the EPO format into a dictionary of the type {gab_id => [Epoitem, ...]} :param fname: file name""" data = {} with open(fname) as fd: for el in (cls._strfactory(_) for _ in fd): if el: data.setdefault(el.gabid, []).append(el) log.info("parsed %d elements from %s", len(data), fname) return data def cigar_iter(self, reverse): """self.cigar => [(length, type) ... ] iterate the cigar :param reverse: whether to iterate in the reverse direction (right-to-left) :type reverse: boolean :return a list of pairs of the type [(length, M/D) ..] """ l = 0 P = self.cigar_pattern data = [] cigar = self.cigar parsed_cigar = re.findall(P, cigar) if reverse: parsed_cigar = parsed_cigar[::-1] for _l, t in parsed_cigar: # 1M is encoded as M l = (_l and int(_l) or 1) # int(_l) cannot be 0 data.append((l, t)) return data def intervals(self, reverse, thr=0): """return a list of (0-based half-open) intervals representing the match regions of the cigar for example 4MD4M2DM with reverse=False will produce [(0,4), (5,9), (11,12)] 4MD4M2DM with reverse=True will produce [(0,1), (3,7), (8,12)] (= 12 - previous interval) :param reverse: whether to iterate in the reverse direction (right-to-left) (this is passed as is to self.cigar_iter) :type reverse: boolean :param thr: shift all intervals by this much :type thr: integer :return: list of pairs""" d = [(thr, thr)] dl = 0 for tup in self.cigar_iter(reverse): if tup[1] == "D": dl = tup[0] else: s = d[-1][1] + dl d.append((s, s+tup[0])) assert d[0] == (thr, thr) # assert that nr. of Ms in the interval == sum of produced intervals assert sum(t[0] for t in self.cigar_iter(False) if t[1] == "M") == sum(t[1]-t[0] for t in d) d_sum = sum(t[1]-t[0] for t in d) assert self.end - self.start + 1 == d_sum, "[ (%d, %d) = %d ] != %d" % ( self.start, self.end, self.end-self.start+1, d_sum) return d[1:] # clip the (thr, thr) entry bx-python-0.8.13/lib/bx/align/epo_tests.py000066400000000000000000000212141415666465100204010ustar00rootroot00000000000000"tests for bx.align.epo" import pdb import random import unittest import numpy as np from bx.align._epo import ( bed_union, cummulative_intervals, ) from bx.align.epo import ( Chain, EPOitem ) class TestBed(unittest.TestCase): def setUp(self): self.N = random.randint(1, 1000) def test_ci(self): S, D = [], [] for i in range(self.N): S.append(random.randint(10, 50)) D.append(random.randint(10, 50)) D[-1] = 0 C = cummulative_intervals(np.array(S, dtype=np.int64), np.array(D, dtype=np.int64)) for i in range(self.N): assert C[i, 1] - C[i, 0] == S[i] for i in range(1, self.N): assert C[i, 0] - C[i-1, 1] == D[i-1], "[%d] %d != %d" % (i, C[i, 0] - C[i-1, 1], D[i-1]) def test_elem_u(self): # back to back, so should return a single interval EL = [] th = 0 for i in range(self.N): size = random.randint(1, 20) EL.append((th, th+size)) th += size U = bed_union(np.array(EL, dtype=np.uint64)) assert U[0, 0] == 0 and U[0, 1] == th # disjoint EL = [] th = 0 for i in range(self.N): size = random.randint(1, 20) EL.append((th, th+size)) th += (size + 1) U = bed_union(np.array(EL, dtype=np.uint64)) for i in range(U.shape[0]): assert (U[i, 0], U[i, 1]) == EL[i] # random with some empty elements EL = [] th = 0 for i in range(self.N): size = random.randint(1, 20) EL.append((th, th+size)) th += random.randint(1, size+size) # 50% of overlapping U = bed_union(np.array(EL, dtype=np.uint64)) assert U[0, 1] > U[0, 0] for i in range(1, U.shape[0]): assert U[i, 1] > U[i, 0] assert U[i, 0] > U[i-1, 1] cigar_pairs = [ ("GGACCTGGAGAGATCAG---------------------------GACTTCAACTGTGTG-------------TCTTAGACTGGG--------AGGGTGTTA", "AGGCCAGGAGAGATCAGGTAAGTCTTAATTTAATAAAGAGATAGGACCTGAACTGTGTCTAACAATAGGTAATATTAGACTGGGGGAGAGAGAAGACTTTC"), ("TTT--------------------------------------------------------------------------------------------------------------------T", "CTTGTACCAAGGACAGTACTGGCAGCCTAATTGCTAACACTTTGTGGTGGATTGGTCCACTCAATATTTGTTCCCACCTCTTTTCAGTCCAGTTCTATAAAGGACAGAAAGTTGAAAACT"), ("A-------------------------------------------------ACACTGGACACAGCACTAACACGATTACTTA", "ACATTTCCCACACTCCCTTGCAGCTAGGTTTCTAGATATAATTTAGATTCCA----------------------------A"), ("TTTGGTCCTCTGGA------CGAGCAGCCAGTGCT---------------------------------------------------------------------------AAAAAAAA", "T---CATTCTAGCAGGTGCTGCAGCAGCAGGTAGCCCTGGAGCCAACAGTTGTGGCTATGATTCTTGATCATCAGATTTGGCTCAAGTGATGTGTTCCTCTAGCATGCACTTGAGATA"), ("G-----------------------C----------------------------------------------------------------------------------------A", "GGCCTGCACTGCCAGTAATTTTAACAAATTTTTAGGCACTGAATTCCCTGTATTAAATCTGTTTTCCTTAGCGTAAACAGATCTCTGTTAAATGAAACTAAACCCTGACTGATA"), ("TATT----------------------------------T", "TCCTTCATTTTATTTCTCCCTTAAAATTTTTTTTATTACT"), ("TAAAAA--A------A------------------------------------------------------------TTTTTTTTTTT", "T---AATTATTTTGCAGCAGGTCCTTGATAACATATCATCTATAAATATTTCAGCAAGAATCTCTAAAAGGCAAGAACCTCCTTCTT"), ("AAACAA---------------------------------------TT---T", "AAACAATACCACTGCATCACTATCAAACCCAAAAAATAACAAAAATTGGGT"), ("TCTTAAC---TGCTGAGCCATCCCTCCAGCTCCTGTTTTATTTTTATTATGAAGTAATAATA--ATAG--TAATAATAATGATG", "TACACTTAATTCTAAAACTTGTTATGAATCATCA----------TTGG--TTTTTTATTGTGAAGAACTAATATAATCAGA--G"), ("ATGATAATGGTATCCTAGCTCAACACCTG-GAGTTCACCCCAACAGTTAACTAA----GTTTGAGGAAGTGTTAACAAGCCTA---ACAAAGAGGACATGCCAATAGCTGACAGAGTCAC", "A-------CCTCTGCTAGCTCAACTCCTGAGAATCAATTATATAAGCTAGGTCAGTGGTTTTGAGAAAGTATTAGTAGACATTTCTCCAAAGAATACATAAAAATGGCC-A--CAAGTAT") ] def toCigar(species, id, s): I = [(0, 0)] L = [len(_) for _ in s.split("-")] NZ = [_ for _ in L if _] if L[0] > 0: I.append((0, L[0])) NZ = NZ[1:] L = L[1:] for i in range(len(NZ)): L.insert(0, 0) size = NZ[i] start = L.index(size) I.append((I[-1][1] + start, I[-1][1]+start+size)) L = L[start+1:] if len(L): I.append((I[-1][1] + len(L), I[-1][1] + len(L))) C = [] for i in range(1, len(I)): dl = I[i][0] - I[i-1][1] ml = I[i][1] - I[i][0] dc = "" if dl: dc = (dl > 1 and str(dl) or "") + "D" mc = "" if ml: mc = (ml > 1 and str(ml) or "") + "M" C.append(dc+mc) MSUM = sum(i[1]-i[0] for i in I) start = random.randint(50, 10000) return "%s\t%d\t1\t%d\t%d\t%d\t%s" % (species, id, start, start+MSUM-1, random.choice((-1, 1)), "".join(C)) class TestEpo(unittest.TestCase): def setUp(self): self.epo_records = [] for i, (t, q) in enumerate(cigar_pairs): gab_pair = (toCigar("homo_sapiens", i, t), toCigar("mus_musculus", i, q)) A = EPOitem._strfactory(gab_pair[0]) B = EPOitem._strfactory(gab_pair[1]) if A and B: self.epo_records.append((A, B)) def test_out(self): def ch(c, ci): th = 0 for l, t in ci: if t == 'M': assert c[th:th+l].find('-') == -1 else: assert c[th:th+l] == '-' * l th += l for (a, b) in self.epo_records: ca, cb = cigar_pairs[int(a.gabid)] ch(ca, a.cigar_iter(False)) ch(cb, b.cigar_iter(False)) def test_make_chain(self): def cch(cigar, s, e): return cigar[s:e].find('-') == -1 for p in self.epo_records: chain = Chain._make_from_epo(p[0], p[1], {"chr1": 500}, {"chr1": 800}) if not chain: continue ch, S, T, Q = chain i = int(ch.id) c1, c2 = cigar_pairs[i] if p[0].strand == '-': c1 = c1[::-1] c2 = c2[::-1] th = 0 for s, t, q in zip(S, T, Q): if not (cch(c1, th, th+s) and cch(c2, th, th+s)): pdb.set_trace() assert cch(c1, th, th+s) and cch(c2, th, th+s), f"{c1[th:th+s]} and {c2[th:th+s]}" if t > q: cch(c1, th+s, th+s+t) and c1[th+s:th+s+t] == '-'*t else: cch(c2, th+s, th+s+q) and c1[th+s:th+s+q] == '-'*q th = th + s + max(t, q) def test_rem_dash(self): # ****--****-------**** 4M2D4M7D4M # *******-------******* 7M7D7M # has 4 dash columns and should become # ****--****---**** 4M2D4M3D4M # *******---******* 7M3D7M for i in range(100): dash_cols = random.randint(0, 10) tStart = random.randint(0, 1000) qStart = random.randint(0, 1000) epo_pair = ( EPOitem._strfactory("homo_sapiens\t0\t1\t%d\t%d\t1\t%s" % (tStart, tStart+12-1, "4M2D4M%dD4M" % (dash_cols+3))), EPOitem._strfactory("mus_musculus\t0\t1\t%d\t%d\t1\t%s" % (qStart, qStart+14-1, "7M%dD7M" % (dash_cols+3)))) chain = Chain._make_from_epo(epo_pair[0], epo_pair[1], {"chr1": 500}, {"chr1": 800}) ti = epo_pair[0].intervals(False) qi = epo_pair[1].intervals(False) assert ti[2][0] - ti[1][1] - dash_cols == chain[2][1] assert qi[1][0] - qi[0][1] - dash_cols == chain[2][1] # ----***** # *-------* # has 3 dash cols and should become # * # * # with the qStart += 1 and tStart += 4 for i in range(100): dash_cols = random.randint(0, 10) tm = random.randint(6, 10) qm = random.randint(1, 5) tStart = random.randint(0, 1000) qStart = random.randint(0, 1000) epo_pair = ( EPOitem._strfactory("homo_sapiens\t0\t1\t%d\t%d\t1\t%s" % (tStart, tStart+tm-1, "%dD%dM" % (dash_cols+1, tm))), EPOitem._strfactory("mus_musculus\t0\t1\t%d\t%d\t1\t%s" % (qStart, qStart+qm+1-1, "M%dD%dM" % (dash_cols+tm-qm, qm)))) chain = Chain._make_from_epo(epo_pair[0], epo_pair[1], {"chr1": 500}, {"chr1": 800}) if chain[1][-1] != qm: pdb.set_trace() assert chain[1][-1] == qm # correct also for coordinate interpretation differences between UCSC and EPO assert (qStart + 1) - 1 == chain[0].qStart, "%d != %d" % (qStart + 1, chain[0].qStart) if __name__ == '__main__': unittest.main() bx-python-0.8.13/lib/bx/align/lav.py000066400000000000000000000524451415666465100171700ustar00rootroot00000000000000""" Support for reading and writing the LAV format produced by the `blastz`_ pairwise aligner. .. _blastz: http://www.bx.psu.edu/miller_lab/ """ import sys from io import StringIO import bx.seq from bx.align import ( Alignment, Component, src_merge, src_split ) class Reader: """Iterate over all lav blocks in a file in order""" def __init__(self, file, path_subs=None, fail_to_ns=False): self.file = file self.lineNumber = 0 self.path_subs = path_subs # list of (prefix,replacement) to allow if self.path_subs is None: # .. redirection of sequence file paths self.path_subs = [] # .. on different machines self.fail_to_ns = fail_to_ns # True => if sequences fail to open, create a fake file of all Ns self.d_stanza_text = None self.seq1_filename = None self.seq1_file = None self.seq1_header = None self.seq1_start = None self.seq1_end = None self.seq1_strand = None self.seq1_contig = None self.seq1_src = None self.seq1_gap = None self.seq2_filename = None self.seq2_file = None self.seq2_header = None self.seq2_start = None self.seq2_end = None self.seq2_strand = None self.seq2_contig = None self.seq2_src = None self.seq2_gap = None def __next__(self): while True: line = self.fetch_line(strip=None, requireLine=False) assert (line), "unexpected end of file (missing #:eof)" line = line.rstrip() if line == "": # (allow blank lines between stanzas) continue if line == "#:eof": line = self.file.readline().rstrip() assert (not line), "extra line after #:eof (line %d, \"%s\")" % (self.lineNumber, line) return None if line == "#:lav": continue if line.startswith("d {"): self.d_stanza_text = self.parse_unknown_stanza() continue if line.startswith("s {"): self.parse_s_stanza() continue if line.startswith("h {"): self.parse_h_stanza() continue if line.startswith("a {"): (score, pieces) = self.parse_a_stanza() break if line.endswith("{"): self.parse_unknown_stanza() continue raise ValueError("incomprehensible line (line %d, \"%s\")" % (self.lineNumber, line)) return self.build_alignment(score, pieces) def __iter__(self): return ReaderIter(self) def close(self): self.file.close() def open_seqs(self): if self.seq1_file is not None and self.seq2_file is not None: return if self.seq1_file is None: if self.seq1_strand == "+": revcomp = False else: revcomp = "-5'" if self.seq1_contig == 1: contig = None else: contig = self.seq1_contig try: f = open(self.seq1_filename, "rb") except Exception: if self.fail_to_ns: f = StringIO(">seq1\n" + ("n" * (self.seq1_end - self.seq1_start))) revcomp = False contig = 1 else: raise Exception("failed to open %s" % self.seq1_filename) self.seq1_file = bx.seq.seq_file(f, revcomp=revcomp, contig=contig) self.seq1_gap = self.seq1_file.gap try: name1 = self.header_to_src_name(self.seq1_header) except ValueError: try: name1 = self.path_to_src_name(self.seq1_filename) except ValueError: name1 = "seq1" (species1, chrom1) = src_split(name1) self.seq1_src = src_merge(species1, chrom1, contig) if contig is not None: chrom1 += "[%s]" % contig if self.seq2_file is None: if self.seq2_strand == "+": revcomp = False else: revcomp = "-5'" if self.seq2_contig == 1: contig = None else: contig = self.seq2_contig try: f = open(self.seq2_filename, "rb") except Exception: if self.fail_to_ns: f = StringIO(">seq2\n" + ("n" * (self.seq2_end - self.seq2_start))) revcomp = False contig = 1 else: raise Exception("failed to open %s" % self.seq1_filename) self.seq2_file = bx.seq.seq_file(f, revcomp=revcomp, contig=contig) self.seq2_gap = self.seq2_file.gap try: name2 = self.header_to_src_name(self.seq2_header) except ValueError: try: name2 = self.path_to_src_name(self.seq2_filename) except ValueError: name2 = "seq2" (species2, chrom2) = src_split(name2) self.seq2_src = src_merge(species2, chrom2, contig) if contig is not None: chrom2 += "[%s]" % contig length1 = self.seq1_file.length length2 = self.seq2_file.length assert (species1 != species2) or (chrom1 != chrom2) or (length1 == length2), \ "conflicting lengths for %s (%d and %d)" % (self.seq1_src, length1, length2) self.species_to_lengths = {} self.species_to_lengths[species1] = {} self.species_to_lengths[species2] = {} # (OK if it clobbers line above) self.species_to_lengths[species1][chrom1] = self.seq1_file.length self.species_to_lengths[species2][chrom2] = self.seq2_file.length def close_seqs(self): if self.seq1_file is not None: self.seq1_file.close() self.seq1_file = None if self.seq2_file is not None: self.seq2_file.close() self.seq2_file = None def parse_s_stanza(self): self.close_seqs() line = self.fetch_line(report=" in s-stanza") (self.seq1_filename, self.seq1_start, self.seq1_end, self.seq1_strand, self.seq1_contig) = self.parse_s_seq(line) line = self.fetch_line(report=" in s-stanza") (self.seq2_filename, self.seq2_start, self.seq2_end, self.seq2_strand, self.seq2_contig) = self.parse_s_seq(line) line = self.fetch_line(report=" in s-stanza") assert (line == "}"), "improper s-stanza terminator (line %d, \"%s\")" \ % (self.lineNumber, line) def parse_s_seq(self, line): fields = line.split() filename = fields[0].strip('"') start = int(fields[1]) - 1 end = int(fields[2]) contig = int(fields[4]) if fields[3] == "1": strand = "-" else: strand = "+" if filename.endswith("-"): assert (strand == "-"), "strand mismatch in \"%s\"" % line filename = filename[:-1] filename = do_path_subs(filename, self.path_subs) return (filename, start, end, strand, contig) def parse_h_stanza(self): line = self.fetch_line(strip='"', report=" in h-stanza") self.seq1_header = line self.seq1_header_prefix = "" if line.startswith(">"): self.seq1_header = line[1:].strip() self.seq1_header_prefix = ">" self.seq1_header = self.seq1_header.split(None, 1) if len(self.seq1_header) > 0: self.seq1_header = self.seq1_header[0] else: self.seq1_header = "seq1" line = self.fetch_line(strip='"', report=" in h-stanza") self.seq2_header = line self.seq2_header_prefix = "" if line.startswith(">"): self.seq2_header = line[1:].strip() self.seq2_header_prefix = ">" self.seq2_header = self.seq2_header.split(None, 1) if len(self.seq2_header) > 0: self.seq2_header = self.seq2_header[0] else: self.seq2_header = "seq2" line = self.fetch_line(report=" in h-stanza") assert (line == "}"), "improper h-stanza terminator (line %d, \"%s\")" \ % (self.lineNumber, line) def parse_a_stanza(self): """returns the pair (score,pieces) where pieces is a list of ungapped segments (start1,start2,length,pctId) with start1,start2 origin-0""" # 's' line -- score, 1 field line = self.fetch_line(report=" in a-stanza") fields = line.split() assert (fields[0] == "s"), "s line expected in a-stanza (line %d, \"%s\")" \ % (self.lineNumber, line) try: score = int(fields[1]) except ValueError: score = float(fields[1]) # 'b' line -- begin positions in seqs, 2 fields line = self.fetch_line(report=" in a-stanza") fields = line.split() assert (fields[0] == "b"), "b line expected in a-stanza (line %d, \"%s\")" \ % (self.lineNumber, line) # 'e' line -- end positions in seqs, 2 fields line = self.fetch_line(report=" in a-stanza") fields = line.split() assert (fields[0] == "e"), "e line expected in a-stanza (line %d, \"%s\")" \ % (self.lineNumber, line) # 'l' lines pieces = [] while (True): line = self.fetch_line(report=" in a-stanza") fields = line.split() if fields[0] != "l": break start1 = int(fields[1]) - 1 start2 = int(fields[2]) - 1 length = int(fields[3]) - start1 length2 = int(fields[4]) - start2 try: pctId = int(fields[5]) except ValueError: pctId = float(fields[5]) assert (length2 == length), "length mismatch in a-stanza" pieces.append((start1+self.seq1_start, start2+self.seq2_start, length, pctId)) assert (line == "}"), "improper a-stanza terminator (line %d, \"%s\")" \ % (self.lineNumber, line) return (score, pieces) def parse_unknown_stanza(self): lines = [] while (True): line = self.fetch_line() assert (line), "unexpected end of file (missing #:eof)" if line == "}": break lines.append(line) return " " + "\n ".join(lines) + "\n" def fetch_line(self, strip=True, requireLine=True, report=""): if strip is None: line = self.file.readline() elif strip is True: line = self.file.readline().strip() else: line = self.file.readline().strip().strip(strip) self.lineNumber += 1 if requireLine: assert (line), "unexpected blank line or end of file%s (line %d)" \ % (report, self.lineNumber) return line def d_stanza(self): if self.d_stanza_text is None: return "" return "d {\n%s}" % self.d_stanza_text def s_stanza(self): if self.seq1_filename is None: return "" if self.seq1_strand == "-": seq1_strand = "1" else: seq1_strand = "0" if self.seq2_strand == "-": seq2_strand = "1" else: seq2_strand = "0" s = " \"%s\" %d %d %s %d\n"\ % (self.seq1_filename, self.seq2_start+1, self.seq1_end, seq1_strand, self.seq1_contig) s += " \"%s\" %d %d %s %d\n"\ % (self.seq2_filename, self.seq2_start+1, self.seq2_end, seq2_strand, self.seq2_contig) return "s {\n%s}" % s def h_stanza(self): if self.seq1_header is None: return "" s = f" \"{self.seq1_header_prefix}{self.seq1_header}\"\n" s += f" \"{self.seq2_header_prefix}{self.seq2_header}\"\n" return "h {\n%s}" % s def build_alignment(self, score, pieces): """converts a score and pieces to an alignment""" # build text self.open_seqs() text1 = text2 = "" end1 = end2 = None for (start1, start2, length, _pctId) in pieces: if end1 is not None: if start1 == end1: # insertion in sequence 2 text1 += self.seq1_gap * (start2-end2) text2 += self.seq2_file.get(end2, start2-end2) else: # insertion in sequence 1 text1 += self.seq1_file.get(end1, start1-end1) text2 += self.seq2_gap * (start1-end1) text1 += self.seq1_file.get(start1, length) text2 += self.seq2_file.get(start2, length) end1 = start1 + length end2 = start2 + length # create alignment start1 = pieces[0][0] start2 = pieces[0][1] end1 = pieces[-1][0] + pieces[-1][2] end2 = pieces[-1][1] + pieces[-1][2] size1 = end1 - start1 size2 = end2 - start2 a = Alignment(score=score, species_to_lengths=self.species_to_lengths) # if (self.seq1_strand == "-"): start1 = self.seq1_file.length - end1 a.add_component(Component(self.seq1_src, start1, size1, self.seq1_strand, text=text1)) # if (self.seq2_strand == "-"): start2 = self.seq2_file.length - end2 a.add_component(Component(self.seq2_src, start2, size2, self.seq2_strand, text=text2)) return a def path_to_src_name(self, path_name): # converts, e.g. ".../hg18/seq/chr13.nib" to "hg18.chr13" if path_name is None or path_name == "": raise ValueError if path_name.endswith(".nib"): path_name = path_name[:-4] if path_name.endswith(".fa"): path_name = path_name[:-3] if path_name.endswith(".fasta"): path_name = path_name[:-6] slash = path_name.rfind("/") if slash == -1: return path_name name = path_name[slash+1:] path_name = path_name[:slash] if path_name.endswith("/seq"): path_name = path_name[:-4] slash = path_name.rfind("/") if slash != -1: path_name = path_name[slash+1:] return path_name + "." + name def header_to_src_name(self, header): # converts, e.g. "hg18.chr13:115404472-117281897" to "hg18.chr13" if header is None or header == "": raise ValueError colon = header.rfind(":") if colon != -1: header = header[:colon] if "/" in header: raise ValueError if header.count(".") == 0: return header header = header.split(".") if header[0] == "" or header[1] == "": raise ValueError return ".".join(header) class ReaderIter: def __init__(self, reader): self.reader = reader def __iter__(self): return self def __next__(self): v = next(self.reader) if not v: raise StopIteration return v class LavAsPiecesReader(Reader): """Iterate over all lav blocks in a file in order, returning alignments as score and pieces, as returned by Reader.parse_a_stanza""" def build_alignment(self, score, pieces): return (score, pieces) class Writer: # blockHash is a hash from (src1,strand1,src2,strand2) to a list of blocks; # the blocks are collected on each call to write(), but the actual writing # does not occur until close(). def __init__(self, file, attributes=None): if attributes is None: attributes = {} self.file = file self.fname1 = None self.fname2 = None self.block = 0 self.blockHash = {} # (see note above) if "name_format_1" in attributes: self.fname1 = attributes["name_format_1"] if "name_format_2" in attributes: self.fname2 = attributes["name_format_2"] if "d_stanza" in attributes: self.write_lav_marker() print("d {", file=self.file) print(attributes["d_stanza"], file=self.file) print("}", file=self.file) def write(self, alignment): if len(alignment.components) != 2: raise ValueError( "%d-component alignment is not compatible with lav" % len(alignment.components)) c1 = alignment.components[0] c2 = alignment.components[1] key = (c1.src, c1.strand, c2.src, c2.strand) if key not in self.blockHash: self.blockHash[key] = [] self.blockHash[key].append(alignment) self.block += 1 def close(self): keys = [key for key in self.blockHash] keys = sort_keys_by_chrom(keys) for key in keys: (src1, strand1, src2, strand2) = key alignment = self.blockHash[key][0] self.src1 = src1 self.strand1 = strand1 self.length1 = alignment.src_size(src1) self.src2 = src2 self.strand2 = strand2 self.length2 = alignment.src_size(src2) self.write_s_stanza() self.write_h_stanza() for alignment in self.blockHash[key]: self.write_a_stanza(alignment) self.write_trailer() if self.file != sys.stdout: self.file.close() def write_s_stanza(self): self.write_lav_marker() (strand1, flag1) = minus_or_nothing(self.strand1) (strand2, flag2) = minus_or_nothing(self.strand2) fname1 = build_filename(self.fname1, self.src1) fname2 = build_filename(self.fname2, self.src2) print("s {", file=self.file) print(" \"%s%s\" 1 %d %d 1" % (fname1, strand1, self.length1, flag1), file=self.file) print(" \"%s%s\" 1 %d %d 1" % (fname2, strand2, self.length2, flag2), file=self.file) print("}", file=self.file) def write_h_stanza(self): strand1 = rc_or_nothing(self.strand1) strand2 = rc_or_nothing(self.strand2) print("h {", file=self.file) print(f" \"> {self.src1}{strand1}\"", file=self.file) print(f" \"> {self.src2}{strand2}\"", file=self.file) print("}", file=self.file) def write_a_stanza(self, alignment): c1 = alignment.components[0] pos1 = c1.start text1 = c1.text.upper() c2 = alignment.components[1] pos2 = c2.start text2 = c2.text.upper() # collect ungapped pieces pieces = [] piece1 = None for ix in range(len(text1)): ch1 = text1[ix] ch2 = text2[ix] nonGap = (ch1 != "-") and (ch2 != "-") if nonGap: if piece1 is None: # new piece starts (piece1, piece2, idCount) = (pos1, pos2, 0) if ch1 == ch2: idCount += 1 elif piece1 is not None: # new gap starts size = pos1 - piece1 pctId = (200*idCount + size) / (2*size) pieces.append((piece1, piece2, size, pctId)) piece1 = None if ch1 != "-": pos1 += 1 if ch2 != "-": pos2 += 1 if piece1 is not None: size = pos1 - piece1 pctId = (200*idCount + size) / (2*size) pieces.append((piece1, piece2, size, pctId)) # write the block (start1, start2, size, pctId) = pieces[-1] # get end of final piece end1 = start1 + size end2 = start2 + size (start1, start2, size, pctId) = pieces[0] # get start of first piece score = int(round(alignment.score)) print("a {", file=self.file) print(" s %s" % score, file=self.file) print(" b %d %d" % (start1 + 1, start2 + 1), file=self.file) print(" e %d %d" % (end1, end2), file=self.file) for (start1, start2, size, pctId) in pieces: print(" l %d %d %d %d %d" % (start1 + 1, start2 + 1, start1 + size, start2 + size, pctId), file=self.file) print("}", file=self.file) def write_lav_marker(self): print("#:lav", file=self.file) def write_trailer(self): print("#:eof", file=self.file) def sort_keys_by_chrom(keys): decorated = sorted((chrom_key(src1), strand1, chrom_key(src2), strand2, (src1, strand1, src2, strand2)) for (src1, strand1, src2, strand2) in keys) return [key for (src1, strand1, src2, strand2, key) in decorated] def chrom_key(src): (species, chrom) = src_split(src) if chrom.startswith("chr"): chrom = chrom[3:] try: chrom = int(chrom) except ValueError: pass return chrom def build_filename(fmt, src): if fmt is None: return src num = fmt.count("%s") if num == 0: return fmt (species, chrom) = src_split(src) if num == 1: return fmt % chrom return fmt % (species, chrom) def minus_or_nothing(strand): if strand == "-": return ("-", 1) else: return ("", 0) def rc_or_nothing(strand): if strand == "-": return " (reverse complement)" else: return "" def do_path_subs(path, path_subs): for (prefix, replacement) in path_subs: if path.startswith(prefix): return replacement + path[len(prefix):] return path bx-python-0.8.13/lib/bx/align/lav_tests.py000066400000000000000000000041051415666465100204000ustar00rootroot00000000000000""" Tests for `bx.align.lav`. """ import unittest import bx.align.lav as lav test_lav = "test_data/lav_tests/apple_orange.lav" class lavTestCase(unittest.TestCase): def testReader(self): reader = lav.Reader(open(test_lav)) a = next(reader) assert a.score == 10286, "a.score is wrong: %s" % a.score assert len(a.components) == 2 check_component(a.components[0], "apple", 106, 252, "+", 411, "GTCCGGCCGGCTGAGAGCTACAATACACATGCACGCAGTTTGGCCACTCACATTAAGTATATGAGGAAGGGTTAGCATGAGTTGTACTATAAGGCAGCGGATAGCAGGTTGTGGAAAAATATCCTCCCGATTCAAATCCCCAGGTGCCTAAA----------------GTAGGGCCGGTAGTTGAATGCTTGCCTGTCAGACTGGATGACCAAGTTCAGTATCAACACAATATAGTGCCAGGAGCTAATTGTTCCCCAGCAGCGTGAC") check_component(a.components[1], "lav_tests.orange", 53, 252, "+", 361, "GTCCGGCCGGCTGTGTGCTACAATACACGTTCACGCAGTTTGGCCAATCACTTTAAGTATATACGAAATGGTTACCATGAGTTGTACTGTAAGGCAGCGGAAAGC---TTGTTAA--------CTCCTGGGCGACATT----GGGGCTGCAACATCGTTTATCCTCCTCTACAACCAATAGCTG-TTGCTTCTTGGTTCAAGTATATCCCATGGATTAGTATCAACACGATATAGTGTCAGGAGCTAATTGTTCCCCAGCAGCGTGAC") a = next(reader) assert a.score == 3586, "a.score is wrong: %s" % a.score assert len(a.components) == 2 check_component(a.components[0], "apple", 52, 72, "+", 411, "TGCATATCGACTATTACAGCCACGCGAGTTACATTCCTCTTTTTTTTTGCTGGCGTCCGGCCGGCTGAGAGC") check_component(a.components[1], "lav_tests.orange", 2, 72, "-", 361, "TGCATATCGACTAGTACAGCCTCTCGAGTTACCCCCCCCATTCCTCTTGCTGACGTCACGCTGCTGGGGAAC") a = next(reader) assert a is None reader.close() def check_component(c, src, start, size, strand, src_size, text): # ..print "\"%s\" == \"%s\"" % (c.src,src) assert c.src == src, f"c.src = {c.src} (expected {src})" assert c.start == start, f"c.start = {c.start} (expected {start})" assert c.size == size, f"c.size = {c.size} (expected {size})" assert c.strand == strand, f"c.strand = {c.strand} (expected {strand})" assert c.src_size == src_size, f"c.src_size = {c.src_size} (expected {src_size})" assert c.text == text, f"c.text = \"{c.text}\" (expected \"{text}\")" bx-python-0.8.13/lib/bx/align/maf.py000066400000000000000000000204111415666465100171350ustar00rootroot00000000000000""" Support for the `MAF`_ multiple sequence alignment format used by `multiz`_. .. _MAF: http://genome.ucsc.edu/FAQ/FAQformat.html#format5 .. _multiz: http://www.bx.psu.edu/miller_lab/ """ from io import ( StringIO, TextIOWrapper, ) from bx import interval_index_file from bx.align import ( Alignment, Component ) MAF_INVERSE_STATUS = 'V' MAF_INSERT_STATUS = 'I' MAF_CONTIG_STATUS = 'C' MAF_CONTIG_NESTED_STATUS = 'c' MAF_NEW_STATUS = 'N' MAF_NEW_NESTED_STATUS = 'n' MAF_MAYBE_NEW_STATUS = 'S' MAF_MAYBE_NEW_NESTED_STATUS = 's' MAF_MISSING_STATUS = 'M' class MAFIndexedAccess(interval_index_file.AbstractIndexedAccess): """ Indexed access to a MAF file. """ def read_at_current_offset(self, file, **kwargs): """ Read the MAF block at the current position in `file` and return an instance of `Alignment`. """ return read_next_maf(file, **kwargs) def open_data(self): data = super().open_data() return TextIOWrapper(data, encoding="ascii") class MAFMultiIndexedAccess(interval_index_file.AbstractMultiIndexedAccess): """ Indexed access to multiple MAF files. """ indexed_access_class = MAFIndexedAccess Indexed = MAFIndexedAccess """Deprecated: `MAFIndexedAccess` is also available under the name `Indexed`.""" MultiIndexed = MAFMultiIndexedAccess """Deprecated: `MAFMultiIndexedAccess` is also available under the name `MultiIndexed`.""" class Reader: """ Iterate over all maf blocks in a file in order """ def __init__(self, file, **kwargs): self.file = file self.maf_kwargs = kwargs # Read and verify maf header, store any attributes fields = self.file.readline().split() if fields[0] != '##maf': raise Exception("File does not have MAF header") self.attributes = parse_attributes(fields[1:]) def __next__(self): return read_next_maf(self.file, **self.maf_kwargs) def __iter__(self): return ReaderIter(self) def close(self): self.file.close() class ReaderIter: """ Adapts a `Reader` to the iterator protocol. """ def __init__(self, reader): self.reader = reader def __iter__(self): return self def __next__(self): v = next(self.reader) if not v: raise StopIteration return v class Writer: def __init__(self, file, attributes=None): if attributes is None: attributes = {} self.file = file # Write header, Webb's maf code wants version first, we accomodate if 'version' not in attributes: attributes['version'] = 1 self.file.write("##maf version=%s" % attributes['version']) for key in attributes: if key == 'version': continue self.file.writelines(f" {key}={attributes[key]}") self.file.write("\n") def write(self, alignment): self.file.write("a score=" + str(alignment.score)) for key in alignment.attributes: self.file.write(f" {key}={alignment.attributes[key]}") self.file.write("\n") # Components rows = [] for c in alignment.components: # "Empty component" generates an 'e' row if c.empty: rows.append(("e", c.src, str(c.start), str(c.size), c.strand, str(c.src_size), c.synteny_empty)) continue # Regular component rows.append(("s", c.src, str(c.start), str(c.size), c.strand, str(c.src_size), c.text)) # If component has quality, write a q row if c.quality is not None: rows.append(("q", c.src, "", "", "", "", c.quality)) # If component has synteny follow up with an 'i' row if c.synteny_left and c.synteny_right: rows.append(("i", c.src, "", "", "", "", " ".join(map(str, c.synteny_left + c.synteny_right)))) self.file.write(format_tabular(rows, "llrrrrl")) self.file.write("\n") def close(self): self.file.close() # ---- Helper methods ------------------------------------------------------- def from_string(string, **kwargs): return read_next_maf(StringIO(string), **kwargs) def read_next_maf(file, species_to_lengths=None, parse_e_rows=False): """ Read the next MAF block from `file` and return as an `Alignment` instance. If `parse_e_rows` is true, empty components will be created when e rows are encountered. """ alignment = Alignment(species_to_lengths=species_to_lengths) # Attributes line line = readline(file, skip_blank=True) if not line: return None fields = line.split() if fields[0] != 'a': raise Exception("Expected 'a ...' line") alignment.attributes = parse_attributes(fields[1:]) if 'score' in alignment.attributes: alignment.score = alignment.attributes['score'] del alignment.attributes['score'] else: alignment.score = 0 # Sequence lines last_component = None while True: line = readline(file) # EOF or Blank line terminates alignment components if not line or line.isspace(): break if line.isspace(): break # Parse row fields = line.split() if fields[0] == 's': # An 's' row contains sequence for a component component = Component() component.src = fields[1] component.start = int(fields[2]) component.size = int(fields[3]) component.strand = fields[4] component.src_size = int(fields[5]) if len(fields) > 6: component.text = fields[6].strip() # Add to set alignment.add_component(component) last_component = component elif fields[0] == 'e': # An 'e' row, when no bases align for a given species this tells # us something about the synteny if parse_e_rows: component = Component() component.empty = True component.src = fields[1] component.start = int(fields[2]) component.size = int(fields[3]) component.strand = fields[4] component.src_size = int(fields[5]) component.text = None synteny = fields[6].strip() assert len(synteny) == 1, \ "Synteny status in 'e' rows should be denoted with a single character code" component.synteny_empty = synteny alignment.add_component(component) last_component = component elif fields[0] == 'i': # An 'i' row, indicates left and right synteny status for the # previous component, we hope ;) assert fields[1] == last_component.src, "'i' row does not follow matching 's' row" last_component.synteny_left = (fields[2], int(fields[3])) last_component.synteny_right = (fields[4], int(fields[5])) elif fields[0] == 'q': assert fields[1] == last_component.src, "'q' row does not follow matching 's' row" # TODO: Should convert this to an integer array? last_component.quality = fields[2] return alignment def readline(file, skip_blank=False): """Read a line from provided file, skipping any blank or comment lines""" while True: line = file.readline() if not line: return None if line[0] != '#' and not (skip_blank and line.isspace()): return line def parse_attributes(fields): """Parse list of key=value strings into a dict""" attributes = {} for field in fields: pair = field.split('=') attributes[pair[0]] = pair[1] return attributes def format_tabular(rows, align=None): if len(rows) == 0: return "" lengths = [len(col) for col in rows[0]] for row in rows[1:]: for i in range(0, len(row)): lengths[i] = max(lengths[i], len(row[i])) rval = "" for row in rows: for i in range(0, len(row)): if align and align[i] == "l": rval += row[i].ljust(lengths[i]) else: rval += row[i].rjust(lengths[i]) rval += " " rval += "\n" return rval bx-python-0.8.13/lib/bx/align/maf_tests.py000066400000000000000000000261111415666465100203620ustar00rootroot00000000000000""" Tests for `bx.align.maf`. """ from io import StringIO import bx.align as align import bx.align.maf as maf # A simple MAF from the rat paper days test_maf = """##maf version=1 scoring=humor.v4 # humor.v4 R=30 M=10 /cluster/data/hg15/bed/blastz.mm3/axtNet300/chr1.maf # /cluster/data/hg15/bed/blastz.rn3/axtNet300/chr1.maf a score=0.128 s human_hoxa 100 8 + 100257 ACA-TTACT s horse_hoxa 120 9 - 98892 ACAATTGCT s fugu_hoxa 88 7 + 90788 ACA--TGCT a score=0.071 s human_unc 9077 8 + 10998 ACAGTATT # Comment s horse_unc 4555 6 - 5099 ACA--ATT s fugu_unc 4000 4 + 4038 AC----TT """ # A more complicated MAF with synteny annotation and such test_maf_2 = """##maf version=1 scoring=autoMZ.v1 a score=3656.000000 s hg17.chr1 2005 34 + 245522847 TGTAACTTAATACCACAACCAGGCATAGGGG--AAA------------- s rheMac2.chr11 9625228 31 + 134511895 TGTAACCTCTTACTGCAACAAGGCACAGGGG------------------ i rheMac2.chr11 C 0 I 1678 s panTro1.chr1 2014 34 + 229575298 TGTAACTTAATACCACAACCAGGCATGGGGG--AAA------------- i panTro1.chr1 C 0 C 0 s bosTau2.chr5 64972365 47 + 76426644 TCCAGCCATGTGTTGTGATCAG--CCAGGGGCTAAAGCCATGGCGGTAG i bosTau2.chr5 C 0 I 1462 s canFam2.chr27 45129665 31 + 48908698 TTTGACTCTGTGCTCTTATCAGGCCCAAGGG------------------ i canFam2.chr27 C 0 I 1664 e danRer3.chr18 2360867 428 + 50308305 I e oryCun1.scaffold_139397 643 1271 - 4771 I e loxAfr1.scaffold_5603 58454 1915 + 68791 I e echTel1.scaffold_212365 4641 1430 + 9822 I e echTel1.scaffold_212365 4641 1430 + 9822 I e rn3.chr4 29161032 1524 - 187371129 I e mm7.chr6 28091695 3290 - 149646834 I """ # A MAF to test slicing upon test_maf_3 = """##maf version=1 scoring=none a score=0 s apple 34 64 + 110 AGGGA---GTTCGTCACT------GTCGTAAGGGTTCAGA--CTGTCTATGTATACACAAGTTGTGTTGCA--ACCG s orange 19 61 - 100 AGGGATGCGTT--TCACTGCTATCGTCGTA----TTCAGACTTCG-CTATCT------GAGTTGT---GCATTACCG """ complex_maf = align.Alignment() complex_maf.score = "7009" complex_maf.components.append(align.Component(src="human_hoxa", start=100, size=8, strand="+", src_size=100257, text="ACA-TTACT")) complex_maf.components.append(align.Component(src="horse_hoxa", start=120, size=9, strand="-", src_size=98892, text="ACAATTGCT")) complex_maf.components[-1].synteny_left = (maf.MAF_NEW_STATUS, 0) complex_maf.components[-1].synteny_right = (maf.MAF_CONTIG_STATUS, 0) complex_maf.components.append(align.Component(src="unknown_1", start=150, size=3, strand="-", src_size=98892, text="---ATT---")) complex_maf.components.append(align.Component(src="unknown_2", start=12, size=1000, strand="+", src_size=1200, text=None)) complex_maf.components[-1].empty = True complex_maf.components[-1].synteny_empty = maf.MAF_INSERT_STATUS complex_maf.text_size = 9 def test_reader(): reader = maf.Reader(StringIO(test_maf)) assert reader.attributes["version"] == "1" assert reader.attributes["scoring"] == "humor.v4" a = next(reader) assert a.score == 0.128 assert len(a.components) == 3 check_component(a.components[0], "human_hoxa", 100, 8, "+", 100257, "ACA-TTACT") check_component(a.components[1], "horse_hoxa", 120, 9, "-", 98892, "ACAATTGCT") check_component(a.components[2], "fugu_hoxa", 88, 7, "+", 90788, "ACA--TGCT") a = next(reader) assert a.score == 0.071 assert len(a.components) == 3 check_component(a.components[0], "human_unc", 9077, 8, "+", 10998, "ACAGTATT") check_component(a.components[1], "horse_unc", 4555, 6, "-", 5099, "ACA--ATT") check_component(a.components[2], "fugu_unc", 4000, 4, "+", 4038, "AC----TT") a = next(reader) assert a is None reader.close() def test_writer(): val = StringIO() writer = maf.Writer(val, {'scoring': 'foobar'}) a = align.Alignment() a.score = 7009 a.components.append(align.Component(src="human_hoxa", start=100, size=9, strand="+", src_size=1000257, text="ACA-TTACT")) a.components.append(align.Component(src="horse_hoxa", start=120, size=10, strand="-", src_size=98892, text="ACAATTGCT")) check_component(a.components[0], "human_hoxa", 100, 9, "+", 1000257, "ACA-TTACT") check_component(a.components[1], "horse_hoxa", 120, 10, "-", 98892, "ACAATTGCT") writer.write(a) assert val.getvalue() == """##maf version=1 scoring=foobar a score=7009 s human_hoxa 100 9 + 1000257 ACA-TTACT s horse_hoxa 120 10 - 98892 ACAATTGCT """ # noqa: W291 def test_slice(): b = complex_maf.slice_by_component(0, 101, 105) check_component(b.components[0], src="human_hoxa", start=101, size=4, strand="+", src_size=100257, text="CA-TT") check_component(b.components[1], src="horse_hoxa", start=121, size=5, strand="-", src_size=98892, text="CAATT") check_component(b.components[2], src="unknown_1", start=150, size=3, strand="-", src_size=98892, text="--ATT") check_component(b.components[3], src="unknown_2", start=12, size=1000, strand="+", src_size=1200, text=None) assert b.components[3].empty assert b.components[3].synteny_empty == maf.MAF_INSERT_STATUS # test slicing with + strand src reader = maf.Reader(StringIO(test_maf_3)) a = next(reader) b = a.slice_by_component(0, 40, 62) check_component(b.components[0], src="apple", start=40, size=22, strand="+", src_size=110, text="TTCGTCACT------GTCGTAAGGGTTC") check_component(b.components[1], src="orange", start=28, size=22, strand="-", src_size=100, text="TT--TCACTGCTATCGTCGTA----TTC") # test slicing with - strand src b = a.slice_by_component(1, 30, 68) check_component(b.components[0], src="apple", start=46, size=41, strand="+", src_size=110, text="ACT------GTCGTAAGGGTTCAGA--CTGTCTATGTATACACAAGTTG") check_component(b.components[1], src="orange", start=32, size=38, strand="-", src_size=100, text="ACTGCTATCGTCGTA----TTCAGACTTCG-CTATCT------GAGTTG") a = next(reader) assert a is None def test_reverse_complement(): b = complex_maf.reverse_complement() check_component(b.components[0], src="human_hoxa", start=100257-100-8, size=8, strand="-", src_size=100257, text="AGTAA-TGT") check_component(b.components[1], src="horse_hoxa", start=98892-120-9, size=9, strand="+", src_size=98892, text="AGCAATTGT") assert b.components[1].synteny_right == (maf.MAF_NEW_STATUS, 0) assert b.components[1].synteny_left == (maf.MAF_CONTIG_STATUS, 0) check_component(b.components[2], src="unknown_1", start=98892-150-3, size=3, strand="+", src_size=98892, text="---AAT---") check_component(b.components[3], src="unknown_2", start=1200-12-1000, size=1000, strand="-", src_size=1200, text=None) assert b.components[3].empty assert b.components[3].synteny_empty == maf.MAF_INSERT_STATUS def test_column_iter(): expected = [['A', 'A', '-'], ['C', 'C', '-'], ['A', 'A', '-'], ['-', 'A', 'A'], ['T', 'T', 'T'], ['T', 'T', 'T'], ['A', 'G', '-'], ['C', 'C', '-'], ['T', 'T', '-']] for i, c in enumerate(complex_maf.column_iter()): assert c == expected[i] def test_remove_all_gap_column(): complex_maf_gap = align.Alignment() complex_maf_gap.score = "7009" complex_maf_gap.components.append(align.Component(src="human_hoxa", start=100, size=8, strand="+", src_size=100257, text="-ACA--TTACT")) complex_maf_gap.components.append(align.Component(src="horse_hoxa", start=120, size=9, strand="-", src_size=98892, text="-ACA-ATTGCT")) complex_maf_gap.components[-1].synteny_left = (maf.MAF_NEW_STATUS, 0) complex_maf_gap.components[-1].synteny_right = (maf.MAF_CONTIG_STATUS, 0) complex_maf_gap.components.append(align.Component(src="unknown_1", start=150, size=3, strand="-", src_size=98892, text="-----ATT---")) complex_maf_gap.components.append(align.Component(src="unknown_2", start=12, size=1000, strand="+", src_size=1200, text=None)) complex_maf_gap.components[-1].empty = True complex_maf_gap.components[-1].synteny_empty = maf.MAF_INSERT_STATUS complex_maf_gap.text_size = 11 complex_maf_gap.remove_all_gap_columns() assert complex_maf_gap == complex_maf def test_read_with_synteny(): reader = maf.Reader(StringIO(test_maf_2), parse_e_rows=True) a = next(reader) check_component(a.components[0], "hg17.chr1", 2005, 34, "+", 245522847, "TGTAACTTAATACCACAACCAGGCATAGGGG--AAA-------------") check_component(a.components[1], "rheMac2.chr11", 9625228, 31, "+", 134511895, "TGTAACCTCTTACTGCAACAAGGCACAGGGG------------------") print(a.components[1].synteny_left) assert a.components[1].synteny_left == (maf.MAF_CONTIG_STATUS, 0) assert a.components[1].synteny_right == (maf.MAF_INSERT_STATUS, 1678) rat = a.get_component_by_src_start("rn3.") check_component(rat, "rn3.chr4", 29161032, 1524, "-", 187371129, None) assert rat.synteny_empty == maf.MAF_INSERT_STATUS def test_write_with_synteny(): reader = maf.Reader(StringIO(test_maf_2), parse_e_rows=True) a = next(reader) val = StringIO() writer = maf.Writer(val, {'scoring': 'foobar'}) writer.write(a) actual = val.getvalue() expected = """##maf version=1 scoring=foobar a score=3656.0 s hg17.chr1 2005 34 + 245522847 TGTAACTTAATACCACAACCAGGCATAGGGG--AAA------------- s rheMac2.chr11 9625228 31 + 134511895 TGTAACCTCTTACTGCAACAAGGCACAGGGG------------------ i rheMac2.chr11 C 0 I 1678 s panTro1.chr1 2014 34 + 229575298 TGTAACTTAATACCACAACCAGGCATGGGGG--AAA------------- i panTro1.chr1 C 0 C 0 s bosTau2.chr5 64972365 47 + 76426644 TCCAGCCATGTGTTGTGATCAG--CCAGGGGCTAAAGCCATGGCGGTAG i bosTau2.chr5 C 0 I 1462 s canFam2.chr27 45129665 31 + 48908698 TTTGACTCTGTGCTCTTATCAGGCCCAAGGG------------------ i canFam2.chr27 C 0 I 1664 e danRer3.chr18 2360867 428 + 50308305 I e oryCun1.scaffold_139397 643 1271 - 4771 I e loxAfr1.scaffold_5603 58454 1915 + 68791 I e echTel1.scaffold_212365 4641 1430 + 9822 I e echTel1.scaffold_212365 4641 1430 + 9822 I e rn3.chr4 29161032 1524 - 187371129 I e mm7.chr6 28091695 3290 - 149646834 I """ # noqa: W291 print(actual) print("---") print(expected) assert actual == expected def check_component(c, src, start, size, strand, src_size, text): assert c.src == src assert c.start == start assert c.size == size assert c.strand == strand assert c.src_size == src_size assert c.text == text bx-python-0.8.13/lib/bx/align/score.py000066400000000000000000000271561415666465100175220ustar00rootroot00000000000000""" Support for scoring alignments using arbitrary scoring matrices, arbitrary alphabets, and affine gap penalties. """ from numpy import ( float32, int32, ones, zeros ) class ScoringScheme: # note that gap_open and gap_extend are penalties, which means you should make them positive def __init__(self, gap_open, gap_extend, default=-100, alphabet1="ACGT", alphabet2=None, gap1="-", gap2=None, text1_range=128, text2_range=None, typecode=int32): if text2_range is None: text2_range = text1_range if alphabet2 is None: alphabet2 = alphabet1 if gap2 is None: gap2 = gap1 # (scheme with gap1=gap2=None is legit) if isinstance(alphabet1, str): alphabet1 = [ch for ch in alphabet1] if isinstance(alphabet2, str): alphabet2 = [ch for ch in alphabet2] self.table = ones((text1_range, text2_range), typecode) self.table *= default self.gap_open = gap_open self.gap_extend = gap_extend self.gap1 = gap1 self.gap2 = gap2 self.alphabet1 = alphabet1 self.alphabet2 = alphabet2 # private _set_score and _get_score allow subclasses to override them to # implement a different underlying table object def _set_score(self, a_b_pair, val): (a, b) = a_b_pair self.table[a, b] = val def _get_score(self, a_b_pair): (a, b) = a_b_pair return self.table[a, b] def set_score(self, a, b, val, foldcase1=False, foldcase2=False): self._set_score((a, b), val) if foldcase1: aCh = chr(a) if (aCh.isupper()): aa = ord(aCh.lower()) elif (aCh.islower()): aa = ord(aCh.upper()) else: foldcase1 = False if foldcase2: bCh = chr(b) if (bCh.isupper()): bb = ord(bCh.lower()) elif (bCh.islower()): bb = ord(bCh.upper()) else: foldcase2 = False if foldcase1 and foldcase2: self._set_score((aa, b), val) self._set_score((a, bb), val) self._set_score((aa, bb), val) elif foldcase1: self._set_score((aa, b), val) elif foldcase2: self._set_score((a, bb), val) def score_alignment(self, a): return score_alignment(self, a) def score_texts(self, text1, text2): return score_texts(self, text1, text2) def __str__(self): isDna1 = "".join(self.alphabet1) == "ACGT" isDna2 = "".join(self.alphabet2) == "ACGT" labelRows = not (isDna1 and isDna2) width = 3 for a in self.alphabet1: for b in self.alphabet2: score = self._get_score((ord(a), ord(b))) if (isinstance(score, float)): s = "%8.6f" % score else: s = "%s" % score if (len(s)+1 > width): width = len(s)+1 lines = [] line = [] if labelRows: if isDna1: line.append(" ") else: line.append(" ") for b in self.alphabet2: if isDna2: s = b else: s = "%02X" % ord(b) line.append("%*s" % (width, s)) lines.append(("".join(line))+"\n") for a in self.alphabet1: line = [] if labelRows: if isDna1: line.append(a) else: line.append("%02X" % ord(a)) for b in self.alphabet2: score = self._get_score((ord(a), ord(b))) if (isinstance(score, float)): s = "%8.6f" % score else: s = "%s" % score line.append("%*s" % (width, s)) lines.append(("".join(line))+"\n") return "".join(lines) def read_scoring_scheme(f, gap_open, gap_extend, gap1="-", gap2=None, **kwargs): """ Initialize scoring scheme from a file containint a blastz style text blob. f can be either a file or the name of a file. """ close_it = False if (isinstance(f, str)): f = open(f) close_it = True ss = build_scoring_scheme("".join([line for line in f]), gap_open, gap_extend, gap1=gap1, gap2=gap2, **kwargs) if (close_it): f.close() return ss def build_scoring_scheme(s, gap_open, gap_extend, gap1="-", gap2=None, **kwargs): """ Initialize scoring scheme from a blastz style text blob, first line specifies the bases for each row/col, subsequent lines contain the corresponding scores. Slaw extensions allow for unusual and/or asymmetric alphabets. Symbols can be two digit hex, and each row begins with symbol. Note that a row corresponds to a symbol in text1 and a column to a symbol in text2. examples: blastz slaw A C G T 01 02 A C G T 91 -114 -31 -123 01 200 -200 -50 100 -50 100 -114 100 -125 -31 02 -200 200 100 -50 100 -50 -31 -125 100 -114 -123 -31 -114 91 """ # perform initial parse to determine alphabets and locate scores bad_matrix = "invalid scoring matrix" s = s.rstrip("\n") lines = s.split("\n") rows = [] symbols2 = lines.pop(0).split() symbols1 = None rows_have_syms = False a_la_blastz = True for i, line in enumerate(lines): row_scores = line.split() if len(row_scores) == len(symbols2): # blastz-style row if symbols1 is None: if len(lines) != len(symbols2): raise bad_matrix symbols1 = symbols2 elif (rows_have_syms): raise bad_matrix elif len(row_scores) == len(symbols2) + 1: # row starts with symbol if symbols1 is None: symbols1 = [] rows_have_syms = True a_la_blastz = False elif not rows_have_syms: raise bad_matrix symbols1.append(row_scores.pop(0)) else: raise bad_matrix rows.append(row_scores) # convert alphabets from strings to characters try: alphabet1 = [sym_to_char(sym) for sym in symbols1] alphabet2 = [sym_to_char(sym) for sym in symbols2] except ValueError: raise bad_matrix if (alphabet1 != symbols1) or (alphabet2 != symbols2): a_la_blastz = False if a_la_blastz: alphabet1 = [ch.upper() for ch in alphabet1] alphabet2 = [ch.upper() for ch in alphabet2] # decide if rows and/or columns should reflect case if a_la_blastz: foldcase1 = foldcase2 = True else: foldcase1 = "".join(alphabet1) == "ACGT" foldcase2 = "".join(alphabet2) == "ACGT" # create appropriately sized matrix text1_range = text2_range = 128 if ord(max(alphabet1)) >= 128: text1_range = 256 if ord(max(alphabet2)) >= 128: text2_range = 256 typecode = int32 for i, row_scores in enumerate(rows): for j, score in enumerate(map(int_or_float, row_scores)): if isinstance(score, float): typecode = float32 if isinstance(gap_open, float): typecode = float32 if isinstance(gap_extend, float): typecode = float32 ss = ScoringScheme(gap_open, gap_extend, alphabet1=alphabet1, alphabet2=alphabet2, gap1=gap1, gap2=gap2, text1_range=text1_range, text2_range=text2_range, typecode=typecode, **kwargs) # fill matrix for i, row_scores in enumerate(rows): for j, score in enumerate(map(int_or_float, row_scores)): ss.set_score(ord(alphabet1[i]), ord(alphabet2[j]), score) if foldcase1 and foldcase2: ss.set_score(ord(alphabet1[i].lower()), ord(alphabet2[j].upper()), score) ss.set_score(ord(alphabet1[i].upper()), ord(alphabet2[j].lower()), score) ss.set_score(ord(alphabet1[i].lower()), ord(alphabet2[j].lower()), score) elif foldcase1: ss.set_score(ord(alphabet1[i].lower()), ord(alphabet2[j]), score) elif foldcase2: ss.set_score(ord(alphabet1[i]), ord(alphabet2[j].lower()), score) return ss def int_or_float(s): try: return int(s) except ValueError: return float(s) # convert possible two-char symbol to a single character def sym_to_char(sym): if len(sym) == 1: return sym elif len(sym) != 2: raise ValueError else: return chr(int(sym, base=16)) def score_alignment(scoring_scheme, a): score = 0 ncomps = len(a.components) for i in range(ncomps): for j in range(i+1, ncomps): score += score_texts(scoring_scheme, a.components[i].text, a.components[j].text) return score def score_texts(scoring_scheme, text1, text2): rval = 0 last_gap_a = last_gap_b = False for i in range(len(text1)): a = text1[i] b = text2[i] # Ignore gap/gap pair if a == scoring_scheme.gap1 and b == scoring_scheme.gap2: continue # Gap in first species elif a == scoring_scheme.gap1: rval -= scoring_scheme.gap_extend if not last_gap_a: rval -= scoring_scheme.gap_open last_gap_a = True last_gap_b = False # Gap in second species elif b == scoring_scheme.gap2: rval -= scoring_scheme.gap_extend if not last_gap_b: rval -= scoring_scheme.gap_open last_gap_a = False last_gap_b = True # Aligned base else: rval += scoring_scheme._get_score((ord(a), ord(b))) last_gap_a = last_gap_b = False return rval def accumulate_scores(scoring_scheme, text1, text2, skip_ref_gaps=False): """ Return cumulative scores for each position in alignment as a 1d array. If `skip_ref_gaps` is False positions in returned array correspond to each column in alignment, if True they correspond to each non-gap position (each base) in text1. """ if skip_ref_gaps: rval = zeros(len(text1) - text1.count(scoring_scheme.gap1)) else: rval = zeros(len(text1)) score = 0 pos = 0 last_gap_a = last_gap_b = False for i in range(len(text1)): a = text1[i] b = text2[i] # Ignore gap/gap pair if a == scoring_scheme.gap1 and b == scoring_scheme.gap2: continue # Gap in first species elif a == scoring_scheme.gap1: score -= scoring_scheme.gap_extend if not last_gap_a: score -= scoring_scheme.gap_open last_gap_a = True last_gap_b = False # Gap in second species elif b == scoring_scheme.gap2: score -= scoring_scheme.gap_extend if not last_gap_b: score -= scoring_scheme.gap_open last_gap_a = False last_gap_b = True # Aligned base else: score += scoring_scheme._get_score((ord(a), ord(b))) last_gap_a = last_gap_b = False if not(skip_ref_gaps) or a != scoring_scheme.gap1: rval[pos] = score pos += 1 return rval hox70 = build_scoring_scheme(""" A C G T 91 -114 -31 -123 -114 100 -125 -31 -31 -125 100 -114 -123 -31 -114 91 """, 400, 30) bx-python-0.8.13/lib/bx/align/score_tests.py000066400000000000000000000071661415666465100207430ustar00rootroot00000000000000""" Tests for `bx.align.score`. """ import unittest from io import StringIO from numpy import ( allclose, array, cumsum, ) import bx.align.maf import bx.align.score aligns = [("CCACTAGTTTTTAAATAATCTACTATCAAATAAAAGATTTGTTAATAATAAATTTTAAATCATTAACACTT", "CCATTTGGGTTCAAAAATTGATCTATCA----------TGGTGGATTATTATTTAGCCATTAAGGACAAAT", -111), ("CCACTAGTTTTTAAATAATCTAC-----AATAAAAGATTTGTTAATAAT---AAATTTTAAATCATTAA-----CACTT", "CCATTTGGGTTCAAAAATTGATCTATCA----------TGGTGGAT---TATTATTT-----AGCCATTAAGGACAAAT", -3626), ("CCACTAGTTTTTGATTC", "CCATTTGGGTTC-----", -299), ("CTTAGTTTTTGATCACC", "-----CTTGGGTTTACC", -299), ("gggaattgaacaatgagaacacatggacacaggaaggggaacatcacacacc----------ggggcctgttgtggggtggggggaag", "ggaactagaacaagggagacacatacaaacaacaacaacaacaacacagcccttcccttcaaagagcttatagtctgatggaggagag", 1690)] mafs = """##maf a score=2883.0 s hg17.chr1 6734 30 + 245522847 CTACCTCAGTGTGGAAGGTGGGCAGTTCTG s rheMac1.SCAFFOLD71394 9319 30 - 13789 CTACCTCAGTGTGGAAGGTGGGCAGTTCTG a score=8167.0 s hg17.chr1 41401 40 + 245522847 TGTGTGATTAATGCCTGAGACTGTGTGAAGTAAGAGATGG s panTro1.chr1 49673 40 + 229575298 TGCGTGATTAATGCCTGAGATTGTGTGAAGTAAAAGATGG s rheMac1.SCAFFOLD45837 26063 33 - 31516 TGTGTGATTAATGCCTGAGATTGTGTGAAGTAA------- """ nonsymm_scheme = bx.align.score.build_scoring_scheme(""" A C G T 91 0 -31 -123 -114 100 -125 -31 -31 -125 100 -114 -123 -31 -114 91 """, 400, 30) aligns_for_nonsymm_scheme = [("AAAACCCCGGGGTTTT", "ACGTACGTACGTACGT", -580)] asymm_scheme = bx.align.score.build_scoring_scheme(""" 01 02 A C G T 01 200 -200 -50 100 -50 100 02 -200 200 100 -50 100 -50 """, 0, 0, gap1='\x00') aligns_for_asymm_scheme = [("\x01\x01\x01\x01\x01\x01", "ACGT\x01\x02", 100)] class BasicTests(unittest.TestCase): def test_scoring_text(self): ss = bx.align.score.hox70 for t1, t2, score in aligns: self.assertEqual(bx.align.score.score_texts(ss, t1, t2), score) def test_align(self): ss = bx.align.score.hox70 for block in bx.align.maf.Reader(StringIO(mafs)): self.assertEqual(bx.align.score.score_alignment(ss, block), float(block.score)) def test_accumulate(self): ss = bx.align.score.hox70 self.assertTrue(allclose( bx.align.score.accumulate_scores(ss, "-----CTTT", "CTTAGTTTA"), cumsum(array([-430, -30, -30, -30, -30, -31, 91, 91, -123])) )) self.assertTrue(allclose( bx.align.score.accumulate_scores(ss, "-----CTTT", "CTTAGTTTA", skip_ref_gaps=True), cumsum(array([-581, 91, 91, -123])) )) def test_nonsymm_scoring(self): ss = nonsymm_scheme for t1, t2, score in aligns_for_nonsymm_scheme: self.assertEqual(bx.align.score.score_texts(ss, t1, t2), score) def test_asymm_scoring(self): ss = asymm_scheme for t1, t2, score in aligns_for_asymm_scheme: self.assertEqual(bx.align.score.score_texts(ss, t1, t2), score) bx-python-0.8.13/lib/bx/align/sitemask/000077500000000000000000000000001415666465100176425ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/align/sitemask/__init__.py000066400000000000000000000003041415666465100217500ustar00rootroot00000000000000""" Tools for masking out specific sites in aligments by various criteria, for example masking CpG sites or sites with low sequence quality. """ from bx.align.sitemask.core import * # noqa: F40 bx-python-0.8.13/lib/bx/align/sitemask/_cpg.pyx000066400000000000000000000056701415666465100213240ustar00rootroot00000000000000""" Pyrex/C extension for quickly finding potential CpG sites in pairs of sequences. """ from cpython.version cimport PY_MAJOR_VERSION cdef extern from "find_cpg.h": int next_cpg( char * sp1, char * sp2, int start) int next_cpg_restricted( char * sp1, char *sp2, int start) int next_non_cpg( char * sp1, char * sp2, int start) def find_cpg( sp1, sp2, start ): cdef char* a cdef char* b cdef int pos if PY_MAJOR_VERSION >= 3: bytes_sp1, bytes_sp2 = sp1.encode(), sp2.encode() else: bytes_sp1, bytes_sp2 = sp1, sp2 a = bytes_sp1 b = bytes_sp2 pos = start if pos > len(sp1): return -1 return next_cpg( a, b, pos ) def find_cpg_restricted( sp1, sp2, start ): cdef char* a cdef char* b cdef int pos if PY_MAJOR_VERSION >= 3: bytes_sp1, bytes_sp2 = sp1.encode(), sp2.encode() else: bytes_sp1, bytes_sp2 = sp1, sp2 a = bytes_sp1 b = bytes_sp2 pos = start if pos > len(sp1): return -1 return next_cpg_restricted( a, b, pos ) def find_non_cpg( sp1, sp2, start ): cdef char* a cdef char* b cdef int pos if PY_MAJOR_VERSION >= 3: bytes_sp1, bytes_sp2 = sp1.encode(), sp2.encode() else: bytes_sp1, bytes_sp2 = sp1, sp2 a = bytes_sp1 b = bytes_sp2 pos = start if pos > len(sp1): return -1 return next_non_cpg( a, b, pos ) def list_cpg( sp1, sp2 ): cdef char * a cdef char * b cdef int start if PY_MAJOR_VERSION >= 3: bytes_sp1, bytes_sp2 = sp1.encode(), sp2.encode() else: bytes_sp1, bytes_sp2 = sp1, sp2 a = bytes_sp1 b = bytes_sp2 start = 0 cpglist = list() while start > -1 and start < len(sp1): start = next_cpg( a, b, start ) if start == -1: break cpglist.append(start) start = start + 1 return cpglist def list_cpg_restricted( sp1, sp2 ): cdef char * a cdef char * b cdef int start if PY_MAJOR_VERSION >= 3: bytes_sp1, bytes_sp2 = sp1.encode(), sp2.encode() else: bytes_sp1, bytes_sp2 = sp1, sp2 a = bytes_sp1 b = bytes_sp2 start = 0 cpglist = list() while start > -1 and start < len(sp1): start = next_cpg_restricted( a, b, start ) if start == -1: break cpglist.append(start) start = start + 1 return cpglist def list_non_cpg( sp1, sp2 ): cdef char * a cdef char * b cdef int start if PY_MAJOR_VERSION >= 3: bytes_sp1, bytes_sp2 = sp1.encode(), sp2.encode() else: bytes_sp1, bytes_sp2 = sp1, sp2 a = bytes_sp1 b = bytes_sp2 start = 0 cpglist = list() while start > -1 and start < len(sp1): start = next_non_cpg( a, b, start ) if start == -1: break cpglist.append(start) start = start + 1 return cpglist def remove_gaps( sp, cpglist ): for item in cpglist: if sp[item] == '-': cpglist.remove(item) return cpglist bx-python-0.8.13/lib/bx/align/sitemask/core.py000066400000000000000000000021561415666465100211500ustar00rootroot00000000000000""" Base classes for site maskers. """ from bx.filter import ( Filter, Pipeline, ) class Masker(Filter): def __init__(self, **kwargs): self.masked = 0 self.total = 0 Exception("Abstract class") class MaskPipeline(Pipeline): """ MaskPipeline implements a Pipeline through which alignments can be pushed and masked. Pipelines can be aggregated. """ def get_masked(self): masked = 0 for masker in self.pipeline: try: masked += masker.masked except AttributeError: pass return masked masked = property(fget=get_masked) def __call__(self, block): if not block: return # push alignment block through all filters self.total += len(block.components[0].text) for masker in self.filters: if not block: return try: masker.__call__ except AttributeError: raise Exception("Masker in pipeline does not implement \"filter( self, block )\".") masker(block) bx-python-0.8.13/lib/bx/align/sitemask/cpg.py000066400000000000000000000053471415666465100207760ustar00rootroot00000000000000""" Support for masking potential CpG sites in *pairwise* alignments. """ from bx.align.sitemask import Masker from ._cpg import ( list_cpg, list_cpg_restricted, list_non_cpg, ) # Restricted. Only mask out sites that are defitely CpG class Restricted(Masker): def __init__(self, mask='?'): self.mask = mask self.masked = 0 self.total = 0 def __call__(self, block): if not block: return block if len(block.components) < 2: return cpglist = list_cpg_restricted( block.components[0].text.upper(), block.components[1].text.upper()) # now we have a fast list of CpG columns, iterate/mask self.masked += len(cpglist) self.total += len(block.components[0].text) for component in block.components: component.text = mask_columns(cpglist, component.text, self.mask) return block # Inclusive. Mask out all sites that are not non-CpG sites. class Inclusive(Masker): def __init__(self, mask='?'): self.mask = mask self.masked = 0 self.total = 0 def __call__(self, block): if not block: return block if len(block.components) < 2: return cpglist = list_cpg( block.components[0].text.upper(), block.components[1].text.upper()) self.masked += len(cpglist) self.total += len(block.components[0].text) for component in block.components: component.text = mask_columns(cpglist, component.text, self.mask) return block # Mak nonCpG sites class nonCpG(Masker): def __init__(self, mask='?'): self.mask = mask self.masked = 0 self.total = 0 def __call__(self, block): if not block: return block if len(block.components) < 2: return noncpglist = list_non_cpg( block.components[0].text.upper(), block.components[1].text.upper()) # now we have a fast list of non-CpG columns, iterate/mask self.masked += len(noncpglist) self.total += len(block.components[0].text) for component in block.components: component.text = mask_columns(noncpglist, component.text, self.mask) return block def mask_columns(masklist, text, mask): templist = list() for position in masklist: if text[position] != "-": templist.append(position) templist.append(len(text)) # Add the end of the text # cut string newtext = list() c = 0 for position in templist: newtext.append(text[c:position]) c = position + 1 # Gaps have len = 1 joinedtext = mask.join(newtext) return joinedtext bx-python-0.8.13/lib/bx/align/sitemask/find_cpg.c000066400000000000000000000032361415666465100215630ustar00rootroot00000000000000#include /* Author: Ian N Schenck Version: 7/21/2006 Most of this was ripped out of James Taylor's never-released code, and plugged in here for use in Python. Slight modifications were made where I saw fit. It looks as if CpG's are typically not next to gaps. */ static inline int is_cpg( char * sp1, char * sp2, int pos) { if ( pos < 1 ) return 0; if ( sp1[pos + 1] == '\0' ) return 0; if ( sp1[pos - 1] != 'C' && sp2[pos - 1] != 'C' && sp1[pos + 1] == 'G' && sp2[pos + 1] == 'G' && (sp1[pos] == 'C' || sp2[pos] == 'C') ) return 1; if ( sp1[pos + 1] != 'G' && sp2[pos + 1] != 'G' && sp1[pos - 1] == 'C' && sp2[pos - 1] == 'C' && (sp1[pos] == 'G' || sp2[pos] == 'G') ) return 1; return 0; } static inline int is_non_cpg( char * sp1, char * sp2, int pos) { // first one can't assuredly be cpg if ( pos < 1 ) return 1; if ( sp1[pos + 1] == '\0' ) return 0; return ( sp1[pos - 1] != 'C' && sp2[pos - 1] != 'C' && sp1[pos + 1] != 'G' && sp2[pos + 1] != 'G' ); } static inline int is_cpg_restricted( char * sp1, char * sp2, int pos ) { return !is_non_cpg( sp1, sp2, pos ); } int next( char * sp1, char * sp2, int start, int (*func)(char*,char*,int)) { while( sp1[start+1] != '\0') { if( func(sp1, sp2, start) ) return start; start++; } // nothing found return -1; } int next_cpg( char * sp1, char * sp2, int start) { return next( sp1, sp2, start, &is_cpg); } int next_cpg_restricted( char * sp1, char *sp2, int start) { return next( sp1, sp2, start, &is_cpg_restricted ); } int next_non_cpg( char * sp1, char * sp2, int start) { return next( sp1, sp2, start, &is_non_cpg); } bx-python-0.8.13/lib/bx/align/sitemask/find_cpg.h000066400000000000000000000004411415666465100215630ustar00rootroot00000000000000#ifndef __find_cpg__ #define __find_cpg__ int next( char * sp1, char * sp2, int start, int (*func)(char*,char*,int)); int next_cpg( char * sp1, char * sp2, int start); int next_cpg_restricted( char * sp1, char *sp2, int start); int next_non_cpg( char * sp1, char * sp2, int start); #endif bx-python-0.8.13/lib/bx/align/sitemask/quality.py000066400000000000000000000130121415666465100217010ustar00rootroot00000000000000""" Support for masking out sites in alignments based on sequence quality. Both simple masking of regions below some threshold and masking using the neighborhood quality standard (NQS) are supported. Uses sequence quality values stored in a `bx.binned_array.FileBinnedArray`. """ from bx.align.sitemask import Masker from bx.binned_array import FileBinnedArray # This class implements simple rules for masking quality, if base < # minqual, mask class Simple(Masker): # keys should be: # qualspecies: dictionary of species as key, lengths # dict by chromosome or chromosome list as value # qualfiles: prefix for quality file for each species in qualspecies # mask: mask character (default is '?') # minqual: minimum quality # cache: optional, but sets the number of megabytes allowed in cache per quality masked species def __init__(self, qualfiles=None, qualspecies=None, minqual=None, mask="?", cache=100): if not qualfiles: raise Exception("No quality files.") if not qualspecies: raise Exception("No species dictionary.") if not minqual: raise Exception("No minimum quality specified.") self.mask = "?" self.minqual = minqual self.mask = mask self.total = 0 self.masked = 0 self.qualfiles = qualfiles self.qualspecies = qualspecies self.cache = cache * 2 # typical bin size is 512K # load quality files into FileBinnedArray self.qualities = {} for species, qualfile in self.qualfiles.items(): specdict = {} for chrom in self.qualspecies[species]: specdict[chrom] = FileBinnedArray( open(qualfile + "." + chrom + ".bqv", "rb"), cache=self.cache/len(qualfiles)) self.qualities[species] = specdict def __call__(self, block): if not block: return for qualspec in self.qualities: comp = block.get_component_by_src_start(qualspec) if not comp: continue chrom = comp.src.split(".")[1] start, end = comp.get_forward_strand_start(), comp.get_forward_strand_end() # get quality slice, for + strand qual = self.qualities[qualspec][chrom][start:end] x = 0 while start+x < end: self.total += 1 # got the column in the alignment for this particular base if qual[x] < self.minqual: col = comp.coord_to_col(start+x) self.masked += 1 for component in block.components: if component.text[col] != "-": component.text = component.text[0:col] + \ self.mask + \ component.text[col+1:len(component.text)] # iterate through quality x += 1 return block class NQS(Masker): # keys should be: # qualspecies: dictionary of species as key, lengths # dict by chromosome or chromosome list as value # qualfiles: prefix for quality file for each species in qualspecies # mask: mask character (default is '?') # minqual: minimum quality # neighborqual: neighborhood minimum quality (bases within 5 bps are masked) # cache: optional, but sets the number of megabytes allowed in cache per quality masked species def __init__(self, qualfiles=None, qualspecies=None, minqual=None, mask="?", cache=100): if not qualfiles: raise Exception("No quality files.") if not qualspecies: raise Exception("No species dictionary.") if not minqual: raise Exception("No minimum quality specified.") self.mask = "?" self.minqual = minqual self.mask = mask self.total = 0 self.masked = 0 self.qualfiles = qualfiles self.qualspecies = qualspecies self.cache = cache * 2 # typical bin size is 512K # load quality files into FileBinnedArray self.qualities = {} for species, qualfile in self.qualfiles.items(): specdict = {} for chrom in self.qualspecies[species]: specdict[chrom] = FileBinnedArray( open(qualfile + "." + chrom + ".bqv", "rb"), cache=self.cache/len(qualfiles)) self.qualities[species] = specdict def __call__(self, block): if not block: return for qualspec in self.qualities: comp = block.get_component_by_src_start(qualspec) chrom = comp.src.split(".")[1] start, end = comp.get_forward_strand_start(), comp.get_forward_strand_end() # get quality slice, for + strand qual = self.qualities[qualspec][chrom][start:end] x = 0 while start+x < end: self.total += 1 # got the column in the alignment for this particular base if qual[x] < self.minqual: col = comp.coord_to_col(start+x) self.masked += 1 for component in block.components: if component.text[col] != "-": component.text = component.text[0:col] + \ self.mask + \ component.text[col+1:len(component.text)] # iterate through quality x += 1 return block bx-python-0.8.13/lib/bx/align/sitemask/sitemask_tests.py000066400000000000000000000050021415666465100232530ustar00rootroot00000000000000""" Tests for `bx.align.maf.sitemask`. """ import tempfile from io import StringIO import bx.align.maf from . import cpg test_maf_cpg = """##maf version=1 scoring=none a score=0 s apple 34 64 + 110 AGGGA---GTTCGTCACT------GTCGTAAGGGTTCAGA--CTGTCTATGTATACACAAGTTGTGTTGCA--ACCG s orange 19 61 - 100 AGGGATGCGTT--TCACTGCTATCGTCGTA----TTCAGACTTCG-CTATCT------GAGTTGT---GCATTACCG """ cpg_inclusive_result = [ "##maf,version=1", "a,score=0", "s,apple,34,64,+,110,AGGGA---GTTCGTCACT------GT##TAAGGGTTCAGA--CTGTCTATGTATACACAAGTTGTGTTGCA--ACCG", "s,orange,19,61,-,100,AGGGATG#GTT--TCACTGCTAT#GT##TA----TTCAGACTTCG-CTATCT------GAGTTGT---GCATTACCG" ] cpg_restricted_result = [ "##maf,version=1", "a,score=0", "s,apple,34,64,+,110,A##GA---#TT##TC#C#------#T##TA###GTTC#GA--C##TC#A#G#ATAC####GT#G#GT#GC#--AC#G", "s,orange,19,61,-,100,A##GA#G##TT--TC#C#GC#AT##T##TA----TTC#GAC#T##-C#A#C#------##GT#G#---GC#TTAC#G" ] noncpg_result = [ "##maf,version=1", "a,score=0", "s,apple,34,64,+,110,#GG##---G##CG##A#T------G#CG##AGG####A##--#TG##T#T#T####ACAA##T#T##T##A--##CG", "s,orange,19,61,-,100,#GG##T#CG##--##A#T##T##CG#CG##----###A###T#CG-#T#T#T------GA##T#T---##A####CG" ] def test_cpg_inclusive(): reader = bx.align.maf.Reader(StringIO(test_maf_cpg)) out = tempfile.NamedTemporaryFile('w') writer = bx.align.maf.Writer(out) cpgfilter = cpg.Inclusive(mask='#') cpgfilter.run(reader, writer.write) out.seek(0) j = 0 for line in open(out.name): line = line.strip() if not(line): continue assert cpg_inclusive_result[j] == ",".join(line.split()) j += 1 def test_cpg_restricted(): reader = bx.align.maf.Reader(StringIO(test_maf_cpg)) out = tempfile.NamedTemporaryFile('w') writer = bx.align.maf.Writer(out) cpgfilter = cpg.Restricted(mask='#') cpgfilter.run(reader, writer.write) out.seek(0) j = 0 for line in open(out.name): line = line.strip() if not(line): continue assert cpg_restricted_result[j] == ",".join(line.split()) j += 1 def test_non_cpg(): reader = bx.align.maf.Reader(StringIO(test_maf_cpg)) out = tempfile.NamedTemporaryFile('w') writer = bx.align.maf.Writer(out) cpgfilter = cpg.nonCpG(mask='#') cpgfilter.run(reader, writer.write) out.seek(0) j = 0 for line in open(out.name): line = line.strip() if not(line): continue assert noncpg_result[j] == ",".join(line.split()) j += 1 bx-python-0.8.13/lib/bx/align/tools/000077500000000000000000000000001415666465100171625ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/align/tools/__init__.py000066400000000000000000000003201415666465100212660ustar00rootroot00000000000000""" Various utilities for working with `bx.align.Alignment` objects. """ from .chop import * # noqa: F40 from .fuse import * # noqa: F40 from .thread import * # noqa: F40 from .tile import * # noqa: F40 bx-python-0.8.13/lib/bx/align/tools/chop.py000066400000000000000000000020301415666465100204600ustar00rootroot00000000000000""" Support for chopping a list of alignment blocks to only the portion that intersects a particular interval. """ def chop_list(blocks, src, start, end): """ For each alignment block in the sequence `blocks`, chop out the portion of the block that overlaps the interval [`start`,`end`) in the component/species named `src`. """ new_blocks = [] for block in blocks: ref = block.get_component_by_src(src) # If the reference component is on the '-' strand we should complement the interval if ref.strand == '-': slice_start = max(ref.src_size - end, ref.start) slice_end = max(ref.src_size - start, ref.end) else: slice_start = max(start, ref.start) slice_end = min(end, ref.end) sliced = block.slice_by_component(ref, slice_start, slice_end) good = True for c in sliced.components: if c.size < 1: good = False if good: new_blocks.append(sliced) return new_blocks bx-python-0.8.13/lib/bx/align/tools/fuse.py000066400000000000000000000060321415666465100204770ustar00rootroot00000000000000""" Tools for fusing contiguous alignment blocks together. """ from copy import deepcopy def fuse_list(mafs): """ Try to fuse a list of blocks by progressively fusing each adjacent pair. """ last = None for m in mafs: if last is None: last = m else: fused = fuse(last, m) if fused: last = fused else: yield last last = m if last: yield last def fuse(m1, m2): """ Attempt to fuse two blocks. If they can be fused returns a new block, otherwise returns None. Example: >>> import bx.align.maf >>> block1 = bx.align.maf.from_string( ''' ... a score=0.0 ... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC ... i hg18.chr10 N 0 C 0 ... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT ... ''' ) >>> block2 = bx.align.maf.from_string( ''' ... a score=0.0 ... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC ... i hg18.chr10 C 0 I 12 ... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT ... ''' ) >>> fused = fuse( block1, block2 ) >>> print(fused) a score=0.0 s hg18.chr10 52686 113 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGCGCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC i hg18.chr10 N 0 I 12 s panTro1.chrUn_random 208115356 113 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGTGCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT """ # Check if the blocks are adjacent and easily fusable # return none if not. if len(m1.components) != len(m2.components): return None for c1, c2 in zip(m1.components, m2.components): if c1.src != c2.src: return None if c1.strand != c2.strand: return None if c1.end != c2.start: return None if c1.empty or c2.empty: return None # Try to fuse: n = deepcopy(m1) for c1, c2 in zip(n.components, m2.components): c1.text += c2.text c1.size += c2.size # Propagate the synteny right c1.synteny_right = c2.synteny_right n.text_size = len(n.components[0].text) return n class FusingAlignmentWriter: """ Wrapper for an alignment Writer which attempts to fuse adjacent blocks """ def __init__(self, maf_writer): self.maf_writer = maf_writer self.last = None def write(self, m): if not self.last: self.last = m else: fused = fuse(self.last, m) if fused: self.last = fused else: self.maf_writer.write(self.last) self.last = m def close(self): if self.last: self.maf_writer.write(self.last) self.maf_writer.close() bx-python-0.8.13/lib/bx/align/tools/thread.py000066400000000000000000000062031415666465100210040ustar00rootroot00000000000000""" Tools for "threading" out specific species from alignments (removing other species and fixing alignment text). """ from copy import deepcopy def thread(mafs, species): """ Restrict an list of alignments to a given list of species by: 1) Removing components for any other species 2) Remove any columns containing all gaps Example: >>> import bx.align.maf >>> block1 = bx.align.maf.from_string( ''' ... a score=4964.0 ... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC ... s rheMac2.chr20 58163346 43 - 88221753 ATATTATCTTAACATTAAAGA-AGAACAGTAATTCTGGTCATAA ... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT ... s oryCun1.scaffold_175207 85970 22 + 212797 ----------------------AAAATATTAGTTATCACCATAT ... s bosTau2.chr23 23894492 43 + 41602928 AAACTACCTTAATGTCACAGG-AAACAATGTATgctgctgctgc ... ''' ) >>> block2 = bx.align.maf.from_string( ''' ... a score=9151.0 ... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAG-GAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTT-TGAC ... s oryCun1.scaffold_175207 85992 71 + 212797 TCTAGTGCTCTCCAATAATATAATAGATTATAACTTCATATAATTATGTGAAATATAAGATTATTTATCAG ... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAG-GGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTT-TGAT ... s rheMac2.chr20 58163389 69 - 88221753 ACACATATTATTTCTTAACATGGAGGATTATATCTT-AAACATGTGTGCaaaatataaatatatat-tcaa ... ''' ) >>> mafs = [ block1, block2 ] >>> threaded = [ t for t in thread( mafs, [ "hg18", "panTro1" ] ) ] >>> len( threaded ) 2 >>> print(threaded[0]) a score=0.0 s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT >>> print(threaded[1]) a score=0.0 s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT """ for m in mafs: new_maf = deepcopy(m) new_components = get_components_for_species(new_maf, species) if new_components: new_maf.components = new_components new_maf.score = 0.0 new_maf.text_size = len(new_components[0].text) new_maf.remove_all_gap_columns() yield new_maf def get_components_for_species(alignment, species): """Return the component for each species in the list `species` or None""" # If the number of components in the alignment is less that the requested number # of species we can immediately fail if len(alignment.components) < len(species): return None # Otherwise, build an index of components by species, then lookup index = {c.src.split('.')[0]: c for c in alignment.components} try: return [index[s] for s in species] except Exception: return None bx-python-0.8.13/lib/bx/align/tools/tile.py000066400000000000000000000054061415666465100204760ustar00rootroot00000000000000""" Tools for tiling / projecting alignments onto an interval of a sequence. """ import bx.seq.nib def tile_interval(sources, index, ref_src, start, end, seq_db=None): """ Tile maf blocks onto an interval. The resulting block will span the interval exactly and contain the column from the highest scoring alignment at each position. `sources`: list of sequence source names to include in final block `index`: an instnace that can return maf blocks overlapping intervals `ref_src`: source name of the interval (ie, hg17.chr7) `start`: start of interval `end`: end of interval `seq_db`: a mapping for source names in the reference species to nib files """ # First entry in sources should also be on the reference species assert sources[0].split('.')[0] == ref_src.split('.')[0], \ "{} != {}".format(sources[0].split('.')[0], ref_src.split('.')[0]) base_len = end - start blocks = index.get(ref_src, start, end) # From low to high score blocks.sort(key=lambda t: t.score) mask = [-1] * base_len for i, block in enumerate(blocks): ref = block.get_component_by_src_start(ref_src) assert ref.strand == "+" slice_start = max(start, ref.start) slice_end = min(end, ref.end) for j in range(slice_start, slice_end): mask[j-start] = i tiled = [] for i in range(len(sources)): tiled.append([]) for ss, ee, index in intervals_from_mask(mask): # Interval with no covering alignments if index < 0: # Get sequence if available, otherwise just use 'N' if seq_db: tiled[0].append(bx.seq.nib.NibFile(open(seq_db[ref_src])).get(start+ss, ee-ss)) else: tiled[0].append("N" * (ee-ss)) # Gaps in all other species for row in tiled[1:]: row.append("-" * (ee - ss)) else: slice_start = start + ss slice_end = start + ee block = blocks[index] ref = block.get_component_by_src_start(ref_src) sliced = block.slice_by_component(ref, slice_start, slice_end) sliced = sliced.limit_to_species(sources) sliced.remove_all_gap_columns() for i, src in enumerate(sources): comp = sliced.get_component_by_src_start(src) if comp: tiled[i].append(comp.text) else: tiled[i].append("-" * sliced.text_size) return ["".join(t) for t in tiled] def intervals_from_mask(mask): start = 0 last = mask[0] for i in range(1, len(mask)): if mask[i] != last: yield start, i, last start = i last = mask[i] yield start, len(mask), last bx-python-0.8.13/lib/bx/arrays/000077500000000000000000000000001415666465100162315ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/arrays/__init__.py000066400000000000000000000000611415666465100203370ustar00rootroot00000000000000""" Classes for working with arrays of data. """ bx-python-0.8.13/lib/bx/arrays/array_tree.pyx000066400000000000000000000464231415666465100211410ustar00rootroot00000000000000from __future__ import division __all__ = [ 'ArrayTree', 'FileArrayTreeDict', 'array_tree_dict_from_reader' ] import numpy from numpy import * cimport numpy cimport bx.arrays.wiggle from bx.misc.binary_file import BinaryFileWriter, BinaryFileReader from bx.misc.cdb import FileCDBDict """ Classes for storing binary data on disk in a tree structure that allows for efficient sparse storage (when the data occurs in contiguous blocks), fast access to a specific block of data, and fast access to summaries at different resolutions. On disk format -------------- Blocks are stored contiguously on disk in level-order. Contents should always be network byte order (big endian), however this implementation will byte-swap when reading if necessary. File contents: - magic: uint32 - version: unit32 - array size: uint32 - block size: uint32 - array type: 4 chars (numpy typecode, currently only simple types represented by one char are supported) - Internal nodes in level order - Summary - count of valid values in each subtree : sizeof( dtype ) * block_size - frequencies: sizeof ( int32 ) * block_size - min of valid values in each subtree : sizeof( dtype ) * block_size - max of valid values in each subtree : sizeof( dtype ) * block_size - sum of valid values in each subtree : sizeof( dtype ) * block_size - sum of squares of valid values in each subtree : sizeof( dtype ) * block_size - File offsets of each child node: uint64 * block_size - Leaf nodes - data points: sizeof( dtype ) * block_size - Version 1 reads version 0 and version 1 """ ## Enhancement ideas: ## ## - Write markers of the number of blocks skipped between blocks. This would ## allow fast striding across data or summaries (use the indexes to get to ## the start of a block, then just read straight through). Would this help? ## ## - Compression for blocks? MAGIC = 0x310ec7dc VERSION = 1 NUM_SUMMARY_ARRAYS = 6 def array_tree_dict_from_reader( reader, sizes, default_size=2147483647, block_size=1000, no_leaves=False ): # Create empty array trees rval = {} ## for key, size in sizes.iteritems(): ## rval[ key ] = ArrayTree( size, 1000 ) # Fill last_chrom = None last_array_tree = None for chrom, start, end, _, val in reader: if chrom != last_chrom: if chrom not in rval: rval[chrom] = ArrayTree( sizes.get( chrom, default_size ), block_size, no_leaves=no_leaves ) last_array_tree = rval[chrom] last_array_tree.set_range( start, end, val ) return rval cdef class FileArrayTreeDict: """ Access to a file containing multiple array trees indexed by a string key. """ cdef object io cdef object cdb_dict def __init__( self, file ): self.io = io = BinaryFileReader( file, MAGIC ) assert (0 <= io.read_uint32() <= 1) # Check for version 0 or 1 self.cdb_dict = FileCDBDict( file, is_little_endian=io.is_little_endian ) def __getitem__( self, key ): offset = self.cdb_dict[key] offset = self.io.unpack( "L", offset.encode() )[0] self.io.seek( offset ) return FileArrayTree( self.io.file, self.io.is_little_endian ) @classmethod def dict_to_file( Class, dict, file, is_little_endian=True, no_leaves=False ): """ Writes a dictionary of array trees to a file that can then be read efficiently using this class. """ io = BinaryFileWriter( file, is_little_endian=is_little_endian ) # Write magic number and version io.write_uint32( MAGIC ) io.write_uint32( VERSION ) # Write cdb index with fake values just to fill space cdb_dict = {} for key in dict.iterkeys(): cdb_dict[ key ] = io.pack( "L", 0 ) cdb_offset = io.tell() FileCDBDict.to_file( cdb_dict, file, is_little_endian=is_little_endian ) # Write each tree and save offset for key, value in dict.iteritems(): offset = io.tell() cdb_dict[ key ] = io.pack( "L", offset ) value.to_file( file, is_little_endian=is_little_endian, no_leaves=no_leaves ) # Go back and write the index again io.seek( cdb_offset ) FileCDBDict.to_file( cdb_dict, file, is_little_endian=is_little_endian ) cdef class FileArrayTree: """ Wrapper for ArrayTree stored in file that reads as little as possible """ cdef public int max cdef public int block_size cdef public object dtype cdef public int levels cdef public int offset cdef public int root_offset cdef object io def __init__( self, file, is_little_endian=True ): self.io = BinaryFileReader( file, is_little_endian=is_little_endian ) self.offset = self.io.tell() # Read basic info about the tree self.max = self.io.read_uint32() self.block_size = self.io.read_uint32() # Read dtype and canonicalize dt = self.io.read( 1 ) self.dtype = numpy.dtype( dt ) self.io.skip( 3 ) # How many levels are needed to cover the entire range? self.levels = 0 while ( self.block_size ) ** ( self.levels + 1 ) < self.max: self.levels += 1 # Not yet dealing with the case where the root is a Leaf assert self.levels > 0, "max < block_size not yet handled" # Save offset of root self.root_offset = self.io.tell() def __getitem__( self, index ): min = self.r_seek_to_node( index, 0, self.root_offset, self.levels, 0 ) if min < 0: return nan self.io.skip( self.dtype.itemsize * ( index - min ) ) return self.io.read_raw_array( self.dtype, 1 )[0] def get_summary( self, index, level ): if level <= 0 or level > self.levels: raise ValueError, "level must be <= self.levels" if self.r_seek_to_node( index, 0, self.root_offset, self.levels, level ) < 0: return None # Read summary arrays s = Summary() s.counts = self.io.read_raw_array( self.dtype, self.block_size ) s.frequencies = self.io.read_raw_array( self.dtype, self.block_size ) s.sums = self.io.read_raw_array( self.dtype, self.block_size ) s.mins = self.io.read_raw_array( self.dtype, self.block_size) s.maxs = self.io.read_raw_array( self.dtype, self.block_size ) s.sumsquares = self.io.read_raw_array( self.dtype, self.block_size ) return s def get_leaf( self, index ): if self.r_seek_to_node( index, 0, self.root_offset, self.levels, 0 ) < 0: return [] return self.io.read_raw_array( self.dtype, self.block_size ) cdef int r_seek_to_node( self, int index, int min, long long offset, int level, int desired_level ): """ Seek to the start of the node at `desired_level` that contains `index`. Returns the minimum value represented in that node. """ cdef int child_size, bin_index, child_min self.io.seek( offset ) if level > desired_level: child_size = self.block_size ** level bin_index = ( index - min ) // ( child_size ) child_min = min + ( bin_index * child_size ) # Skip summary arrays -- # arrays * itemsize * block_size self.io.skip( NUM_SUMMARY_ARRAYS * self.dtype.itemsize * self.block_size ) # Skip to offset of correct child -- offsets are 8 bytes self.io.skip( 8 * bin_index ) # Read offset of child child_offset = self.io.read_uint64() # print "co: %s\tbi: %s\tcm: %s\n" % (child_offset, bin_index, child_min) if child_offset == 0: return -1 return self.r_seek_to_node( index, child_min, child_offset, level - 1, desired_level ) else: # The file pointer is at the start of the desired node, do nothing return min cdef class Summary: """ Summary for a non-leaf level of the tree, contains arrays of the min, max, valid count, sum, and sum-of-squares for each child. """ cdef public object counts cdef public object frequencies cdef public object mins cdef public object maxs cdef public object sums cdef public object sumsquares cdef class ArrayTreeNode cdef class ArrayTreeLeaf cdef class ArrayTree: """ Stores a sparse array of data as a tree. An array of `self.max` values is stored in a tree in which each leaf contains `self.block_size` values and each internal node contains `self.block_size` children. Entirely empty subtrees are not stored. Thus, the storage is efficient for data that is block sparse -- having contiguous chunks of `self.block_size` or larger data. Currently it is not efficient if the data is strided (e.g. one or two data points in every interval of length `self.block_size`). Internal nodes store `Summary` instances for their subtrees. """ cdef public int max cdef public int block_size cdef public object dtype cdef public int levels cdef public int no_leaves cdef public ArrayTreeNode root def __init__( self, int max, int block_size, dtype=float32, no_leaves=False ): """ Create a new array tree of size `max` """ self.max = max self.block_size = block_size self.no_leaves = no_leaves # Force the dtype argument to its canonical dtype object self.dtype = numpy.dtype( dtype ) # How many levels are needed to cover the entire range? self.levels = 0 while ( self.block_size ) ** ( self.levels + 1 ) < self.max: self.levels += 1 # Not yet dealing with the case where the root is a Leaf assert self.levels > 0, "max < block_size not yet handled" # Create the root node` self.root = ArrayTreeNode( self, 0, max, block_size, self.levels ) def __setitem__( self, int index, value ): self.root.set( index, value ) def set_range( self, int start, int end, value ): for i from start <= i < end: self.root.set( i, value ) def __getitem__( self, int index ): return self.root.get( index ) def to_file( self, f, is_little_endian=True, no_leaves=False ): io = BinaryFileWriter( f, is_little_endian=is_little_endian ) ## io.write_uint32( VERSION ) io.write_uint32( self.max ) io.write_uint32( self.block_size ) io.write( self.dtype.char ) io.write( "\0\0\0" ) # Data pass, level order if no_leaves: bottom_level = 0 else: bottom_level = -1 for level in range( self.levels, bottom_level, -1 ): self.root.to_file_data_pass( io, level ) # Offset pass to fix up indexes self.root.to_file_offset_pass( io ) @classmethod def from_file( Class, f, is_little_endian=True ): io = BinaryFileReader( f, is_little_endian=is_little_endian ) ## assert io.read_uint32() == VERSION max = io.read_uint32() block_size = io.read_uint32() dt = io.read( 1 ) io.read( 3 ) tree = Class( max, block_size, dt ) tree.root.from_file( io ) return tree @classmethod def from_sequence( Class, s, block_size=1000 ): """ Build an ArrayTree from a sequence like object (must have at least length and getitem). """ tree = Class( len( s ), block_size ) for i in range( len( s ) ): tree[i] = s[i] return tree cdef class ArrayTreeNode: """ Internal node of an ArrayTree. Contains summary data and pointers to subtrees. """ cdef ArrayTree tree cdef int min cdef int max cdef int block_size cdef int level cdef int child_size cdef object children cdef public Summary summary cdef public long start_offset def __init__( self, ArrayTree tree, int min, int max, int block_size, int level ): self.tree = tree self.min = min self.max = max self.block_size = block_size self.level = level # Each of my children represents block_size ** level values self.child_size = self.block_size ** self.level self.children = [None] * self.block_size self.summary = None self.start_offset = 0 cdef inline init_bin( self, int index ): cdef int min = self.min + ( index * self.child_size ) cdef int max = min + self.child_size if self.level == 1: self.children[ index ] = ArrayTreeLeaf( self.tree, min, max ) else: self.children[ index ] = ArrayTreeNode( self.tree, min, max, self.block_size, self.level - 1 ) def set( self, int index, value ): cdef int bin_index = ( index - self.min ) // ( self.child_size ) if self.children[ bin_index ] is None: self.init_bin( bin_index ) self.children[ bin_index ].set( index, value ) def get( self, int index ): cdef int bin_index = ( index - self.min ) // ( self.child_size ) if self.children[ bin_index ] is None: return nan else: return self.children[ bin_index ].get( index ) cpdef build_summary( self ): """ Build summary of children. """ counts = empty( self.tree.block_size, self.tree.dtype ) frequencies = empty( self.tree.block_size, self.tree.dtype ) mins = empty( self.tree.block_size, self.tree.dtype ) maxs = empty( self.tree.block_size, self.tree.dtype ) sums = empty( self.tree.block_size, self.tree.dtype ) sumsquares = empty( self.tree.block_size, self.tree.dtype ) for i in range( len( self.children ) ): if self.children[i]: if self.level == 1: v = self.children[i].values counts[i] = sum( ~isnan( v ) ) frequencies[i] = self.children[i].frequency mins[i] = nanmin( v ) maxs[i] = nanmax( v ) sums[i] = nansum( v ) sumsquares[i] = nansum( v ** 2 ) else: c = self.children[i] c.build_summary() counts[i] = sum( c.summary.counts ) frequencies[i] = sum( c.summary.frequencies ) mins[i] = nanmin( c.summary.mins ) maxs[i] = nanmax( c.summary.maxs ) sums[i] = nansum( c.summary.sums ) sumsquares[i] = nansum( c.summary.sumsquares ) else: counts[i] = 0 frequencies[i] = 0 mins[i] = nan maxs[i] = nan sums[i] = nan sumsquares[i] = nan s = Summary() s.counts = counts s.frequencies = frequencies s.mins = mins s.maxs = maxs s.sums = sums s.sumsquares = sumsquares self.summary = s def to_file_data_pass( self, io, level ): """ First pass of writing to file, writes data and saves position of block. """ assert self.summary, "Writing without summaries is currently not supported" # If we are at the current level being written, write a block if self.level == level: # Save file offset where this block starts self.start_offset = io.tell() # Write out summary data io.write_raw_array( self.summary.counts ) io.write_raw_array( self.summary.frequencies ) io.write_raw_array( self.summary.sums ) io.write_raw_array( self.summary.mins ) io.write_raw_array( self.summary.maxs ) io.write_raw_array( self.summary.sumsquares ) # Skip enough room for child offsets (block_size children * 64bits) io.skip( self.tree.block_size * 8 ) # Must be writing a lower level, so recurse else: # Write all non-empty children for i in range( len( self.children ) ): if self.children[i] is not None: self.children[i].to_file_data_pass( io, level ) def to_file_offset_pass( self, io ): """ Second pass of writing to file, seek to appropriate position and write offsets of children. """ # Seek to location of child offfsets (skip over # summary arrays) skip_amount = NUM_SUMMARY_ARRAYS * self.tree.dtype.itemsize * self.block_size io.seek( self.start_offset + skip_amount ) # Write the file offset of each child into the index for child in self.children: if child is None: io.write_uint64( 0 ) else: io.write_uint64( child.start_offset ) # Recursively write offsets in child nodes for child in self.children: if child is not None: child.to_file_offset_pass( io ) def from_file( self, io ): """ Load entire summary and all children into memory. """ dtype = self.tree.dtype block_size = self.tree.block_size # Read summary arrays s = Summary() s.counts = io.read_raw_array( dtype, block_size ) s.frequencies = io.read_raw_array( int32, block_size ) s.sums = io.read_raw_array( dtype, block_size ) s.mins = io.read_raw_array( dtype, block_size) s.maxs = io.read_raw_array( dtype, block_size ) s.sumsquares = io.read_raw_array( dtype, block_size ) self.summary = s # Read offset of all children child_offsets = [ io.read_uint64() for i in range( block_size ) ] for i in range( block_size ): if child_offsets[i] > 0: self.init_bin( i ) io.seek( child_offsets[i] ) self.children[i].from_file( io ) def get_from_file( self, io, index ): cdef int bin_index = ( index - self.min ) //( self.child_size ) if self.children[ bin_index ] is None: return nan else: return self.children[ bin_index ].get( index ) cdef class ArrayTreeLeaf: """ Leaf node of an ArrayTree, contains data values. """ cdef ArrayTree tree cdef int min cdef int max cdef public int frequency cdef public numpy.ndarray values cdef public long start_offset def __init__( self, ArrayTree tree, int min, int max ): self.tree = tree self.min = min self.max = max self.frequency = 0 self.values = empty( max - min, self.tree.dtype ) self.values[:] = nan self.start_offset = 0 def set( self, index, value ): self.frequency += 1 self.values[ index - self.min ] = value def get( self, index ): return self.values[ index - self.min ] def to_file_data_pass( self, io, level ): assert level == 0 self.start_offset = io.tell() io.write_raw_array( self.values ) def to_file_offset_pass( self, io ): pass def from_file( self, io ): self.values = io.read_raw_array( self.tree.dtype, self.tree.block_size ) bx-python-0.8.13/lib/bx/arrays/array_tree_tests.py000066400000000000000000000072201415666465100221630ustar00rootroot00000000000000import os import sys import tempfile import unittest try: sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) except Exception: sys.path.insert(0, os.path.dirname(os.path.abspath("."))) from bx.arrays.array_tree import ArrayTree, FileArrayTreeDict class TestArrayTree(unittest.TestCase): def setUp(self): tree = ArrayTree(10000, 10) # max value of 10000, each block has 10 numbers for i in range(5000): tree[i] = i # Insert extra copies to test frequency for i in range(3000): tree[i] = i tree.set_range(5000, 9001, 100) tree.root.build_summary() d = {'test': tree} f = tempfile.TemporaryFile() FileArrayTreeDict.dict_to_file(d, f) f.seek(0) self.filearraytreedict = FileArrayTreeDict(f) self.filearraytree = self.filearraytreedict['test'] def test_get_summary(self): f = self.filearraytree lvl1 = f.get_summary(0, 1) self.assertEqual([float(_) for _ in lvl1.sums/lvl1.counts], [4.5, 14.5, 24.5, 34.5, 44.5, 54.5, 64.5, 74.5, 84.5, 94.5]) lvl2 = f.get_summary(0, 2) self.assertEqual([float(_) for _ in lvl2.sums/lvl2.counts], [49.5, 149.5, 249.5, 349.5, 449.5, 549.5, 649.5, 749.5, 849.5, 949.5]) lvl3 = f.get_summary(0, 3) self.assertEqual([float(_) for _ in lvl3.sums/lvl3.counts], [499.5, 1499.5, 2499.5, 3499.5, 4499.5, 100.0, 100.0, 100.0, 100.0, 100.0]) lvl2_2 = f.get_summary(3000, 2) self.assertEqual([float(_) for _ in lvl2_2.sums/lvl2_2.counts], [3049.5, 3149.5, 3249.5, 3349.5, 3449.5, 3549.5, 3649.5, 3749.5, 3849.5, 3949.5]) def test_get_leaf(self): f = self.filearraytree from_start = [int(i) for i in f.get_leaf(0)] from_middle = [int(i) for i in f.get_leaf(5)] self.assertEqual(from_start, from_middle) self.assertEqual(from_start, list(range(10))) from_start = [int(i) for i in f.get_leaf(4999)] self.assertEqual(from_start, list(range(4990, 5000))) from_start = [int(i) for i in f.get_leaf(9600)] self.assertEqual(from_start, []) def test_big(self): tree = ArrayTree(2147483647, 1000) # What we use for tracks for i in range(5000): tree[i] = i # Insert extra copies to test frequency for i in range(3000): tree[i] = i tree.set_range(5000, 9001, 100) tree.set_range(14000000, 15000000, 200) tree.root.build_summary() d = {'test': tree} f = tempfile.TemporaryFile() FileArrayTreeDict.dict_to_file(d, f) f.seek(0) at = FileArrayTreeDict(f)['test'] lvl1 = at.get_summary(14000000, 1) avgs = [float(_) for _ in lvl1.sums/lvl1.counts] self.assertEqual(len(avgs), 1000) self.assertEqual(avgs, [200 for i in range(0, 1000)]) def test_get_frequencies(self): f = self.filearraytree self.assertEqual([float(_) for _ in f.get_summary(0, 1).frequencies], ([20] * 10)) self.assertEqual([float(_) for _ in f.get_summary(4000, 1).frequencies], ([10] * 10)) self.assertEqual([float(_) for _ in f.get_summary(0, 2).frequencies], ([200] * 10)) self.assertEqual([int(_) for _ in f.get_summary(0, 3).frequencies], [2000, 2000, 2000, 1000, 1000, 1000, 1000, 1000, 1000, 1]) def test_wrong_dictkey(self): self.assertRaises(KeyError, self.filearraytreedict.__getitem__, "non-existing") def test_higher_level_than_tree(self): f = self.filearraytree self.assertEqual(3, f.levels) self.assertRaises(ValueError, f.get_summary, 0, 4) if __name__ == '__main__': unittest.main() bx-python-0.8.13/lib/bx/arrays/bed.pyx000066400000000000000000000025401415666465100175260ustar00rootroot00000000000000""" Iterator for the BED format ( http://genome.ucsc.edu/FAQ/FAQformat.html#format1 ) Returns chrom, chromStart, chromEnd, name, score """ cdef class BedReader: cdef object f def __init__( self, f ): self.f = f def __iter__( self ): return self def __next__( self ): while True: line = self.f.readline() if not line: raise StopIteration() if line.isspace(): continue if line[0] == "#": continue if line[0].isalpha(): if line.startswith( "track" ) or line.startswith( "browser" ): continue feature = line.strip().split() chrom = feature[0] chrom_start = int(feature[1]) chrom_end = int(feature[2]) if len(feature) > 3: name = feature[3] else: name = None if len(feature) > 4: score = int(feature[4]) else: score = None return chrom, chrom_start, chrom_end, name, score else: raise "Unexpected input line: %s" % line.strip() bx-python-0.8.13/lib/bx/arrays/wiggle.pxd000066400000000000000000000003701415666465100202240ustar00rootroot00000000000000cdef enum linemode: MODE_BED MODE_FIXED MODE_VARIABLE cdef class WiggleReader: cdef object file cdef object current_chrom cdef long current_pos cdef long current_step cdef long current_span cdef linemode mode bx-python-0.8.13/lib/bx/arrays/wiggle.pyx000066400000000000000000000071721415666465100202600ustar00rootroot00000000000000""" Support for scores in the `wiggle`_ file format used by the UCSC Genome Browser. The positions in the wiggle format are 1-relative, however, the positions returned match the BED/interval format which is zero-based, half-open. .. _wiggle: http://genome.ucsc.edu/goldenPath/help/wiggle.html """ def parse_header( line ): return dict( [ field.split( '=' ) for field in line.split()[1:] ] ) cdef class WiggleReader: """ Iterator yielding chrom, start, end, strand, value. Values are zero-based, half-open. Regions which lack a score are ignored. """ #cdef object file #cdef object current_chrom #cdef long current_pos #cdef long current_step #cdef long current_span #cdef linemode mode def __init__( self, file ): self.file = file self.current_chrom = None self.current_pos = -1 self.current_step = -1 self.current_span = -1 self.mode = MODE_BED def __iter__( self ): return self def __next__( self ): while True: line = self.file.readline() if not line: raise StopIteration() if line.isspace(): continue if line[0] == "#": continue if line[0].isalpha(): if line.startswith( "track" ) or line.startswith( "browser" ): continue elif line.startswith( "variableStep" ): header = parse_header( line ) self.current_chrom = header['chrom'] self.current_pos = -1 self.current_step = -1 if 'span' in header: self.current_span = int( header['span'] ) else: self.current_span = 1 self.mode = MODE_VARIABLE continue elif line.startswith( "fixedStep" ): header = parse_header( line ) self.current_chrom = header['chrom'] self.current_pos = int( header['start'] ) - 1 self.current_step = int( header['step'] ) if 'span' in header: self.current_span = int( header['span'] ) else: self.current_span = 1 self.mode = MODE_FIXED continue elif self.mode == MODE_BED: fields = line.split() if len( fields ) > 3: if len( fields ) > 5: return fields[0], int( fields[1] ), int( fields[2] ), fields[5], float( fields[3] ) else: return fields[0], int( fields[1] ), int( fields[2] ), "+", float( fields[3] ) elif self.mode == MODE_VARIABLE: fields = line.split() try: pos = int( fields[0] ) - 1 val = float( fields[1] ) except ValueError: continue return self.current_chrom, pos, pos + self.current_span, "+", val elif self.mode == MODE_FIXED: fields = line.split() try: val = float( fields[0] ) except ValueError: continue return self.current_chrom, self.current_pos, self.current_pos + self.current_span, "+", val # FIXME: unreachable! need to test this and fix! self.current_pos += self.current_step else: raise "Unexpected input line: %s" % line.strip() bx-python-0.8.13/lib/bx/bbi/000077500000000000000000000000001415666465100154645ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/bbi/__init__.py000066400000000000000000000001231415666465100175710ustar00rootroot00000000000000""" Support for the UCSC "Big Binary Indexed" file formats (bigWig and bigBed) """ bx-python-0.8.13/lib/bx/bbi/bbi_file.pxd000066400000000000000000000061761415666465100177460ustar00rootroot00000000000000from bpt_file cimport BPTFile from cirtree_file cimport CIRTreeFile from types cimport * import numpy cimport numpy cdef class SummaryBlock: """ A block of summary data from disk """ cdef public bits32 chrom_id cdef public bits32 start cdef public bits32 end cdef public bits32 valid_count cdef public double min_val cdef public double max_val cdef public double sum_data cdef public double sum_squares cdef class SummarizedData: """ The result of using SummaryBlocks read from the file to produce a aggregation over a particular range and resolution """ cdef public bits32 start cdef public bits32 end cdef public int size cdef public numpy.ndarray valid_count cdef public numpy.ndarray min_val cdef public numpy.ndarray max_val cdef public numpy.ndarray sum_data cdef public numpy.ndarray sum_squares cdef accumulate_interval_value( self, bits32 s, bits32 e, float val ) cdef class BBIFile cdef class BlockHandler: """ Callback for `BBIFile.visit_blocks_in_region` """ cdef handle_block( self, bytes block_data, BBIFile bbi_file ) cdef class BBIFile: """ A "big binary indexed" file. Stores blocks of raw data and numeric summaries of that data at different levels of aggregation ("zoom levels"). Generic enough to accommodate both wiggle and bed data. """ # Probably a PyFileObject, or any seekable file-like cdef object file # A BinaryFileReader created from file cdef object reader # The magic number or type signature (whether the file is bigWig or bigBed or...) cdef public bits32 magic # Is the file byteswapped relative to our native byte order? cdef boolean is_byteswapped # The index to the chromosomes, an embedded BPT file cdef BPTFile chrom_bpt # Version number cdef public bits16 version # Number of zoom levels cdef public bits16 zoom_levels # Offset to chromosome index cdef bits64 chrom_tree_offset # Offset to unzoomed data cdef bits64 unzoomed_data_offset # Offset to unzoomed index cdef bits64 unzoomed_index_offset # If bed, number of columns cdef bits16 field_count cdef bits16 defined_field_count # Offset to an embedded string containing "AutoSQL" format data that defines the columns cdef bits64 as_offset # Offset to total summary information (if any) cdef bits64 total_summary_offset # Size of uncompression buffer, 0 if no compression cdef bits32 uncompress_buf_size # Zoom levels list cdef public object level_list cdef visit_blocks_in_region( self, bits32 chrom_id, bits32 start, bits32 end, BlockHandler handler ) cdef _get_chrom_id_and_size( self, char * chrom ) cdef _best_zoom_level( self, int desired_reduction ) cpdef summarize( self, object chrom, bits32 start, bits32 end, int summary_size ) cpdef summarize_from_full( self, char * chrom, bits32 start, bits32 end, int summary_size ) cpdef query( self, object chrom, bits32 start, bits32 end, int summary_size ) cdef _summarize_from_full( self, bits32 chrom_id, bits32 start, bits32 end, int summary_size ) bx-python-0.8.13/lib/bx/bbi/bbi_file.pyx000066400000000000000000000407111415666465100177640ustar00rootroot00000000000000# cython: profile=False """ Core implementation for reading UCSC "big binary indexed" files. There isn't really any specification for the format beyond the code, so this mirrors Jim Kent's 'bbiRead.c' mostly. """ from cpython.version cimport PY_MAJOR_VERSION import sys cimport cython from collections import deque from bpt_file cimport BPTFile from cirtree_file cimport CIRTreeFile from types cimport * from libc cimport limits import numpy cimport numpy from bx.misc.binary_file import BinaryFileReader from io import BytesIO import zlib, math cdef extern from "Python.h": char * PyBytes_AsString( object ) # Signatures for bbi related file types cdef public int big_wig_sig = 0x888FFC26 cdef public int big_bed_sig = 0x8789F2EB # Some record sizes for parsing DEF summary_on_disk_size = 32 @cython.profile(False) cdef inline int range_intersection( int start1, int end1, int start2, int end2 ): return min( end1, end2 ) - max( start1, start2 ) @cython.profile(False) cdef inline int imax(int a, int b): return a if a >= b else b @cython.profile(False) cdef inline int imin(int a, int b): return a if a <= b else b cdef enum summary_type: summary_type_mean = 0 summary_type_max = 1 summary_type_min = 2 summary_type_coverage = 3 summary_type_sd = 4 cdef class SummaryBlock: """ A block of summary data from disk """ pass cdef class SummarizedData: """ The result of using SummaryBlocks read from the file to produce a aggregation over a particular range and resolution """ def __init__( self, bits32 start, bits32 end, int size ): self.start = start self.end = end self.size = size self.valid_count = numpy.zeros( self.size, dtype=numpy.float64 ) self.min_val = numpy.zeros( self.size, dtype=numpy.float64 ) self.max_val = numpy.zeros( self.size, dtype=numpy.float64 ) self.sum_data = numpy.zeros( self.size, dtype=numpy.float64 ) self.sum_squares = numpy.zeros( self.size, dtype=numpy.float64 ) cdef accumulate_interval_value( self, bits32 s, bits32 e, float val ): cdef int base_start, base_end, base_step, overlap, j, interval_size cdef double overlap_factor, interval_weight # We locally cdef the arrays so all indexing will be at C speeds cdef numpy.ndarray[numpy.float64_t, ndim=1] valid_count = self.valid_count cdef numpy.ndarray[numpy.float64_t, ndim=1] min_val = self.min_val cdef numpy.ndarray[numpy.float64_t, ndim=1] max_val = self.max_val cdef numpy.ndarray[numpy.float64_t, ndim=1] sum_data = self.sum_data cdef numpy.ndarray[numpy.float64_t, ndim=1] sum_squares = self.sum_squares # Trim interval down to region of interest if s < self.start: s = self.start if e > self.end: e = self.end if s >= e: return base_step = ( self.end - self.start ) / self.size for j from 0 <= j < self.size: base_start = self.start + ( base_step * j ) base_end = base_start + base_step overlap = range_intersection( base_start, base_end, s, e ) if overlap > 0: interval_size = e - s overlap_factor = overlap / interval_size interval_weight = interval_size * overlap_factor valid_count[j] += interval_weight sum_data[j] += val * interval_weight sum_squares[j] += val * val * interval_weight if max_val[j] < val: max_val[j] = val if min_val[j] > val: min_val[j] = val cdef class BlockHandler: """ Callback for `BBIFile.visit_blocks_in_region` """ cdef handle_block( self, bytes block_data, BBIFile bbi_file ): pass cdef class BBIFile: """ A "big binary indexed" file. Stores blocks of raw data and numeric summaries of that data at different levels of aggregation ("zoom levels"). Generic enough to accommodate both wiggle and bed data. """ def __init__( self, file=None, expected_sig=None, type_name=None ): if file is not None: self.open( file, expected_sig, type_name ) def open( self, file, expected_sig, type_name ): """ Initialize from an existing bbi file, signature (magic) must be passed in since this is generic. """ assert expected_sig is not None self.file = file # Open the file in a BinaryFileReader, handles magic and byteswapping self.reader = reader = BinaryFileReader( file, expected_sig ) self.magic = expected_sig self.is_byteswapped = self.reader.byteswap_needed # Read header stuff self.version = reader.read_uint16() self.zoom_levels = reader.read_uint16() self.chrom_tree_offset = reader.read_uint64() self.unzoomed_data_offset = reader.read_uint64() self.unzoomed_index_offset = reader.read_uint64() self.field_count = reader.read_uint16() self.defined_field_count = reader.read_uint16() self.as_offset = reader.read_uint64() self.total_summary_offset = reader.read_uint64() self.uncompress_buf_size = reader.read_uint32() # Skip reserved reader.seek( 64 ) # Read zoom headers self.level_list = [] for i from 0 <= i < self.zoom_levels: level = ZoomLevel() level.bbi_file = self level.reduction_level = reader.read_uint32() level.reserved = reader.read_uint32() level.data_offset = reader.read_uint64() level.index_offset = reader.read_uint64() self.level_list.append( level ) # Initialize and attach embedded BPTFile containing chromosome names and ids reader.seek( self.chrom_tree_offset ) self.chrom_bpt = BPTFile( file=self.file ) cdef visit_blocks_in_region( self, bits32 chrom_id, bits32 start, bits32 end, BlockHandler handler ): """ Visit each block from the full data that overlaps a specific region """ cdef CIRTreeFile ctf reader = self.reader reader.seek( self.unzoomed_index_offset ) ctf = CIRTreeFile( reader.file ) block_list = ctf.find_overlapping_blocks( chrom_id, start, end ) for offset, size in block_list: # Seek to and read all data for the block reader.seek( offset ) block_data = reader.read( size ) # Might need to uncompress if self.uncompress_buf_size > 0: block_data = zlib.decompress( block_data ) handler.handle_block( block_data, self ) cpdef summarize( self, object chrom, bits32 start, bits32 end, int summary_size ): """ Gets `summary_size` data points over the regions `chrom`:`start`-`end`. """ cdef char * cchrom if PY_MAJOR_VERSION >= 3: bytes_chrom = chrom.encode() else: bytes_chrom = chrom cchrom = PyBytes_AsString(bytes_chrom) if start >= end: return None chrom_id, chrom_size = self._get_chrom_id_and_size( cchrom ) if chrom_id is None: return None # Return value will be a structured array (rather than an array # of summary element structures # Find appropriate zoom level cdef bits32 base_size = end - start cdef int full_reduction = base_size / summary_size cdef int zoom = full_reduction / 2 if zoom < 0: zoom = 0 cdef ZoomLevel zoom_level = self._best_zoom_level( zoom ) if zoom_level is not None: return zoom_level._summarize( chrom_id, start, end, summary_size ) else: return self._summarize_from_full( chrom_id, start, end, summary_size ) cpdef summarize_from_full( self, char * chrom, bits32 start, bits32 end, int summary_size ): """ Gets `summary_size` data points over the regions `chrom`:`start`-`end`, always using the raw data points """ if start >= end: return None chrom_id, chrom_size = self._get_chrom_id_and_size( chrom ) if chrom_id is None: return None # Return value will be a structured array (rather than an array # of summary element structures return self._summarize_from_full( chrom_id, start, end, summary_size ) cpdef query( self, object chrom, bits32 start, bits32 end, int summary_size ): """ Provides a different view of summary for region, a list of dictionaries with keys: mean, max, min, coverage, std_dev """ if end > 2147483647 or start < 0: raise ValueError results = self.summarize(chrom, start, end, summary_size) if not results: return None rval = [] for i in range(summary_size): sum_data = results.sum_data[i] valid_count = results.valid_count[i] mean = sum_data / valid_count coverage = summary_size / (end - start) * valid_count # print results.sum_squares[i], sum_data, valid_count variance = results.sum_squares[i] - sum_data * sum_data / valid_count if valid_count > 1: variance /= valid_count - 1 std_dev = math.sqrt(max(variance, 0)) rval.append( { "mean": mean, "max": results.max_val[i], "min": results.min_val[i], \ "coverage": coverage, "std_dev": std_dev } ) return rval cdef _get_chrom_id_and_size( self, char * chrom ): """ Lookup id and size from the chromosome named `chrom` """ bytes = self.chrom_bpt.find( chrom ) if bytes is not None: # The value is two 32 bit uints, use the BPT's reader for checking byteswapping assert len( bytes ) == 8 chrom_id, chrom_size = self.chrom_bpt.reader.unpack( "II", bytes ) return chrom_id, chrom_size else: return None, None cdef _summarize_from_full( self, bits32 chrom_id, bits32 start, bits32 end, int summary_size ): """ Create summary from full data. This is data specific so must be overridden. """ pass cdef _best_zoom_level( self, int desired_reduction ): if desired_reduction <= 1: return None cdef ZoomLevel level, closest_level cdef int diff, closest_diff = limits.INT_MAX closest_level = None for level in self.level_list: diff = desired_reduction - level.reduction_level if diff >= 0 and diff < closest_diff: closest_diff = diff closest_level = level return closest_level cdef class ZoomLevel: cdef BBIFile bbi_file cdef public bits32 reduction_level cdef bits32 reserved cdef public bits64 data_offset cdef public bits64 index_offset cdef int item_count def _summary_blocks_in_region( self, bits32 chrom_id, bits32 start, bits32 end ): """ Return a list of all SummaryBlocks that overlap the region `chrom_id`:`start`-`end` """ cdef CIRTreeFile ctf cdef SummaryBlock summary rval = deque() reader = self.bbi_file.reader reader.seek( self.index_offset ) ctf = CIRTreeFile( reader.file ) block_list = ctf.find_overlapping_blocks( chrom_id, start, end ) for offset, size in block_list: # Seek to and read all data for the block reader.seek( offset ) block_data = reader.read( size ) # Might need to uncompress if self.bbi_file.uncompress_buf_size > 0: ## block_data = zlib.decompress( block_data, buf_size = self.bbi_file.uncompress_buf_size ) block_data = zlib.decompress( block_data ) block_size = len( block_data ) # The block should be a bunch of summaries. assert block_size % summary_on_disk_size == 0 item_count = block_size / summary_on_disk_size # Create another reader just for the block, shouldn't be too expensive block_reader = BinaryFileReader( BytesIO( block_data ), is_little_endian=reader.is_little_endian ) for i from 0 <= i < item_count: ## NOTE: Look carefully at bbiRead again to be sure the endian ## conversion here is all correct. It looks like it is ## just pushing raw data into memory and not swapping sum_chrom_id = block_reader.read_uint32() # A block can contain summaries from more that one chrom_id if sum_chrom_id != chrom_id: block_reader.skip(7*4) continue summary = SummaryBlock() summary.chrom_id = sum_chrom_id summary.start = block_reader.read_uint32() summary.end = block_reader.read_uint32() summary.valid_count = block_reader.read_uint32() summary.min_val = block_reader.read_float() summary.max_val = block_reader.read_float() summary.sum_data = block_reader.read_float() summary.sum_squares = block_reader.read_float() rval.append( summary ) return rval cdef _get_summary_slice( self, bits32 base_start, bits32 base_end, summaries ): cdef float valid_count = 0.0 cdef float sum_data = 0.0 cdef float sum_squares = 0.0 cdef float min_val = numpy.nan cdef float max_val = numpy.nan cdef float overlap_factor cdef int overlap if summaries: min_val = summaries[0].min_val max_val = summaries[0].max_val for summary in summaries: if summary.start >= base_end: break overlap = range_intersection( base_start, base_end, summary.start, summary.end ) if overlap > 0: overlap_factor = overlap / (summary.end - summary.start) valid_count += summary.valid_count * overlap_factor sum_data += summary.sum_data * overlap_factor sum_squares += summary.sum_squares * overlap_factor if max_val < summary.max_val: max_val = summary.max_val if min_val > summary.min_val: min_val = summary.min_val return valid_count, sum_data, sum_squares, min_val, max_val cdef _summarize( self, bits32 chrom_id, bits32 start, bits32 end, int summary_size ): """ Summarize directly from file. Looking at Jim's code, it appears that - bbiSummariesInRegion returns all summaries that span start-end in sorted order - bbiSummarySlice is then used to aggregate over the subset of those summaries that overlap a single summary element """ cdef bits32 base_start, base_end, base_step # We locally cdef the arrays so all indexing will be at C speeds cdef numpy.ndarray[numpy.float64_t] valid_count cdef numpy.ndarray[numpy.float64_t] min_val cdef numpy.ndarray[numpy.float64_t] max_val cdef numpy.ndarray[numpy.float64_t] sum_data cdef numpy.ndarray[numpy.float64_t] sum_squares # What we will load into rval = SummarizedData( start, end, summary_size ) valid_count = rval.valid_count min_val = rval.min_val max_val = rval.max_val sum_data = rval.sum_data sum_squares = rval.sum_squares # First, load up summaries reader = self.bbi_file.reader reader.seek( self.index_offset ) summaries = self._summary_blocks_in_region(chrom_id, start, end) base_step = (end - start) / summary_size base_start = start base_end = start for i in range(summary_size): base_end += base_step while summaries and summaries[0].end <= base_start: summaries.popleft() valid_count[i], sum_data[i], sum_squares[i], min_val[i], max_val[i] = self._get_summary_slice(base_start, base_end, summaries) base_start = base_end return rval bx-python-0.8.13/lib/bx/bbi/bigbed_file.pyx000066400000000000000000000106311415666465100204420ustar00rootroot00000000000000""" BigBed file. """ from bbi_file cimport * from cirtree_file cimport CIRTreeFile import numpy cimport numpy from types cimport * from bx.intervals.io import GenomicInterval from bx.misc.binary_file import BinaryFileReader from io import BytesIO import zlib DEF big_bed_sig = 0x8789F2EB cdef inline int range_intersection( int start1, int end1, int start2, int end2 ): return min( end1, end2 ) - max( start1, start2 ) cdef class BigBedBlockHandler( BlockHandler ): """ BlockHandler that parses the block into a series of BED records """ cdef bits32 chrom_id cdef bits32 start cdef bits32 end def __init__( self, bits32 chrom_id, bits32 start, bits32 end ): BlockHandler.__init__( self ) self.chrom_id = chrom_id self.start = start self.end = end cdef handle_block( self, bytes block_data, BBIFile bbi_file ): cdef object bytes_io cdef int length cdef bits32 chrom_id, s, e cdef str rest # Now we parse the block, which should just be a bunch of BED records bytes_io = BytesIO( block_data ) block_reader = BinaryFileReader( bytes_io, is_little_endian=bbi_file.reader.is_little_endian ) length = len( block_data ) while bytes_io.tell() < length: chrom_id = block_reader.read_uint32() s = block_reader.read_uint32() e = block_reader.read_uint32() rest = block_reader.read_c_string() if chrom_id != self.chrom_id: continue if s < self.end and e > self.start: self.handle_interval_value( s, e, rest ) cdef handle_interval_value( self, bits32 s, bits32 e, str rest ): pass cdef class SummarizingBlockHandler( BigBedBlockHandler ): """ Accumulates intervals into a SummarizedData """ cdef SummarizedData sd def __init__( self, bits32 chrom_id, bits32 start, bits32 end, int summary_size ): BigBedBlockHandler.__init__( self, chrom_id, start, end ) # What we will load into self.sd = SummarizedData( start, end, summary_size ) for i in range(summary_size): self.sd.min_val[i] = +numpy.inf for i in range(summary_size): self.sd.max_val[i] = -numpy.inf cdef handle_interval_value( self, bits32 s, bits32 e, str rest ): # FIXME: Does this really obvious thing actually do what we want? # No... sum_data will end up being the coverage, but min/max/etc are wrong self.sd.accumulate_interval_value( s, e, 1 ) cdef class IntervalAccumulatingBlockHandler( BigBedBlockHandler ): cdef list intervals """ Accumulates intervals into a list of intervals with values """ def __init__( self, bits32 chrom_id, bits32 start, bits32 end ): BigBedBlockHandler.__init__( self, chrom_id, start, end ) self.intervals = [] cdef handle_interval_value( self, bits32 s, bits32 e, str rest ): self.intervals.append( ( s, e, rest ) ) cdef class BigBedFile( BBIFile ): """ A "big binary indexed" file whose raw data is in BED format. """ def __init__( self, file=None ): BBIFile.__init__( self, file, big_bed_sig, "bigbed" ) cdef _summarize_from_full( self, bits32 chrom_id, bits32 start, bits32 end, int summary_size ): """ Create summary from full data. """ v = SummarizingBlockHandler( chrom_id, start, end, summary_size ) self.visit_blocks_in_region( chrom_id, start, end, v ) # Round valid count, in place for i from 0 <= i < summary_size: v.sd.valid_count[i] = round( v.sd.valid_count[i] ) return v.sd cpdef get( self, char * chrom, bits32 start, bits32 end ): """ Gets all data points over the regions `chrom`:`start`-`end`. """ if start >= end: return None chrom_id, chrom_size = self._get_chrom_id_and_size( chrom ) if chrom_id is None: return None v = IntervalAccumulatingBlockHandler( chrom_id, start, end ) self.visit_blocks_in_region( chrom_id, start, end, v ) rval = [] # FIXME: Not sure the best way to return, will user GenomicInterval for # now. for ( s, e, rest ) in v.intervals: fields = [ chrom, str( s ), str( e ) ] + rest.split( "\t" ) rval.append( GenomicInterval( None, fields, 0, 1, 2, 5, "+" ) ) return rval bx-python-0.8.13/lib/bx/bbi/bigwig_file.pyx000066400000000000000000000173531415666465100205060ustar00rootroot00000000000000""" BigWig file. """ from collections import deque from bbi_file cimport * from cirtree_file cimport CIRTreeFile import numpy cimport numpy from types cimport * from bx.misc.binary_file import BinaryFileReader from io import BytesIO import zlib DEF big_wig_sig = 0x888FFC26 DEF bwg_bed_graph = 1 DEF bwg_variable_step = 2 DEF bwg_fixed_step = 3 cdef inline int range_intersection( int start1, int end1, int start2, int end2 ): return min( end1, end2 ) - max( start1, start2 ) cdef class BigWigBlockHandler( BlockHandler ): """ BlockHandler that parses the block into a series of wiggle records, and calls `handle_interval_value` for each. """ cdef bits32 start cdef bits32 end def __init__( self, bits32 start, bits32 end ): BlockHandler.__init__( self ) self.start = start self.end = end cdef handle_block( self, bytes block_data, BBIFile bbi_file ): cdef bits32 b_chrom_id, b_start, b_end, b_valid_count cdef bits32 b_item_step, b_item_span cdef bits16 b_item_count cdef UBYTE b_type cdef int s, e cdef float val # Now we parse the block, first the header block_reader = BinaryFileReader( BytesIO( block_data ), is_little_endian=bbi_file.reader.is_little_endian ) b_chrom_id = block_reader.read_uint32() b_start = block_reader.read_uint32() b_end = block_reader.read_uint32() b_item_step = block_reader.read_uint32() b_item_span = block_reader.read_uint32() b_type = block_reader.read_uint8() block_reader.skip(1) b_item_count = block_reader.read_uint16() for i from 0 <= i < b_item_count: # Depending on the type, s and e are either read or # generate using header, val is always read if b_type == bwg_bed_graph: s = block_reader.read_uint32() e = block_reader.read_uint32() val = block_reader.read_float() elif b_type == bwg_variable_step: s = block_reader.read_uint32() e = s + b_item_span val = block_reader.read_float() elif b_type == bwg_fixed_step: s = b_start + ( i * b_item_span ) e = s + b_item_span val = block_reader.read_float() else: # FIXME: raise exception??? # s, e, val are uninitialized/not updated at this point! pass if s < self.start: s = self.start if e > self.end: e = self.end if s >= e: continue self.handle_interval_value( s, e, val ) cdef handle_interval_value( self, bits32 s, bits32 e, float val ): pass cdef class SummarizingBlockHandler( BigWigBlockHandler ): """ Accumulates intervals into a SummarizedData """ cdef SummarizedData sd def __init__( self, bits32 start, bits32 end, int summary_size ): BigWigBlockHandler.__init__( self, start, end ) # What we will load into self.sd = SummarizedData( start, end, summary_size ) for i in range(summary_size): self.sd.min_val[i] = +numpy.inf for i in range(summary_size): self.sd.max_val[i] = -numpy.inf cdef handle_interval_value( self, bits32 s, bits32 e, float val ): self.sd.accumulate_interval_value( s, e, val ) cdef class IntervalAccumulatingBlockHandler( BigWigBlockHandler ): cdef list intervals """ Accumulates intervals into a list of intervals with values """ def __init__( self, bits32 start, bits32 end ): BigWigBlockHandler.__init__( self, start, end ) self.intervals = [] cdef handle_interval_value( self, bits32 s, bits32 e, float val ): self.intervals.append( ( s, e, val ) ) cdef class ArrayAccumulatingBlockHandler( BigWigBlockHandler ): """ Accumulates intervals into a list of intervals with values """ cdef numpy.ndarray array def __init__( self, bits32 start, bits32 end ): BigWigBlockHandler.__init__( self, start, end ) self.array = numpy.zeros( end - start, dtype=numpy.float32 ) self.array[...] = numpy.nan cdef handle_interval_value( self, bits32 s, bits32 e, float val ): cdef numpy.ndarray[ numpy.float32_t, ndim=1 ] array = self.array cdef int i # Slicing is not optimized by Cython for i from s - self.start <= i < e - self.start: array[ i ] = val cdef class BigWigHeaderBlockHandler( BigWigBlockHandler ): "Reads and returns headers" cdef list headers def __init__( self, bits32 start, bits32 end ): BigWigBlockHandler.__init__( self, start, end ) self.headers = [] cdef handle_block( self, bytes block_data, BBIFile bbi_file ): cdef bits32 b_chrom_id, b_start, b_end, b_valid_count cdef bits32 b_item_step, b_item_span cdef bits16 b_item_count cdef UBYTE b_type cdef int s, e cdef float val # parse the block header block_reader = BinaryFileReader( BytesIO( block_data ), is_little_endian=bbi_file.reader.is_little_endian ) b_chrom_id = block_reader.read_uint32() b_start = block_reader.read_uint32() b_end = block_reader.read_uint32() b_item_step = block_reader.read_uint32() b_item_span = block_reader.read_uint32() b_type = block_reader.read_uint8() block_reader.skip(1) b_item_count = block_reader.read_uint16() self.handle_header( b_start, b_end, b_item_step, b_item_span, b_type, b_item_count ) cdef handle_header( self, bits32 start, bits32 end, bits32 step, bits32 span, bits8 type, bits16 itemCount ): self.headers.append( ( start, end, step, span, type, itemCount ) ) cdef class BigWigFile( BBIFile ): """ A "big binary indexed" file whose raw data is in wiggle format. """ def __init__( self, file=None ): BBIFile.__init__( self, file, big_wig_sig, "bigwig" ) cdef _summarize_from_full( self, bits32 chrom_id, bits32 start, bits32 end, int summary_size ): """ Create summary from full data. """ v = SummarizingBlockHandler( start, end, summary_size ) self.visit_blocks_in_region( chrom_id, start, end, v ) # Round valid count, in place for i from 0 <= i < summary_size: v.sd.valid_count[i] = round( v.sd.valid_count[i] ) return v.sd cpdef get( self, char * chrom, bits32 start, bits32 end ): """ Gets all data points over the regions `chrom`:`start`-`end`. """ if start >= end: return None chrom_id, chrom_size = self._get_chrom_id_and_size( chrom ) if chrom_id is None: return None v = IntervalAccumulatingBlockHandler( start, end ) self.visit_blocks_in_region( chrom_id, start, end, v ) return v.intervals cpdef get_as_array( self, char * chrom, bits32 start, bits32 end ): """ Gets all data points over the regions `chrom`:`start`-`end`. """ if start >= end: return None chrom_id, chrom_size = self._get_chrom_id_and_size( chrom ) if chrom_id is None: return None v = ArrayAccumulatingBlockHandler( start, end ) self.visit_blocks_in_region( chrom_id, start, end, v ) return v.array cpdef get_headers( self, char * chrom, bits32 start, bits32 end ): if start >= end: return None chrom_id, chrom_size = self._get_chrom_id_and_size( chrom ) if chrom_id is None: return None v = BigWigHeaderBlockHandler( start, end ) self.visit_blocks_in_region( chrom_id, start, end, v ) return v.headers bx-python-0.8.13/lib/bx/bbi/bigwig_tests.py000066400000000000000000000061351415666465100205350ustar00rootroot00000000000000import os import sys import numpy import pytest try: sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) except Exception: sys.path.insert(0, os.path.dirname(os.path.abspath("."))) from bx.bbi.bigwig_file import BigWigFile def allclose(a, b, tol=0.00001): """ Like numpy.allclose but treat Nan == Nan """ d = numpy.absolute(a - b) return numpy.all(numpy.isnan(d) | (d < tol)) class TestBigWig: @pytest.fixture(autouse=True) def setUp(self): f = open("test_data/bbi_tests/test.bw", 'rb') self.bw = BigWigFile(file=f) def test_get_summary(self): data = self.bw.query("chr1", 10000, 20000, 10) means = [x['mean'] for x in data] assert numpy.allclose([float(_) for _ in means], [-0.17557571594973645, -0.054009292602539061, -0.056892242431640622, -0.03650328826904297, 0.036112907409667966, 0.0064466032981872557, 0.036949024200439454, 0.076638259887695306, 0.043518108367919923, 0.01554749584197998]) # Summarize variant sd = self.bw.summarize("chr1", 10000, 20000, 10) assert numpy.allclose(sd.sum_data / sd.valid_count, [-0.17557571594973645, -0.054009292602539061, -0.056892242431640622, -0.03650328826904297, 0.036112907409667966, 0.0064466032981872557, 0.036949024200439454, 0.076638259887695306, 0.043518108367919923, 0.01554749584197998]) # Test min and max for this entire summary region data = self.bw.query("chr1", 10000, 20000, 1) maxs = [x['max'] for x in data] mins = [x['min'] for x in data] assert [float(_) for _ in maxs] == [0.289000004529953] assert [float(_) for _ in mins] == [-3.9100000858306885] def test_get_leaf(self): data = self.bw.query("chr1", 11000, 11005, 5) means = [x['mean'] for x in data] assert numpy.allclose([float(_) for _ in means], [0.050842501223087311, -2.4589500427246094, 0.050842501223087311, 0.050842501223087311, 0.050842501223087311]) # Test min and max for this entire leaf region data = self.bw.query("chr1", 11000, 11005, 1) maxs = [x['max'] for x in data] mins = [x['min'] for x in data] assert [float(_) for _ in maxs] == [0.050842501223087311] assert [float(_) for _ in mins] == [-2.4589500427246094] def test_wrong_nochrom(self): data = self.bw.query("chr2", 0, 10000, 10) assert data is None @pytest.mark.parametrize("line", open("test_data/bbi_tests/test.expectation").readlines()) def test_summary_from_file(self, line): fields = line.split() chrom = fields[0] start = int(fields[1]) end = int(fields[2]) n = int(fields[3]) t = fields[4] values = [float(v.replace('n/a', 'NaN')) for v in fields[5:]] sd = self.bw.summarize(chrom, start, end, n) if t == 'mean': assert allclose(sd.sum_data / sd.valid_count, values) elif t == 'min': assert allclose(sd.min_val, values) elif t == 'max': assert allclose(sd.max_val, values) # elif t == 'std': # assert numpy.allclose( sd.max_val, values ) bx-python-0.8.13/lib/bx/bbi/bpt_file.pxd000066400000000000000000000005711415666465100177700ustar00rootroot00000000000000from bx.misc.binary_file import BinaryFileReader from types cimport * cdef class BPTFile: """ On disk B+ tree compatible with Jim Kent's bPlusTree.c """ cdef object file cdef object reader cdef boolean is_byteswapped cdef bits32 block_size cdef bits32 key_size cdef bits32 value_size cdef bits64 item_count cdef bits64 root_offset bx-python-0.8.13/lib/bx/bbi/bpt_file.pyx000066400000000000000000000050121415666465100200100ustar00rootroot00000000000000from bx.misc.binary_file import BinaryFileReader DEF bpt_sig = 0x78CA8C91 # bptFileHeaderSize = 32 # bptBlockHeaderSize = 4 cdef class BPTFile: """ On disk B+ tree compatible with Jim Kent's bPlusTree.c """ def __init__(self, file=None): if file is not None: self.attach(file) def attach(self, file): """ Attach to an open file """ self.file = file self.reader = reader = BinaryFileReader(file, bpt_sig) self.is_byteswapped = self.reader.byteswap_needed # Read header stuff self.block_size = reader.read_uint32() self.key_size = reader.read_uint32() self.value_size = reader.read_uint32() self.item_count = reader.read_uint64() reader.skip(8) self.root_offset = reader.tell() def r_find(self, bits64 block_start, key): """ Recursively seek the value matching key under the subtree starting at file offset `block_start` """ cdef UBYTE is_leaf cdef bits16 child_count cdef bits64 offset self.reader.seek(block_start) # Block header is_leaf = self.reader.read_uint8() self.reader.read_uint8() child_count = self.reader.read_uint16() if is_leaf: for i from 0 <= i < child_count: node_key = self.reader.read(self.key_size) node_value = self.reader.read(self.value_size) if node_key == key: return node_value return None else: # Read and discard first key, store offset self.reader.read(self.key_size) offset = self.reader.read_uint64() # Loop until correct subtree is found for i from 0 <= i < child_count - 1: node_key = self.reader.read(self.key_size) if node_key > key: break offset = self.reader.read_uint64() return self.r_find(offset, key) def find(self, key): """ Find the value matching `key` (a bytestring). Returns the matching value as a bytestring if found, or None """ # Key is greater than key_size, must not be a match if len(key) > self.key_size: return None # Key is less than key_size, right pad with 0 bytes if len(key) < self.key_size: key += b'\0' * (self.key_size - len(key)) # Call the recursive finder return self.r_find(self.root_offset, key) bx-python-0.8.13/lib/bx/bbi/cirtree_file.pxd000066400000000000000000000005651415666465100206430ustar00rootroot00000000000000from types cimport * cdef class CIRTreeFile: cdef object file cdef object reader cdef boolean is_byteswapped cdef bits64 root_offset cdef bits32 block_size cdef bits64 item_count cdef bits32 start_chrom_ix cdef bits32 start_base cdef bits32 end_chrom_ix cdef bits32 end_base cdef bits64 file_size cdef bits32 items_per_slot bx-python-0.8.13/lib/bx/bbi/cirtree_file.pyx000066400000000000000000000106171415666465100206670ustar00rootroot00000000000000from bx.misc.binary_file import BinaryFileReader DEF cir_tree_sig = 0x2468ACE0 cdef int ovcmp( bits32 a_hi, bits32 a_lo, bits32 b_hi, bits32 b_lo ): if a_hi < b_hi: return 1 elif a_hi > b_hi: return -1 else: if a_lo < b_lo: return 1 elif a_lo > b_lo: return -1 else: return 0 cdef overlaps( qchrom, qstart, qend, rstartchrom, rstartbase, rendchrom, rendbase ): return ( ovcmp( qchrom, qstart, rendchrom, rendbase ) > 0 ) and \ ( ovcmp( qchrom, qend, rstartchrom, rstartbase ) < 0 ) cdef class CIRTreeFile: def __init__( self, file=None ): if file is not None: self.attach( file ) def attach( self, file ): """ Attach to an open file """ self.file = file self.reader = reader = BinaryFileReader( file, cir_tree_sig ) self.is_byteswapped = self.reader.byteswap_needed # Header self.block_size = reader.read_uint32() self.item_count = reader.read_uint64() self.start_chrom_ix = reader.read_uint32() self.start_base = reader.read_uint32() self.end_chrom_ix = reader.read_uint32() self.end_base = reader.read_uint32() self.file_size = reader.read_uint64() self.items_per_slot = reader.read_uint32() # Skip reserved reader.read_uint32() # Save root self.root_offset = reader.tell() def r_find_overlapping( self, int level, bits64 index_file_offset, bits32 chrom_ix, bits32 start, bits32 end, object rval, object reader ): cdef UBYTE is_leaf cdef bits16 child_count reader.seek( index_file_offset ) # Block header is_leaf = reader.read_uint8() assert is_leaf == 0 or is_leaf == 1 reader.read_uint8() child_count = reader.read_uint16() # Read block if is_leaf: self.r_find_overlapping_leaf( level, chrom_ix, start, end, rval, child_count, reader ) else: self.r_find_overlapping_parent( level, chrom_ix, start, end, rval, child_count, reader ) def r_find_overlapping_leaf( self, int level, bits32 chrom_ix, bits32 start, bits32 end, object rval, bits16 child_count, object reader ): cdef bits32 start_chrom_ix, start_base, end_chrom_ix, end_base cdef bits64 offset cdef bits64 size for i from 0 <= i < child_count: start_chrom_ix = reader.read_uint32() start_base = reader.read_uint32() end_chrom_ix = reader.read_uint32() end_base = reader.read_uint32() offset = reader.read_uint64() size = reader.read_uint64() if overlaps( chrom_ix, start, end, start_chrom_ix, start_base, end_chrom_ix, end_base ): rval.append( ( offset, size ) ) def r_find_overlapping_parent( self, int level, bits32 chrom_ix, bits32 start, bits32 end, object rval, bits16 child_count, object reader ): # Read and cache offsets for all children to avoid excessive seeking ## cdef bits32 start_chrom_ix[child_count], start_base[child_count], end_chrom_ix[child_count], end_base[child_count] ## cdef bits64 offset[child_count] start_chrom_ix = []; start_base = []; end_chrom_ix = []; end_base = [] offset = [] for i from 0 <= i < child_count: ## start_chrom_ix[i] = reader.read_bits32() ## start_base[i] = reader.read_bits32() ## end_chrom_ix[i] = reader.read_bits32() ## end_base[i] = reader.read_bits32() ## offset[i] = reader.read_bits64() start_chrom_ix.append( reader.read_uint32() ) start_base.append( reader.read_uint32() ) end_chrom_ix.append( reader.read_uint32() ) end_base.append( reader.read_uint32() ) offset.append( reader.read_uint64() ) # Now recurse for i from 0 <= i < child_count: if overlaps( chrom_ix, start, end, start_chrom_ix[i], start_base[i], end_chrom_ix[i], end_base[i] ): self.r_find_overlapping( level + 1, offset[i], chrom_ix, start, end, rval, reader ) def find_overlapping_blocks( self, bits32 chrom_ix, bits32 start, bits32 end ): rval = [] self.r_find_overlapping( 0, self.root_offset, chrom_ix, start, end, rval, self.reader ) return rval bx-python-0.8.13/lib/bx/bbi/types.pxd000066400000000000000000000004401415666465100173430ustar00rootroot00000000000000ctypedef unsigned char UBYTE ctypedef signed char BYTE ctypedef unsigned short UWORD ctypedef short WORD ctypedef unsigned long long bits64 ctypedef unsigned bits32 ctypedef unsigned short bits16 ctypedef unsigned char bits8 ctypedef int signed32 ctypedef bint boolean bx-python-0.8.13/lib/bx/binned_array.py000066400000000000000000000301241415666465100177370ustar00rootroot00000000000000""" Numeric arrays stored as individually compressed blocks on disk, allowing pseudo-random acccess. `BinnedArray` is used to build such an array in memory and save it to disk. `BinnedArrayWriter` can instead be used when creating the array sequentially (does not require keeping all data in memory). `FileBinnedArray` provides read only access to an on disk binned array. """ import math import sys from struct import ( calcsize, pack, unpack, ) from numpy import ( array, concatenate, frombuffer, NaN, resize, zeros ) from bx_extras.lrucache import LRUCache platform_is_little_endian = (sys.byteorder == 'little') MAGIC = 0x4AB04612 # Version incremented from version 0 to version 1 by Ian Schenck, June # 23, 2006. Version 1 supports different typecodes, and in doing so # breaks the original header format. The new FileBinnedArray is # backwards compatible with version 0. # Version 1 -> 2 by James Taylor, allow specifying different compression # types. VERSION = 2 # Compression types comp_types = { 'none': (lambda x: x, lambda x: x) } try: import zlib comp_types['zlib'] = (zlib.compress, zlib.decompress) except Exception: pass try: import lzo comp_types['lzo'] = (lzo.compress, lzo.decompress) except Exception: pass MAX = 512*1024*1024 def bytesify(s): if isinstance(s, bytes): return s else: return s.encode() class BinnedArray: def __init__(self, bin_size=512*1024, default=NaN, max_size=MAX, typecode="f"): self.max_size = max_size self.bin_size = bin_size self.nbins = int(math.ceil(max_size / self.bin_size)) self.bins = [None] * self.nbins self.default = default self.typecode = typecode def get_bin_offset(self, index): return index // self.bin_size, index % self.bin_size def init_bin(self, index): # self.bins[index] = zeros( self.bin_size ) * self.default self.bins[index] = zeros(self.bin_size, self.typecode) self.bins[index][:] = self.default def get(self, key): bin, offset = self.get_bin_offset(key) if self.bins[bin] is None: return self.default else: return self.bins[bin][offset] def set(self, key, value): bin, offset = self.get_bin_offset(key) if self.bins[bin] is None: self.init_bin(bin) self.bins[bin][offset] = value def get_range(self, start, end): size = end - start assert size >= 0 rval = [] while size > 0: bin, offset = self.get_bin_offset(start) delta = self.bin_size - offset if self.bins[bin] is None: if delta < size: rval.append(resize(array(self.default, self.typecode), (delta,))) size -= delta start += delta else: rval.append(resize(array(self.default, "f"), (size,))) size = 0 else: if delta < size: rval.append(self.bins[bin][offset:offset+delta]) size -= delta start += delta else: rval.append(self.bins[bin][offset:offset+size]) size = 0 return concatenate(rval) def __getitem__(self, key): if isinstance(key, slice): start, stop, stride = key.indices(self.max_size) assert stride == 1, "Slices with strides are not supported" return self.get_range(start, stop) else: return self.get(key) def __setitem__(self, key, value): return self.set(key, value) def to_file(self, f, comp_type='zlib'): # Get compress method compress, _ = comp_types[comp_type] # Write header write_packed(f, ">5I", MAGIC, VERSION, self.max_size, self.bin_size, self.nbins) # save type code f.write(pack('c', bytesify(self.typecode))) # save compression type f.write(bytesify(comp_type[0:4].ljust(4))) # write default value a = array(self.default, self.typecode) # Struct module can't deal with NaN and endian conversion, we'll hack # around that by byteswapping the array if platform_is_little_endian: a = a.byteswap() f.write(a.tobytes()) # Save current position (start of bin offsets) index_start_pos = f.tell() # Skip forward to save space for index f.seek(calcsize(">2I") * self.nbins, 1) bin_pos_and_size = [] # Write each bin for bin in self.bins: if bin is None: bin_pos_and_size.append((0, 0)) else: assert bin.dtype.char == self.typecode if platform_is_little_endian: s = bin.byteswap().tobytes() else: s = bin.tobytes() compressed = compress(s) bin_pos_and_size.append((f.tell(), len(compressed))) f.write(compressed) # Go back and fill in table f.seek(index_start_pos) for pos, size in bin_pos_and_size: write_packed(f, ">2I", pos, size) class FileBinnedArray: def __init__(self, f, cache=32): # If cache=None, then everything is allowed to stay in memory, # this is the default behavior. self.f = f M, V, max_size, bin_size, nbins = read_packed(f, ">5I") assert M == MAGIC # assert version less than max supported assert V <= VERSION, "File is version %d but I don't know about anything beyond %d" % (V, VERSION) self.max_size = max_size self.bin_size = bin_size self.nbins = nbins self.bins = LRUCache(size=cache) # Read typecode if V >= 1: self.typecode = (unpack('c', f.read(1))[0]).decode() else: self.typecode = 'f' # Read compression type if V >= 2: self.comp_type = f.read(4).strip().decode() else: self.comp_type = 'zlib' self.decompress = comp_types[self.comp_type][1] # Read default value s = f.read(calcsize(self.typecode)) a = frombuffer(s, self.typecode) if platform_is_little_endian: a = a.byteswap() self.default = a[0] # Read bin sizes and offsets self.bin_pos = [] self.bin_sizes = [] for _ in range(nbins): pos, size = read_packed(f, ">2I") self.bin_pos.append(pos) self.bin_sizes.append(size) def get_bin_offset(self, index): return int(index // self.bin_size), int(index % self.bin_size) def load_bin(self, index): assert self.bin_pos[index] != 0 self.f.seek(self.bin_pos[index]) raw = self.f.read(self.bin_sizes[index]) a = frombuffer(self.decompress(raw), self.typecode) if platform_is_little_endian: a = a.byteswap() assert len(a) == self.bin_size self.bins[index] = a def get(self, key): bin, offset = self.get_bin_offset(key) if bin in self.bins: return self.bins[bin][offset] elif self.bin_pos[bin]: self.load_bin(bin) return self.bins[bin][offset] else: return self.default def get_range(self, start, end): size = end - start assert size >= 0 rval = [] while size > 0: bin, offset = self.get_bin_offset(start) delta = self.bin_size - offset if bin not in self.bins and self.bin_pos[bin] != 0: self.load_bin(bin) if self.bins[bin] is None: if delta < size: rval.append(resize(array(self.default, self.typecode), (delta,))) size -= delta start += delta else: rval.append(resize(array(self.default, self.typecode), (size,))) size = 0 else: if delta < size: rval.append(self.bins[bin][offset:offset+delta]) size -= delta start += delta else: rval.append(self.bins[bin][offset:offset+size]) size = 0 return concatenate(rval) def __getitem__(self, key): if isinstance(key, slice): start, stop, stride = key.indices(self.max_size) assert stride == 1, "Slices with strides are not supported" return self.get_range(start, stop) else: return self.get(key) class BinnedArrayWriter: def __init__(self, f, bin_size=512*1024, default=NaN, max_size=MAX, typecode="f", comp_type='zlib'): # All parameters in the constructor are immutable after creation self.f = f self.max_size = max_size self.bin_size = bin_size self.nbins = int(math.ceil(max_size / self.bin_size)) self.default = default self.typecode = typecode self.bin = 0 self.bin_pos = 0 self.bin_index = [] self.buffer = resize(array(self.default, self.typecode), (self.bin_size,)) self.buffer_contains_values = False self.comp_type = comp_type self.compress = comp_types[comp_type][0] self.write_header() # Put the fp at the start of the data (we go back and fill in the index at the end) self.f.seek(self.data_offset) def write_header(self): self.f.seek(0) # Write header write_packed(self.f, ">5I", MAGIC, VERSION, self.max_size, self.bin_size, self.nbins) # save type code self.f.write(pack('c', bytesify(self.typecode))) # write default value a = array(self.default, self.typecode) # write comp type self.f.write(bytesify(self.comp_type[0:4].ljust(4))) # write default # Struct module can't deal with NaN and endian conversion, we'll hack # around that by byteswapping the array if platform_is_little_endian: a = a.byteswap() self.f.write(a.tobytes()) # Save current position (start of bin offsets) self.index_pos = self.f.tell() self.data_offset = self.index_pos + (self.nbins * calcsize(">2I")) def write_index(self): self.f.seek(self.index_pos) for pos, size in self.bin_index: write_packed(self.f, ">2I", pos, size) def skip(self): self.bin_pos += 1 if self.bin_pos == self.bin_size: self.flush() self.bin_pos = 0 self.bin += 1 assert self.bin <= self.nbins self.buffer = resize(array(self.default, self.typecode), (self.bin_size,)) self.buffer_contains_values = False def write(self, data): self.buffer[self.bin_pos] = data self.buffer_contains_values = True self.bin_pos += 1 if self.bin_pos == self.bin_size: self.flush() self.bin_pos = 0 self.bin += 1 assert self.bin <= self.nbins self.buffer = resize(array(self.default, self.typecode), (self.bin_size,)) self.buffer_contains_values = False def flush(self): # Flush buffer to file if self.buffer_contains_values: pos = self.f.tell() if platform_is_little_endian: s = self.buffer.byteswap().tobytes() else: s = self.buffer.tobytes() compressed = self.compress(s) size = len(compressed) assert len(self.bin_index) == self.bin self.bin_index.append((pos, size)) self.f.write(compressed) else: assert len(self.bin_index) == self.bin self.bin_index.append((0, 0)) def finish(self): self.flush() self.nbins = self.bin + 1 self.write_header() self.write_index() def write_packed(f, pattern, *vals): f.write(pack(pattern, *vals)) def read_packed(f, pattern): rval = unpack(pattern, f.read(calcsize(pattern))) if len(rval) == 1: return rval[0] return rval bx-python-0.8.13/lib/bx/binned_array_tests.py000066400000000000000000000065111415666465100211640ustar00rootroot00000000000000""" Tests for `bx.binned_array`. """ from numpy import ( allclose, concatenate, NaN, zeros ) from numpy.random import random_sample as random from .binned_array import ( BinnedArray, BinnedArrayWriter, FileBinnedArray, ) # Bigger values take longer, but excercise more bins CHUNK_SIZE_RANDOM = 945 CHUNK_SIZE_ZEROS = 897 # CHUNK_SIZE_RANDOM=9456 # CHUNK_SIZE_ZEROS=8972 source = target = None def setup(): global source global target source = [] for _ in range(13): if random() < 0.5: source = concatenate((source, random(CHUNK_SIZE_RANDOM))) else: source = concatenate((source, zeros(CHUNK_SIZE_ZEROS, 'f'))) source = source.astype('f') # Set on target target = BinnedArray(128, NaN, len(source)) for i in range(len(source)): # if not isNaN( source[i] ): target[i] = source[i] return source, target def test_simple(): # Verify for i in range(len(source)): assert source[i] == target[i], "No match, index: %d, source: %f, target: %f, len( source ): %d" % (i, source[i], target[i], len(source)) # Verify with slices for _ in range(10): a = int(random() * len(source)) b = int(random() * len(source)) if b < a: a, b = b, a assert allclose(source[a:b], target[a:b]), "No match, index: %d:%d, source: %s, target: %s" % \ (a, b, ",".join(map(str, source[a:a+10])), ",".join(map(str, target[a:a+10]))) def test_file(): # With a file (zlib) target.to_file(open("/tmp/foo", "wb")) target2 = FileBinnedArray(open("/tmp/foo", 'rb')) for i in range(len(source)): assert source[i] == target2[i], "No match, index: %d, source: %d, target: %d" % (i, source[i], target2[i]) # Verify with slices target2 = FileBinnedArray(open("/tmp/foo", 'rb')) for _ in range(10): a = int(random() * len(source)) b = int(random() * len(source)) if b < a: a, b = b, a assert allclose(source[a:b], target[a:b]), "No match, index: %d:%d, source: %s, target: %s" % \ (a, b, ",".join(map(str, source[a:a+10])), ",".join(map(str, target2[a:a+10]))) def test_file_lzo(): # With a file (lzo) target.to_file(open("/tmp/foo3", "wb"), comp_type="lzo") target3 = FileBinnedArray(open("/tmp/foo3", 'rb')) # Verify for i in range(len(source)): assert source[i] == target3[i], "No match, index: %d, source: %d, target: %d" % (i, source[i], target3[i]) # Verify with slices target3 = FileBinnedArray(open("/tmp/foo3", 'rb')) for _ in range(10): a = int(random() * len(source)) b = int(random() * len(source)) if b < a: a, b = b, a assert allclose(source[a:b], target3[a:b]), "No match, index: %d:%d, source: %s, target: %s" % \ (a, b, ",".join(map(str, source[a:a+10])), ",".join(map(str, target3[a:a+10]))) def test_binned_array_writer(): # Test with ba writer o = open("/tmp/foo4", "wb") w = BinnedArrayWriter(o, 128, comp_type='lzo') for val in source: w.write(val) w.finish() o.close() # Verify target4 = FileBinnedArray(open("/tmp/foo4", 'rb')) for i in range(len(source)): assert allclose(source[i], target4[i]), "No match, index: %d, source: %d, target: %d" % (i, source[i], target4[i]) bx-python-0.8.13/lib/bx/bitset.pyx000066400000000000000000000220361415666465100167670ustar00rootroot00000000000000""" Compact mutable sequences of bits (vectors of 0s and 1s) supporting various boolean operations, and a "binned" variation which stores long runs of identical bits compactly. Because the binned implementation avoids a lot of memory allocation and access when working with either small subregions of the total interval or setting / testing spans larger than the bin size, it can be much faster. """ import sys cdef extern from "common.h": ctypedef int boolean cdef extern from "bits.h": ctypedef unsigned char Bits # Allocate bits. Bits * bitAlloc( int bitCount ) # Clone bits. Bits * bitClone(Bits* orig, int bitCount ) # Free bits. void bitFree(Bits **pB) # Set a single bit. void bitSetOne(Bits *b, int bitIx) # Clear a single bit. void bitClearOne(Bits *b, int bitIx) # Set a range of bits. void bitSetRange(Bits *b, int startIx, int bitCount) # Read a single bit. int bitReadOne(Bits *b, int bitIx) # Count number of bits set in range. int bitCountRange(Bits *b, int startIx, int bitCount) # Find the index of the the next set bit. int bitFindSet(Bits *b, int startIx, int bitCount) # Find the index of the the next clear bit. int bitFindClear(Bits *b, int startIx, int bitCount) # Clear many bits. void bitClear(Bits *b, int bitCount) # And two bitmaps. Put result in a. void bitAnd(Bits *a, Bits *b, int bitCount) # Or two bitmaps. Put result in a. void bitOr(Bits *a, Bits *b, int bitCount) # Xor two bitmaps. Put result in a. void bitXor(Bits *a, Bits *b, int bitCount) # Flip all bits in a. void bitNot(Bits *a, int bitCount) ## # Print part or all of bit map as a string of 0s and 1s. Mostly useful for ## void bitPrint(Bits *a, int startIx, int bitCount, FILE* out) cdef extern from "binBits.h": struct BinBits: int size int bin_size int nbins Bits ** bins BinBits* binBitsAlloc( int size, int granularity ) void binBitsFree( BinBits * bb ) int binBitsReadOne( BinBits * bb, int pos ) void binBitsSetOne( BinBits * bb, int pos ) void binBitsClearOne( BinBits * bb, int pos ) void binBitsSetRange( BinBits *bb, int start, int size ) int binBitsCountRange( BinBits *bb, int start, int size ) int binBitsFindSet( BinBits *bb, int start ) int binBitsFindClear( BinBits *bb, int start ) void binBitsAnd( BinBits *bb1, BinBits *bb2 ) void binBitsOr( BinBits *bb1, BinBits *bb2 ) void binBitsNot( BinBits *bb ) ## ---- Forward declerations ------------------------------------------------ cdef class BitSet cdef class BinnedBitSet ## ---- BitSet bounds checking ---------------------------------------------- cdef inline b_check_index( BitSet b, index ): if index < 0: raise IndexError( "BitSet index (%d) must be non-negative." % index ) if index >= b.bitCount: raise IndexError( "%d is larger than the size of this BitSet (%d)." % ( index, b.bitCount ) ) cdef inline b_check_range( BitSet b, start, end ): b_check_index( b, start ) if end < start: raise IndexError( "Range end (%d) must be greater than range start(%d)." % ( end, start ) ) if end > b.bitCount: raise IndexError( "End %d is larger than the size of this BitSet (%d)." % ( end, b.bitCount ) ) cdef inline b_check_range_count( BitSet b, start, count ): b_check_index( b, start ) if count < 0: raise IndexError( "Count (%d) must be non-negative." % count ) if start + count > b.bitCount: raise IndexError( "End %d is larger than the size of this BitSet (%d)." % ( start + count, b.bitCount ) ) cdef inline b_check_same_size( BitSet b, BitSet other ): if b.bitCount != other.bitCount: raise ValueError( "BitSets must have the same size" ) ## ---- BitSet -------------------------------------------------------------- # Maximum value of a signed 32 bit integer ( 2**31 - 1 ) cdef int MAX_INT = 2147483647 cdef class BitSet: cdef Bits * bits cdef int bitCount def __cinit__( self, bitCount ): if bitCount > MAX_INT: raise ValueError( "%d is larger than the maximum BitSet size of %d." % ( bitCount, MAX_INT ) ) self.bitCount = bitCount self.bits = bitAlloc( bitCount ) def __dealloc__( self ): if self.bits: bitFree( & self.bits ) property size: def __get__( self ): return self.bitCount def set( self, index ): b_check_index( self, index ) bitSetOne( self.bits, index ) def clear( self, index ): b_check_index( self, index ) bitClearOne( self.bits, index ) def clone( self ): other = BitSet( self.bitCount ) other.ior( self ) return other def set_range( self, start, count ): b_check_range_count( self, start, count ) bitSetRange( self.bits, start, count ) def get( self, index ): b_check_index( self, index ) return bitReadOne( self.bits, index ); def count_range( self, start=0, count=None ): if count == None: count = self.bitCount - start b_check_range_count( self, start, count ) return bitCountRange( self.bits, start, count ) def next_set( self, start, end=None ): if end == None: end = self.bitCount b_check_range( self, start, end ) return bitFindSet( self.bits, start, end ) def next_clear( self, start, end=None ): if end == None: end = self.bitCount b_check_range( self, start, end ) return bitFindClear( self.bits, start, end ) def iand( self, BitSet other ): b_check_same_size( self, other ) bitAnd( self.bits, other.bits, self.bitCount ) def ior( self, BitSet other ): b_check_same_size( self, other ) bitOr( self.bits, other.bits, self.bitCount ) def ixor( self, BitSet other ): b_check_same_size( self, other ) bitXor( self.bits, other.bits, self.bitCount ) def invert( self ): bitNot( self.bits, self.bitCount) def __getitem__( self, index ): return self.get( index ) def __iand__( self, other ): self.iand( other ) return self def __ior__( self, other ): self.ior( other ) return self def __invert__( self ): self.invert() return self ## ---- BinnedBitSet bounds checking ---------------------------------------- cdef inline bb_check_index( BinnedBitSet bb, index ): if index < 0: raise IndexError( "BitSet index (%d) must be non-negative." % index ) if index >= bb.bb.size: raise IndexError( "%d is larger than the size of this BitSet (%d)." % ( index, bb.bb.size ) ) cdef inline bb_check_start( BinnedBitSet bb, start ): bb_check_index( bb, start ) cdef inline bb_check_range_count( BinnedBitSet bb, start, count ): bb_check_index( bb, start ) if count < 0: raise IndexError( "Count (%d) must be non-negative." % count ) if start + count > bb.bb.size: raise IndexError( "End (%d) is larger than the size of this BinnedBitSet (%d)." % ( start + count, bb.bb.size ) ) cdef inline bb_check_same_size( BinnedBitSet bb, BinnedBitSet other ): if bb.bb.size != other.bb.size: raise ValueError( "BitSets must have the same size" ) ## ---- BinnedBitSet -------------------------------------------------------- MAX=512*1024*1024 cdef class BinnedBitSet: cdef BinBits * bb def __cinit__( self, size=MAX, granularity=1024 ): if size > MAX_INT: raise ValueError( "%d is larger than the maximum BinnedBitSet size of %d." % ( size, MAX_INT ) ) self.bb = binBitsAlloc( size, granularity ) def __dealloc__( self ): if self.bb: binBitsFree( self.bb ); def __getitem__( self, index ): bb_check_index( self, index ) return binBitsReadOne( self.bb, index ) def set( self, index ): bb_check_index( self, index ) binBitsSetOne( self.bb, index ) def clear( self, index ): bb_check_index( self, index ) binBitsClearOne( self.bb, index ) def set_range( self, int start, count ): bb_check_range_count( self, start, count ) binBitsSetRange( self.bb, start, count ) def count_range( self, start, count ): bb_check_range_count( self, start, count ) return binBitsCountRange( self.bb, start, count ) def next_set( self, start ): bb_check_start( self, start ) return binBitsFindSet( self.bb, start ) def next_clear( self, start ): bb_check_start( self, start ) return binBitsFindClear( self.bb, start ) property size: def __get__( self ): return self.bb.size property bin_size: def __get__( self ): return self.bb.bin_size def iand( self, BinnedBitSet other ): bb_check_same_size( self, other ) binBitsAnd( self.bb, other.bb ) def ior( self, BinnedBitSet other ): bb_check_same_size( self, other ) binBitsOr( self.bb, other.bb ) def invert( self ): binBitsNot( self.bb ) bx-python-0.8.13/lib/bx/bitset_builders.py000066400000000000000000000133431415666465100204710ustar00rootroot00000000000000""" Support for creating dictionaries of `Bitset`s / `BinnedBitset`s from text files containg sets of "covered" intervals in sequences (e.g. `BED`_ files). .. BED: http://genome.ucsc.edu/FAQ/FAQformat.html#format1 """ import re from warnings import warn from bx.bitset import ( BinnedBitSet, MAX ) def binned_bitsets_from_file(f, chrom_col=0, start_col=1, end_col=2, strand_col=5, upstream_pad=0, downstream_pad=0, lens={}): """ Read a file into a dictionary of bitsets. The defaults arguments - 'f' should be a file like object (or any iterable containing strings) - 'chrom_col', 'start_col', and 'end_col' must exist in each line. - 'strand_col' is optional, any line without it will be assumed to be '+' - if 'lens' is provided bitset sizes will be looked up from it, otherwise chromosomes will be assumed to be the maximum size """ last_chrom = None last_bitset = None bitsets = dict() for line in f: if line.startswith("#") or line.isspace(): continue fields = line.split() chrom = fields[chrom_col] if chrom != last_chrom: if chrom not in bitsets: if chrom in lens: size = lens[chrom] else: size = MAX bitsets[chrom] = BinnedBitSet(size) last_chrom = chrom last_bitset = bitsets[chrom] start, end = int(fields[start_col]), int(fields[end_col]) if upstream_pad: start = max(0, start - upstream_pad) if downstream_pad: end = min(size, end + downstream_pad) if start > end: warn("Interval start after end!") last_bitset.set_range(start, end-start) return bitsets def binned_bitsets_from_bed_file(f, chrom_col=0, start_col=1, end_col=2, strand_col=5, upstream_pad=0, downstream_pad=0, lens={}): """ Read a file into a dictionary of bitsets. The defaults arguments - 'f' should be a file like object (or any iterable containing strings) - 'chrom_col', 'start_col', and 'end_col' must exist in each line. - 'strand_col' is optional, any line without it will be assumed to be '+' - if 'lens' is provided bitset sizes will be looked up from it, otherwise chromosomes will be assumed to be the maximum size """ last_chrom = None last_bitset = None bitsets = dict() offset = 0 for line in f: if line.startswith("#") or line.isspace(): continue # Ignore browser lines completely if line.startswith("browser"): continue # Need to check track lines due to the offset if line.startswith("track"): m = re.search(r"offset=(\d+)", line) if m and m.group(1): offset = int(m.group(1)) continue fields = line.split() chrom = fields[chrom_col] if chrom != last_chrom: if chrom not in bitsets: if chrom in lens: size = lens[chrom] else: size = MAX bitsets[chrom] = BinnedBitSet(size) last_chrom = chrom last_bitset = bitsets[chrom] start, end = int(fields[start_col]) + offset, int(fields[end_col]) + offset if upstream_pad: start = max(0, start - upstream_pad) if downstream_pad: end = min(size, end + downstream_pad) if start > end: warn("Interval start after end!") last_bitset.set_range(start, end-start) return bitsets def binned_bitsets_proximity(f, chrom_col=0, start_col=1, end_col=2, strand_col=5, upstream=0, downstream=0): """Read a file into a dictionary of bitsets""" last_chrom = None last_bitset = None bitsets = dict() for line in f: if line.startswith("#"): continue # print "input=%s" % ( line ), fields = line.split() strand = "+" if len(fields) >= strand_col + 1: if fields[strand_col] == "-": strand = "-" chrom = fields[chrom_col] if chrom != last_chrom: if chrom not in bitsets: bitsets[chrom] = BinnedBitSet(MAX) last_chrom = chrom last_bitset = bitsets[chrom] start, end = int(fields[start_col]), int(fields[end_col]) if strand == "+": if upstream: start = max(0, start - upstream) if downstream: end = min(MAX, end + downstream) if strand == "-": if upstream: end = min(MAX, end + upstream) if downstream: start = max(0, start - downstream) # print "set: start=%d\tend=%d" % ( start, end ) if end-start > 0: last_bitset.set_range(start, end-start) return bitsets def binned_bitsets_from_list(list=[]): """Read a list into a dictionary of bitsets""" last_chrom = None last_bitset = None bitsets = dict() for l in list: chrom = l[0] if chrom != last_chrom: if chrom not in bitsets: bitsets[chrom] = BinnedBitSet(MAX) last_chrom = chrom last_bitset = bitsets[chrom] start, end = int(l[1]), int(l[2]) last_bitset.set_range(start, end - start) return bitsets def binned_bitsets_by_chrom(f, chrom, chrom_col=0, start_col=1, end_col=2): """Read a file by chrom name into a bitset""" bitset = BinnedBitSet(MAX) for line in f: if line.startswith("#"): continue fields = line.split() if fields[chrom_col] == chrom: start, end = int(fields[start_col]), int(fields[end_col]) bitset.set_range(start, end-start) return bitset bx-python-0.8.13/lib/bx/bitset_tests.py000066400000000000000000000067361415666465100200320ustar00rootroot00000000000000""" Tests for `bx.bitset`. """ import unittest import bx.bitset class AbstractTests: def assert_bits(self, bits, list): assert bits.size == len(list), "Bitset size and verification list size do not match" for i in range(bits.size): self.assertEqual(bits[i], list[i]) def test_overflow_create(self): self.assertRaises(ValueError, self.new_bits, 4000000000) def test_overflow_access(self): bits = self.new_bits(100) self.assertRaises(IndexError, bits.set, -5) self.assertRaises(IndexError, bits.set, 110) def test_access(self): # Create and assert empty bits = self.new_bits(100) l = [0] * 100 self.assert_bits(bits, l) # Set some positions for pos in (11, 14, 70, 16): bits.set(pos) l[pos] = 1 # Clear some positions for pos in (14, 80, 16): bits.clear(pos) l[pos] = 0 self.assert_bits(bits, l) def test_range_access(self): # Create and assert empty bits = self.new_bits(100) l = [0] * 100 self.assert_bits(bits, l) # Set some positions for b, e in ((11, 14), (20, 75), (90, 99)): bits.set_range(b, e-b) for pos in range(b, e): l[pos] = 1 self.assert_bits(bits, l) def test_count(self): # Create and assert empty bits = self.new_bits(100) # Set some positions for b, e in ((11, 14), (20, 75), (90, 100)): bits.set_range(b, e-b) self.assertEqual(bits.count_range(0, 0), 0) self.assertEqual(bits.count_range(0, 20), 3) self.assertEqual(bits.count_range(25, 25), 25) self.assertEqual(bits.count_range(80, 20), 10) self.assertEqual(bits.count_range(0, 100), 68) def test_find(self): # Create and assert empty bits = self.new_bits(100) # Set some positions for b, e in ((11, 14), (20, 75), (90, 100)): bits.set_range(b, e-b) # Next set self.assertEqual(bits.next_set(0), 11) self.assertEqual(bits.next_set(13), 13) self.assertEqual(bits.next_set(15), 20) # Next clear self.assertEqual(bits.next_clear(0), 0) self.assertEqual(bits.next_clear(11), 14) self.assertEqual(bits.next_clear(20), 75) self.assertEqual(bits.next_clear(92), 100) def test_and(self): bits1 = self.new_bits(100) bits2 = self.new_bits(100) bits1.set_range(20, 40) bits2.set_range(50, 25) bits1.iand(bits2) l = [0]*100 for i in range(50, 60): l[i] = 1 self.assert_bits(bits1, l) def test_or(self): bits1 = self.new_bits(100) bits2 = self.new_bits(100) bits1.set_range(20, 40) bits2.set_range(50, 25) bits1.ior(bits2) l = [0]*100 for i in range(20, 75): l[i] = 1 self.assert_bits(bits1, l) def test_not(self): bits = self.new_bits(100) bits.set_range(20, 40) bits.invert() l = [1]*100 for i in range(20, 60): l[i] = 0 self.assert_bits(bits, l) class BitSetTests(AbstractTests, unittest.TestCase): def new_bits(self, size): return bx.bitset.BitSet(size) class BinnedBitSetTests(AbstractTests, unittest.TestCase): def new_bits(self, size): granularity = size % 11 return bx.bitset.BinnedBitSet(size, granularity) bx-python-0.8.13/lib/bx/bitset_utils.py000066400000000000000000000035371415666465100200240ustar00rootroot00000000000000""" Utility functions for working with `Bitset`s and treating lists of (start,end) as `Bitset`s. """ from bx.bitset import ( BinnedBitSet, MAX, ) def bitset_intersect(ex1, ex2): bits1 = list2bits(ex1) bits2 = list2bits(ex2) bits1.iand(bits2) return bits2list(bits1) def bitset_subtract(ex1, ex2): bits1 = list2bits(ex1) bits2 = list2bits(ex2) bits2.invert() bits1.iand(bits2) return bits2list(bits1) def list2bits(ex): bits = BinnedBitSet(MAX) for start, end in ex: bits.set_range(start, end - start) return bits def bits2list(bits): ex = [] end = 0 while True: start = bits.next_set(end) if start == bits.size: break end = bits.next_clear(start) ex.append((start, end)) return ex def bitset_complement(exons): bits = BinnedBitSet(MAX) introns = [] for start, end in exons: bits.set_range(start, end - start) bits.invert() # only complement within the range of the list ex_start = min(a[0] for a in exons) ex_end = max(a[1] for a in exons) end = ex_start len = ex_end while True: start = bits.next_set(end) if start == bits.size: break end = bits.next_clear(start) if end > len: end = len if start != end: introns.append((start, end)) if end == len: break return introns def bitset_interval_intersect(bits, istart, iend): rval = [] end = istart len = iend while True: start = bits.next_set(end) if start >= len: break end = bits.next_clear(start) if start != end: rval.append((start, end)) if end >= len: break return rval def bitset_union(exons): bits = list2bits(exons) return bits2list(bits) bx-python-0.8.13/lib/bx/cookbook/000077500000000000000000000000001415666465100165365ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/cookbook/__init__.py000066400000000000000000000055051415666465100206540ustar00rootroot00000000000000""" Various useful utilities, mostly taken from the ASPN Python cookbook. """ import types seq_types = type(()), type([]) def flatten(*args): for arg in args: if type(arg) in seq_types: for elem in arg: yield from flatten(elem) else: yield arg def cross_lists(*sets): """Return the cross product of the arguments""" wheels = [iter(_) for _ in sets] digits = [next(it) for it in wheels] while True: yield digits[:] for i in range(len(digits)-1, -1, -1): try: digits[i] = next(wheels[i]) break except StopIteration: wheels[i] = iter(sets[i]) digits[i] = next(wheels[i]) else: break # Cached / memoized methods def cachedmethod(function): return types.MethodType(Memoize(function), None) class Memoize: def __init__(self, function): self._cache = {} self._callable = function def __call__(self, *args, **kwds): cache = self._cache key = self._getKey(*args, **kwds) try: return cache[key] except KeyError: cachedValue = cache[key] = self._callable(*args, **kwds) return cachedValue def _getKey(self, *args, **kwds): return kwds and (args, ImmutableDict(kwds)) or args class memoized: """Decorator that caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned, and not re-evaluated. """ def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): try: return self.cache[args] except KeyError: self.cache[args] = value = self.func(*args) return value except TypeError: # uncachable -- for instance, passing a list as an argument. # Better to not cache than to blow up entirely. return self.func(*args) def __repr__(self): """Return the function's docstring.""" return self.func.__doc__ class ImmutableDict(dict): '''A hashable dict.''' def __init__(self, *args, **kwds): dict.__init__(self, *args, **kwds) def __setitem__(self, key, value): raise NotImplementedError("dict is immutable") def __delitem__(self, key): raise NotImplementedError("dict is immutable") def clear(self): raise NotImplementedError("dict is immutable") def setdefault(self, k, default=None): raise NotImplementedError("dict is immutable") def popitem(self): raise NotImplementedError("dict is immutable") def update(self, other): raise NotImplementedError("dict is immutable") def __hash__(self): return hash(tuple(self.items())) bx-python-0.8.13/lib/bx/cookbook/argparse.py000066400000000000000000002522441415666465100207250ustar00rootroot00000000000000# Author: Steven J. Bethard . """Command-line parsing library This module is an optparse-inspired command-line parsing library that: - handles both optional and positional arguments - produces highly informative usage messages - supports parsers that dispatch to sub-parsers The following is a simple usage example that sums integers from the command-line and writes the result to a file:: parser = argparse.ArgumentParser( description='sum the integers at the command line') parser.add_argument( 'integers', metavar='int', nargs='+', type=int, help='an integer to be summed') parser.add_argument( '--log', default=sys.stdout, type=argparse.FileType('w'), help='the file where the sum should be written') args = parser.parse_args() args.log.write('%s' % sum(args.integers)) args.log.close() The module contains the following public classes: - ArgumentParser -- The main entry point for command-line parsing. As the example above shows, the add_argument() method is used to populate the parser with actions for optional and positional arguments. Then the parse_args() method is invoked to convert the args at the command-line into an object with attributes. - ArgumentError -- The exception raised by ArgumentParser objects when there are errors with the parser's actions. Errors raised while parsing the command-line are caught by ArgumentParser and emitted as command-line messages. - FileType -- A factory for defining types of files to be created. As the example above shows, instances of FileType are typically passed as the type= argument of add_argument() calls. - Action -- The base class for parser actions. Typically actions are selected by passing strings like 'store_true' or 'append_const' to the action= argument of add_argument(). However, for greater customization of ArgumentParser actions, subclasses of Action may be defined and passed as the action= argument. - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, ArgumentDefaultsHelpFormatter -- Formatter classes which may be passed as the formatter_class= argument to the ArgumentParser constructor. HelpFormatter is the default, RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser not to change the formatting for help text, and ArgumentDefaultsHelpFormatter adds information about argument defaults to the help. All other classes in this module are considered implementation details. (Also note that HelpFormatter and RawDescriptionHelpFormatter are only considered public as object names -- the API of the formatter objects is still considered an implementation detail.) """ __version__ = '1.2.1' __all__ = [ 'ArgumentParser', 'ArgumentError', 'ArgumentTypeError', 'FileType', 'HelpFormatter', 'ArgumentDefaultsHelpFormatter', 'RawDescriptionHelpFormatter', 'RawTextHelpFormatter', 'Namespace', 'Action', 'ONE_OR_MORE', 'OPTIONAL', 'PARSER', 'REMAINDER', 'SUPPRESS', 'ZERO_OR_MORE', ] import copy as _copy import os as _os import re as _re import sys as _sys import textwrap as _textwrap from gettext import gettext as _ try: set except NameError: # for python < 2.4 compatibility (sets module is there since 2.3): from sets import Set as set try: basestring except NameError: basestring = str try: sorted except NameError: # for python < 2.4 compatibility: def sorted(iterable, reverse=False): result = sorted(iterable) if reverse: result.reverse() return result def _callable(obj): return hasattr(obj, '__call__') or hasattr(obj, '__bases__') SUPPRESS = '==SUPPRESS==' OPTIONAL = '?' ZERO_OR_MORE = '*' ONE_OR_MORE = '+' PARSER = 'A...' REMAINDER = '...' _UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' # ============================= # Utility functions and classes # ============================= class _AttributeHolder: """Abstract base class that provides __repr__. The __repr__ method returns a string in the format:: ClassName(attr=name, attr=name, ...) The attributes are determined either by a class-level attribute, '_kwarg_names', or by inspecting the instance __dict__. """ def __repr__(self): type_name = type(self).__name__ arg_strings = [] for arg in self._get_args(): arg_strings.append(repr(arg)) for name, value in self._get_kwargs(): arg_strings.append(f'{name}={value!r}') return '{}({})'.format(type_name, ', '.join(arg_strings)) def _get_kwargs(self): return sorted(self.__dict__.items()) def _get_args(self): return [] def _ensure_value(namespace, name, value): if getattr(namespace, name, None) is None: setattr(namespace, name, value) return getattr(namespace, name) # =============== # Formatting Help # =============== class HelpFormatter: """Formatter for generating usage messages and argument help strings. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def __init__(self, prog, indent_increment=2, max_help_position=24, width=None): # default setting for width if width is None: try: width = int(_os.environ['COLUMNS']) except (KeyError, ValueError): width = 80 width -= 2 self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position self._width = width self._current_indent = 0 self._level = 0 self._action_max_length = 0 self._root_section = self._Section(self, None) self._current_section = self._root_section self._whitespace_matcher = _re.compile(r'\s+') self._long_break_matcher = _re.compile(r'\n\n\n+') # =============================== # Section and indentation methods # =============================== def _indent(self): self._current_indent += self._indent_increment self._level += 1 def _dedent(self): self._current_indent -= self._indent_increment assert self._current_indent >= 0, 'Indent decreased below 0.' self._level -= 1 class _Section: def __init__(self, formatter, parent, heading=None): self.formatter = formatter self.parent = parent self.heading = heading self.items = [] def format_help(self): # format the indented section if self.parent is not None: self.formatter._indent() join = self.formatter._join_parts for func, args in self.items: func(*args) item_help = join([func(*args) for func, args in self.items]) if self.parent is not None: self.formatter._dedent() # return nothing if the section was empty if not item_help: return '' # add the heading if the section was non-empty if self.heading is not SUPPRESS and self.heading is not None: current_indent = self.formatter._current_indent heading = '%*s%s:\n' % (current_indent, '', self.heading) else: heading = '' # join the section-initial newline, the heading and the help return join(['\n', heading, item_help, '\n']) def _add_item(self, func, args): self._current_section.items.append((func, args)) # ======================== # Message building methods # ======================== def start_section(self, heading): self._indent() section = self._Section(self, self._current_section, heading) self._add_item(section.format_help, []) self._current_section = section def end_section(self): self._current_section = self._current_section.parent self._dedent() def add_text(self, text): if text is not SUPPRESS and text is not None: self._add_item(self._format_text, [text]) def add_usage(self, usage, actions, groups, prefix=None): if usage is not SUPPRESS: args = usage, actions, groups, prefix self._add_item(self._format_usage, args) def add_argument(self, action): if action.help is not SUPPRESS: # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] for subaction in self._iter_indented_subactions(action): invocations.append(get_invocation(subaction)) # update the maximum item length invocation_length = max(len(s) for s in invocations) action_length = invocation_length + self._current_indent self._action_max_length = max(self._action_max_length, action_length) # add the item to the list self._add_item(self._format_action, [action]) def add_arguments(self, actions): for action in actions: self.add_argument(action) # ======================= # Help-formatting methods # ======================= def format_help(self): help = self._root_section.format_help() if help: help = self._long_break_matcher.sub('\n\n', help) help = help.strip('\n') + '\n' return help def _join_parts(self, part_strings): return ''.join([part for part in part_strings if part and part is not SUPPRESS]) def _format_usage(self, usage, actions, groups, prefix): if prefix is None: prefix = _('usage: ') # if usage is specified, use that if usage is not None: usage = usage % dict(prog=self._prog) # if no optionals or positionals are available, usage is just prog elif usage is None and not actions: usage = '%(prog)s' % dict(prog=self._prog) # if optionals and positionals are available, calculate usage elif usage is None: prog = '%(prog)s' % dict(prog=self._prog) # split optionals from positionals optionals = [] positionals = [] for action in actions: if action.option_strings: optionals.append(action) else: positionals.append(action) # build full usage string format = self._format_actions_usage action_usage = format(optionals + positionals, groups) usage = ' '.join([s for s in [prog, action_usage] if s]) # wrap the usage parts if it's too long text_width = self._width - self._current_indent if len(prefix) + len(usage) > text_width: # break usage into wrappable parts part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' opt_usage = format(optionals, groups) pos_usage = format(positionals, groups) opt_parts = _re.findall(part_regexp, opt_usage) pos_parts = _re.findall(part_regexp, pos_usage) assert ' '.join(opt_parts) == opt_usage assert ' '.join(pos_parts) == pos_usage # helper for wrapping lines def get_lines(parts, indent, prefix=None): lines = [] line = [] if prefix is not None: line_len = len(prefix) - 1 else: line_len = len(indent) - 1 for part in parts: if line_len + 1 + len(part) > text_width: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 line.append(part) line_len += len(part) + 1 if line: lines.append(indent + ' '.join(line)) if prefix is not None: lines[0] = lines[0][len(indent):] return lines # if prog is short, follow it with optionals or positionals if len(prefix) + len(prog) <= 0.75 * text_width: indent = ' ' * (len(prefix) + len(prog) + 1) if opt_parts: lines = get_lines([prog] + opt_parts, indent, prefix) lines.extend(get_lines(pos_parts, indent)) elif pos_parts: lines = get_lines([prog] + pos_parts, indent, prefix) else: lines = [prog] # if prog is long, put it on its own line else: indent = ' ' * len(prefix) parts = opt_parts + pos_parts lines = get_lines(parts, indent) if len(lines) > 1: lines = [] lines.extend(get_lines(opt_parts, indent)) lines.extend(get_lines(pos_parts, indent)) lines = [prog] + lines # join lines into usage usage = '\n'.join(lines) # prefix with 'usage:' return f'{prefix}{usage}\n\n' def _format_actions_usage(self, actions, groups): # find group indices and identify actions in groups group_actions = set() inserts = {} for group in groups: try: start = actions.index(group._group_actions[0]) except ValueError: continue else: end = start + len(group._group_actions) if actions[start:end] == group._group_actions: for action in group._group_actions: group_actions.add(action) if not group.required: if start in inserts: inserts[start] += ' [' else: inserts[start] = '[' inserts[end] = ']' else: if start in inserts: inserts[start] += ' (' else: inserts[start] = '(' inserts[end] = ')' for i in range(start + 1, end): inserts[i] = '|' # collect all actions format strings parts = [] for i, action in enumerate(actions): # suppressed arguments are marked with None # remove | separators for suppressed arguments if action.help is SUPPRESS: parts.append(None) if inserts.get(i) == '|': inserts.pop(i) elif inserts.get(i + 1) == '|': inserts.pop(i + 1) # produce all arg strings elif not action.option_strings: part = self._format_args(action, action.dest) # if it's in a group, strip the outer [] if action in group_actions: if part[0] == '[' and part[-1] == ']': part = part[1:-1] # add the action string to the list parts.append(part) # produce the first way to invoke the option in brackets else: option_string = action.option_strings[0] # if the Optional doesn't take a value, format is: # -s or --long if action.nargs == 0: part = '%s' % option_string # if the Optional takes a value, format is: # -s ARGS or --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) part = f'{option_string} {args_string}' # make it look optional if it's not required or in a group if not action.required and action not in group_actions: part = '[%s]' % part # add the action string to the list parts.append(part) # insert things at the necessary indices for i in sorted(inserts, reverse=True): parts[i:i] = [inserts[i]] # join all the action items with spaces text = ' '.join([item for item in parts if item is not None]) # clean up separators for mutually exclusive groups open = r'[\[(]' close = r'[\])]' text = _re.sub(r'(%s) ' % open, r'\1', text) text = _re.sub(r' (%s)' % close, r'\1', text) text = _re.sub(fr'{open} *{close}', r'', text) text = _re.sub(r'\(([^|]*)\)', r'\1', text) text = text.strip() # return the text return text def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) text_width = self._width - self._current_indent indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' def _format_action(self, action): # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) help_width = self._width - help_position action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) # ho nelp; start on same line and add a final newline if not action.help: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup # short action name; start on the same line and pad two spaces elif len(action_header) <= action_width: tup = self._current_indent, '', action_width, action_header action_header = '%*s%-*s ' % tup indent_first = 0 # long action name; start on the next line else: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup indent_first = help_position # collect the pieces of the action help parts = [action_header] # if there was help for the action, add lines of help text if action.help: help_text = self._expand_help(action) help_lines = self._split_lines(help_text, help_width) parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) for line in help_lines[1:]: parts.append('%*s%s\n' % (help_position, '', line)) # or add a newline if the description doesn't end with one elif not action_header.endswith('\n'): parts.append('\n') # if there are any sub-actions, add their help as well for subaction in self._iter_indented_subactions(action): parts.append(self._format_action(subaction)) # return a single string return self._join_parts(parts) def _format_action_invocation(self, action): if not action.option_strings: metavar, = self._metavar_formatter(action, action.dest)(1) return metavar else: parts = [] # if the Optional doesn't take a value, format is: # -s, --long if action.nargs == 0: parts.extend(action.option_strings) # if the Optional takes a value, format is: # -s ARGS, --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append(f'{option_string} {args_string}') return ', '.join(parts) def _metavar_formatter(self, action, default_metavar): if action.metavar is not None: result = action.metavar elif action.choices is not None: choice_strs = [str(choice) for choice in action.choices] result = '{%s}' % ','.join(choice_strs) else: result = default_metavar def format(tuple_size): if isinstance(result, tuple): return result else: return (result, ) * tuple_size return format def _format_args(self, action, default_metavar): get_metavar = self._metavar_formatter(action, default_metavar) if action.nargs is None: result = '%s' % get_metavar(1) elif action.nargs == OPTIONAL: result = '[%s]' % get_metavar(1) elif action.nargs == ZERO_OR_MORE: result = '[%s [%s ...]]' % get_metavar(2) elif action.nargs == ONE_OR_MORE: result = '%s [%s ...]' % get_metavar(2) elif action.nargs == REMAINDER: result = '...' elif action.nargs == PARSER: result = '%s ...' % get_metavar(1) else: formats = ['%s' for _ in range(action.nargs)] result = ' '.join(formats) % get_metavar(action.nargs) return result def _expand_help(self, action): params = dict(vars(action), prog=self._prog) for name in list(params): if params[name] is SUPPRESS: del params[name] for name in list(params): if hasattr(params[name], '__name__'): params[name] = params[name].__name__ if params.get('choices') is not None: choices_str = ', '.join([str(c) for c in params['choices']]) params['choices'] = choices_str return self._get_help_string(action) % params def _iter_indented_subactions(self, action): try: get_subactions = action._get_subactions except AttributeError: pass else: self._indent() yield from get_subactions() self._dedent() def _split_lines(self, text, width): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.wrap(text, width) def _fill_text(self, text, width, indent): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.fill(text, width, initial_indent=indent, subsequent_indent=indent) def _get_help_string(self, action): return action.help class RawDescriptionHelpFormatter(HelpFormatter): """Help message formatter which retains any formatting in descriptions. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _fill_text(self, text, width, indent): return ''.join([indent + line for line in text.splitlines(True)]) class RawTextHelpFormatter(RawDescriptionHelpFormatter): """Help message formatter which retains formatting of all help text. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _split_lines(self, text, width): return text.splitlines() class ArgumentDefaultsHelpFormatter(HelpFormatter): """Help message formatter which adds default values to argument help. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _get_help_string(self, action): help = action.help if '%(default)' not in action.help: if action.default is not SUPPRESS: defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: help += ' (default: %(default)s)' return help # ===================== # Options and Arguments # ===================== def _get_action_name(argument): if argument is None: return None elif argument.option_strings: return '/'.join(argument.option_strings) elif argument.metavar not in (None, SUPPRESS): return argument.metavar elif argument.dest not in (None, SUPPRESS): return argument.dest else: return None class ArgumentError(Exception): """An error from creating or using an argument (optional or positional). The string value of this exception is the message, augmented with information about the argument that caused it. """ def __init__(self, argument, message): self.argument_name = _get_action_name(argument) self.message = message def __str__(self): if self.argument_name is None: format = '%(message)s' else: format = 'argument %(argument_name)s: %(message)s' return format % dict(message=self.message, argument_name=self.argument_name) class ArgumentTypeError(Exception): """An error from trying to convert a command line string to a type.""" # ============== # Action classes # ============== class Action(_AttributeHolder): """Information about how to convert command line strings to Python objects. Action objects are used by an ArgumentParser to represent the information needed to parse a single argument from one or more strings from the command line. The keyword arguments to the Action constructor are also all attributes of Action instances. Keyword Arguments: - option_strings -- A list of command-line option strings which should be associated with this action. - dest -- The name of the attribute to hold the created object(s) - nargs -- The number of command-line arguments that should be consumed. By default, one argument will be consumed and a single value will be produced. Other values include: - N (an integer) consumes N arguments (and produces a list) - '?' consumes zero or one arguments - '*' consumes zero or more arguments (and produces a list) - '+' consumes one or more arguments (and produces a list) Note that the difference between the default and nargs=1 is that with the default, a single value will be produced, while with nargs=1, a list containing a single value will be produced. - const -- The value to be produced if the option is specified and the option uses an action that takes no values. - default -- The value to be produced if the option is not specified. - type -- The type which the command-line arguments should be converted to, should be one of 'string', 'int', 'float', 'complex' or a callable object that accepts a single string argument. If None, 'string' is assumed. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate type, an exception will be raised if it is not a member of this collection. - required -- True if the action must always be specified at the command line. This is only meaningful for optional command-line arguments. - help -- The help string describing the argument. - metavar -- The name to be used for the option's argument with the help string. If None, the 'dest' value will be used as the name. """ def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): self.option_strings = option_strings self.dest = dest self.nargs = nargs self.const = const self.default = default self.type = type self.choices = choices self.required = required self.help = help self.metavar = metavar def _get_kwargs(self): names = [ 'option_strings', 'dest', 'nargs', 'const', 'default', 'type', 'choices', 'help', 'metavar', ] return [(name, getattr(self, name)) for name in names] def __call__(self, parser, namespace, values, option_string=None): raise NotImplementedError(_('.__call__() not defined')) class _StoreAction(Action): def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): if nargs == 0: raise ValueError('nargs for store actions must be > 0; if you ' 'have nothing to store, actions such as store ' 'true or store const may be more appropriate') if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super().__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) class _StoreConstAction(Action): def __init__(self, option_strings, dest, const, default=None, required=False, help=None, metavar=None): super().__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.const) class _StoreTrueAction(_StoreConstAction): def __init__(self, option_strings, dest, default=False, required=False, help=None): super().__init__( option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help) class _StoreFalseAction(_StoreConstAction): def __init__(self, option_strings, dest, default=True, required=False, help=None): super().__init__( option_strings=option_strings, dest=dest, const=False, default=default, required=required, help=help) class _AppendAction(Action): def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): if nargs == 0: raise ValueError('nargs for append actions must be > 0; if arg ' 'strings are not supplying the value to append, ' 'the append const action may be more appropriate') if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super().__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(values) setattr(namespace, self.dest, items) class _AppendConstAction(Action): def __init__(self, option_strings, dest, const, default=None, required=False, help=None, metavar=None): super().__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(self.const) setattr(namespace, self.dest, items) class _CountAction(Action): def __init__(self, option_strings, dest, default=None, required=False, help=None): super().__init__( option_strings=option_strings, dest=dest, nargs=0, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): new_count = _ensure_value(namespace, self.dest, 0) + 1 setattr(namespace, self.dest, new_count) class _HelpAction(Action): def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None): super().__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) def __call__(self, parser, namespace, values, option_string=None): parser.print_help() parser.exit() class _VersionAction(Action): def __init__(self, option_strings, version=None, dest=SUPPRESS, default=SUPPRESS, help="show program's version number and exit"): super().__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) self.version = version def __call__(self, parser, namespace, values, option_string=None): version = self.version if version is None: version = parser.version formatter = parser._get_formatter() formatter.add_text(version) parser.exit(message=formatter.format_help()) class _SubParsersAction(Action): class _ChoicesPseudoAction(Action): def __init__(self, name, help): sup = super(_SubParsersAction._ChoicesPseudoAction, self) sup.__init__(option_strings=[], dest=name, help=help) def __init__(self, option_strings, prog, parser_class, dest=SUPPRESS, help=None, metavar=None): self._prog_prefix = prog self._parser_class = parser_class self._name_parser_map = {} self._choices_actions = [] super().__init__( option_strings=option_strings, dest=dest, nargs=PARSER, choices=self._name_parser_map, help=help, metavar=metavar) def add_parser(self, name, **kwargs): # set prog from the existing prefix if kwargs.get('prog') is None: kwargs['prog'] = f'{self._prog_prefix} {name}' # create a pseudo-action to hold the choice help if 'help' in kwargs: help = kwargs.pop('help') choice_action = self._ChoicesPseudoAction(name, help) self._choices_actions.append(choice_action) # create the parser and add it to the map parser = self._parser_class(**kwargs) self._name_parser_map[name] = parser return parser def _get_subactions(self): return self._choices_actions def __call__(self, parser, namespace, values, option_string=None): parser_name = values[0] arg_strings = values[1:] # set the parser name if requested if self.dest is not SUPPRESS: setattr(namespace, self.dest, parser_name) # select the parser try: parser = self._name_parser_map[parser_name] except KeyError: tup = parser_name, ', '.join(self._name_parser_map) msg = _('unknown parser %r (choices: %s)' % tup) raise ArgumentError(self, msg) # parse all the remaining options into the namespace # store any unrecognized options on the object, so that the top # level parser can decide what to do with them namespace, arg_strings = parser.parse_known_args(arg_strings, namespace) if arg_strings: vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) # ============== # Type classes # ============== class FileType: """Factory for creating file object types Instances of FileType are typically passed as type= arguments to the ArgumentParser add_argument() method. Keyword Arguments: - mode -- A string indicating how the file is to be opened. Accepts the same values as the builtin open() function. - bufsize -- The file's desired buffer size. Accepts the same values as the builtin open() function. """ def __init__(self, mode='r', bufsize=None): self._mode = mode self._bufsize = bufsize def __call__(self, string): # the special argument "-" means sys.std{in,out} if string == '-': if 'r' in self._mode: return _sys.stdin elif 'w' in self._mode: return _sys.stdout else: msg = _('argument "-" with mode %r' % self._mode) raise ValueError(msg) # all other arguments are used as file names if self._bufsize: return open(string, self._mode, self._bufsize) else: return open(string, self._mode) def __repr__(self): args = [self._mode, self._bufsize] args_str = ', '.join([repr(arg) for arg in args if arg is not None]) return f'{type(self).__name__}({args_str})' # =========================== # Optional and Positional Parsing # =========================== class Namespace(_AttributeHolder): """Simple object for storing attributes. Implements equality by attribute names and values, and provides a simple string representation. """ def __init__(self, **kwargs): for name in kwargs: setattr(self, name, kwargs[name]) __hash__ = None def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not (self == other) def __contains__(self, key): return key in self.__dict__ class _ActionsContainer: def __init__(self, description, prefix_chars, argument_default, conflict_handler): super().__init__() self.description = description self.argument_default = argument_default self.prefix_chars = prefix_chars self.conflict_handler = conflict_handler # set up registries self._registries = {} # register actions self.register('action', None, _StoreAction) self.register('action', 'store', _StoreAction) self.register('action', 'store_const', _StoreConstAction) self.register('action', 'store_true', _StoreTrueAction) self.register('action', 'store_false', _StoreFalseAction) self.register('action', 'append', _AppendAction) self.register('action', 'append_const', _AppendConstAction) self.register('action', 'count', _CountAction) self.register('action', 'help', _HelpAction) self.register('action', 'version', _VersionAction) self.register('action', 'parsers', _SubParsersAction) # raise an exception if the conflict handler is invalid self._get_handler() # action storage self._actions = [] self._option_string_actions = {} # groups self._action_groups = [] self._mutually_exclusive_groups = [] # defaults storage self._defaults = {} # determines whether an "option" looks like a negative number self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') # whether or not there are any optionals that look like negative # numbers -- uses a list so it can be shared and edited self._has_negative_number_optionals = [] # ==================== # Registration methods # ==================== def register(self, registry_name, value, object): registry = self._registries.setdefault(registry_name, {}) registry[value] = object def _registry_get(self, registry_name, value, default=None): return self._registries[registry_name].get(value, default) # ================================== # Namespace default accessor methods # ================================== def set_defaults(self, **kwargs): self._defaults.update(kwargs) # if these defaults match any existing arguments, replace # the previous default on the object with the new one for action in self._actions: if action.dest in kwargs: action.default = kwargs[action.dest] def get_default(self, dest): for action in self._actions: if action.dest == dest and action.default is not None: return action.default return self._defaults.get(dest, None) # ======================= # Adding argument actions # ======================= def add_argument(self, *args, **kwargs): """ add_argument(dest, ..., name=value, ...) add_argument(option_string, option_string, ..., name=value, ...) """ # if no positional args are supplied or only one is supplied and # it doesn't look like an option string, parse a positional # argument chars = self.prefix_chars if not args or len(args) == 1 and args[0][0] not in chars: if args and 'dest' in kwargs: raise ValueError('dest supplied twice for positional argument') kwargs = self._get_positional_kwargs(*args, **kwargs) # otherwise, we're adding an optional argument else: kwargs = self._get_optional_kwargs(*args, **kwargs) # if no default was supplied, use the parser-level default if 'default' not in kwargs: dest = kwargs['dest'] if dest in self._defaults: kwargs['default'] = self._defaults[dest] elif self.argument_default is not None: kwargs['default'] = self.argument_default # create the action object, and add it to the parser action_class = self._pop_action_class(kwargs) if not _callable(action_class): raise ValueError('unknown action "%s"' % action_class) action = action_class(**kwargs) # raise an error if the action type is not callable type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): raise ValueError('%r is not callable' % type_func) return self._add_action(action) def add_argument_group(self, *args, **kwargs): group = _ArgumentGroup(self, *args, **kwargs) self._action_groups.append(group) return group def add_mutually_exclusive_group(self, **kwargs): group = _MutuallyExclusiveGroup(self, **kwargs) self._mutually_exclusive_groups.append(group) return group def _add_action(self, action): # resolve any conflicts self._check_conflict(action) # add to actions list self._actions.append(action) action.container = self # index the action by any option strings it has for option_string in action.option_strings: self._option_string_actions[option_string] = action # set the flag if any option strings look like negative numbers for option_string in action.option_strings: if self._negative_number_matcher.match(option_string): if not self._has_negative_number_optionals: self._has_negative_number_optionals.append(True) # return the created action return action def _remove_action(self, action): self._actions.remove(action) def _add_container_actions(self, container): # collect groups by titles title_group_map = {} for group in self._action_groups: if group.title in title_group_map: msg = _('cannot merge actions - two groups are named %r') raise ValueError(msg % (group.title)) title_group_map[group.title] = group # map each action to its group group_map = {} for group in container._action_groups: # if a group with the title exists, use that, otherwise # create a new group matching the container's group if group.title not in title_group_map: title_group_map[group.title] = self.add_argument_group( title=group.title, description=group.description, conflict_handler=group.conflict_handler) # map the actions to their new group for action in group._group_actions: group_map[action] = title_group_map[group.title] # add container's mutually exclusive groups # NOTE: if add_mutually_exclusive_group ever gains title= and # description= then this code will need to be expanded as above for group in container._mutually_exclusive_groups: mutex_group = self.add_mutually_exclusive_group( required=group.required) # map the actions to their new mutex group for action in group._group_actions: group_map[action] = mutex_group # add all actions to this container or their group for action in container._actions: group_map.get(action, self)._add_action(action) def _get_positional_kwargs(self, dest, **kwargs): # make sure required is not specified if 'required' in kwargs: msg = _("'required' is an invalid argument for positionals") raise TypeError(msg) # mark positional arguments as required if at least one is # always required if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: kwargs['required'] = True if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: kwargs['required'] = True # return the keyword arguments with no option strings return dict(kwargs, dest=dest, option_strings=[]) def _get_optional_kwargs(self, *args, **kwargs): # determine short and long option strings option_strings = [] long_option_strings = [] for option_string in args: # error on strings that don't start with an appropriate prefix if not option_string[0] in self.prefix_chars: msg = _('invalid option string %r: ' 'must start with a character %r') tup = option_string, self.prefix_chars raise ValueError(msg % tup) # strings starting with two prefix characters are long options option_strings.append(option_string) if option_string[0] in self.prefix_chars: if len(option_string) > 1: if option_string[1] in self.prefix_chars: long_option_strings.append(option_string) # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' dest = kwargs.pop('dest', None) if dest is None: if long_option_strings: dest_option_string = long_option_strings[0] else: dest_option_string = option_strings[0] dest = dest_option_string.lstrip(self.prefix_chars) if not dest: msg = _('dest= is required for options like %r') raise ValueError(msg % option_string) dest = dest.replace('-', '_') # return the updated keyword arguments return dict(kwargs, dest=dest, option_strings=option_strings) def _pop_action_class(self, kwargs, default=None): action = kwargs.pop('action', default) return self._registry_get('action', action, action) def _get_handler(self): # determine function from conflict handler string handler_func_name = '_handle_conflict_%s' % self.conflict_handler try: return getattr(self, handler_func_name) except AttributeError: msg = _('invalid conflict_resolution value: %r') raise ValueError(msg % self.conflict_handler) def _check_conflict(self, action): # find all options that conflict with this option confl_optionals = [] for option_string in action.option_strings: if option_string in self._option_string_actions: confl_optional = self._option_string_actions[option_string] confl_optionals.append((option_string, confl_optional)) # resolve any conflicts if confl_optionals: conflict_handler = self._get_handler() conflict_handler(action, confl_optionals) def _handle_conflict_error(self, action, conflicting_actions): message = _('conflicting option string(s): %s') conflict_string = ', '.join([option_string for option_string, _2 in conflicting_actions]) raise ArgumentError(action, message % conflict_string) def _handle_conflict_resolve(self, action, conflicting_actions): # remove all conflicting options for option_string, action in conflicting_actions: # remove the conflicting option action.option_strings.remove(option_string) self._option_string_actions.pop(option_string, None) # if the option now has no option string, remove it from the # container holding it if not action.option_strings: action.container._remove_action(action) class _ArgumentGroup(_ActionsContainer): def __init__(self, container, title=None, description=None, **kwargs): # add any missing keyword arguments by checking the container update = kwargs.setdefault update('conflict_handler', container.conflict_handler) update('prefix_chars', container.prefix_chars) update('argument_default', container.argument_default) super_init = super().__init__ super_init(description=description, **kwargs) # group attributes self.title = title self._group_actions = [] # share most attributes with the container self._registries = container._registries self._actions = container._actions self._option_string_actions = container._option_string_actions self._defaults = container._defaults self._has_negative_number_optionals = \ container._has_negative_number_optionals def _add_action(self, action): action = super()._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): super()._remove_action(action) self._group_actions.remove(action) class _MutuallyExclusiveGroup(_ArgumentGroup): def __init__(self, container, required=False): super().__init__(container) self.required = required self._container = container def _add_action(self, action): if action.required: msg = _('mutually exclusive arguments must be optional') raise ValueError(msg) action = self._container._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): self._container._remove_action(action) self._group_actions.remove(action) class ArgumentParser(_AttributeHolder, _ActionsContainer): """Object for parsing command line strings into Python objects. Keyword Arguments: - prog -- The name of the program (default: sys.argv[0]) - usage -- A usage message (default: auto-generated from arguments) - description -- A description of what the program does - epilog -- Text following the argument descriptions - parents -- Parsers whose arguments should be copied into this one - formatter_class -- HelpFormatter class for printing help messages - prefix_chars -- Characters that prefix optional arguments - fromfile_prefix_chars -- Characters that prefix files containing additional arguments - argument_default -- The default value for all arguments - conflict_handler -- String indicating how to handle conflicts - add_help -- Add a -h/-help option """ def __init__(self, prog=None, usage=None, description=None, epilog=None, version=None, parents=[], formatter_class=HelpFormatter, prefix_chars='-', fromfile_prefix_chars=None, argument_default=None, conflict_handler='error', add_help=True): if version is not None: import warnings warnings.warn( """The "version" argument to ArgumentParser is deprecated. """ """Please use """ """"add_argument(..., action='version', version="N", ...)" """ """instead""", DeprecationWarning) superinit = super().__init__ superinit(description=description, prefix_chars=prefix_chars, argument_default=argument_default, conflict_handler=conflict_handler) # default setting for prog if prog is None: prog = _os.path.basename(_sys.argv[0]) self.prog = prog self.usage = usage self.epilog = epilog self.version = version self.formatter_class = formatter_class self.fromfile_prefix_chars = fromfile_prefix_chars self.add_help = add_help add_group = self.add_argument_group self._positionals = add_group(_('positional arguments')) self._optionals = add_group(_('optional arguments')) self._subparsers = None # register types def identity(string): return string self.register('type', None, identity) # add help and version arguments if necessary # (using explicit default to override global argument_default) if '-' in prefix_chars: default_prefix = '-' else: default_prefix = prefix_chars[0] if self.add_help: self.add_argument( default_prefix+'h', default_prefix*2+'help', action='help', default=SUPPRESS, help=_('show this help message and exit')) if self.version: self.add_argument( default_prefix+'v', default_prefix*2+'version', action='version', default=SUPPRESS, version=self.version, help=_("show program's version number and exit")) # add parent arguments and defaults for parent in parents: self._add_container_actions(parent) try: defaults = parent._defaults except AttributeError: pass else: self._defaults.update(defaults) # ======================= # Pretty __repr__ methods # ======================= def _get_kwargs(self): names = [ 'prog', 'usage', 'description', 'version', 'formatter_class', 'conflict_handler', 'add_help', ] return [(name, getattr(self, name)) for name in names] # ================================== # Optional/Positional adding methods # ================================== def add_subparsers(self, **kwargs): if self._subparsers is not None: self.error(_('cannot have multiple subparser arguments')) # add the parser class to the arguments if it's not present kwargs.setdefault('parser_class', type(self)) if 'title' in kwargs or 'description' in kwargs: title = _(kwargs.pop('title', 'subcommands')) description = _(kwargs.pop('description', None)) self._subparsers = self.add_argument_group(title, description) else: self._subparsers = self._positionals # prog defaults to the usage message of this parser, skipping # optional arguments and with no "usage:" prefix if kwargs.get('prog') is None: formatter = self._get_formatter() positionals = self._get_positional_actions() groups = self._mutually_exclusive_groups formatter.add_usage(self.usage, positionals, groups, '') kwargs['prog'] = formatter.format_help().strip() # create the parsers action and add it to the positionals list parsers_class = self._pop_action_class(kwargs, 'parsers') action = parsers_class(option_strings=[], **kwargs) self._subparsers._add_action(action) # return the created parsers action return action def _add_action(self, action): if action.option_strings: self._optionals._add_action(action) else: self._positionals._add_action(action) return action def _get_optional_actions(self): return [action for action in self._actions if action.option_strings] def _get_positional_actions(self): return [action for action in self._actions if not action.option_strings] # ===================================== # Command line argument parsing methods # ===================================== def parse_args(self, args=None, namespace=None): args, argv = self.parse_known_args(args, namespace) if argv: msg = _('unrecognized arguments: %s') self.error(msg % ' '.join(argv)) return args def parse_known_args(self, args=None, namespace=None): # args default to the system args if args is None: args = _sys.argv[1:] # default Namespace built from parser defaults if namespace is None: namespace = Namespace() # add any action defaults that aren't present for action in self._actions: if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: default = action.default if isinstance(action.default, basestring): default = self._get_value(action, default) setattr(namespace, action.dest, default) # add any parser defaults that aren't present for dest in self._defaults: if not hasattr(namespace, dest): setattr(namespace, dest, self._defaults[dest]) # parse the arguments and exit if there are any errors try: namespace, args = self._parse_known_args(args, namespace) if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) return namespace, args except ArgumentError: err = _sys.exc_info()[1] self.error(str(err)) def _parse_known_args(self, arg_strings, namespace): # replace arg strings that are file references if self.fromfile_prefix_chars is not None: arg_strings = self._read_args_from_files(arg_strings) # map all mutually exclusive arguments to the other arguments # they can't occur with action_conflicts = {} for mutex_group in self._mutually_exclusive_groups: group_actions = mutex_group._group_actions for i, mutex_action in enumerate(mutex_group._group_actions): conflicts = action_conflicts.setdefault(mutex_action, []) conflicts.extend(group_actions[:i]) conflicts.extend(group_actions[i + 1:]) # find all option indices, and determine the arg_string_pattern # which has an 'O' if there is an option at an index, # an 'A' if there is an argument, or a '-' if there is a '--' option_string_indices = {} arg_string_pattern_parts = [] arg_strings_iter = iter(arg_strings) for i, arg_string in enumerate(arg_strings_iter): # all args after -- are non-options if arg_string == '--': arg_string_pattern_parts.append('-') for arg_string in arg_strings_iter: arg_string_pattern_parts.append('A') # otherwise, add the arg to the arg strings # and note the index if it was an option else: option_tuple = self._parse_optional(arg_string) if option_tuple is None: pattern = 'A' else: option_string_indices[i] = option_tuple pattern = 'O' arg_string_pattern_parts.append(pattern) # join the pieces together to form the pattern arg_strings_pattern = ''.join(arg_string_pattern_parts) # converts arg strings to the appropriate and then takes the action seen_actions = set() seen_non_default_actions = set() def take_action(action, argument_strings, option_string=None): seen_actions.add(action) argument_values = self._get_values(action, argument_strings) # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" if argument_values is not action.default: seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: msg = _('not allowed with argument %s') action_name = _get_action_name(conflict_action) raise ArgumentError(action, msg % action_name) # take the action if we didn't receive a SUPPRESS value # (e.g. from a default) if argument_values is not SUPPRESS: action(self, namespace, argument_values, option_string) # function to convert arg_strings into an optional action def consume_optional(start_index): # get the optional identified at this index option_tuple = option_string_indices[start_index] action, option_string, explicit_arg = option_tuple # identify additional optionals in the same arg string # (e.g. -xyz is the same as -x -y -z if no args are required) match_argument = self._match_argument action_tuples = [] while True: # if we found no optional action, skip it if action is None: extras.append(arg_strings[start_index]) return start_index + 1 # if there is an explicit argument, try to match the # optional's string arguments to only this if explicit_arg is not None: arg_count = match_argument(action, 'A') # if the action is a single-dash option and takes no # arguments, try to parse more single-dash options out # of the tail of the option string chars = self.prefix_chars if arg_count == 0 and option_string[1] not in chars: action_tuples.append((action, [], option_string)) char = option_string[0] option_string = char + explicit_arg[0] new_explicit_arg = explicit_arg[1:] or None optionals_map = self._option_string_actions if option_string in optionals_map: action = optionals_map[option_string] explicit_arg = new_explicit_arg else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if the action expect exactly one argument, we've # successfully matched the option; exit the loop elif arg_count == 1: stop = start_index + 1 args = [explicit_arg] action_tuples.append((action, args, option_string)) break # error if a double-dash option did not use the # explicit argument else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if there is no explicit argument, try to match the # optional's string arguments with the following strings # if successful, exit the loop else: start = start_index + 1 selected_patterns = arg_strings_pattern[start:] arg_count = match_argument(action, selected_patterns) stop = start + arg_count args = arg_strings[start:stop] action_tuples.append((action, args, option_string)) break # add the Optional to the list and return the index at which # the Optional's string args stopped assert action_tuples for action, args, option_string in action_tuples: take_action(action, args, option_string) return stop # the list of Positionals left to be parsed; this is modified # by consume_positionals() positionals = self._get_positional_actions() # function to convert arg_strings into positional actions def consume_positionals(start_index): # match as many Positionals as possible match_partial = self._match_arguments_partial selected_pattern = arg_strings_pattern[start_index:] arg_counts = match_partial(positionals, selected_pattern) # slice off the appropriate arg strings for each Positional # and add the Positional and its args to the list for action, arg_count in zip(positionals, arg_counts): args = arg_strings[start_index: start_index + arg_count] start_index += arg_count take_action(action, args) # slice off the Positionals that we just parsed and return the # index at which the Positionals' string args stopped positionals[:] = positionals[len(arg_counts):] return start_index # consume Positionals and Optionals alternately, until we have # passed the last option string extras = [] start_index = 0 if option_string_indices: max_option_string_index = max(option_string_indices) else: max_option_string_index = -1 while start_index <= max_option_string_index: # consume any Positionals preceding the next option next_option_string_index = min( index for index in option_string_indices if index >= start_index) if start_index != next_option_string_index: positionals_end_index = consume_positionals(start_index) # only try to parse the next optional if we didn't consume # the option string during the positionals parsing if positionals_end_index > start_index: start_index = positionals_end_index continue else: start_index = positionals_end_index # if we consumed all the positionals we could and we're not # at the index of an option string, there were extra arguments if start_index not in option_string_indices: strings = arg_strings[start_index:next_option_string_index] extras.extend(strings) start_index = next_option_string_index # consume the next optional and any arguments for it start_index = consume_optional(start_index) # consume any positionals following the last Optional stop_index = consume_positionals(start_index) # if we didn't consume all the argument strings, there were extras extras.extend(arg_strings[stop_index:]) # if we didn't use all the Positional objects, there were too few # arg strings supplied. if positionals: self.error(_('too few arguments')) # make sure all required actions were present for action in self._actions: if action.required: if action not in seen_actions: name = _get_action_name(action) self.error(_('argument %s is required') % name) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: if group.required: for action in group._group_actions: if action in seen_non_default_actions: break # if no actions were used, report the error else: names = [_get_action_name(action) for action in group._group_actions if action.help is not SUPPRESS] msg = _('one of the arguments %s is required') self.error(msg % ' '.join(names)) # return the updated namespace and the extra arguments return namespace, extras def _read_args_from_files(self, arg_strings): # expand arguments referencing files new_arg_strings = [] for arg_string in arg_strings: # for regular arguments, just add them back into the list if arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content else: try: args_file = open(arg_string[1:]) try: arg_strings = [] for arg_line in args_file.read().splitlines(): for arg in self.convert_arg_line_to_args(arg_line): arg_strings.append(arg) arg_strings = self._read_args_from_files(arg_strings) new_arg_strings.extend(arg_strings) finally: args_file.close() except OSError: err = _sys.exc_info()[1] self.error(str(err)) # return the modified argument list return new_arg_strings def convert_arg_line_to_args(self, arg_line): return [arg_line] def _match_argument(self, action, arg_strings_pattern): # match the pattern for this action to the arg strings nargs_pattern = self._get_nargs_pattern(action) match = _re.match(nargs_pattern, arg_strings_pattern) # raise an exception if we weren't able to find a match if match is None: nargs_errors = { None: _('expected one argument'), OPTIONAL: _('expected at most one argument'), ONE_OR_MORE: _('expected at least one argument'), } default = _('expected %s argument(s)') % action.nargs msg = nargs_errors.get(action.nargs, default) raise ArgumentError(action, msg) # return the number of arguments matched return len(match.group(1)) def _match_arguments_partial(self, actions, arg_strings_pattern): # progressively shorten the actions list by slicing off the # final actions until we find a match result = [] for i in range(len(actions), 0, -1): actions_slice = actions[:i] pattern = ''.join([self._get_nargs_pattern(action) for action in actions_slice]) match = _re.match(pattern, arg_strings_pattern) if match is not None: result.extend([len(string) for string in match.groups()]) break # return the list of arg string counts return result def _parse_optional(self, arg_string): # if it's an empty string, it was meant to be a positional if not arg_string: return None # if it doesn't start with a prefix, it was meant to be positional if not arg_string[0] in self.prefix_chars: return None # if the option string is present in the parser, return the action if arg_string in self._option_string_actions: action = self._option_string_actions[arg_string] return action, arg_string, None # if it's just a single character, it was meant to be positional if len(arg_string) == 1: return None # if the option string before the "=" is present, return the action if '=' in arg_string: option_string, explicit_arg = arg_string.split('=', 1) if option_string in self._option_string_actions: action = self._option_string_actions[option_string] return action, option_string, explicit_arg # search through all possible prefixes of the option string # and all actions in the parser for possible interpretations option_tuples = self._get_option_tuples(arg_string) # if multiple actions match, the option string was ambiguous if len(option_tuples) > 1: options = ', '.join([_1 for _0, _1, _2 in option_tuples]) tup = arg_string, options self.error(_('ambiguous option: %s could match %s') % tup) # if exactly one action matched, this segmentation is good, # so return the parsed action elif len(option_tuples) == 1: option_tuple, = option_tuples return option_tuple # if it was not found as an option, but it looks like a negative # number, it was meant to be positional # unless there are negative-number-like options if self._negative_number_matcher.match(arg_string): if not self._has_negative_number_optionals: return None # if it contains a space, it was meant to be a positional if ' ' in arg_string: return None # it was meant to be an optional but there is no such option # in this parser (though it might be a valid option in a subparser) return None, arg_string, None def _get_option_tuples(self, option_string): result = [] # option strings starting with two prefix characters are only # split at the '=' chars = self.prefix_chars if option_string[0] in chars and option_string[1] in chars: if '=' in option_string: option_prefix, explicit_arg = option_string.split('=', 1) else: option_prefix = option_string explicit_arg = None for option_string in self._option_string_actions: if option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # single character options can be concatenated with their arguments # but multiple character options always have to have their argument # separate elif option_string[0] in chars and option_string[1] not in chars: option_prefix = option_string explicit_arg = None short_option_prefix = option_string[:2] short_explicit_arg = option_string[2:] for option_string in self._option_string_actions: if option_string == short_option_prefix: action = self._option_string_actions[option_string] tup = action, option_string, short_explicit_arg result.append(tup) elif option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # shouldn't ever get here else: self.error(_('unexpected option string: %s') % option_string) # return the collected option tuples return result def _get_nargs_pattern(self, action): # in all examples below, we have to allow for '--' args # which are represented as '-' in the pattern nargs = action.nargs # the default (None) is assumed to be a single argument if nargs is None: nargs_pattern = '(-*A-*)' # allow zero or one arguments elif nargs == OPTIONAL: nargs_pattern = '(-*A?-*)' # allow zero or more arguments elif nargs == ZERO_OR_MORE: nargs_pattern = '(-*[A-]*)' # allow one or more arguments elif nargs == ONE_OR_MORE: nargs_pattern = '(-*A[A-]*)' # allow any number of options or arguments elif nargs == REMAINDER: nargs_pattern = '([-AO]*)' # allow one argument followed by any number of options or arguments elif nargs == PARSER: nargs_pattern = '(-*A[-AO]*)' # all others should be integers else: nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) # if this is an optional action, -- is not allowed if action.option_strings: nargs_pattern = nargs_pattern.replace('-*', '') nargs_pattern = nargs_pattern.replace('-', '') # return the pattern return nargs_pattern # ======================== # Value conversion methods # ======================== def _get_values(self, action, arg_strings): # for everything but PARSER args, strip out '--' if action.nargs not in [PARSER, REMAINDER]: arg_strings = [s for s in arg_strings if s != '--'] # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: if action.option_strings: value = action.const else: value = action.default if isinstance(value, basestring): value = self._get_value(action, value) self._check_value(action, value) # when nargs='*' on a positional, if there were no command-line # args, use the default if it is anything other than None elif (not arg_strings and action.nargs == ZERO_OR_MORE and not action.option_strings): if action.default is not None: value = action.default else: value = arg_strings self._check_value(action, value) # single argument or optional argument produces a single value elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: arg_string, = arg_strings value = self._get_value(action, arg_string) self._check_value(action, value) # REMAINDER arguments convert all values, checking none elif action.nargs == REMAINDER: value = [self._get_value(action, v) for v in arg_strings] # PARSER arguments convert all values, but check only the first elif action.nargs == PARSER: value = [self._get_value(action, v) for v in arg_strings] self._check_value(action, value[0]) # all other types of nargs produce a list else: value = [self._get_value(action, v) for v in arg_strings] for v in value: self._check_value(action, v) # return the converted value return value def _get_value(self, action, arg_string): type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): msg = _('%r is not callable') raise ArgumentError(action, msg % type_func) # convert the value to the appropriate type try: result = type_func(arg_string) # ArgumentTypeErrors indicate errors except ArgumentTypeError: name = getattr(action.type, '__name__', repr(action.type)) msg = str(_sys.exc_info()[1]) raise ArgumentError(action, msg) # TypeErrors or ValueErrors also indicate errors except (TypeError, ValueError): name = getattr(action.type, '__name__', repr(action.type)) msg = _('invalid %s value: %r') raise ArgumentError(action, msg % (name, arg_string)) # return the converted value return result def _check_value(self, action, value): # converted value must be one of the choices (if specified) if action.choices is not None and value not in action.choices: tup = value, ', '.join(map(repr, action.choices)) msg = _('invalid choice: %r (choose from %s)') % tup raise ArgumentError(action, msg) # ======================= # Help-formatting methods # ======================= def format_usage(self): formatter = self._get_formatter() formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) return formatter.format_help() def format_help(self): formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups for action_group in self._action_groups: formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help() def format_version(self): import warnings warnings.warn( 'The format_version method is deprecated -- the "version" ' 'argument to ArgumentParser is no longer supported.', DeprecationWarning) formatter = self._get_formatter() formatter.add_text(self.version) return formatter.format_help() def _get_formatter(self): return self.formatter_class(prog=self.prog) # ===================== # Help-printing methods # ===================== def print_usage(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_usage(), file) def print_help(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_help(), file) def print_version(self, file=None): import warnings warnings.warn( 'The print_version method is deprecated -- the "version" ' 'argument to ArgumentParser is no longer supported.', DeprecationWarning) self._print_message(self.format_version(), file) def _print_message(self, message, file=None): if message: if file is None: file = _sys.stderr file.write(message) # =============== # Exiting methods # =============== def exit(self, status=0, message=None): if message: self._print_message(message, _sys.stderr) _sys.exit(status) def error(self, message): """error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception. """ self.print_usage(_sys.stderr) self.exit(2, _('%s: error: %s\n') % (self.prog, message)) bx-python-0.8.13/lib/bx/cookbook/attribute.py000066400000000000000000000076361415666465100211270ustar00rootroot00000000000000""" Provides functions for creating simple properties. If, inside a class definition, you write: attribute(foo=1, bar=2) simple properties named 'foo' and 'bar' are created for this class. Also, private instance variables '__foo' and '__bar' will be added to instances of this class. USEAGE: # assumes attribute.py is on path from attribute import * class MyClass(object): readable(foo=1, bar=2) # or, attribute('r', foo=1, bar=2) writable(fro=3, boz=4) # or, attribute('w', fro=3, boz=4) attribute(baz=5) This is equivalent to the following: class MyClass(object): def __init__(self): self.__foo = 1 self.__bar = 2 self.__fro = 3 self.__boz = 4 self.__baz = 5 def get_foo(self): return self.__foo def get_bar(self): return self.__bar def set_fro(self, value): self.__fro = value def set_boz(self, value): self.__boz = value def get_baz(self): return self.__baz def set_baz(self, value): self.__baz = value def del_baz(self): del self.__baz foo = property(fget=get_foo, doc="foo") bar = property(fget=get_bar, doc="bar") fro = property(fset=set_fro, doc="fro") boz = property(fset=set_boz, doc="boz") baz = property(fget=get_baz, fset=set_baz, fdel=del_baz, doc="baz") """ __all__ = ['attribute', 'readable', 'writable'] __version__ = '3.0' __author__ = 'Sean Ross' __credits__ = ['Guido van Rossum', 'Garth Kidd'] __created__ = '10/21/02' import sys def mangle(classname, attrname): """mangles name according to python name-mangling conventions for private variables""" return f"_{classname}__{attrname}" def class_space(classlevel=3): "returns the calling class' name and dictionary" frame = sys._getframe(classlevel) classname = frame.f_code.co_name classdict = frame.f_locals return classname, classdict # convenience function def readable(**kwds): "returns one read-only property for each (key,value) pair in kwds" return _attribute(permission='r', **kwds) # convenience function def writable(**kwds): "returns one write-only property for each (key,value) pair in kwds" return _attribute(permission='w', **kwds) # needed because of the way class_space is resolved in _attribute def attribute(permission='rwd', **kwds): """returns one property for each (key,value) pair in kwds; each property provides the specified level of access(permission): 'r': readable, 'w':writable, 'd':deletable """ return _attribute(permission, **kwds) # based on code by Guido van Rossum, comp.lang.python 2001-07-31 def _attribute(permission='rwd', **kwds): """returns one property for each (key,value) pair in kwds; each property provides the specified level of access(permission): 'r': readable, 'w':writable, 'd':deletable """ classname, classdict = class_space() def _property(attrname, default): propname, attrname = attrname, mangle(classname, attrname) fget, fset, fdel, doc = None, None, None, propname if 'r' in permission: def fget(self): value = default try: value = getattr(self, attrname) except AttributeError: setattr(self, attrname, default) return value if 'w' in permission: def fset(self, value): setattr(self, attrname, value) if 'd' in permission: def fdel(self): try: delattr(self, attrname) except AttributeError: pass # calling fget can restore this attribute, so remove property delattr(self.__class__, propname) return property(fget=fget, fset=fset, fdel=fdel, doc=doc) for attrname, default in kwds.items(): classdict[attrname] = _property(attrname, default) bx-python-0.8.13/lib/bx/cookbook/doc_optparse.py000066400000000000000000000054301415666465100215740ustar00rootroot00000000000000""" :Author: M. Simionato :Date: April 2004 :Title: A much simplified interface to optparse. You should use optionparse in your scripts as follows. First, write a module level docstring containing something like this (this is just an example):: '''usage: %prog files [options] -d, --delete: delete all files -e, --erase = ERASE: erase the given file''' Then write a main program of this kind: # sketch of a script to delete files:: if __name__=='__main__': import optionparse option,args=optionparse.parse(__doc__) if not args and not option: optionparse.exit() elif option.delete: print "Delete all files" elif option.erase: print "Delete the given file" Notice that ``optionparse`` parses the docstring by looking at the characters ",", ":", "=", "\\n", so be careful in using them. If the docstring is not correctly formatted you will get a SyntaxError or worse, the script will not work as expected. """ import optparse import re import sys import traceback USAGE = re.compile(r'(?s)\s*usage: (.*?)(\n[ \t]*\n|$)') def nonzero(self): # will become the nonzero method of optparse.Values "True if options were given" for v in self.__dict__.values(): if v is not None: return True return False optparse.Values.__nonzero__ = nonzero # dynamically fix optparse.Values class ParsingError(Exception): pass optionstring = "" def exception(msg=""): print("Exception while parsing command line:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) exit(msg) def exit(msg=""): raise SystemExit(msg or optionstring.replace("%prog", sys.argv[0])) def parse(docstring, arglist=None): global optionstring optionstring = docstring match = USAGE.search(optionstring) if not match: raise ParsingError("Cannot find the option string") optlines = match.group(1).splitlines() try: p = optparse.OptionParser(optlines[0], conflict_handler="resolve") for line in optlines[1:]: opt, help = line.split(':')[:2] # Make both short and long optional (but at least one) # Old: short,long=opt.split(',')[:2] opt_strings = [] action = "store_true" for k in opt.split(', '): k = k.strip() if k.startswith("--") and "=" in k: action = "store" k = k.split("=")[0] opt_strings.append(k) p.add_option(*opt_strings, **dict(action=action, help=help.strip())) except (IndexError, ValueError): raise ParsingError("Cannot parse the option string correctly") return p.parse_args(arglist) def help_callback(option, opt, value, parser, help): print(help, file=sys.stderr) sys.exit(1) bx-python-0.8.13/lib/bx/cookbook/progress_bar.py000066400000000000000000000050761415666465100216100ustar00rootroot00000000000000""" An ASCII text progress bar. See __main__ for command line use (using \r to move the cursor back to the start of the current line is the key, on terminals that do not support this functionality the progress bar will not work as well). http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/168639 """ import sys class ProgressBar: def __init__(self, minValue=0, maxValue=10, totalWidth=72): self.progBar = "[]" # This holds the progress bar string self.min = minValue self.max = maxValue self.span = maxValue - minValue self.width = totalWidth self.amount = 0 # When amount == max, we are 100% done self.update(0) # Build progress bar string def update(self, newAmount=0): if newAmount < self.min: newAmount = self.min if newAmount > self.max: newAmount = self.max self.amount = newAmount # Figure out the new percent done, round to an integer diffFromMin = float(self.amount - self.min) percentDone = (diffFromMin / float(self.span)) * 100.0 percentDone = round(percentDone) percentDone = int(percentDone) # Figure out how many hash bars the percentage should be allFull = self.width - 2 numHashes = (percentDone / 100.0) * allFull numHashes = int(round(numHashes)) # build a progress bar with hashes and spaces if allFull == numHashes: self.progBar = "[" + '='*(numHashes) + "]" else: self.progBar = "[" + '='*(numHashes-1) + '>' + ' '*(allFull-numHashes) + "]" # figure out where to put the percentage, roughly centered percentPlace = (len(self.progBar) / 2) - len(str(percentDone)) percentString = str(percentDone) + "%" # slice the percentage into the bar self.progBar = self.progBar[0:percentPlace] + percentString + self.progBar[percentPlace+len(percentString):] def update_and_print(self, newAmount=0, f=sys.stdout): self.update(newAmount) print("\r", self, end=' ', file=f) f.flush() def __str__(self): return str(self.progBar) def iterprogress(sized_iterable): """ Iterate something printing progress bar to stdout """ pb = ProgressBar(0, len(sized_iterable)) for i, value in enumerate(sized_iterable): yield value pb.update_and_print(i, sys.stderr) if __name__ == "__main__": bar = ProgressBar(0, 1000, 80) for i in range(1000): bar.update(i) print("\r", bar, end=' ') sys.stdout.flush() print() bx-python-0.8.13/lib/bx/filter.py000066400000000000000000000040451415666465100165720ustar00rootroot00000000000000""" Classes for implementing `Pipeline`s composed of `Filter`s (intended to be subclassed). """ class Filter: def __init__(self, **kwargs): raise Exception("AbstractClass") def run(self, reader, writer): for block in reader: block = self(block) if block: writer(block) def step(self, reader, writer): block = next(reader) if not block: raise StopIteration block = self(block) if block: writer(block) def __call__(self, block): raise Exception("AbstractMethod") class Pipeline(Filter): def __init__(self, **kwargs): self.pipeline = list() def __call__(self, block): for function in self.pipeline: if not block: return block try: f = function.__call__ except AttributeError: raise TypeError("'" + function.__class__.__name__ + "' is not callable.") block = f(block) return block def append(self, function): try: function.__call__ except AttributeError: raise TypeError("'" + function.__class__.__name__ + "' is not callable.") return self.pipeline.append(function) def remove(self, function): return self.pipeline.remove(function) def extend(self, pipeline): for item in pipeline: self.append(item) # Container interface def __len__(self): return len(self.pipeline) def __getitem__(self, key): return self.pipeline[key] def __setitem__(self, key, value): try: value.__call__ except AttributeError: raise TypeError("'" + value.__class__.__name__ + "' is not callable.") return self.pipeline.__setitem__(key, value) def __delitem__(self, key): return self.pipeline.__delitem__(key) def __iter__(self): return self.pipeline.__iter__() def __contains__(self, item): return self.pipeline.__contains__(item) bx-python-0.8.13/lib/bx/gene_reader.py000066400000000000000000000243211415666465100175440ustar00rootroot00000000000000""" Readers extracting gene (exon and intron) information from bed / gtf / gff formats. - GeneReader: yields exons - CDSReader: yields cds_exons - FeatureReader: yields cds_exons, introns, exons For gff/gtf, the start_codon stop_codon line types are merged with CDSs. """ import sys from bx.bitset_utils import ( bitset_complement, bitset_intersect, bitset_subtract, bitset_union, ) def GeneReader(fh, format='gff'): """ yield chrom, strand, gene_exons, name """ known_formats = ('gff', 'gtf', 'bed') if format not in known_formats: print('{} format not in {}'.format(format, ",".join(known_formats)), file=sys.stderr) raise Exception('?') if format == 'bed': for line in fh: f = line.strip().split() chrom = f[0] chrom_start = int(f[1]) name = f[4] strand = f[5] int(f[6]) # cdsStart int(f[7]) # cdsEnd int(f[9]) # blockCount blockSizes = [int(i) for i in f[10].strip(',').split(',')] blockStarts = [chrom_start + int(i) for i in f[11].strip(',').split(',')] # grab cdsStart - cdsEnd gene_exons = [] for base, offset in zip(blockStarts, blockSizes): exon_start = base exon_end = base+offset gene_exons.append((exon_start, exon_end)) yield chrom, strand, gene_exons, name genelist = {} grouplist = [] if format == 'gff' or format == 'gtf': for line in fh: if line.startswith('#'): continue fields = line.strip().split('\t') if len(fields) < 9: continue # fields chrom = fields[0] ex_st = int(fields[3]) - 1 # make zero-centered ex_end = int(fields[4]) # + 1 # make exclusive strand = fields[6] if format == 'gtf': group = fields[8].split(';')[0] else: group = fields[8] if group not in grouplist: grouplist.append(group) if group not in genelist: genelist[group] = (chrom, strand, []) exons_i = 2 genelist[group][exons_i].append((ex_st, ex_end)) # for gene in genelist.values(): for gene in grouplist: chrom, strand, gene_exons = genelist[gene] gene_exons = bitset_union(gene_exons) yield chrom, strand, gene_exons, gene def CDSReader(fh, format='gff'): """ yield chrom, strand, cds_exons, name """ known_formats = ('gff', 'gtf', 'bed') if format not in known_formats: print('{} format not in {}'.format(format, ",".join(known_formats)), file=sys.stderr) raise Exception('?') if format == 'bed': for line in fh: f = line.strip().split() chrom = f[0] chrom_start = int(f[1]) name = f[4] strand = f[5] cdsStart = int(f[6]) cdsEnd = int(f[7]) int(f[9]) # blockCount blockSizes = [int(i) for i in f[10].strip(',').split(',')] blockStarts = [chrom_start + int(i) for i in f[11].strip(',').split(',')] # grab cdsStart - cdsEnd cds_exons = [] for base, offset in zip(blockStarts, blockSizes): if (base + offset) < cdsStart: continue if base > cdsEnd: continue exon_start = max(base, cdsStart) exon_end = min(base+offset, cdsEnd) cds_exons.append((exon_start, exon_end)) yield chrom, strand, cds_exons, name genelist = {} grouplist = [] if format == 'gff' or format == 'gtf': for line in fh: if line.startswith('#'): continue fields = line.strip().split('\t') if len(fields) < 9: continue if fields[2] not in ('CDS', 'stop_codon', 'start_codon'): continue # fields chrom = fields[0] ex_st = int(fields[3]) - 1 # make zero-centered ex_end = int(fields[4]) # + 1 # make exclusive strand = fields[6] if format == 'gtf': group = fields[8].split(';')[0] else: group = fields[8] if group not in grouplist: grouplist.append(group) if group not in genelist: genelist[group] = (chrom, strand, []) genelist[group][2].append((ex_st, ex_end)) # for gene in genelist.values(): for gene in grouplist: chrom, strand, cds_exons = genelist[gene] seqlen = sum(a[1]-a[0] for a in cds_exons) overhang = seqlen % 3 if overhang > 0: if strand == '+': cds_exons[-1] = (cds_exons[-1][0], cds_exons[-1][1] - overhang) else: cds_exons[0] = (cds_exons[0][0] + overhang, cds_exons[0][1]) cds_exons = bitset_union(cds_exons) yield chrom, strand, cds_exons, gene def FeatureReader(fh, format='gff', alt_introns_subtract="exons", gtf_parse=None): """ yield chrom, strand, cds_exons, introns, exons, name gtf_parse Example: # parse gene_id from transcript_id "AC073130.2-001"; gene_id "TES"; gene_name = lambda s: s.split(';')[1].split()[1].strip('"') for chrom, strand, cds_exons, introns, exons, name in FeatureReader( sys.stdin, format='gtf', gtf_parse=gene_name ) """ known_formats = ('gff', 'gtf', 'bed') if format not in known_formats: print('{} format not in {}'.format(format, ",".join(known_formats)), file=sys.stderr) raise Exception('?') if format == 'bed': for line in fh: f = line.strip().split() chrom = f[0] chrom_start = int(f[1]) name = f[4] strand = f[5] cdsStart = int(f[6]) cdsEnd = int(f[7]) int(f[9]) # blockCount blockSizes = [int(i) for i in f[10].strip(',').split(',')] blockStarts = [chrom_start + int(i) for i in f[11].strip(',').split(',')] # grab cdsStart - cdsEnd cds_exons = [] exons = [] for base, offset in zip(blockStarts, blockSizes): if (base + offset) < cdsStart: continue if base > cdsEnd: continue # exons exon_start = base exon_end = base+offset exons.append((exon_start, exon_end)) # cds exons exon_start = max(base, cdsStart) exon_end = min(base+offset, cdsEnd) cds_exons.append((exon_start, exon_end)) cds_exons = bitset_union(cds_exons) exons = bitset_union(exons) introns = bitset_complement(exons) yield chrom, strand, cds_exons, introns, exons, name genelist = {} grouplist = [] if format == 'gff' or format == 'gtf': for line in fh: if line.startswith('#'): continue fields = line.strip().split('\t') if len(fields) < 9: continue # fields chrom = fields[0] ex_st = int(fields[3]) - 1 # make zero-centered ex_end = int(fields[4]) # + 1 # make exclusive strand = fields[6] if format == 'gtf': if not gtf_parse: group = fields[8].split(';')[0] else: group = gtf_parse(fields[8]) else: group = fields[8] # Results are listed in the same order as encountered if group not in grouplist: grouplist.append(group) if group not in genelist: # chrom, strand, cds_exons, introns, exons, cds_start, cds_end genelist[group] = [chrom, strand, [], [], [], None, None] if fields[2] == 'exon': genelist[group][4].append((ex_st, ex_end)) elif fields[2] in ('CDS', 'stop_codon', 'start_codon'): genelist[group][2].append((ex_st, ex_end)) if fields[2] == 'start_codon': if strand == '+': genelist[group][5] = ex_st else: genelist[group][5] = ex_end if fields[2] == 'stop_codon': if strand == '+': genelist[group][5] = ex_end else: genelist[group][5] = ex_st elif fields[2] == 'intron': genelist[group][3].append((ex_st, ex_end)) for gene in grouplist: chrom, strand, cds_exons, introns, exons, cds_start, cds_end = genelist[gene] cds_exons = bitset_union(cds_exons) exons = bitset_union(exons) # assure that cds exons were within the cds range if cds_start is not None and cds_end is not None: if strand == '+': cds_exons = bitset_intersect(cds_exons, [(cds_start, cds_end)]) else: cds_exons = bitset_intersect(cds_exons, [(cds_end, cds_start)]) # assure that introns are non-overlapping with themselves or exons if alt_introns_subtract: if alt_introns_subtract == 'exons': introns = bitset_subtract(introns, exons) if alt_introns_subtract == 'cds_exons': introns = bitset_subtract(introns, cds_exons) else: introns = bitset_union(introns) # assure CDS is a multiple of 3, trim from last exon if necessary seqlen = sum(a[1]-a[0] for a in cds_exons) overhang = seqlen % 3 if overhang > 0: if strand == '+': cds_exons[-1] = (cds_exons[-1][0], cds_exons[-1][1] - overhang) else: cds_exons[0] = (cds_exons[0][0] + overhang, cds_exons[0][1]) yield chrom, strand, cds_exons, introns, exons, gene bx-python-0.8.13/lib/bx/interval_index_file.py000066400000000000000000000455001415666465100213200ustar00rootroot00000000000000""" Classes for index files that map genomic intervals to values. :Authors: James Taylor (james@bx.psu.edu), Bob Harris (rsharris@bx.psu.edu) An interval index file maps genomic intervals to values. This implementation writes version 1 file format, and reads versions 0 and 1. Index File Format ----------------- All fields are in big-endian format (most significant byte first). All intervals are origin-zero, inclusive start, exclusive end. The file begins with an index file header, then is immediately followed by an index table. The index table points to index headers, and index headers point to bins. Index headers and bins are referenced via pointers (file offsets), and can be placed more or less anywhere in the file. File header ~~~~~~~~~~~ ============ =========== ================================================= offset 0x00: 2C FF 80 0A magic number offset 0x04: 00 00 00 01 version (00 00 00 00 is also supported) offset 0x08: 00 00 00 2A (N) number of index sets offset 0x0C: ... index table ============ =========== ================================================= Index table ~~~~~~~~~~~ The index table is a list of N index headers, packed sequentially and sorted by name. The first begins at offset 0x0C. Each header describes one set of intervals. ============ =========== ================================================= offset: xx xx xx xx (L) length of index src name offset+4: ... index src name (e.g. canFam1.chr1) offset+4+L: xx xx xx xx offset (in this file) to index data offset+8+L: xx xx xx xx (B) number of bytes in each value; for version 0, this field is absent, and B is assumed to be 4 ============ =========== ================================================= Index data ~~~~~~~~~~ The index data for (for one index table) consists of the overall range of intervals followed by an array of pointers to bins. The length of the array is 1+binForRange(maxEnd-1,maxEnd), where maxEnd is the maximum interval end. ============ =========== ================================================= offset: xx xx xx xx minimum interval start offset+4: xx xx xx xx maximum interval end offset+8: xx xx xx xx offset (in this file) to bin 0 offset+12: xx xx xx xx number of intervals in bin 0 offset+16: xx xx xx xx offset (in this file) to bin 1 offset+20: xx xx xx xx number of intervals in bin 1 ... ... ... ============ =========== ================================================= Bin ~~~ A bin is an array of (start,end,val), sorted by increasing start (with end and val as tiebreakers). Note that bins may be empty (the number of intervals indicated in the index data is zero). Note that B is determined from the appropriate entry in the index table. ============ =========== ================================================= offset: xx xx xx xx start for interval 1 offset+4: xx xx xx xx end for interval 1 offset+8: ... (B bytes) value for interval 1 offset+8+B: xx xx xx xx start for interval 2 offset+12+B: xx xx xx xx end for interval 2 offset+16+B: ... (B bytes) value for interval 2 ... ... ... ============ =========== ================================================= """ import os.path import sys from bisect import ( insort, insort_right ) from struct import ( calcsize, pack, unpack ) from warnings import warn from bx.misc import filecache try: from bx.misc import seekbzip2 except ImportError: seekbzip2 = None try: from bx.misc import seeklzop except ImportError: seeklzop = None __all__ = ['Indexes', 'Index'] MAGIC = 0x2cff800a VERSION = 2 # These three constants determine the structure of the default binning strategy BIN_LEVELS = 6 # Number of levels of bins to build BIN_FIRST_SHIFT = 17 # Number of bits for the bottom level bin BIN_NEXT_SHIFT = 3 # Number of bits for each higher level bin # Build offset and max size arrays for each bin level BIN_OFFSETS = [1, 0] BIN_OFFSETS_MAX = [(1 << BIN_FIRST_SHIFT << BIN_NEXT_SHIFT), (1 << BIN_FIRST_SHIFT)] for i in range(BIN_LEVELS - 2): BIN_OFFSETS.insert(0, (2 ** (3 * (i + 1))) + BIN_OFFSETS[0]) BIN_OFFSETS_MAX.insert(0, (BIN_OFFSETS_MAX[0] << BIN_NEXT_SHIFT)) # The maximum size for the top bin is actually bigger than the signed integers # we use to store positions in the file, so we'll change it to prevent confusion BIN_OFFSETS_MAX[0] = sys.maxsize # Constants for the minimum and maximum size of the overall interval MIN = 0 OLD_MAX = 512 * 1024 * 1024 # Maximum size supported by versions < 2 DEFAULT_MAX = 512 * 1024 * 1024 # Default max size to use when none is passed MAX = 2 ** 31 # Absolute max size (limited by file format) def offsets_for_max_size(max_size): """ Return the subset of offsets needed to contain intervals over (0,max_size) """ for i, max in enumerate(reversed(BIN_OFFSETS_MAX)): if max_size < max: break else: raise Exception("%d is larger than the maximum possible size (%d)" % (max_size, BIN_OFFSETS_MAX[0])) return BIN_OFFSETS[(len(BIN_OFFSETS) - i - 1):] def bin_for_range(start, end, offsets=None): """Find the smallest bin that can contain interval (start,end)""" if offsets is None: offsets = BIN_OFFSETS start_bin, end_bin = start, max(start, end - 1) start_bin >>= BIN_FIRST_SHIFT end_bin >>= BIN_FIRST_SHIFT for offset in offsets: if start_bin == end_bin: return offset + start_bin else: start_bin >>= BIN_NEXT_SHIFT end_bin >>= BIN_NEXT_SHIFT raise Exception("Interval (%d,%d) out of range") class AbstractMultiIndexedAccess: """ Allows accessing multiple indexes / files as if they were one """ indexed_access_class = None def __init__(self, filenames, index_filenames=None, keep_open=False, use_cache=False, **kwargs): # TODO: Handle index_filenames argument self.indexes = [ self.new_indexed_access(fname, keep_open=keep_open, use_cache=use_cache, **kwargs) for fname in filenames] def new_indexed_access(self, data_filename, index_filename=None, keep_open=False, **kwargs): return self.indexed_access_class(data_filename, index_filename, keep_open, **kwargs) def get(self, src, start, end): return [block for block in self.get_as_iterator(src, start, end)] def get_as_iterator(self, src, start, end): for block, _index, _offset in self.get_as_iterator_with_index_and_offset(src, start, end): yield block def get_as_iterator_with_index_and_offset(self, src, start, end): for index in self.indexes: yield from index.get_as_iterator_with_index_and_offset(src, start, end) def close(self): for index in self.indexes: index.close() class AbstractIndexedAccess: """Indexed access to a data using overlap queries, requires an index file""" def __init__(self, data_filename, index_filename=None, keep_open=False, use_cache=False, **kwargs): self.data_kwargs = kwargs self.data_filename = data_filename if data_filename.endswith(".bz2"): if seekbzip2 is None: raise Exception("Trying to open .bz2 file but no seekbzip2 module found") table_filename = data_filename + "t" self.table_filename = table_filename if not os.path.exists(table_filename): raise Exception("Cannot find bz2t file for: " + data_filename) self.file_type = "bz2t" # Strip .bz2 from the filename before adding ".index" data_filename_root = data_filename[:-4] elif data_filename.endswith(".lzo"): if seeklzop is None: raise Exception("Trying to open .lzo file but no seeklzop module found") table_filename = data_filename + "t" self.table_filename = table_filename if not os.path.exists(table_filename): raise Exception("Cannot find lzot file for: " + data_filename) self.file_type = "lzot" # Strip .lzo from the filename before adding ".index" data_filename_root = data_filename[:-4] else: self.file_type = "plain" data_filename_root = data_filename # Open index if index_filename is None: index_filename = data_filename_root + ".index" self.indexes = Indexes(filename=index_filename) # Use a file cache? self.use_cache = use_cache # Open now? if keep_open: self.f = self.open_data() else: self.f = None def close(self): if self.f: self.f.close() self.f = None def open_data(self): if self.file_type == "plain": return open(self.data_filename, 'rb') elif self.file_type == "bz2t": f = seekbzip2.SeekableBzip2File(self.data_filename, self.table_filename) if self.use_cache: return filecache.FileCache(f, f.size) else: return f elif self.file_type == "lzot": if self.use_cache: block_cache_size = 20 else: block_cache_size = 0 f = seeklzop.SeekableLzopFile(self.data_filename, self.table_filename, block_cache_size=block_cache_size) return f def get(self, src, start, end): return [val for val in self.get_as_iterator(src, start, end)] def get_as_iterator(self, src, start, end): for val, _index, _offset in self.get_as_iterator_with_index_and_offset(src, start, end): yield val def get_as_iterator_with_index_and_offset(self, src, start, end): for _val_start, _val_end, val in self.indexes.find(src, start, end): yield self.get_at_offset(val), self, val def get_at_offset(self, offset): if self.f: self.f.seek(offset) return self.read_at_current_offset(self.f, **self.data_kwargs) else: f = self.open_data() try: f.seek(offset) return self.read_at_current_offset(f, **self.data_kwargs) finally: f.close() def read_at_current_offset(self, file, **kwargs): raise TypeError("Abstract Method") class Indexes: """A set of indexes, each identified by a unique name""" def __init__(self, filename=None): self.indexes = dict() if filename is not None: self.open(filename) def add(self, name, start, end, val, max=DEFAULT_MAX): if name not in self.indexes: self.indexes[name] = Index(max=max) self.indexes[name].add(start, end, val) def get(self, name): if self.indexes[name] is None: offset, value_size = self.offsets[name] self.indexes[name] = Index(filename=self.filename, offset=offset, value_size=value_size, version=self.version) return self.indexes[name] def find(self, name, start, end): if name in self.indexes: return self.get(name).find(start, end) else: return [] def open(self, filename): self.filename = filename self.offsets = dict() # (will map key to (offset,value_size)) with open(filename, 'rb') as f: magic, version, length = read_packed(f, ">3I") if magic != MAGIC: raise Exception("File does not have expected header") if version > VERSION: warn("File claims version %d, I don't known anything about versions beyond %d. Attempting to continue", version, VERSION) self.version = version for _ in range(length): key_len = read_packed(f, ">I") key = f.read(key_len).decode() offset = read_packed(f, ">I") if version == 0: value_size = 4 else: value_size = read_packed(f, ">I") assert value_size % 4 == 0, "unsupported value size: %s" % value_size self.indexes[key] = None self.offsets[key] = (offset, value_size) def write(self, f): keys = sorted(self.indexes.keys()) # First determine the size of the header base = calcsize(">3I") for key in keys: key = str(key) base += calcsize(">I") base += len(key) base += calcsize(">2I") # Now actually write the header write_packed(f, ">3I", MAGIC, VERSION, len(self.indexes)) # And write the index table for key in keys: key = str(key) # Write the string prefixed by its length (pascal!) write_packed(f, ">I", len(key)) f.write(key.encode()) # Write offset write_packed(f, ">I", base) base += self.indexes[key].bytes_required() # Write value size write_packed(f, ">I", self.indexes[key].value_size) # And finally write each index in order for key in keys: self.indexes[key].write(f) class Index: def __init__(self, min=MIN, max=DEFAULT_MAX, filename=None, offset=0, value_size=None, version=None): self._value_size = value_size self.max_val = 1 # (1, rather than 0, to force value_size > 0) if filename is None: self.new(min, max) else: self.open(filename, offset, version) def get_value_size(self): if self._value_size is not None: return self._value_size else: return round_up_to_4(bytes_of(self.max_val)) value_size = property(fget=get_value_size) def new(self, min, max): """Create an empty index for intervals in the range min, max""" # Ensure the range will fit given the shifting strategy assert MIN <= min <= max <= MAX self.min = min self.max = max # Determine offsets to use self.offsets = offsets_for_max_size(max) # Determine the largest bin we will actually use self.bin_count = bin_for_range(max - 1, max, offsets=self.offsets) + 1 # Create empty bins self.bins = [[] for i in range(self.bin_count)] def open(self, filename, offset, version): self.filename = filename self.offset = offset # Open the file and seek to where we expect our header f = open(filename, 'rb') f.seek(offset) # Read min/max min, max = read_packed(f, ">2I") self.new(min, max) # Decide how many levels of bins based on 'max' if version < 2: # Prior to version 2 all files used the bins for 512MB self.offsets = offsets_for_max_size(OLD_MAX - 1) else: self.offsets = offsets_for_max_size(max) # Read bin indexes self.bin_offsets = [] self.bin_sizes = [] for _ in range(self.bin_count): o, s = read_packed(f, ">2I") self.bin_offsets.append(o) self.bin_sizes.append(s) # Initialize bins to None, indicating that they need to be loaded self.bins = [None for _ in range(self.bin_count)] def add(self, start, end, val): """Add the interval (start,end) with associated value val to the index""" insort(self.bins[bin_for_range(start, end, offsets=self.offsets)], (start, end, val)) assert val >= 0 self.max_val = max(self.max_val, val) def find(self, start, end): rval = [] start_bin = (max(start, self.min)) >> BIN_FIRST_SHIFT end_bin = (min(end, self.max) - 1) >> BIN_FIRST_SHIFT for offset in self.offsets: for i in range(start_bin + offset, end_bin + offset + 1): if self.bins[i] is None: self.load_bin(i) # Iterate over bin and insert any overlapping elements into return value for el_start, el_end, val in self.bins[i]: if el_start < end and el_end > start: insort_right(rval, (el_start, el_end, val)) start_bin >>= BIN_NEXT_SHIFT end_bin >>= BIN_NEXT_SHIFT return rval def iterate(self): for i in range(self.bin_count): if self.bins[i] is None: self.load_bin(i) yield from self.bins[i] def load_bin(self, index): bin = [] if self.bin_sizes[index] == 0: self.bins[index] = bin return f = open(self.filename, 'rb') f.seek(self.bin_offsets[index]) # One big read for happy NFS item_size = self.value_size + calcsize(">2I") buffer = f.read(self.bin_sizes[index] * item_size) for i in range(self.bin_sizes[index]): start, end = unpack(">2I", buffer[i*item_size:i*item_size+8]) val = unpack_uints(buffer[i*item_size+8:(i+1)*item_size]) bin.append((start, end, val)) self.bins[index] = bin f.close() def write(self, f): value_size = self.value_size item_size = value_size + calcsize(">2I") # Write min/max write_packed(f, ">2I", self.min, self.max) # Write table of bin sizes and offsets base = f.tell() + self.bin_count * calcsize(">2I") for bin in self.bins: write_packed(f, ">2I", base, len(bin)) base += len(bin) * item_size # Write contents of each bin for bin in self.bins: for start, end, val in bin: write_packed(f, ">2I", start, end) write_packed_uints(f, val, value_size) def bytes_required(self): item_size = self.value_size + calcsize(">2I") rval = calcsize(">2I") rval += self.bin_count * calcsize(">2I") for bin in self.bins: rval += len(bin) * item_size return rval def write_packed(f, pattern, *vals): f.write(pack(pattern, *vals)) def read_packed(f, pattern): rval = unpack(pattern, f.read(calcsize(pattern))) if len(rval) == 1: return rval[0] return rval def write_packed_uints(f, v, num_bytes): if num_bytes < 4: write_packed(f, ">I", v) else: parts = [] while num_bytes > 0: parts.append(v & 0xFFFFFFFF) v >>= 32 num_bytes -= 4 parts.reverse() # (write most-significant chunk first) write_packed(f, ">%dI" % len(parts), *parts) def unpack_uints(parts): chunks = len(parts)/4 vals = unpack(">%dI" % chunks, parts) val = vals[0] for v in vals[1:]: val = (val << 32) + v return val def bytes_of(v): assert v > 0 b = 0 while v > 0: v >>= 8 b += 1 return b def round_up_to_4(v): if v % 4 == 0: return v else: return v + 4 - (v % 4) bx-python-0.8.13/lib/bx/interval_index_file_tests.py000066400000000000000000000030251415666465100225360ustar00rootroot00000000000000import random from tempfile import mktemp from . import interval_index_file from .interval_index_file import Indexes def test_offsets(): assert interval_index_file.offsets_for_max_size(512*1024*1024 - 1) == [512 + 64 + 8 + 1, 64 + 8 + 1, 8 + 1, 1, 0] def test_interval_index_file(): ix = Indexes() chrs = [] for i in range(5): intervals = [] name = "seq%d" % i max = random.randint(0, interval_index_file.MAX) # print name, "size", max for i in range(500): start = random.randint(0, max) end = random.randint(0, max) if end < start: end, start = start, end ix.add(name, start, end, i, max=interval_index_file.MAX) intervals.append((start, end, i)) chrs.append(intervals) fname = mktemp() f = open(fname, "wb") ix.write(f) f.close() del ix ix = Indexes(fname) for i in range(5): intervals = chrs[i] name = "seq%d" % i for i in range(100): start = random.randint(0, max) end = random.randint(0, max) if end < start: end, start = start, end query_intervals = set() for (s, e, i) in intervals: if e > start and s < end: query_intervals.add((s, e, i)) result = ix.find(name, start, end) for inter in result: assert inter in query_intervals def test_zero(): ix = Indexes() ix.add("t.idx", 0, 0, 1, 123) bx-python-0.8.13/lib/bx/intervals/000077500000000000000000000000001415666465100167375ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/intervals/__init__.py000066400000000000000000000003311415666465100210450ustar00rootroot00000000000000""" Tools and data structures for working with genomic intervals (or sets of regions on a line in general) efficiently. """ # For compatiblity with existing stuff from bx.intervals.intersection import * # noqa: F40 bx-python-0.8.13/lib/bx/intervals/cluster.pyx000066400000000000000000000072231415666465100211660ustar00rootroot00000000000000""" Kanwei Li, 2009 Inspired by previous ClusterTree Provides a ClusterTree data structure that supports efficient finding of clusters of intervals that are within a certain distance apart. This clustering algorithm uses a binary tree structure. Nodes correspond to non-overlapping intervals, where overlapping means that the distance between two intervals is less or equal to the max separation. The tree self-balances using rotations based on the binomial sequence. Merges among nodes are performed whenever a node is changed/added that will cause other nodes to form a new cluster. C source code is in src/cluster.c """ cdef extern from "cluster.h": cdef struct struct_interval: int start int end int id struct_interval * next ctypedef struct_interval interval cdef struct struct_clusternode: int start int end struct_interval *interval_head struct_interval *interval_tail ctypedef struct_clusternode clusternode cdef struct struct_clustertree: int max_dist int min_intervals struct_clusternode *root ctypedef struct_clustertree clustertree cdef struct struct_treeitr: struct_treeitr *next struct_clusternode *node ctypedef struct_treeitr treeitr clusternode* clusternode_insert(clustertree *tree, clusternode *node, int start, int end, int id) clustertree* create_clustertree(int max_dist, int min_intervals) treeitr* clusteritr(clustertree *tree) void freeclusteritr(treeitr *itr) void free_tree(clustertree *tree) cdef class ClusterTree: cdef clustertree *tree cdef int mincols cdef int minregions def __cinit__(self, mincols, minregions): self.tree = create_clustertree(mincols, minregions) self.mincols = mincols self.minregions = minregions def __dealloc__(self): free_tree(self.tree) def insert(self, s, e, id): ''' Insert an interval with start, end, id as parameters''' if s > e: raise ValueError("Interval start must be before end") self.tree.root = clusternode_insert(self.tree, self.tree.root, s, e, id) def getregions(self): ''' Returns a list clusters in ascending order of starting position. Each cluster is a tuple of (start, end, [sorted ids of intervals in cluster]) tree = ClusterTree(0, 0) Insert (6, 7, 1), (1, 2, 3), (9, 10, 2), (3, 4, 0), (3, 8, 4) tree.getregions() returns [(1, 2, [3]), (3, 8, [0, 1, 4]), (9, 10, [2])] ''' cdef treeitr *itr cdef interval *ival regions = [] itr = clusteritr(self.tree) while (itr): ids = [] ival = itr.node.interval_head while (ival): ids.append(ival.id) ival = ival.next regions.append( (itr.node.start, itr.node.end, sorted(ids)) ) itr = itr.next freeclusteritr(itr) return regions def getlines(self): ''' Similar to getregions except it just returns a list of ids of intervals The above example would return [3, 0, 1, 4, 2] ''' cdef treeitr *itr cdef interval *ival lines = [] itr = clusteritr(self.tree) while (itr): ids = [] ival = itr.node.interval_head while (ival): ids.append(ival.id) ival = ival.next lines.extend(sorted(ids)) itr = itr.next freeclusteritr(itr) return lines bx-python-0.8.13/lib/bx/intervals/cluster_tests.py000066400000000000000000000071451415666465100222230ustar00rootroot00000000000000import os import sys import unittest try: sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) except Exception: sys.path.insert(0, os.path.dirname(os.path.abspath("."))) # from bx.intervals.cluster import ClusterTree from .cluster import ClusterTree class TestCluster(unittest.TestCase): def setUp(self): self.tree = ClusterTree(0, 0) def insertpairs(self, pairs): for i, (s, e) in enumerate(pairs): self.tree.insert(s, e, i) def test_merge_case(self): pairs = [(3, 4), (6, 7), (9, 10), (1, 2), (3, 8)] self.insertpairs(pairs) self.assertEqual([(1, 2, [3]), (3, 8, [0, 1, 4]), (9, 10, [2])], self.tree.getregions()) def test_trivial(self): pairs = [(1, 4), (4, 5)] self.insertpairs(pairs) self.assertEqual([(1, 5, [0, 1])], self.tree.getregions()) def test_easymerge(self): pairs = [(1, 2), (4, 5), (2, 4)] self.insertpairs(pairs) self.assertEqual([(1, 5, [0, 1, 2])], self.tree.getregions()) def test_hardmerge(self): pairs = [(1, 2), (8, 9), (3, 4), (5, 6), (7, 8), (1, 10)] self.insertpairs(pairs) self.assertEqual([(1, 10, [0, 1, 2, 3, 4, 5])], self.tree.getregions()) def test_duplicates(self): pairs = [(1, 1), (1, 2), (3, 4), (3, 4), (1, 4)] self.insertpairs(pairs) self.assertEqual([(1, 4, [0, 1, 2, 3, 4])], self.tree.getregions()) def test_startbeforeend(self): self.assertRaises(ValueError, self.tree.insert, 4, 2, 0) def test_large_sorted(self): upto = 100000 pairs = [(2*i + 1, 2*i + 2) for i in range(upto)] self.insertpairs(pairs) self.tree.insert(0, upto*3, upto) self.assertEqual([(0, upto*3, [x for x in range(upto+1)])], self.tree.getregions()) def test_minregions(self): self.tree = ClusterTree(0, 2) pairs = [(3, 4), (6, 7), (9, 10), (1, 2), (3, 8)] self.insertpairs(pairs) self.assertEqual([(3, 8, [0, 1, 4])], self.tree.getregions()) def test_distance(self): self.tree = ClusterTree(1, 0) pairs = [(3, 4), (6, 7), (9, 10), (1, 2), (3, 8)] self.insertpairs(pairs) self.assertEqual([(1, 10, [0, 1, 2, 3, 4])], self.tree.getregions()) def test_merge_left_right(self): pairs = [(6, 7, 1), (1, 2, 3), (9, 10, 2), (3, 4, 0), (3, 8, 4)] for s, e, i in pairs: self.tree.insert(s, e, i) self.assertEqual([(1, 2, [3]), (3, 8, [0, 1, 4]), (9, 10, [2])], self.tree.getregions()) def test_larger(self): pairs = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16), (17, 18), (19, 20), (1, 3), (4, 10), (10, 15), (15, 20), (21, 22)] self.insertpairs(pairs) self.assertEqual([(1, 20, [x for x in range(14)]), (21, 22, [14])], self.tree.getregions()) def test_another(self): pairs = [(3, 4, 1), (13, 14, 6), (21, 22, 14), (5, 6, 2), (4, 10, 11), (1, 2, 0), (11, 12, 5), (1, 3, 10), (7, 8, 3), (15, 16, 7), (15, 20, 13), (19, 20, 9), (10, 15, 12), (17, 18, 8), (9, 10, 4)] # pairs = [(3, 4, 1), (13, 14, 6), (21, 22, 14), (5, 6, 2), (4, 10, 11), (1, 2, 0), (11, 12, 5), (1, 3, 10), (7, 8, 3), (15, 16, 7), (15, 20, 13), (19, 20, 9), (10, 15, 12), (9, 10, 4)] for s, e, i in pairs: self.tree.insert(s, e, i) self.assertEqual([(1, 20, [x for x in range(14)]), (21, 22, [14])], self.tree.getregions()) def test_none(self): pairs = [] self.insertpairs(pairs) self.assertEqual([], self.tree.getregions()) if __name__ == '__main__': unittest.main() bx-python-0.8.13/lib/bx/intervals/intersection.pyx000066400000000000000000000414371415666465100222200ustar00rootroot00000000000000""" Data structure for performing intersect queries on a set of intervals which preserves all information about the intervals (unlike bitset projection methods). :Authors: James Taylor (james@jamestaylor.org), Ian Schenk (ian.schenck@gmail.com), Brent Pedersen (bpederse@gmail.com) """ # Historical note: # This module original contained an implementation based on sorted endpoints # and a binary search, using an idea from Scott Schwartz and Piotr Berman. # Later an interval tree implementation was implemented by Ian for Galaxy's # join tool (see `bx.intervals.operations.quicksect.py`). This was then # converted to Cython by Brent, who also added support for # upstream/downstream/neighbor queries. This was modified by James to # handle half-open intervals strictly, to maintain sort order, and to # implement the same interface as the original Intersecter. #cython: cdivision=True import operator cdef extern from "stdlib.h": int ceil(float f) float log(float f) int RAND_MAX int rand() int strlen(char *) int iabs(int) cdef inline int imax2(int a, int b): if b > a: return b return a cdef inline int imax3(int a, int b, int c): if b > a: if c > b: return c return b if a > c: return a return c cdef inline int imin3(int a, int b, int c): if b < a: if c < b: return c return b if a < c: return a return c cdef inline int imin2(int a, int b): if b < a: return b return a cdef float nlog = -1.0 / log(0.5) cdef class IntervalNode: """ A single node of an `IntervalTree`. NOTE: Unless you really know what you are doing, you probably should us `IntervalTree` rather than using this directly. """ cdef float priority cdef public object interval cdef public int start, end cdef int minend, maxend, minstart cdef IntervalNode cleft, cright, croot property left_node: def __get__(self): return self.cleft if self.cleft is not EmptyNode else None property right_node: def __get__(self): return self.cright if self.cright is not EmptyNode else None property root_node: def __get__(self): return self.croot if self.croot is not EmptyNode else None def __repr__(self): return "IntervalNode(%i, %i)" % (self.start, self.end) def __cinit__(IntervalNode self, int start, int end, object interval): # Python lacks the binomial distribution, so we convert a # uniform into a binomial because it naturally scales with # tree size. Also, python's uniform is perfect since the # upper limit is not inclusive, which gives us undefined here. self.priority = ceil(nlog * log(-1.0/(1.0 * rand()/RAND_MAX - 1))) self.start = start self.end = end self.interval = interval self.maxend = end self.minstart = start self.minend = end self.cleft = EmptyNode self.cright = EmptyNode self.croot = EmptyNode cpdef IntervalNode insert(IntervalNode self, int start, int end, object interval): """ Insert a new IntervalNode into the tree of which this node is currently the root. The return value is the new root of the tree (which may or may not be this node!) """ cdef IntervalNode croot = self # If starts are the same, decide which to add interval to based on # end, thus maintaining sortedness relative to start/end cdef int decision_endpoint = start if start == self.start: decision_endpoint = end if decision_endpoint > self.start: # insert to cright tree if self.cright is not EmptyNode: self.cright = self.cright.insert( start, end, interval ) else: self.cright = IntervalNode( start, end, interval ) # rebalance tree if self.priority < self.cright.priority: croot = self.rotate_left() else: # insert to cleft tree if self.cleft is not EmptyNode: self.cleft = self.cleft.insert( start, end, interval) else: self.cleft = IntervalNode( start, end, interval) # rebalance tree if self.priority < self.cleft.priority: croot = self.rotate_right() croot.set_ends() self.cleft.croot = croot self.cright.croot = croot return croot cdef IntervalNode rotate_right(IntervalNode self): cdef IntervalNode croot = self.cleft self.cleft = self.cleft.cright croot.cright = self self.set_ends() return croot cdef IntervalNode rotate_left(IntervalNode self): cdef IntervalNode croot = self.cright self.cright = self.cright.cleft croot.cleft = self self.set_ends() return croot cdef inline void set_ends(IntervalNode self): if self.cright is not EmptyNode and self.cleft is not EmptyNode: self.maxend = imax3(self.end, self.cright.maxend, self.cleft.maxend) self.minend = imin3(self.end, self.cright.minend, self.cleft.minend) self.minstart = imin3(self.start, self.cright.minstart, self.cleft.minstart) elif self.cright is not EmptyNode: self.maxend = imax2(self.end, self.cright.maxend) self.minend = imin2(self.end, self.cright.minend) self.minstart = imin2(self.start, self.cright.minstart) elif self.cleft is not EmptyNode: self.maxend = imax2(self.end, self.cleft.maxend) self.minend = imin2(self.end, self.cleft.minend) self.minstart = imin2(self.start, self.cleft.minstart) def intersect( self, int start, int end, sort=True ): """ given a start and a end, return a list of features falling within that range """ cdef list results = [] self._intersect( start, end, results ) return results find = intersect cdef void _intersect( IntervalNode self, int start, int end, list results): # Left subtree if self.cleft is not EmptyNode and self.cleft.maxend > start: self.cleft._intersect( start, end, results ) # This interval if ( self.end > start ) and ( self.start < end ): results.append( self.interval ) # Right subtree if self.cright is not EmptyNode and self.start < end: self.cright._intersect( start, end, results ) cdef void _seek_left(IntervalNode self, int position, list results, int n, int max_dist): # we know we can bail in these 2 cases. if self.maxend + max_dist < position: return if self.minstart > position: return # the ordering of these 3 blocks makes it so the results are # ordered nearest to farest from the query position if self.cright is not EmptyNode: self.cright._seek_left(position, results, n, max_dist) if -1 < position - self.end < max_dist: results.append(self.interval) # TODO: can these conditionals be more stringent? if self.cleft is not EmptyNode: self.cleft._seek_left(position, results, n, max_dist) cdef void _seek_right(IntervalNode self, int position, list results, int n, int max_dist): # we know we can bail in these 2 cases. if self.maxend < position: return if self.minstart - max_dist > position: return #print "SEEK_RIGHT:",self, self.cleft, self.maxend, self.minstart, position # the ordering of these 3 blocks makes it so the results are # ordered nearest to farest from the query position if self.cleft is not EmptyNode: self.cleft._seek_right(position, results, n, max_dist) if -1 < self.start - position < max_dist: results.append(self.interval) if self.cright is not EmptyNode: self.cright._seek_right(position, results, n, max_dist) cpdef left(self, position, int n=1, int max_dist=2500): """ find n features with a start > than `position` f: a Interval object (or anything with an `end` attribute) n: the number of features to return max_dist: the maximum distance to look before giving up. """ cdef list results = [] # use start - 1 becuase .left() assumes strictly left-of self._seek_left( position - 1, results, n, max_dist ) if len(results) == n: return results r = results r.sort(key=operator.attrgetter('end'), reverse=True) return r[:n] cpdef right(self, position, int n=1, int max_dist=2500): """ find n features with a end < than position f: a Interval object (or anything with a `start` attribute) n: the number of features to return max_dist: the maximum distance to look before giving up. """ cdef list results = [] # use end + 1 becuase .right() assumes strictly right-of self._seek_right(position + 1, results, n, max_dist) if len(results) == n: return results r = results r.sort(key=operator.attrgetter('start')) return r[:n] def traverse(self, func): self._traverse(func) cdef void _traverse(IntervalNode self, object func): if self.cleft is not EmptyNode: self.cleft._traverse(func) func(self) if self.cright is not EmptyNode: self.cright._traverse(func) cdef IntervalNode EmptyNode = IntervalNode( 0, 0, Interval(0, 0)) ## ---- Wrappers that retain the old interface ------------------------------- cdef class Interval: """ Basic feature, with required integer start and end properties. Also accepts optional strand as +1 or -1 (used for up/downstream queries), a name, and any arbitrary data is sent in on the info keyword argument >>> from bx.intervals.intersection import Interval >>> from collections import OrderedDict >>> f1 = Interval(23, 36) >>> f2 = Interval(34, 48, value=OrderedDict([('chr', 12), ('anno', 'transposon')])) >>> f2 Interval(34, 48, value=OrderedDict([('chr', 12), ('anno', 'transposon')])) """ cdef public int start, end cdef public object value, chrom, strand def __init__(self, int start, int end, object value=None, object chrom=None, object strand=None ): assert start <= end, "start must be less than end" self.start = start self.end = end self.value = value self.chrom = chrom self.strand = strand def __repr__(self): fstr = "Interval(%d, %d" % (self.start, self.end) if not self.value is None: fstr += ", value=" + str(self.value) fstr += ")" return fstr def __richcmp__(self, other, op): if op == 0: # < return self.start < other.start or self.end < other.end elif op == 1: # <= return self == other or self < other elif op == 2: # == return self.start == other.start and self.end == other.end elif op == 3: # != return self.start != other.start or self.end != other.end elif op == 4: # > return self.start > other.start or self.end > other.end elif op == 5: # >= return self == other or self > other cdef class IntervalTree: """ Data structure for performing window intersect queries on a set of of possibly overlapping 1d intervals. Usage ===== Create an empty IntervalTree >>> from bx.intervals.intersection import Interval, IntervalTree >>> intersecter = IntervalTree() An interval is a start and end position and a value (possibly None). You can add any object as an interval: >>> intersecter.insert( 0, 10, "food" ) >>> intersecter.insert( 3, 7, dict(foo='bar') ) >>> intersecter.find( 2, 5 ) ['food', {'foo': 'bar'}] If the object has start and end attributes (like the Interval class) there is are some shortcuts: >>> intersecter = IntervalTree() >>> intersecter.insert_interval( Interval( 0, 10 ) ) >>> intersecter.insert_interval( Interval( 3, 7 ) ) >>> intersecter.insert_interval( Interval( 3, 40 ) ) >>> intersecter.insert_interval( Interval( 13, 50 ) ) >>> intersecter.find( 30, 50 ) [Interval(3, 40), Interval(13, 50)] >>> intersecter.find( 100, 200 ) [] Before/after for intervals >>> intersecter.before_interval( Interval( 10, 20 ) ) [Interval(3, 7)] >>> intersecter.before_interval( Interval( 5, 20 ) ) [] Upstream/downstream >>> intersecter.upstream_of_interval(Interval(11, 12)) [Interval(0, 10)] >>> intersecter.upstream_of_interval(Interval(11, 12, strand="-")) [Interval(13, 50)] >>> intersecter.upstream_of_interval(Interval(1, 2, strand="-"), num_intervals=3) [Interval(3, 7), Interval(3, 40), Interval(13, 50)] """ cdef IntervalNode root def __cinit__( self ): root = None # ---- Position based interfaces ----------------------------------------- def insert( self, int start, int end, object value=None ): """ Insert the interval [start,end) associated with value `value`. """ if self.root is None: self.root = IntervalNode( start, end, value ) else: self.root = self.root.insert( start, end, value ) add = insert def find( self, start, end ): """ Return a sorted list of all intervals overlapping [start,end). """ if self.root is None: return [] return self.root.find( start, end ) def before( self, position, num_intervals=1, max_dist=2500 ): """ Find `num_intervals` intervals that lie before `position` and are no further than `max_dist` positions away """ if self.root is None: return [] return self.root.left( position, num_intervals, max_dist ) def after( self, position, num_intervals=1, max_dist=2500 ): """ Find `num_intervals` intervals that lie after `position` and are no further than `max_dist` positions away """ if self.root is None: return [] return self.root.right( position, num_intervals, max_dist ) # ---- Interval-like object based interfaces ----------------------------- def insert_interval( self, interval ): """ Insert an "interval" like object (one with at least start and end attributes) """ self.insert( interval.start, interval.end, interval ) add_interval = insert_interval def before_interval( self, interval, num_intervals=1, max_dist=2500 ): """ Find `num_intervals` intervals that lie completely before `interval` and are no further than `max_dist` positions away """ if self.root is None: return [] return self.root.left( interval.start, num_intervals, max_dist ) def after_interval( self, interval, num_intervals=1, max_dist=2500 ): """ Find `num_intervals` intervals that lie completely after `interval` and are no further than `max_dist` positions away """ if self.root is None: return [] return self.root.right( interval.end, num_intervals, max_dist ) def upstream_of_interval( self, interval, num_intervals=1, max_dist=2500 ): """ Find `num_intervals` intervals that lie completely upstream of `interval` and are no further than `max_dist` positions away """ if self.root is None: return [] if interval.strand == -1 or interval.strand == "-": return self.root.right( interval.end, num_intervals, max_dist ) else: return self.root.left( interval.start, num_intervals, max_dist ) def downstream_of_interval( self, interval, num_intervals=1, max_dist=2500 ): """ Find `num_intervals` intervals that lie completely downstream of `interval` and are no further than `max_dist` positions away """ if self.root is None: return [] if interval.strand == -1 or interval.strand == "-": return self.root.left( interval.start, num_intervals, max_dist ) else: return self.root.right( interval.end, num_intervals, max_dist ) def traverse(self, fn): """ call fn for each element in the tree """ if self.root is None: return None return self.root.traverse(fn) # For backward compatibility Intersecter = IntervalTree bx-python-0.8.13/lib/bx/intervals/intersection_tests.py000066400000000000000000000143711415666465100232470ustar00rootroot00000000000000import os import sys import unittest try: sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) except Exception: sys.path.insert(0, os.path.dirname(os.path.abspath("."))) from bx.intervals.intersection import Interval from bx.intervals.intersection import IntervalNode from bx.intervals.intersection import IntervalTree class NeighborTestCase(unittest.TestCase): def setUp(self): iv = IntervalNode(50, 59, Interval(50, 59)) for i in range(0, 110, 10): if i == 50: continue f = Interval(i, i + 9) iv = iv.insert(f.start, f.end, f) self.intervals = iv def test_left(self): iv = self.intervals self.assertEqual(str(iv.left(60, n=2)), str([Interval(50, 59), Interval(40, 49)])) for i in range(10, 100, 10): r = iv.left(i, max_dist=10, n=1) self.assertEqual(r[0].end, i - 1) def test_toomany(self): iv = self.intervals self.assertEqual(len(iv.left(60, n=200)), 6) def test_right(self): iv = self.intervals self.assertEqual(str(iv.left(60, n=2)), str([Interval(50, 59), Interval(40, 49)])) def get_right_start(b10): r = iv.right(b10+1, n=1) assert len(r) == 1 return r[0].start for i in range(10, 100, 10): self.assertEqual(get_right_start(i), i + 10) for i in range(0, 100, 10): r = iv.right(i-1, max_dist=10, n=1) print(r) self.assertEqual(r[0].start, i) class UpDownStreamTestCase(unittest.TestCase): def setUp(self): iv = IntervalTree() iv.add_interval(Interval(50, 59)) for i in range(0, 110, 10): if i == 50: continue f = Interval(i, i + 9) iv.add_interval(f) self.intervals = iv def test_upstream(self): iv = self.intervals upstreams = iv.upstream_of_interval(Interval(59, 60), num_intervals=200) for u in upstreams: self.assertTrue(u.end < 59) upstreams = iv.upstream_of_interval(Interval(60, 70, strand=-1), num_intervals=200) for u in upstreams: self.assertTrue(u.start > 70) upstreams = iv.upstream_of_interval(Interval(58, 58, strand=-1), num_intervals=200) for u in upstreams: self.assertTrue(u.start > 59) def test_downstream(self): iv = self.intervals downstreams = iv.downstream_of_interval(Interval(59, 60), num_intervals=200) for d in downstreams: self.assertTrue(d.start > 60) downstreams = iv.downstream_of_interval(Interval(59, 60, strand=-1), num_intervals=200) for d in downstreams: self.assertTrue(d.start < 59) def test_n(self): iv = self.intervals for i in range(0, 90, 10): r = iv.after(i, max_dist=20, num_intervals=2) self.assertEqual(r[0].start, i + 10) self.assertEqual(r[1].start, i + 20) r = iv.after_interval(Interval(i, i), max_dist=20, num_intervals=2) self.assertEqual(r[0].start, i + 10) self.assertEqual(r[1].start, i + 20) class LotsaTestCase(unittest.TestCase): """ put lotsa data in the tree and make sure it works""" def setUp(self): iv = IntervalNode(1, 2, Interval(1, 2)) self.max = 1000000 for i in range(0, self.max, 10): f = Interval(i, i) iv = iv.insert(f.start, f.end, f) for i in range(600): iv = iv.insert(0, 1, Interval(0, 1)) self.intervals = iv def test_count(self): iv = self.intervals r = iv.right(1, n=33) self.assertEqual(len(r), 33) l = iv.left(1, n=33) self.assertEqual(len(l), 1) u = iv.right(1, n=9999) self.assertEqual(len(u), 250) # now increase max_dist u = iv.right(1, n=9999, max_dist=99999) self.assertEqual(len(u), 9999) def test_max_dist(self): iv = self.intervals r = iv.right(1, max_dist=0, n=10) self.assertEqual(len(r), 0) for n, d in enumerate(range(10, 1000, 10)): r = iv.right(1, max_dist=d, n=10000) self.assertEqual(len(r), n + 1) def test_find(self): iv = self.intervals path = sys.path[:] sys.path = sys.path[2:] random = __import__("random") sys.path = path for t in range(25): start = random.randint(0, self.max - 10000) end = start + random.randint(100, 10000) results = iv.find(start, end) for feat in results: self.assertTrue( (feat.end >= start and feat.end <= end) or (feat.start <= end and feat.start >= start)) class IntervalTreeTest(unittest.TestCase): def setUp(self): iv = IntervalTree() n = 0 for i in range(1, 1000, 80): iv.insert(i, i + 10, dict(value=i*i)) # add is synonym for insert. iv.add(i + 20, i + 30, dict(astr=str(i*i))) # or insert/add an interval object with start, end attrs. iv.insert_interval(Interval(i + 40, i + 50, value=dict(astr=str(i*i)))) iv.add_interval(Interval(i + 60, i + 70, value=dict(astr=str(i*i)))) n += 4 self.intervals = self.iv = iv self.nintervals = n def test_find(self): r = self.iv.find(100, 200) self.assertEqual(len(r), 5) def test_traverse(self): a = [] fn = a.append self.iv.traverse(fn) self.assertEqual(len(a), self.nintervals) def test_empty(self): iv = IntervalTree() self.assertEqual([], iv.find(100, 300)) self.assertEqual([], iv.after(100)) self.assertEqual([], iv.before(100)) self.assertEqual([], iv.after_interval(100)) self.assertEqual([], iv.before_interval(100)) self.assertEqual([], iv.upstream_of_interval(100)) self.assertEqual([], iv.downstream_of_interval(100)) self.assertEqual(None, iv.traverse(lambda x: x.append(1))) def test_public_interval(self): def fn(ival): return self.assertTrue(ival.interval) self.iv.traverse(fn) if __name__ == "__main__": unittest.main() bx-python-0.8.13/lib/bx/intervals/io.py000066400000000000000000000247361415666465100177340ustar00rootroot00000000000000""" Support for reading and writing genomic intervals from delimited text files. """ from bx.bitset import ( BinnedBitSet, MAX ) from bx.tabular.io import ( ParseError, TableReader, TableRow, ) class MissingFieldError(ParseError): pass class FieldFormatError(ParseError): def __init__(self, *args, **kwargs): ParseError.__init__(self, *args, **kwargs) self.expected = kwargs.get("expected", None) def __str__(self): if self.expected: return ParseError.__str__(self) + ", " + self.expected + " expected" else: return ParseError.__str__(self) class StrandFormatError(ParseError): pass class GenomicInterval(TableRow): """ A genomic interval stored in a set of fields (a row of a table) """ def __init__(self, reader, fields, chrom_col, start_col, end_col, strand_col, default_strand, fix_strand=False): TableRow.__init__(self, reader, fields) self.chrom_col = chrom_col self.start_col = start_col self.end_col = end_col self.strand_col = strand_col self.nfields = nfields = len(fields) # Parse chrom/source column if chrom_col >= nfields: raise MissingFieldError("No field for chrom_col (%d)" % chrom_col) self.chrom = fields[chrom_col].strip() # Parse start column and ensure it is an integer if start_col >= nfields: raise MissingFieldError("No field for start_col (%d)" % start_col) try: self.start = int(fields[start_col]) except ValueError as e: raise FieldFormatError("Could not parse start_col: " + str(e), expected="integer") # Parse end column and ensure it is an integer if end_col >= nfields: raise MissingFieldError("No field for end_col (%d)" % end_col) try: self.end = int(fields[end_col]) except ValueError as e: raise FieldFormatError("Could not parse end_col: " + str(e), expected="integer") # Ensure start <= end if self.end < self.start: raise ParseError("Start is greater than End. Interval length is < 1.") # Parse strand and ensure it is valid if strand_col >= nfields or strand_col < 0: # This should probable be immutable since the fields are # not updated when it is set self.strand = default_strand else: strand = fields[strand_col] if strand == ".": strand = default_strand elif strand not in ("+", "-"): if fix_strand: strand = "+" else: raise StrandFormatError("Strand must be either '+' or '-'") self.strand = strand def __setattr__(self, name, value): if name == "chrom": self.fields[self.chrom_col] = str(value) elif name == "start": self.fields[self.start_col] = str(value) elif name == "end": self.fields[self.end_col] = str(value) elif name == "strand": if self.strand_col < self.nfields and self.strand_col >= 0: self.fields[self.strand_col] = str(value) object.__setattr__(self, name, value) def __str__(self): return "\t".join(self.fields) def copy(self): return GenomicInterval(self.reader, list(self.fields), self.chrom_col, self.start_col, self.end_col, self.strand_col, self.strand) class GenomicIntervalReader(TableReader): """ Reader for iterating a set of intervals in a tab separated file. Can also parse header and comment lines if requested. >>> from bx.tabular.io import Comment, Header >>> r = GenomicIntervalReader( [ "#chrom\\tname\\tstart\\tend\\textra", ... "chr1\\tfoo\\t1\\t100\\txxx", ... "chr2\\tbar\\t20\\t300\\txxx", ... "#I am a comment", ... "chr2\\tbar\\t20\\t300\\txxx" ], start_col=2, end_col=3 ) >>> header = next(r) >>> elements = list(r) >>> elements.insert(0, header) >>> assert isinstance(elements[0], Header) >>> str(elements[0]) '#chrom\\tname\\tstart\\tend\\textra' >>> assert isinstance(elements[1], GenomicInterval) >>> print(elements[1].start, elements[1].end) 1 100 >>> str(elements[1]) 'chr1\\tfoo\\t1\\t100\\txxx' >>> elements[1].start = 30 >>> print(elements[1].start, elements[1].end) 30 100 >>> str(elements[1]) 'chr1\\tfoo\\t30\\t100\\txxx' >>> assert isinstance(elements[2], GenomicInterval) >>> assert isinstance(elements[3], Comment) >>> assert isinstance(elements[4], GenomicInterval) """ def __init__(self, input, chrom_col=0, start_col=1, end_col=2, strand_col=5, default_strand="+", return_header=True, return_comments=True, force_header=None, fix_strand=False, comment_lines_startswith=None, allow_spaces=False): if comment_lines_startswith is None: comment_lines_startswith = ["#", "track "] TableReader.__init__(self, input, return_header, return_comments, force_header, comment_lines_startswith) self.chrom_col = chrom_col self.start_col = start_col self.end_col = end_col self.strand_col = strand_col self.default_strand = default_strand self.fix_strand = fix_strand self.allow_spaces = allow_spaces def parse_row(self, line): # Try multiple separators. First tab, our expected splitter, than # just whitespace in the case of problematic files with space instead of # tab separation seps = ["\t"] if self.allow_spaces: seps.append(None) for i, sep in enumerate(seps): try: return GenomicInterval( self, line.split(sep), self.chrom_col, self.start_col, self.end_col, self.strand_col, self.default_strand, fix_strand=self.fix_strand) except Exception as e: # Catch and store the initial error if i == 0: err = e # Ran out of separators and still have errors, raise our problem raise err def binned_bitsets(self, upstream_pad=0, downstream_pad=0, lens=None): # The incoming lens dictionary is a dictionary of chromosome lengths # which are used to initialize the bitsets. if lens is None: lens = {} last_chrom = None last_bitset = None bitsets = dict() for interval in self: if isinstance(interval, GenomicInterval): chrom = interval[self.chrom_col] if chrom != last_chrom: if chrom not in bitsets: size = lens.get(chrom, MAX) try: bbs = BinnedBitSet(size) except ValueError as e: # We will only reach here when constructing this bitset from the lens dict # since the value of MAX is always safe. raise Exception(f"Invalid chrom length {str(size)} in 'lens' dictionary. {str(e)}") bitsets[chrom] = bbs last_chrom = chrom last_bitset = bitsets[chrom] start = max(int(interval[self.start_col]), 0) end = min(int(interval[self.end_col]), last_bitset.size) last_bitset.set_range(start, end-start) return bitsets class NiceReaderWrapper(GenomicIntervalReader): """ >>> from bx.tabular.io import Header >>> r = NiceReaderWrapper(["#chrom\\tname\\tstart\\tend\\textra", ... "chr1\\tfoo\\t1\\t100\\txxx", ... "chr2\\tbar\\t20\\t300\\txxx", ... "#I am a comment", ... "chr2\\tbar\\t20\\t300\\txxx" ], start_col=2, end_col=3 ) >>> assert isinstance(next(r), Header) >>> assert r.current_line == '#chrom\\tname\\tstart\\tend\\textra', r.current_line >>> assert len([_ for _ in r]) == 4 """ def __init__(self, reader, **kwargs): GenomicIntervalReader.__init__(self, reader, **kwargs) self.outstream = kwargs.get("outstream", None) self.print_delegate = kwargs.get("print_delegate", None) self.input_wrapper = iter(self.input) self.input_iter = self.iterwrapper() self.skipped = 0 self.skipped_lines = [] def __iter__(self): return self def __next__(self): while True: try: nextitem = super().__next__() return nextitem except ParseError as e: if self.outstream: if self.print_delegate and callable(self.print_delegate): self.print_delegate(self.outstream, e, self) self.skipped += 1 # no reason to stuff an entire bad file into memory if self.skipped < 10: self.skipped_lines.append((self.linenum, self.current_line, str(e))) def iterwrapper(self): # Generator which keeps track of the current line as an object attribute. for self.current_line in self.input_wrapper: yield self.current_line class BitsetSafeReaderWrapper(NiceReaderWrapper): def __init__(self, reader, lens=None): # This class handles any ValueError, IndexError and OverflowError exceptions that may be thrown when # the bitsets are being created by skipping the problem lines. # The incoming lens dictionary is a dictionary of chromosome lengths # which are used to initialize the bitsets. # It is assumed that the reader is an interval reader, i.e. it has chr_col, start_col, end_col and strand_col attributes. if lens is None: lens = {} NiceReaderWrapper.__init__(self, reader.input, chrom_col=reader.chrom_col, start_col=reader.start_col, end_col=reader.end_col, strand_col=reader.strand_col) self.lens = lens def __next__(self): while True: rval = super().__next__() if isinstance(rval, GenomicInterval) and rval.end > self.lens.get(rval.chrom, MAX): self.skipped += 1 # no reason to stuff an entire bad file into memory if self.skipped < 10: self.skipped_lines.append((self.linenum, self.current_line, "Error in BitsetSafeReaderWrapper")) else: return rval bx-python-0.8.13/lib/bx/intervals/operations/000077500000000000000000000000001415666465100211225ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/intervals/operations/__init__.py000066400000000000000000000015711415666465100232370ustar00rootroot00000000000000""" High level operations on genomic intervals. Most accept and produce iterables of `bx.io.inervals.io.GenomicInterval` objects. """ BED_DEFAULT_COLS = 0, 1, 2, 5 MAX_END = 512*1024*1024 def bits_set_in_range(bits, range_start, range_end): """ Yield start,end tuples for each span of set bits in [range_start,range_end) """ end = range_start while True: start = bits.next_set(end) end = min(bits.next_clear(start), range_end) if start >= end: break yield start, end def bits_clear_in_range(bits, range_start, range_end): """ Yield start,end tuples for each span of clear bits in [range_start,range_end) """ end = range_start while True: start = bits.next_clear(end) if start >= range_end: break end = min(bits.next_set(start), range_end) yield start, end bx-python-0.8.13/lib/bx/intervals/operations/base_coverage.py000066400000000000000000000016011415666465100242570ustar00rootroot00000000000000""" Determine the number of bases covered by a set of intervals. """ from bx.intervals.io import BitsetSafeReaderWrapper from bx.intervals.operations import MAX_END def base_coverage(reader): # Handle any ValueError, IndexError and OverflowError exceptions that may be thrown when # the bitsets are being created by skipping the problem lines base_reader = BitsetSafeReaderWrapper(reader, lens={}) bitsets = base_reader.binned_bitsets() coverage = 0 for chrom in bitsets: try: coverage += bitsets[chrom].count_range(0, MAX_END) except IndexError as e: base_reader.skipped += 1 # no reason to stuff an entire bad file into memmory if base_reader.skipped < 10: base_reader.skipped_lines.append((base_reader.linenum, base_reader.current_line, str(e))) continue return coverage bx-python-0.8.13/lib/bx/intervals/operations/complement.py000066400000000000000000000043211415666465100236370ustar00rootroot00000000000000""" Complement a set of intervals. """ from bx.bitset import MAX from bx.intervals.io import ( BitsetSafeReaderWrapper, GenomicInterval ) from bx.intervals.operations import bits_set_in_range def complement(reader, lens): # Handle any ValueError, IndexError and OverflowError exceptions that may be thrown when # the bitsets are being created by skipping the problem lines complement_reader = BitsetSafeReaderWrapper(reader, lens=lens) bitsets = complement_reader.binned_bitsets(upstream_pad=0, downstream_pad=0, lens=lens) # NOT them all for key, value in bitsets.items(): value.invert() # Read remaining intervals and subtract for chrom in bitsets: bitset = bitsets[chrom] out_intervals = bits_set_in_range(bitset, 0, lens.get(chrom, MAX)) try: # Write the intervals for start, end in out_intervals: fields = ["." for x in range(max(complement_reader.chrom_col, complement_reader.start_col, complement_reader.end_col)+1)] # default the column to a + if it exists if complement_reader.strand_col < len(fields) and complement_reader.strand_col >= 0: fields[complement_reader.strand_col] = "+" fields[complement_reader.chrom_col] = chrom fields[complement_reader.start_col] = start fields[complement_reader.end_col] = end new_interval = GenomicInterval(complement_reader, fields, complement_reader.chrom_col, complement_reader.start_col, complement_reader.end_col, complement_reader.strand_col, "+") yield new_interval except IndexError as e: complement_reader.skipped += 1 # no reason to stuff an entire bad file into memmory if complement_reader.skipped < 10: complement_reader.skipped_lines.append((complement_reader.linenum, complement_reader.current_line, str(e))) continue # def main(): # # test it all out # f1 = fileinput.FileInput("dataset_7.dat") # g1 = GenomicIntervalReader(f1) # for interval in complement(g1,{"chr":16000000}): # print "\t".join(interval) # # if __name__ == "__main__": # main() bx-python-0.8.13/lib/bx/intervals/operations/concat.py000066400000000000000000000050521415666465100227450ustar00rootroot00000000000000""" Concatenate sets of intervals. Preserves format of the first input -- it is possible to concat two files that have different column orders. Of course, the meta-data of the second will be lost (and filled with a "."). If all of the files (GenomicInteralReaders) are the same format, sameformat=True will preserve all columns of the first input, cuts extra columns on subsequent input, and pads missing columns. If sameformat=False then extra columns are filled with ".". """ from bx.intervals.io import GenomicInterval from bx.tabular.io import ( Comment, Header, ) def concat(readers, comments=True, header=True, sameformat=True): # Save columns from the first input chrom_col = readers[0].chrom_col start_col = readers[0].start_col end_col = readers[0].end_col strand_col = readers[0].strand_col nfields = None firstdataset = True output = False for intervals in readers: for interval in intervals: if isinstance(interval, GenomicInterval): if not nfields: nfields = interval.nfields out_interval = interval.copy() if sameformat or firstdataset: # everything except the first input has to be # trimmed or padded to match the first input if len(out_interval.fields) > nfields: out_interval.fields = out_interval.fields[0:nfields] while len(out_interval.fields) < nfields: out_interval.fields.append(".") output = True yield out_interval else: chrom = out_interval.chrom start = out_interval.start end = out_interval.end strand = out_interval.strand out_interval.fields = ["." for col in range(nfields)] out_interval.fields[chrom_col] = chrom out_interval.fields[start_col] = str(start) out_interval.fields[end_col] = str(end) # Strand is optional, might not exist in output if strand_col < len(out_interval.fields): out_interval.fields[strand_col] = strand yield out_interval elif isinstance(interval, Header) and header: yield interval elif isinstance(interval, Comment) and comments: yield interval if output and firstdataset: firstdataset = False bx-python-0.8.13/lib/bx/intervals/operations/coverage.py000066400000000000000000000057021415666465100232730ustar00rootroot00000000000000""" Determine amount of each interval in one set covered by the intervals of another set. Adds two columns to the first input, giving number of bases covered and percent coverage on the second input. """ from bx.intervals.io import ( BitsetSafeReaderWrapper, GenomicInterval, ) from bx.tabular.io import ( Comment, Header ) def coverage(readers, comments=True): # The incoming lens dictionary is a dictionary of chromosome lengths which are used to initialize the bitsets. primary = readers[0] intersect = readers[1:] # Handle any ValueError, IndexError and OverflowError exceptions that may be thrown when # the bitsets are being created by skipping the problem lines intersect[0] = BitsetSafeReaderWrapper(intersect[0], lens={}) bitsets = intersect[0].binned_bitsets() intersect = intersect[1:] for andset in intersect: bitset2 = andset.binned_bitsets() for chrom in bitsets: if chrom not in bitset2: continue bitsets[chrom].ior(bitset2[chrom]) intersect = intersect[1:] # Read remaining intervals and give coverage for interval in primary: if isinstance(interval, Header): yield interval if isinstance(interval, Comment) and comments: yield interval elif isinstance(interval, GenomicInterval): chrom = interval.chrom start = int(interval.start) end = int(interval.end) if start > end: try: # This will only work if primary is a NiceReaderWrapper primary.skipped += 1 # no reason to stuff an entire bad file into memmory if primary.skipped < 10: primary.skipped_lines.append((primary.linenum, primary.current_line, "Interval start after end!")) except Exception: pass continue if chrom not in bitsets: bases_covered = 0 percent = 0.0 else: try: bases_covered = bitsets[chrom].count_range(start, end-start) except IndexError as e: try: # This will only work if primary is a NiceReaderWrapper primary.skipped += 1 # no reason to stuff an entire bad file into memmory if primary.skipped < 10: primary.skipped_lines.append((primary.linenum, primary.current_line, str(e))) except Exception: pass continue if (end - start) == 0: percent = 0 else: percent = float(bases_covered) / float(end - start) interval.fields.append(str(bases_covered)) interval.fields.append(str(percent)) yield interval bx-python-0.8.13/lib/bx/intervals/operations/find_clusters.py000066400000000000000000000121061415666465100243400ustar00rootroot00000000000000""" Find clusters of intervals within a set of intervals. A cluster is a group (of size minregions) of intervals within a specific distance (of mincols) of each other. Returns Cluster objects, which have a chrom, start, end, and lines (a list of linenumbers from the original file). The original can then be ran through with the linenumbers to extract clustered regions without disturbing original order, or the clusters may themselves be written as intervals. """ import math import random from bx.intervals.cluster import ClusterTree from bx.intervals.io import GenomicInterval def find_clusters(reader, mincols=1, minregions=2): extra = dict() chroms = dict() linenum = -1 for interval in reader: linenum += 1 if not isinstance(interval, GenomicInterval): extra[linenum] = interval else: if interval.chrom not in chroms: chroms[interval.chrom] = ClusterTree(mincols, minregions) try: chroms[interval.chrom].insert(interval.start, interval.end, linenum) except OverflowError as e: try: # This will work only if reader is a NiceReaderWrapper reader.skipped += 1 if reader.skipped < 10: reader.skipped_lines.append((reader.linenum, reader.current_line, str(e))) except Exception: pass continue return chroms, extra # DEPRECATED: Use the ClusterTree in bx.intervals.cluster for this. # It does the same thing, but is a C implementation. class ClusterNode: def __init__(self, start, end, linenum, mincols, minregions): # Python lacks the binomial distribution, so we convert a # uniform into a binomial because it naturally scales with # tree size. Also, python's uniform is perfect since the # upper limit is not inclusive, which gives us undefined here. self.priority = math.ceil((-1.0 / math.log(.5)) * math.log(-1.0 / (random.uniform(0, 1) - 1))) self.start = start self.end = end self.left = None self.right = None self.lines = [linenum] self.mincols = mincols self.minregions = minregions def insert(self, start, end, linenum): if start - self.mincols > self.end: # insert to right tree if self.right: self.right = self.right.insert(start, end, linenum) else: self.right = ClusterNode(start, end, linenum, self.mincols, self.minregions) # rebalance tree if self.priority < self.right.priority: return self.rotateleft() elif end + self.mincols < self.start: # insert to left tree if self.left: self.left = self.left.insert(start, end, linenum) else: self.left = ClusterNode(start, end, linenum, self.mincols, self.minregions) # rebalance tree if self.priority < self.left.priority: return self.rotateright() else: # insert here self.start = min(self.start, start) self.end = max(self.end, end) self.lines.append(linenum) # recursive call to push nodes up if self.left: self.left = self.left.push_up(self) if self.right: self.right = self.right.push_up(self) return self def rotateright(self): root = self.left self.left = self.left.right root.right = self return root def rotateleft(self): root = self.right self.right = self.right.left root.left = self return root def push_up(self, topnode): # Note: this function does not affect heap property # Distance method removed for inline, faster? distance = max(self.start, topnode.start) - min(self.end, topnode.end) if distance <= self.mincols: topnode.start = min(self.start, topnode.start) topnode.end = max(self.end, topnode.end) for linenum in self.lines: topnode.lines.append(linenum) if self.right: return self.right.push_up(topnode) if self.left: return self.left.push_up(topnode) return None if self.end < topnode.start and self.right: self.right = self.right.push_up(topnode) if self.start > topnode.end and self.left: self.left = self.left.push_up(topnode) return self def getintervals(self): if self.left: yield from self.left.getintervals(self.minregions) if len(self.lines) >= self.minregions: yield self.start, self.end if self.right: yield from self.right.getintervals(self.minregions) def getlines(self): if self.left: yield from self.left.getlines() if len(self.lines) >= self.minregions: yield from self.lines if self.right: yield from self.right.getlines() bx-python-0.8.13/lib/bx/intervals/operations/intersect.py000066400000000000000000000066551415666465100235100ustar00rootroot00000000000000""" Compute the intersection of two sets of genomic intervals, either base-by-base or at the interval level. The returned GenomicIntervalReader will be in the order of the first set of intervals passed in, with the corresponding additional fields. """ from bx.intervals.io import ( BitsetSafeReaderWrapper, GenomicInterval ) from bx.intervals.operations import bits_set_in_range from bx.tabular.io import ( Comment, Header, ) def intersect(readers, mincols=1, upstream_pad=0, downstream_pad=0, pieces=True, lens={}, comments=True): # The incoming lens dictionary is a dictionary of chromosome lengths which are used to initialize the bitsets. # Read all but first into bitsets and intersect to one primary = readers[0] intersect = readers[1:] # Handle any ValueError, IndexError and OverflowError exceptions that may be thrown when # the bitsets are being created by skipping the problem lines intersect[0] = BitsetSafeReaderWrapper(intersect[0], lens=lens) bitsets = intersect[0].binned_bitsets(upstream_pad=upstream_pad, downstream_pad=downstream_pad, lens=lens) intersect = intersect[1:] for andset in intersect: bitset2 = andset.binned_bitsets(upstream_pad=upstream_pad, downstream_pad=downstream_pad, lens=lens) for chrom in bitsets: if chrom not in bitset2: continue bitsets[chrom].iand(bitset2[chrom]) intersect = intersect[1:] # Read remaining intervals and intersect for interval in primary: if isinstance(interval, Header): yield interval if isinstance(interval, Comment) and comments: yield interval elif isinstance(interval, GenomicInterval): chrom = interval.chrom start = int(interval.start) end = int(interval.end) if chrom not in bitsets: continue if start > end: try: # This will only work if primary is a NiceReaderWrapper primary.skipped += 1 # no reason to stuff an entire bad file into memmory if primary.skipped < 10: primary.skipped_lines.append((primary.linenum, primary.current_line, "Interval start after end!")) except Exception: pass continue out_intervals = [] # Intersect or Overlap try: if bitsets[chrom].count_range(start, end-start) >= mincols: if pieces: out_intervals = bits_set_in_range(bitsets[chrom], start, end) else: out_intervals = [(start, end)] # Write the intervals for start, end in out_intervals: new_interval = interval.copy() new_interval.start = start new_interval.end = end yield new_interval except IndexError as e: try: # This will only work if primary is a NiceReaderWrapper primary.skipped += 1 # no reason to stuff an entire bad file into memmory if primary.skipped < 10: primary.skipped_lines.append((primary.linenum, primary.current_line, str(e))) except Exception: pass continue bx-python-0.8.13/lib/bx/intervals/operations/join.py000066400000000000000000000111611415666465100224330ustar00rootroot00000000000000""" Join two sets of intervals using their overlap as the key. The intervals MUST be sorted by chrom(lexicographically), start(arithmetically) and end(arithmetically). This works by simply walking through the inputs in O(n) time. """ import math from bx.intervals.io import GenomicInterval from .quicksect import IntervalTree def join(leftSet, rightSet, mincols=1, leftfill=True, rightfill=True): # Read rightSet into memory: rightlen = 0 leftlen = 0 rightTree = IntervalTree() for item in rightSet: if isinstance(item, GenomicInterval): rightTree.insert(item, rightSet.linenum, item.fields) if rightlen == 0: rightlen = item.nfields for interval in leftSet: if leftlen == 0 and isinstance(interval, GenomicInterval): leftlen = interval.nfields if not isinstance(interval, GenomicInterval): yield interval else: result = [] rightTree.intersect(interval, lambda node: result.append(node)) overlap_not_met = 0 for item in result: if item.start in range(interval.start, interval.end+1) and item.end not in range(interval.start, interval.end+1): overlap = interval.end-item.start elif item.end in range(interval.start, interval.end+1) and item.start not in range(interval.start, interval.end+1): overlap = item.end-interval.start elif item.start in range(interval.start, interval.end+1) and item.end in range(interval.start, interval.end+1): overlap = item.end-item.start else: # the intersecting item's start and end are outside the interval range overlap = interval.end-interval.start if overlap < mincols: overlap_not_met += 1 continue outfields = list(interval) outfields.extend(item.other) setattr(item, "visited", True) yield outfields if (len(result) == 0 or overlap_not_met == len(result)) and rightfill: outfields = list(interval) for x in range(rightlen): outfields.append(".") yield outfields if leftfill: def report_unvisited(node, results): if not hasattr(node, "visited"): results.append(node) results = [] rightTree.traverse(lambda x: report_unvisited(x, results)) for item in results: outfields = list() for x in range(leftlen): outfields.append(".") outfields.extend(item.other) yield outfields def interval_cmp(a, b): interval1 = a[0] interval2 = b[0] if not (isinstance(interval1, GenomicInterval) and isinstance(interval2, GenomicInterval)): return 0 # Both are intervals if interval1.chrom == interval2.chrom: center1 = interval1.start + ((interval1.end - interval1.start) / 2) center2 = interval2.start + ((interval2.end - interval2.start) / 2) return center1 - center2 else: if interval1.chrom > interval2.chrom: return 1 else: return -1 return 0 def findintersect(interval, sortedlist, mincols): # find range of intervals that intersect via a binary search # find lower bound x = len(sortedlist) / 2 n = int(math.pow(2, math.ceil(math.log(len(sortedlist), 2)))) not_found = True not_done = True while not_found and not_done: n = n / 2 if n == 0: n = 1 not_done = False if x >= len(sortedlist): x -= n elif x < 0: x += n else: if findoverlap(sortedlist[x][0], interval) >= mincols: not_found = False else: comp = interval_cmp(sortedlist[x], [interval, 0]) if comp > 0: x -= n else: x += n print("\t".join(sortedlist[x][0].fields)) print("not_found = " + str(not_found)) if not_found: return 0, -1 lowerbound = x upperbound = x while (lowerbound > -1) and (findoverlap(sortedlist[lowerbound-1][0], interval) >= mincols): lowerbound -= 1 while (upperbound+1 < len(sortedlist)) and (findoverlap(sortedlist[upperbound+1][0], interval) >= mincols): upperbound += 1 return lowerbound, upperbound def findoverlap(a, b): # overlapping if a.chrom == b.chrom: return min(a.end, b.end) - max(a.start, b.start) else: return 0 bx-python-0.8.13/lib/bx/intervals/operations/merge.py000066400000000000000000000026661415666465100226050ustar00rootroot00000000000000""" Merge overlapping regions in two sets of genomic intervals. """ from bx.intervals.io import BitsetSafeReaderWrapper from bx.intervals.operations import ( bits_set_in_range, MAX_END ) # sorting could make this a less memory intensive operation(?) def merge(interval, mincols=1): # Handle any ValueError, IndexError and OverflowError exceptions that may be thrown when # the bitsets are being created by skipping the problem lines interval = BitsetSafeReaderWrapper(interval, lens={}) bitsets = interval.binned_bitsets() if interval.header: yield interval.header for chrom in bitsets: bitset = bitsets[chrom] output = ["."] * (max(interval.chrom_col, interval.start_col, interval.end_col) + 1) output[interval.chrom_col] = chrom try: for start, end in bits_set_in_range(bitset, 0, MAX_END): output[interval.start_col] = str(start) output[interval.end_col] = str(end) yield output except IndexError as e: try: # This will work only if interval is a NiceReaderWrapper interval.skipped += 1 # no reason to stuff an entire bad file into memmory if interval.skipped < 10: interval.skipped_lines.append((interval.linenum, interval.current_line, str(e))) except Exception: pass continue bx-python-0.8.13/lib/bx/intervals/operations/quicksect.py000066400000000000000000000135061415666465100234740ustar00rootroot00000000000000""" Intersects ... faster. Suports GenomicInterval datatype and multiple chromosomes. """ import math import random try: from time import process_time except ImportError: # For compatibility with Python < 3.3 from time import clock as process_time class IntervalTree: def __init__(self): self.chroms = {} def insert(self, interval, linenum=0, other=None): chrom = interval.chrom start = interval.start end = interval.end if interval.chrom in self.chroms: self.chroms[chrom] = self.chroms[chrom].insert(start, end, linenum, other) else: self.chroms[chrom] = IntervalNode(start, end, linenum, other) def intersect(self, interval, report_func): chrom = interval.chrom start = interval.start end = interval.end if chrom in self.chroms: self.chroms[chrom].intersect(start, end, report_func) def traverse(self, func): for item in self.chroms.values(): item.traverse(func) class IntervalNode: def __init__(self, start, end, linenum=0, other=None): # Python lacks the binomial distribution, so we convert a # uniform into a binomial because it naturally scales with # tree size. Also, python's uniform is perfect since the # upper limit is not inclusive, which gives us undefined here. self.priority = math.ceil((-1.0 / math.log(.5)) * math.log(-1.0 / (random.uniform(0, 1) - 1))) self.start = start self.end = end self.maxend = self.end self.minend = self.end self.left = None self.right = None self.linenum = linenum self.other = other def insert(self, start, end, linenum=0, other=None): root = self if start > self.start: # insert to right tree if self.right: self.right = self.right.insert(start, end, linenum, other) else: self.right = IntervalNode(start, end, linenum, other) # rebalance tree if self.priority < self.right.priority: root = self.rotateleft() else: # insert to left tree if self.left: self.left = self.left.insert(start, end, linenum, other) else: self.left = IntervalNode(start, end, linenum, other) # rebalance tree if self.priority < self.left.priority: root = self.rotateright() if root.right and root.left: root.maxend = max(root.end, root.right.maxend, root.left.maxend) root.minend = min(root.end, root.right.minend, root.left.minend) elif root.right: root.maxend = max(root.end, root.right.maxend) root.minend = min(root.end, root.right.minend) elif root.left: root.maxend = max(root.end, root.left.maxend) root.minend = min(root.end, root.left.minend) return root def rotateright(self): root = self.left self.left = self.left.right root.right = self if self.right and self.left: self.maxend = max(self.end, self.right.maxend, self.left.maxend) self.minend = min(self.end, self.right.minend, self.left.minend) elif self.right: self.maxend = max(self.end, self.right.maxend) self.minend = min(self.end, self.right.minend) elif self.left: self.maxend = max(self.end, self.left.maxend) self.minend = min(self.end, self.left.minend) return root def rotateleft(self): root = self.right self.right = self.right.left root.left = self if self.right and self.left: self.maxend = max(self.end, self.right.maxend, self.left.maxend) self.minend = min(self.end, self.right.minend, self.left.minend) elif self.right: self.maxend = max(self.end, self.right.maxend) self.minend = min(self.end, self.right.minend) elif self.left: self.maxend = max(self.end, self.left.maxend) self.minend = min(self.end, self.left.minend) return root def intersect(self, start, end, report_func): if start < self.end and end > self.start: report_func(self) if self.left and start < self.left.maxend: self.left.intersect(start, end, report_func) if self.right and end > self.start: self.right.intersect(start, end, report_func) def traverse(self, func): if self.left: self.left.traverse(func) func(self) if self.right: self.right.traverse(func) def main(): test = None intlist = [] for _ in range(20000): start = random.randint(0, 1000000) end = start + random.randint(1, 1000) if test: test = test.insert(start, end) else: test = IntervalNode(start, end) intlist.append((start, end)) starttime = process_time() for x in range(5000): start = random.randint(0, 10000000) end = start + random.randint(1, 1000) result = [] test.intersect(start, end, lambda x: result.append(x.linenum)) print("%f for tree method" % (process_time() - starttime)) starttime = process_time() for _ in range(5000): start = random.randint(0, 10000000) end = start + random.randint(1, 1000) bad_sect(intlist, start, end) print("%f for linear (bad) method" % (process_time() - starttime)) def test_func(node): print("[%d, %d), %d" % (node.start, node.end, node.maxend)) def bad_sect(lst, int_start, int_end): intersection = [] for start, end in lst: if int_start < end and int_end > start: intersection.append((start, end)) return intersection if __name__ == "__main__": main() bx-python-0.8.13/lib/bx/intervals/operations/subtract.py000066400000000000000000000064221415666465100233270ustar00rootroot00000000000000#!/usr/bin/env python """ Subtract one set of genomic intervals from another (base-by-base or whole intervals). The returned GenomicIntervals will be in the order of the first set of intervals passed in, with the corresponding meta-data. """ from warnings import warn from bx.intervals.io import ( BitsetSafeReaderWrapper, GenomicInterval ) from bx.intervals.operations import bits_clear_in_range from bx.tabular.io import ( Comment, Header, ) def subtract(readers, mincols=1, upstream_pad=0, downstream_pad=0, pieces=True, lens={}, comments=True): # The incoming lens dictionary is a dictionary of chromosome lengths which are used to initialize the bitsets. # Read all but first into bitsets and union to one (if confused, read DeMorgan's...) primary = readers[0] union = readers[1:] # Handle any ValueError, IndexError and OverflowError exceptions that may be thrown when # the bitsets are being created by skipping the problem lines union[0] = BitsetSafeReaderWrapper(union[0], lens=lens) bitsets = union[0].binned_bitsets(upstream_pad=upstream_pad, downstream_pad=downstream_pad, lens=lens) union = union[1:] for andset in union: bitset2 = andset.binned_bitsets(upstream_pad=upstream_pad, downstream_pad=downstream_pad, lens=lens) for chrom in bitset2: if chrom not in bitsets: bitsets[chrom] = bitset2[chrom] else: bitsets[chrom].ior(bitset2[chrom]) # Read remaining intervals and subtract for interval in primary: if isinstance(interval, Header): yield interval if isinstance(interval, Comment) and comments: yield interval elif isinstance(interval, GenomicInterval): chrom = interval.chrom if chrom not in bitsets: yield interval else: start = int(interval.start) end = int(interval.end) if start > end: warn("Interval start after end!") out_intervals = [] # Find the intervals that meet the criteria (for the three sensible # permutations of reverse and pieces) try: if bitsets[chrom].count_range(start, end-start) >= mincols: if pieces: out_intervals = bits_clear_in_range(bitsets[chrom], start, end) else: out_intervals = [(start, end)] # Write the intervals for start, end in out_intervals: new_interval = interval.copy() new_interval.start = start new_interval.end = end yield new_interval except IndexError as e: try: # This will work only if primary is a NiceReaderWrapper primary.skipped += 1 # no reason to stuff an entire bad file into memmory if primary.skipped < 10: primary.skipped_lines.append((primary.linenum, primary.current_line, str(e))) except Exception: pass continue bx-python-0.8.13/lib/bx/intervals/random_intervals.py000066400000000000000000000222601415666465100226620ustar00rootroot00000000000000""" Classes for generating random sets of intervals over larger regions. """ import bisect from bx.bitset import BitSet random = __import__('random') class MaxtriesException(Exception): pass def throw_random_list(lengths, mask, allow_overlap=False): rval = [] throw_random_gap_list(lengths, mask, lambda s, e: rval.append((s, e)), allow_overlap) assert sum(b - a for a, b in rval) == sum(lengths) return rval def throw_random_bits(lengths, mask, allow_overlap=False): rval = BitSet(mask.size) throw_random_gap_list(lengths, mask, lambda s, e: rval.set_range(s, e - s), allow_overlap) if not allow_overlap: assert rval.count_range(0, rval.size) == sum(lengths) return rval def throw_random_gap_list(lengths, mask, save_interval_func, allow_overlap=False): """ Generates a set of non-overlapping random intervals from a length distribution. `lengths`: list containing the length of each interval to be generated. We expect this to be sorted by decreasing length to minimize the chance of failure (MaxtriesException) and for some performance gains when allow_overlap==True and there are duplicate lengths `mask`: a BitSet in which set bits represent regions not to place intervals. The size of the region is also determined from the mask. """ # Use mask to find the gaps; gaps is a list of (length,start,end) lengths = [length for length in lengths if length > 0] min_length = min(lengths) gaps = [] start = end = 0 while True: start = mask.next_clear(end) if start == mask.size: break end = mask.next_set(start) if end-start >= min_length: gaps.append((end-start, start, None)) # Sort (long regions first) gaps.sort() gaps.reverse() # Throw throw_random_private(lengths, gaps, save_interval_func, allow_overlap, three_args=False) def throw_random_intervals(lengths, regions, save_interval_func=None, allow_overlap=False): """ Generates a set of non-overlapping random intervals from a length distribution. `lengths`: list containing the length of each interval to be generated. We expect this to be sorted by decreasing length to minimize the chance of failure (MaxtriesException) and for some performance gains when allow_overlap==True and there are duplicate lengths. `regions`: A list of regions in which intervals can be placed. Elements are tuples or lists of the form (start, end, ...), where ... indicates any number of items (including zero). `save_interval_func`: A function accepting three arguments which will be passed the (start,stop,region) for each generated interval, where region is an entry in the regions list. If this is None, the generated intervals will be returned as a list of elements copied from the region with start and end modified. """ # Copy regions regions = sorted((x[1]-x[0], x[0], x) for x in regions) # Sort (long regions first) regions.reverse() # Throw if (save_interval_func is not None): throw_random_private(lengths, regions, save_interval_func, allow_overlap) return else: intervals = [] def save_interval_func(s, e, rgn): return intervals.append(overwrite_start_end(s, e, rgn)) throw_random_private(lengths, regions, save_interval_func, allow_overlap) return intervals def overwrite_start_end(s, e, rgn): rgn = list(rgn) rgn[0] = s rgn[1] = e return tuple(rgn) def throw_random_private(lengths, regions, save_interval_func, allow_overlap=False, three_args=True): """ (Internal function; we expect calls only through the interface functions above) `lengths`: A list containing the length of each interval to be generated. `regions`: A list of regions in which intervals can be placed, sorted by decreasing length. Elements are triples of the form (length, start, extra), This list CAN BE MODIFIED by this function. `save_interval_func`: A function accepting three arguments which will be passed the (start,stop,extra) for each generated interval. """ # Implementation: # We keep a list of the regions, sorted from largest to smallest. We then # place each length by following steps: # (1) construct a candidate counts array (cc array) # (2) choose a candidate at random # (3) find region containing that candidate # (4) map candidate to position in that region # (5) split region if not allowing overlaps # (6) report placed segment # # The cc array is only constructed if there's a change (different length # to place, or the region list has changed). It contains, for each # region, the total number of number of candidate positions in regions # *preceding* it in the region list: # cc[i] = sum over k in 0..(i-1) of length[i] - L + 1 # where N is the number of regions and L is the length being thrown. # At the same time, we determine the total number of candidates (the total # number of places the current length can be placed) and the index range # of regions into which the length will fit. # # example: # for L = 20 # i = 0 1 2 3 4 5 6 7 8 9 # length[i] = 96 66 56 50 48 40 29 17 11 8 # cc[i] = 0 77 124 161 192 221 242 X X X # candidates = 252 # lo_rgn = 0 # hi_rgn = 6 # # The candidate is chosen in (0..candidates-1). The candidate counts # array allows us to do a binary search to locate the region that holds that # candidate. Continuing the example above, we choose a random candidate # s in (0..251). If s happens to be in (124..160), it will be mapped to # region 2 at start position s-124. # # During the binary search, if we are looking at region 3, if s < cc[3] # then the desired region is region 2 or lower. Otherwise it is region 3 or # higher. min_length = min(lengths) prev_length = None # (force initial cc array construction) cc = [0] * (len(regions) + len(lengths) - 1) num_thrown = 0 for length in lengths: # construct cc array (only needed if length has changed or region list has # changed) if length != prev_length: prev_length = length assert len(cc) >= len(regions) candidates = 0 hi_rgn = 0 for region in regions: rgn_len = region[0] if rgn_len < length: break cc[hi_rgn] = candidates candidates += rgn_len - length + 1 hi_rgn += 1 if candidates == 0: raise MaxtriesException( "No region can fit an interval of length %d (we threw %d of %d)" % (length, num_thrown, len(lengths))) hi_rgn -= 1 # Select a candidate s = random.randrange(candidates) # .. # ..for ix in range( len( regions ) ): # .. region = regions[ix] # .. if ix <= hi_rgn: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], cc[ix] ) # .. else: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], "X" ) # ..print "s = %s (of %s candidates)" % ( s, candidates ) # Locate region containing that candidate, by binary search lo = 0 hi = hi_rgn while hi > lo: mid = (lo + hi + 1) / 2 # (we round up to prevent infinite loop) if s < cc[mid]: hi = mid-1 # (s < num candidates from 0..mid-1) else: lo = mid # (s >= num candidates from 0..mid-1) s -= cc[lo] # If we are not allowing overlaps we will remove the placed interval # from the region list if allow_overlap: rgn_length, rgn_start, rgn_extra = regions[lo] else: # Remove the chosen region and split rgn_length, rgn_start, rgn_extra = regions.pop(lo) rgn_end = rgn_start + rgn_length assert s >= 0 assert rgn_start + s + length <= rgn_end, "Expected: %d + %d + %d == %d <= %d" % (rgn_start, s, length, rgn_start + s + length, rgn_end) regions.reverse() if s >= min_length: bisect.insort(regions, (s, rgn_start, rgn_extra)) if s + length <= rgn_length - min_length: bisect.insort(regions, (rgn_length - (s + length), rgn_start + s + length, rgn_extra)) regions.reverse() prev_length = None # (force cc array construction) # Save the new interval if (three_args): save_interval_func(rgn_start + s, rgn_start + s + length, rgn_extra) else: save_interval_func(rgn_start + s, rgn_start + s + length) num_thrown += 1 bx-python-0.8.13/lib/bx/intseq/000077500000000000000000000000001415666465100162335ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/intseq/__init__.py000066400000000000000000000001141415666465100203400ustar00rootroot00000000000000""" Tools for working with strings over interger alphabets efficiently. """ bx-python-0.8.13/lib/bx/intseq/ngramcount.pyx000066400000000000000000000073471415666465100211650ustar00rootroot00000000000000""" Tools for counting words (n-grams) in integer sequences. """ import numpy cdef extern from "Python.h": ctypedef int Py_intptr_t # cdef extern from "numpy/npy_3kcompat.h": # NOTE: including npy_3kcompat.h did not compile, # so use the explicitly extracted function from here: cdef extern from "npy_capsule_as_void_ptr.h": void * NpyCapsule_AsVoidPtr(object) except NULL # for PyArrayInterface: CONTIGUOUS=0x01 FORTRAN=0x02 ALIGNED=0x100 NOTSWAPPED=0x200 WRITEABLE=0x400 ctypedef struct PyArrayInterface: int two # contains the integer 2 as a sanity check int nd # number of dimensions char typekind # kind in array --- character code of typestr int itemsize # size of each element int flags # flags indicating how the data should be interpreted Py_intptr_t *shape # A length-nd array of shape information Py_intptr_t *strides # A length-nd array of stride information void *data # A pointer to the first element of the array def count_ngrams( object ints, int n, int radix ): """ Count the number of occurrences of each possible length `n` word in `ints` (which contains values from 0 to `radix`). Returns an array of length `radix` ** `n` containing the counts. """ cdef PyArrayInterface * ints_desc cdef PyArrayInterface * rval_desc # Get array interface for input string and validate ints_desc_obj = ints.__array_struct__ ints_desc = NpyCapsule_AsVoidPtr( ints_desc_obj ) assert ints_desc.two == 2, "Array interface sanity check failed, got %d" % ints_desc.two assert ints_desc.nd == 1, "Input array must be 1d" assert ints_desc.typekind == 'i'[0], "Input array must contain integers" assert ints_desc.itemsize == 4, "Input array must contain 32bit integers" assert ints_desc.flags & CONTIGUOUS > 0, "Input array must be contiguous" assert ints_desc.flags & ALIGNED > 0, "Input array must be aligned" assert ints_desc.flags & NOTSWAPPED > 0, "Input array must not be byteswapped" # Create numpy array for return value, get array interface and validate rval = numpy.zeros( ( ( radix ) ** n ), dtype=numpy.int32 ) assert ints_desc.two == 2, "Array interface sanity check failed, got %d" % ints_desc.two rval_desc_obj = rval.__array_struct__ rval_desc = NpyCapsule_AsVoidPtr( rval_desc_obj ) assert rval_desc.two == 2, "Array interface sanity check failed" assert rval_desc.nd == 1, "Input array must be 1d" assert rval_desc.typekind == 'i'[0], "Input array must contain integers" assert rval_desc.itemsize == 4, "Input array must contain 32bit integers" assert rval_desc.flags & CONTIGUOUS > 0, "Input array must be contiguous" assert rval_desc.flags & ALIGNED > 0, "Input array must be aligned" assert rval_desc.flags & NOTSWAPPED > 0, "Input array must not be byteswapped" # Do it _count_ngrams( ints_desc.data, ints_desc.shape[0], rval_desc.data, n, radix ) return rval cdef _count_ngrams( int* ints, int n_ints, int* rval, int n, int radix ): cdef int i, j, index, factor, letter # Loop over each word in the string for i from 0 <= i < ( n_ints - n ): # Walk back to build index into count array index = 0 factor = 1 for j from 0 <= j < n: letter = ints[ i + j ] if letter < 0 or letter >= radix: # This word is bad, break out and do not increment counts print "breaking, letter", letter break index = index + letter * factor factor = factor * radix else: print index rval[ index ] = rval[ index ] + 1 bx-python-0.8.13/lib/bx/misc/000077500000000000000000000000001415666465100156635ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/misc/__init__.py000066400000000000000000000004511415666465100177740ustar00rootroot00000000000000""" Various utilities. """ import bz2 import gzip def open_compressed(filename, mode='r'): if filename.endswith(".bz2"): return bz2.BZ2File(filename, mode) elif filename.endswith(".gz"): return gzip.GzipFile(filename, mode) else: return open(filename, mode) bx-python-0.8.13/lib/bx/misc/_seekbzip2.pyx000066400000000000000000000160341415666465100204660ustar00rootroot00000000000000""" Pyrex/C extension supporting `bx.misc.seekbzip2` (wrapping the low level functions in `micro-bunzip.c`). """ cdef extern from "Python.h": char * PyBytes_AsString( object ) object PyBytes_FromStringAndSize( char *, Py_ssize_t ) cdef extern from "micro-bunzip.h": ctypedef struct bunzip_data: int in_fd int inbufBitCount int inbufPos int inbufCount int writeCount unsigned int writeCRC int writeCurrent int writeCopies unsigned int * dbuf unsigned int get_bits(bunzip_data *bd, char bits_wanted) int get_next_block( bunzip_data *bd ) int read_bunzip(bunzip_data *bd, char *outbuf, int len) int start_bunzip(bunzip_data **bdp, int in_fd, char *inbuf, int len) int read_bunzip_to_char(bunzip_data *bd, char *outbuf, int len, int* gotcount_out, char stopchar ) cdef extern from "unistd.h": # Not really ctypedef unsigned long long off_t off_t lseek( int fildes, off_t offset, int whence ) cdef extern from "stdlib.h": void free( void *ptr ) import sys import os cdef class SeekBzip2: cdef bunzip_data * bd cdef int file_fd cdef int at_eof def __init__( self, filename ): self.at_eof = 0 self.file_fd = os.open( filename, os.O_RDONLY ) # Initialize bunzip_data from the file start_bunzip( &( self.bd ), self.file_fd, NULL, 0 ) def close( self ): free( self.bd.dbuf ) free( self.bd ) os.close( self.file_fd ) def seek( self, unsigned long long position ): """ Seek the bunzip_data to a specific chunk (position must correspond to that start of a compressed data block). """ cdef off_t n_byte cdef int n_bit # Break position into bit and byte offsets ## sys.stderr.write( "arg pos: %d\n" % position ) n_byte = position / 8; n_bit = position % 8; ## sys.stderr.write( "byte pos: %d\n" % n_byte ) ## sys.stderr.write( "bit pos: %d\n" % n_bit ) ## sys.stderr.flush() # Seek the underlying file descriptor if ( lseek( self.file_fd, n_byte, 0 ) != n_byte ): raise Exception( "lseek of underlying file failed" ) # Init the buffer at the right bit position self.bd.inbufBitCount = self.bd.inbufPos = self.bd.inbufCount = 0 get_bits( self.bd, n_bit ) # This ensures that the next read call will return 0, causing the # buffer to be re-initialized self.bd.writeCount = -1 # Reset EOF tracking self.at_eof = 0 def readline( self, int amount ): cdef object rval cdef char * p_rval cdef int gotcount cdef int totalcount cdef int status cdef int spaceleft cdef int desired gotcount = 0 totalcount = 0 # If already at EOF return None if self.at_eof: return None chunks = [] # We have great difficulty resizing buffers, so we'll just create # one 8k string at a time rval = PyBytes_FromStringAndSize( NULL, 8192 ) p_rval = PyBytes_AsString( rval ) spaceleft = 8192 while amount != 0: if amount > 0 and amount < spaceleft: desired = amount else: desired = spaceleft ## sys.stderr.write( "readline, amount: %d\n" % amount ) ## sys.stderr.write( "buffer: %r" % rval[:100] ) ## sys.stderr.write( "\n" ) ## sys.stderr.flush() # ord( "\n" ) = 10 status = read_bunzip_to_char( self.bd, p_rval, desired, &gotcount, 10 ); ## sys.stderr.write( "readline, desired: %d, gotcount: %d\n" % ( desired, gotcount ) ); ## sys.stderr.write( "buffer: %r" % rval[:100] ) ## sys.stderr.write( "\n" ) ## sys.stderr.flush() if status == -9: ## sys.stderr.write( "readline, STOP_CHAR\n" ); sys.stderr.flush() # Reached the stop character (RETVAL_STOPCHAR == -9), so # we can stop chunks.append( rval[:8192-spaceleft+gotcount] ) break elif status == -10: ## sys.stderr.write( "readline, BUFFER_FULL\n" ); sys.stderr.flush() # Filled the buffer (RETVAL_BUFFER_FULL == -10), so create # new buffer and keep going chunks.append( rval ) amount = amount - gotcount if amount == 0: # Got the desired amount break rval = PyBytes_FromStringAndSize( NULL, 8192 ) p_rval = PyBytes_AsString( rval ) spaceleft = 8192 elif status == -8: ## sys.stderr.write( "readline, END_OF_BLOCK\n" ); sys.stderr.flush() # No more data in the decomp buffer (RETVAL_END_OF_BLOCK == -10) if gotcount and p_rval[ gotcount - 1 ] == 10: chunks.append( rval[:8192-spaceleft+gotcount] ) break # Update buffer info p_rval = p_rval + gotcount spaceleft = spaceleft - gotcount amount = amount - gotcount # Get the next block status = get_next_block( self.bd ) if status == -1: # Block is end of stream block (RETVAL_LAST_BLOCK == -1) self.at_eof = 1 chunks.append( rval[:gotcount] ) break self.bd.writeCRC = 0xffffffff self.bd.writeCopies = 0 else: # Some other status raise Exception( "read_bunzip error %d" % status ) # Return whatever we read return "".join( chunks ) def read( self, int amount ): cdef object rval cdef char * p_rval cdef int gotcount cdef int totalcount cdef int status totalcount = 0 # If already at EOF return None if self.at_eof: return None # Create a new python bytes string large enough to hold the result rval = PyBytes_FromStringAndSize( NULL, amount ) p_rval = PyBytes_AsString( rval ) # Read into it ## sys.stderr.write( "read called, bd.current: %x\n" % self.bd.writeCurrent ); sys.stderr.flush() while amount > 0: gotcount = read_bunzip( self.bd, p_rval, amount ); if gotcount < 0: raise Exception( "read_bunzip error %d" % gotcount ) elif gotcount == 0: status = get_next_block( self.bd ) if status == -1: self.at_eof = 1 break self.bd.writeCRC = 0xffffffff self.bd.writeCopies = 0 else: totalcount = totalcount + gotcount amount = amount - gotcount p_rval = p_rval + gotcount # Return whatever we read return rval[:totalcount] bx-python-0.8.13/lib/bx/misc/bgzf.pyx000066400000000000000000000026661415666465100173670ustar00rootroot00000000000000""" Seekable access to BGZ files based on samtools code. Does not yet implement complete file-like interface. """ from cpython.version cimport PY_MAJOR_VERSION ctypedef unsigned long long int64_t cdef extern from "Python.h": char * PyBytes_AsString( object ) object PyBytes_FromStringAndSize( char *, Py_ssize_t ) cdef extern from "bgzf.h": ctypedef struct BGZF BGZF * bgzf_open( const char * path, const char * mode ) int bgzf_close( BGZF * fp ) int bgzf_read( BGZF * fp, void * data, int length ) int64_t bgzf_tell( BGZF * fp ) int64_t bgzf_seek( BGZF * fp, int64_t pos, int where ) cdef class BGZFFile( object ): cdef BGZF * bgzf def __init__( self, path, mode="r" ): if PY_MAJOR_VERSION >= 3: bytes_path, bytes_mode = path.encode(), mode.encode() else: bytes_path, bytes_mode = path, mode self.bgzf = bgzf_open( bytes_path, bytes_mode ) if not self.bgzf: raise IOError( "Could not open file" ) def close( self ): if self.bgzf: bgzf_close( self.bgzf ) def read( self, int length ): cdef object rval rval = PyBytes_FromStringAndSize( NULL, length ) bgzf_read( self.bgzf, PyBytes_AsString( rval ), length ) return rval def tell( self ): return bgzf_tell( self.bgzf ) def seek( self, int64_t pos, int where=0 ): return bgzf_seek( self.bgzf, pos, where ) bx-python-0.8.13/lib/bx/misc/bgzf_tests.py000066400000000000000000000003121415666465100204030ustar00rootroot00000000000000import bx.misc.bgzf def test_bgzf(): f = bx.misc.bgzf.BGZFFile("test_data/bgzf_tests/test.txt.gz", "r") assert f.read(10) == b"begin 644 " f.seek(0) assert f.read(10) == b"begin 644 " bx-python-0.8.13/lib/bx/misc/binary_file.py000066400000000000000000000116251415666465100205250ustar00rootroot00000000000000""" Wrappers for doing binary IO on file-like objects """ import struct import sys import numpy def bytesify(s): if isinstance(s, bytes): return s else: return s.encode() # Standard size: # short is 8 bits # int and long are 32 bits # long long is 64 bits class BadMagicNumber(IOError): pass class BinaryFileReader: """ Wrapper for doing binary reads on any file like object. Currently this is not heavily optimized (it uses the `struct` module to unpack) """ def __init__(self, file, magic=None, is_little_endian=False): self.is_little_endian = is_little_endian self.file = file if magic is not None: # Attempt to read magic number and chuck endianess bytes = file.read(4) if struct.unpack(">I", bytes)[0] == magic: pass elif struct.unpack("I", bytes)[0], struct.unpack("> 8) for i in range(subtable_size): offset = subtable_offset + ((start + i) % subtable_size) * 8 self.io.seek(offset) h = self.io.read_uint32() p = self.io.read_uint32() # Hit an empty bin, no match for key if p == 0: raise KeyError # Hash matches, need to check full key if h == hash: self.io.seek(p) klen = self.io.read_uint32() vlen = self.io.read_uint32() k = self.io.read(klen).decode() if k == key: v = self.io.read(vlen).decode() return v else: # Visited every slot and no match (should never happen since # there are empty slots by contruction) raise KeyError def __iter__(self): raise NotImplementedError() def __len__(self): raise NotImplementedError() @classmethod def to_file(Class, dict, file, is_little_endian=True): """ For constructing a CDB structure in a file. Able to calculate size on disk and write to a file """ io = BinaryFileWriter(file, is_little_endian=is_little_endian) start_offset = io.tell() # Header is of fixed length io.seek(start_offset + (8 * 256)) # For each item, key and value length (written as length prefixed # strings). We also calculate the subtables on this pass. # NOTE: This requires the key and value be byte strings, support for # dealing with encoding specific value types should be # added to this wrapper subtables = [[] for i in range(256)] for key, value in dict.items(): pair_offset = io.tell() io.write_uint32(len(key)) io.write_uint32(len(value)) io.write(key) io.write(value) hash = cdbhash(key) subtables[hash % 256].append((hash, pair_offset)) # Save the offset where the subtables will start subtable_offset = io.tell() # Write subtables for subtable in subtables: if len(subtable) > 0: # Construct hashtable to be twice the size of the number # of items in the subtable, and built it in memory ncells = len(subtable) * 2 cells = [(0, 0) for i in range(ncells)] for hash, pair_offset in subtable: index = (hash >> 8) % ncells while cells[index][1] != 0: index = (index + 1) % ncells # Guaranteed to find a non-empty cell cells[index] = (hash, pair_offset) # Write subtable for hash, pair_offset in cells: io.write_uint32(hash) io.write_uint32(pair_offset) # Go back and write the header end_offset = io.tell() io.seek(start_offset) index = subtable_offset for subtable in subtables: io.write_uint32(index) io.write_uint32(len(subtable * 2)) # For each cell in the subtable, a hash and a pointer to a value index += (len(subtable) * 2) * 8 # Leave fp at end of cdb io.seek(end_offset) bx-python-0.8.13/lib/bx/misc/cdb_tests.py000066400000000000000000000012711415666465100202100ustar00rootroot00000000000000from tempfile import NamedTemporaryFile from bx.misc.cdb import FileCDBDict def test(): d = {} for i in range(10000): d['foo' + str(i)] = 'bar' + str(i) # Open temporary file and get name file = NamedTemporaryFile() file_name = file.name # Write cdb to file FileCDBDict.to_file(d, file) file.flush() # Open on disk file2 = open(file_name, 'rb') cdb = FileCDBDict(file2) for key, value in d.items(): assert cdb[key] == value try: cdb['notin'] assert False, "KeyError was not raised" except KeyError: pass # Close everything (deletes the temporary file) file2.close() file.close() bx-python-0.8.13/lib/bx/misc/filecache.py000066400000000000000000000070031415666465100201400ustar00rootroot00000000000000from io import BytesIO from bx_extras.lrucache import LRUCache DEFAULT_CACHE_SIZE = 10 DEFAULT_BLOCK_SIZE = 1024*1024*2 class FileCache: """ Wrapper for a file that cache blocks of data in memory. **NOTE:** this is currently an incomplete file-like object, it only supports seek, tell, and readline (plus iteration). Reading bytes is currently not implemented. """ def __init__(self, file, size, cache_size=DEFAULT_CACHE_SIZE, block_size=DEFAULT_BLOCK_SIZE): """ Create a new `FileCache` wrapping the file-like object `file` that has total size `size` and caching blocks of size `block_size`. """ self.file = file self.size = size self.cache_size = cache_size self.block_size = block_size # Setup the cache self.nblocks = (self.size // self.block_size) + 1 self.cache = LRUCache(self.cache_size) # Position in file self.dirty = True self.at_eof = False self.file_pos = 0 self.current_block_index = -1 self.current_block = None def fix_dirty(self): chunk, offset = self.get_block_and_offset(self.file_pos) if self.current_block_index != chunk: self.current_block = BytesIO(self.load_block(chunk)) self.current_block.read(offset) self.current_block_index = chunk else: self.current_block.seek(offset) self.dirty = False def get_block_and_offset(self, index): return int(index // self.block_size), int(index % self.block_size) def load_block(self, index): if index in self.cache: return self.cache[index] else: real_offset = index * self.block_size self.file.seek(real_offset) block = self.file.read(self.block_size) self.cache[index] = block return block def seek(self, offset, whence=0): """ Move the file pointer to a particular offset. """ # Determine absolute target position if whence == 0: target_pos = offset elif whence == 1: target_pos = self.file_pos + offset elif whence == 2: target_pos = self.size - offset else: raise Exception("Invalid `whence` argument: %r", whence) # Check if this is a noop if target_pos == self.file_pos: return # Verify it is valid assert 0 <= target_pos < self.size, "Attempt to seek outside file" # Move the position self.file_pos = target_pos # Mark as dirty, the next time a read is done we need to actually # move the position in the bzip2 file self.dirty = True def readline(self): if self.dirty: self.fix_dirty() if self.at_eof: return b"" rval = [] while True: line = self.current_block.readline() rval.append(line) if len(line) > 0 and line[-1] == b'\n': break elif self.current_block_index == self.nblocks - 1: self.at_eof = True break else: self.current_block_index += 1 self.current_block = BytesIO(self.load_block(self.current_block_index)) return b"".join(rval) def __next__(self): line = self.readline() if line == b"": raise StopIteration def __iter__(self): return self def close(self): self.file.close() bx-python-0.8.13/lib/bx/misc/filecache_tests.py000066400000000000000000000010401415666465100213550ustar00rootroot00000000000000""" T="/Users/james/cache/hg18/align/multiz28way/chr10.maf" def test(): s = os.stat( T ).st_size real_f = open( T ) f = filecache.FileCache( real_f, s ) for i in range( 1000 ): f.readline() def test_random_seeking(): s = os.stat( T ).st_size raw = open( T ) f = filecache.FileCache( open( T ), s ) for i in range( 10000 ): seek_to = random.randrange( s ) f.seek( seek_to ) raw.seek( seek_to ) l1 = f.readline() l2 = raw.readline() assert l1 == l2 """ bx-python-0.8.13/lib/bx/misc/readlengths.py000066400000000000000000000016311415666465100205360ustar00rootroot00000000000000""" Read sequence lengths from a file. Each line is of the form where is typically a chromsome name (e.g. chr12) and length is the number of bases the sequence. """ def read_lengths_file(name): """ Returns a hash from sequence name to length. """ chrom_to_length = {} f = open(name) for line in f: line = line.strip() if line == '' or line[0] == '#': continue try: fields = line.split() if len(fields) != 2: raise chrom = fields[0] length = int(fields[1]) except Exception: raise ValueError("bad length file line: %s" % line) if chrom in chrom_to_length and length != chrom_to_length[chrom]: raise ValueError("%s has more than one length!" % chrom) chrom_to_length[chrom] = length f.close() return chrom_to_length bx-python-0.8.13/lib/bx/misc/seekbzip2.py000066400000000000000000000112371415666465100201370ustar00rootroot00000000000000""" Semi-random access to bz2 compressed data. """ import bisect from ._seekbzip2 import SeekBzip2 class SeekableBzip2File: """ Filelike object supporting read-only semi-random access to bz2 compressed files for which an offset table (bz2t) has been generated by `bzip-table`. """ def __init__(self, filename, table_filename, **kwargs): self.filename = filename self.table_filename = table_filename self.init_table() self.init_bz2() self.pos = 0 self.dirty = True self.closed = False def init_bz2(self): self.seek_bz2 = SeekBzip2(self.filename) def init_table(self): # Position in plaintext file self.table_positions = [] # Position of corresponding block in bz2 file (bits) self.table_bz2positions = [] pos = 0 for line in open(self.table_filename): fields = line.split() # Position of the compressed block in the bz2 file bz2_pos = int(fields[0]) # print >> sys.stderr, fields[0], bz2_pos # Length of the block when uncompressed length = int(fields[1]) self.table_positions.append(pos) self.table_bz2positions.append(bz2_pos) old_pos = pos pos = pos + length assert pos > old_pos self.size = pos def close(self): self.seek_bz2.close() self.closed = True def fix_dirty(self): # Our virtual position in the uncompressed data is out of sync # FIXME: If we're moving to a later position that is still in # the same block, we could just read and throw out bytes in the # compressed stream, less wasteful then backtracking chunk, offset = self.get_chunk_and_offset(self.pos) # Get the seek position for that chunk and seek to it bz2_seek_pos = self.table_bz2positions[chunk] # print >>sys.stderr, "bz2 seek pos:", bz2_seek_pos self.seek_bz2.seek(bz2_seek_pos) # Consume bytes to move to the correct position assert len(self.seek_bz2.read(offset)) == offset # Update state self.dirty = False def read(self, sizehint=-1): if sizehint < 0: chunks = [] while True: val = self._read(1024*1024) if val: chunks.append(val) else: break return b"".join(chunks) else: return self._read(sizehint) def _read(self, size): if self.dirty: self.fix_dirty() val = self.seek_bz2.read(size) if val is None: # EOF self.pos = self.size val = b"" else: self.pos = self.pos + len(val) return val def readline(self, size=-1): if self.dirty: self.fix_dirty() val = self.seek_bz2.readline(size) if val is None: # EOF self.pos = self.size val = b"" else: self.pos = self.pos + len(val) return val def tell(self): return self.pos def get_chunk_and_offset(self, position): # Find the chunk that position is in using a binary search chunk = bisect.bisect(self.table_positions, position) - 1 offset = position - self.table_positions[chunk] return chunk, offset def seek(self, offset, whence=0): # Determine absolute target position if whence == 0: target_pos = offset elif whence == 1: target_pos = self.pos + offset elif whence == 2: target_pos = self.size - offset else: raise Exception("Invalid `whence` argument: %r", whence) # Check if this is a noop if target_pos == self.pos: return # Verify it is valid assert 0 <= target_pos < self.size, "Attempt to seek outside file" # Move the position self.pos = target_pos # Mark as dirty, the next time a read is done we need to actually # move the position in the bzip2 file self.dirty = True # ---- File like methods ------------------------------------------------ def __next__(self): ln = self.readline() if ln == b"": raise StopIteration() return ln def __iter__(self): return self def flush(self): pass def readable(self): return True def readlines(self, sizehint=-1): return [ln for ln in self] def seekable(self): return True def xreadlines(self): return iter(self) def writable(self): return False bx-python-0.8.13/lib/bx/misc/seekbzip2_tests.py000066400000000000000000000044431415666465100213620ustar00rootroot00000000000000""" Tests for `bx.misc.seekbzip2`. """ import bz2 import os import random from codecs import encode from . import seekbzip2 F = None T = None # F="/Users/james/work/seek-bzip2/test_random.dat.bz2" # T="/Users/james/cache/hg18/align/multiz28way/chr10.maf.bz2" # F=/depot/data1/cache/human/hg18/align/multiz28way/chr1.maf.bz2 if F and os.path.exists(F): def test_linear_reading(): raw_data = bz2.BZ2File(F).read() f = seekbzip2.SeekableBzip2File(F, F + "t") chunk = 1221 pos = 0 for i in range((len(raw_data) // chunk) + 1): a = raw_data[pos:pos+chunk] b = f.read(chunk) assert a == b pos += chunk assert f.tell() == min(pos, len(raw_data)) f.close() def test_random_seeking(): raw_data = bz2.BZ2File(F).read() f = seekbzip2.SeekableBzip2File(F, F + "t") for i in range(10): seek_to = random.randrange(len(raw_data) - 100) chunk = random.randrange(10, 20) f.seek(seek_to) a = f.read(chunk) b = raw_data[seek_to: seek_to + chunk] assert a == b, "'%s' != '%s' on %dth attempt" % (encode(a, "hex"), encode(b, "hex"), i) assert f.tell() == min(seek_to + chunk, len(raw_data)) f.close() if T and os.path.exists(T): def test_text_reading(): raw_file = bz2.BZ2File(T) f = seekbzip2.SeekableBzip2File(T, T + "t") pos = 0 for i, (line, raw_line) in enumerate(zip(f, raw_file)): assert line == raw_line, "%d: %r != %r" % (i, line.rstrip(b"\n"), raw_line) pos += len(line) ftell = f.tell() assert ftell == pos, "%d != %d" % (ftell, pos) f.close() def test_text_reading_2(): raw_data = bz2.BZ2File(T).read() f = seekbzip2.SeekableBzip2File(T, T + "t") raw_lines = raw_data.split(b"\n") pos = 0 i = 0 while True: line = f.readline() if line == b"": break assert line.rstrip(b"\r\n") == raw_lines[i], "{!r} != {!r}".format(line.rstrip(b"\r\n"), raw_lines[i]) pos += len(line) ftell = f.tell() assert ftell == pos, "%d != %d" % (ftell, pos) i += 1 f.close() bx-python-0.8.13/lib/bx/misc/seeklzop.py000066400000000000000000000136551415666465100201030ustar00rootroot00000000000000""" Semi-random access to bz2 compressed data. """ import struct from io import BytesIO try: import lzo except Exception: pass from bx_extras import lrucache class SeekableLzopFile: """ Filelike object supporting read-only semi-random access to bz2 compressed files for which an offset table (bz2t) has been generated by `bzip-table`. """ def __init__(self, filename, table_filename, block_cache_size=0, **kwargs): self.filename = filename self.table_filename = table_filename self.init_table() self.file = open(self.filename, "rb") self.dirty = True self.closed = False self.at_eof = False self.file_pos = 0 self.current_block_index = -1 self.current_block = None if block_cache_size > 0: self.cache = lrucache.LRUCache(block_cache_size) else: self.cache = None def init_table(self): self.block_size = None self.block_info = [] # Position of corresponding block in compressed file (in bytes) for line in open(self.table_filename): fields = line.split() if fields[0] == "s": self.block_size = int(fields[1]) if fields[0] == "o": offset = int(fields[1]) compressed_size = int(fields[2]) size = int(fields[3]) self.block_info.append((offset, compressed_size, size)) self.nblocks = len(self.block_info) def close(self): self.file.close() self.closed = True def load_block(self, index): if self.cache is not None and index in self.cache: return self.cache[index] else: offset, csize, size = self.block_info[index] # Get the block of compressed data self.file.seek(offset) data = self.file.read(csize) # Need to prepend a header for python-lzo module (silly) data = b''.join((b'\xf0', struct.pack("!I", size), data)) value = lzo.decompress(data) if self.cache is not None: self.cache[index] = value return value def fix_dirty(self): chunk, offset = self.get_block_and_offset(self.file_pos) if self.current_block_index != chunk: self.current_block = BytesIO(self.load_block(chunk)) self.current_block.read(offset) self.current_block_index = chunk else: self.current_block.seek(offset) self.dirty = False def get_block_and_offset(self, index): return int(index // self.block_size), int(index % self.block_size) def seek(self, offset, whence=0): """ Move the file pointer to a particular offset. """ # Determine absolute target position if whence == 0: target_pos = offset elif whence == 1: target_pos = self.file_pos + offset elif whence == 2: raise Exception("seek from end not supported") else: raise Exception("Invalid `whence` argument: %r", whence) # Check if this is a noop if target_pos == self.file_pos: return # Verify it is valid # Move the position self.file_pos = target_pos # Mark as dirty, the next time a read is done we need to actually # move the position in the bzip2 file self.dirty = True def tell(self): return self.file_pos def read(self, sizehint=-1): if sizehint < 0: chunks = [] while True: val = self._read(1024*1024) if val: chunks.append(val) else: break return b"".join(chunks) else: return self._read(sizehint) def _read(self, size): if self.dirty: self.fix_dirty() val = b'' while size: part = self.current_block.read(size) size -= len(part) if part: val += part elif self.current_block_index == self.nblocks - 1: self.at_eof = True break else: self.current_block_index += 1 self.current_block = BytesIO(self.load_block(self.current_block_index)) self.file_pos += len(val) return val def readline(self): if self.dirty: self.fix_dirty() if self.at_eof: return b"" rval = [] while True: line = self.current_block.readline() self.file_pos += len(line) rval.append(line) if len(line) > 0 and line[-1] == b'\n': break elif self.current_block_index == self.nblocks - 1: self.at_eof = True break else: self.current_block_index += 1 self.current_block = BytesIO(self.load_block(self.current_block_index)) return b"".join(rval) def __next__(self): line = self.readline() if line == b"": raise StopIteration def __iter__(self): return self def flush(self): pass def readable(self): return True def seekable(self): return True def writable(self): return False # --- Factor out --- MAGIC = b"\x89\x4c\x5a\x4f\x00\x0d\x0a\x1a\x0a" F_ADLER32_D = 0x00000001 F_ADLER32_C = 0x00000002 F_H_EXTRA_FIELD = 0x00000040 F_H_GMTDIFF = 0x00000080 F_CRC32_D = 0x00000100 F_CRC32_C = 0x00000200 F_MULTIPART = 0x00000400 F_H_FILTER = 0x00000800 F_H_CRC32 = 0x00001000 assert struct.calcsize("!H") == 2 assert struct.calcsize("!I") == 4 class UnpackWrapper: def __init__(self, file): self.file = file def read(self, amt): return self.file.read(amt) def get(self, fmt): t = struct.unpack(fmt, self.file.read(struct.calcsize(fmt))) return t[0] bx-python-0.8.13/lib/bx/misc/seeklzop_tests.py000066400000000000000000000012351415666465100213140ustar00rootroot00000000000000""" T="/Users/james/cache/hg18/align/multiz28way/chr10.maf" C="/Users/james/cache/hg18/align/multiz28way/chr10.maf.lzo" def test(): f = seeklzop.SeekableLzopFile( C, C + "t", block_cache_size=20 ) for line in f: pass def test_random_seeking(): s = os.stat( T ).st_size raw = open( T ) f = seeklzop.SeekableLzopFile( C, C + "t", block_cache_size=20 ) for i in range( 1000 ): seek_to = random.randrange( s ) f.seek( seek_to ) raw.seek( seek_to ) l1 = f.readline() l2 = raw.readline() assert l1 == l2, "%r != %r" % ( l1, l2 ) assert raw.tell() == f.tell(), "tells not equal" """ bx-python-0.8.13/lib/bx/motif/000077500000000000000000000000001415666465100160465ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/motif/__init__.py000066400000000000000000000000001415666465100201450ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/motif/_pwm.pyx000066400000000000000000000077101415666465100175570ustar00rootroot00000000000000""" Extensions used by the `pwm` module. """ from cpython.version cimport PY_MAJOR_VERSION cdef extern from "Python.h": int PyBytes_AsStringAndSize(object obj, char **buffer, Py_ssize_t* length) except -1 cdef extern from "numpy/arrayobject.h": ctypedef int intp ctypedef extern class numpy.ndarray [object PyArrayObject]: cdef char *data cdef int nd cdef intp *dimensions cdef intp *strides cdef int flags # These might be other types in the actual header depending on platform ctypedef int npy_int16 ctypedef float npy_float32 def score_string( ndarray matrix, ndarray char_to_index, object string, ndarray rval ): """ Score each position in string `string` using the scoring matrix `matrix`. Characters in the string are mapped to columns in the matrix by `char_to_index` and the score for each position is stored in `rval`. matrix *must* be a 2d array of type float32 char_to_index *must* be a 1d array of type int16 rval *must* be a 1d array of type float32 and the same length as string """ cdef char *buffer cdef Py_ssize_t len cdef float score cdef int i, j cdef int matrix_width = matrix.dimensions[0] cdef npy_int16 char_index # Get input string as character pointer if PY_MAJOR_VERSION >= 3: bytes_string = string.encode() else: bytes_string = string PyBytes_AsStringAndSize(bytes_string, &buffer, &len ) # Loop over each position in the string cdef int stop = len - matrix.dimensions[0] + 1 for i from 0 <= i < stop: score = 0.0 for j from 0 <= j < matrix_width: char_index = ( ( char_to_index.data + buffer[i+j] * char_to_index.strides[0] ) )[0] if char_index < 0: break score += ( ( matrix.data + j * matrix.strides[0] + char_index * matrix.strides[1] ) )[0] else: ( ( rval.data + i * rval.strides[0] ) )[0] = score def score_string_with_gaps( ndarray matrix, ndarray char_to_index, object string, ndarray rval ): """ Score each position in string `string` using the scoring matrix `matrix`. Characters in the string are mapped to columns in the matrix by `char_to_index` and the score for each position is stored in `rval`. matrix *must* be a 2d array of type float32 char_to_index *must* be a 1d array of type int16 rval *must* be a 1d array of type float32 and the same length as string """ cdef char *buffer cdef Py_ssize_t len cdef float score cdef int i, j, string_pos cdef int matrix_width = matrix.dimensions[0] cdef npy_int16 char_index # Get input string as character pointer if PY_MAJOR_VERSION >= 3: bytes_string = string.encode() else: bytes_string = string PyBytes_AsStringAndSize(bytes_string, &buffer, &len ) # Loop over each position in the string cdef int stop = len - matrix.dimensions[0] + 1 for i from 0 <= i < stop: if buffer[i] == '-': # Never start scoring at a gap continue score = 0.0 string_pos = i for j from 0 <= j < matrix_width: # Advance to the next non-gap character while buffer[string_pos] == '-' and string_pos < len: string_pos += 1 # Ran out of non-gap characters, no more scoring is possible if string_pos == len: return # Find character for position and score char_index = ( ( char_to_index.data + buffer[string_pos] * char_to_index.strides[0] ) )[0] if char_index < 0: break score += ( ( matrix.data + j * matrix.strides[0] + char_index * matrix.strides[1] ) )[0] # Matched a character, move forward string_pos += 1 else: ( ( rval.data + i * rval.strides[0] ) )[0] = score bx-python-0.8.13/lib/bx/motif/io/000077500000000000000000000000001415666465100164555ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/motif/io/__init__.py000066400000000000000000000000001415666465100205540ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/motif/io/transfac.py000066400000000000000000000176431415666465100206430ustar00rootroot00000000000000""" Classes for reading and writing motif data. """ from bx.motif.pwm import FrequencyMatrix class TransfacMotif: def __init__(self): self.accession = None self.id = None self.dates = None self.name = None self.description = None self.binding_factors = None self.basis = None self.comment = None self.matrix = None self.attributes = None self.sites = None transfac_actions = { "AC": ("store_single", "accession"), "ID": ("store_single", "id"), "DT": ("store_single_list", "dates"), "NA": ("store_single", "name"), "DE": ("store_block", "description"), "BF": ("store_single_list", "binding_factors"), "BA": ("store_block", "basis"), "CC": ("store_block", "comment"), "P0": ("store_matrix", "matrix"), # For CREAD format files "TY": ("store_single", "type"), "AT": ("store_single_key_value", "attributes"), "BS": ("store_single_list", "sites") } class TransfacReader: """ Reads motifs in TRANSFAC format. """ parse_actions = transfac_actions def __init__(self, input): self.input = iter(input) self.input_exhausted = False def as_dict(self, key="id"): """ Return a dictionary containing all remaining motifs, using `key` as the dictionary key. """ rval = {} for motif in self: rval[getattr(motif, key)] = motif return rval def __iter__(self): return self def __next__(self): rval = self.next_motif() while rval is None: rval = self.next_motif() return rval def next_motif(self): if self.input_exhausted: raise StopIteration # Accumulate lines until either the end of record indicator "//" is # encounted or the input is exhausted. lines = [] while True: try: line = next(self.input) except StopIteration: self.input_exhausted = True break if line.startswith("//"): break if not line.isspace(): lines.append(line) if lines: return self.parse_record(lines) def parse_record(self, lines): """ Parse a TRANSFAC record out of `lines` and return a motif. """ # Break lines up temp_lines = [] for line in lines: fields = line.rstrip("\r\n").split(None, 1) if len(fields) == 1: fields.append("") temp_lines.append(fields) lines = temp_lines # Fill in motif from lines motif = TransfacMotif() current_line = 0 while True: # Done parsing if no more lines to consume if current_line >= len(lines): break # Remove prefix and first separator from line prefix, rest = lines[current_line] # No action for this prefix, just ignore the line if prefix not in self.parse_actions: current_line += 1 continue # Get action for line action = self.parse_actions[prefix] # Store a single line value if action[0] == "store_single": key = action[1] setattr(motif, key, rest) current_line += 1 # Add a single line value to a list if action[0] == "store_single_list": key = action[1] if not getattr(motif, key): setattr(motif, key, []) getattr(motif, key).append(rest) current_line += 1 # Add a single line value to a dictionary if action[0] == "store_single_key_value": key = action[1] k, v = rest.strip().split('=', 1) if not getattr(motif, key): setattr(motif, key, {}) getattr(motif, key)[k] = v current_line += 1 # Store a block of text if action[0] == "store_block": key = action[1] value = [] while current_line < len(lines) and lines[current_line][0] == prefix: value.append(lines[current_line][1]) current_line += 1 setattr(motif, key, str.join("\n", value)) # Store a matrix if action[0] == "store_matrix": # First line is alphabet alphabet = rest.split() alphabet_size = len(alphabet) rows = [] pattern = "" current_line += 1 # Next lines are the rows of the matrix (we allow 0 rows) while current_line < len(lines): prefix, rest = lines[current_line] # Prefix should be a two digit 0 padded row number if not prefix.isdigit(): break # The first `alphabet_size` fields are the row values values = rest.split() rows.append([float(_) for _ in values[:alphabet_size]]) # TRANSFAC includes an extra column with the IUPAC code if len(values) > alphabet_size: pattern += values[alphabet_size] current_line += 1 # Only store the pattern if it is the correct length (meaning # that every row had an extra field) if len(pattern) != len(rows): pattern = None matrix = FrequencyMatrix.from_rows(alphabet, rows) setattr(motif, action[1], matrix) # Only return a motif if we saw at least ID or AC or NA if motif.id or motif.accession or motif.name: return motif class TransfacWriter: """ Writes motifs in TRANSFAC format. """ actions = transfac_actions def __init__(self, output): self.output = output def write(self, motif): output = self.output for prefix, actions in self.actions.items(): action = actions[0] if action == "store_single": key = actions[1] if getattr(motif, key) is not None: print(prefix, " ", getattr(motif, key), file=output) print("XX", file=output) elif action == "store_single_list": key = actions[1] if getattr(motif, key) is not None: value = getattr(motif, key) for v in value: print(prefix, " ", v, file=output) print("XX", file=output) elif action == "store_single_key_value": key = actions[1] if getattr(motif, key) is not None: value = getattr(motif, key) for k, v in value.items(): print(prefix, " ", f"{k}={v}", file=output) print("XX", file=output) elif action == "store_block": key = actions[1] if getattr(motif, key) is not None: value = getattr(motif, key) for line in value.split("\n"): print(prefix, " ", line, file=output) print("XX", file=output) elif action == "store_matrix": key = actions[1] if getattr(motif, key) is not None: matrix = getattr(motif, key) print(prefix, " ", " ".join(s.rjust(6) for s in matrix.alphabet), file=output) for i in range(matrix.width): print("%02d" % (i + 1), " ", " ".join(str(matrix.values[i, matrix.char_to_index[ord(s)]]).rjust(6) for s in matrix.alphabet), file=output) print("XX", file=output) print("//") bx-python-0.8.13/lib/bx/motif/io/transfac_tests.py000066400000000000000000000050001415666465100220450ustar00rootroot00000000000000from io import StringIO from numpy import allclose from . import transfac sample = """ VV TRANSFAC MATRIX TABLE, Rel.3.2 26-06-1997 XX // AC a XX ID V$MYOD_01 XX DT 19.10.92 (created); ewi. DT 16.10.95 (updated); ewi. XX NA MyoD XX DE myoblast determination gene product XX BF T00526; MyoD; Species: mouse, Mus musculus. XX P0 A C G T 01 100 200 200 0 S 02 200 100 200 0 R 03 300 0 100 100 A 04 0 500 0 0 C 05 500 0 0 0 A 06 0 0 400 100 G 07 0 100 400 0 G 08 0 0 0 500 T 09 0 0 500 0 G 10 0 100 200 200 K 11 0 200 0 300 Y 12 100 0 300 100 G XX BA 5 functional elements in 3 genes XX // AC M00002 XX ID V$E47_01 XX DT 19.10.92 (created); ewi. DT 16.10.95 (updated); ewi. XX NA E47 XX DE E47 XX BF T00207; E47; Species: human, Homo sapiens. XX P0 A C G T 00 400 400 300 0 N 02 200 500 400 0 S 03 300 200 400 200 N 04 200 0 900 0 G 05 0 1100 0 0 C 06 1100 0 0 0 A 07 0 0 1100 0 G 08 100 200 800 0 G 09 0 0 0 1100 T 10 0 0 1100 0 G 11 0 0 400 700 K 12 100 400 300 300 N 13 100 600 200 200 C 14 100 400 400 200 N 15 100 400 200 300 N XX BA 11 selected strong binding sites for E47, E47-MyoD, E12+MyoD BA and (weak) for E12 XX CC Group I in [903]; 5 sites selected in vitro for binding to E12N CC (=N-terminally truncated E12); matrix corrected according to CC the published sequences XX RN [1] RA Sun X.-H., Baltimore D. RT An inhibitory domain of E12 transcription factor prevents RT DNA binding in E12 homodimers but not in E12 heterodimers RL Cell 64:459-470 (1991). XX """ # noqa: W291 def test_reader(): input = StringIO(sample) motifs = list(transfac.TransfacReader(input)) assert len(motifs) == 2 # Single value parse assert motifs[1].accession == "M00002" # Value list parse assert motifs[1].dates == ['19.10.92 (created); ewi.', '16.10.95 (updated); ewi.'] # Matrix parse assert motifs[1].matrix.sorted_alphabet == ['A', 'C', 'G', 'T'] assert allclose(motifs[1].matrix.values[0], [400, 400, 300, 0]) bx-python-0.8.13/lib/bx/motif/logo/000077500000000000000000000000001415666465100170065ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/motif/logo/__init__.py000066400000000000000000000052631415666465100211250ustar00rootroot00000000000000import os.path from io import StringIO from string import Template from numpy import ( ceil, log2, transpose, where ) PAD = 2 # Colors from rgb.txt, DNA_DEFAULT_COLORS = { 'A': "0.00 1.00 0.00", # green 'C': "0.00 0.00 1.00", # blue 'G': "1.00 0.65 0.00", # orange red 'T': "1.00 0.00 0.00" # red } # Template is adapted from Jim Kent's lib/dnaMotif.pss to support aritrary # alphabets. TEMPLATE = "template.ps" def freqs_to_heights(matrix): """ Calculate logo height using the method of: Schneider TD, Stephens RM. "Sequence logos: a new way to display consensus sequences." Nucleic Acids Res. 1990 Oct 25;18(20):6097-100. """ # Columns are sequence positions, rows are symbol counts/frequencies f = matrix.values.transpose() n, m = f.shape # Ensure normalized f = f / sum(f, axis=0) # Shannon entropy (the where replaces 0 with 1 so that '0 log 0 == 0') H = - sum(f * log2(where(f, f, 1)), axis=0) # Height return transpose(f * (log2(n) - H)) def eps_logo(matrix, base_width, height, colors=DNA_DEFAULT_COLORS): """ Return an EPS document containing a sequence logo for matrix where each bases is shown as a column of `base_width` points and the total logo height is `height` points. If `colors` is provided it is a mapping from characters to rgb color strings. """ alphabet = matrix.sorted_alphabet rval = StringIO() # Read header ans substitute in width / height template_path = os.path.join(os.path.dirname(__file__), 'template.ps') with open(template_path) as fh: template_str = fh.read() header = Template(template_str) rval.write(header.substitute( bounding_box_width=ceil(base_width * matrix.width) + PAD, bounding_box_height=ceil(height) + PAD)) # Determine heights heights = freqs_to_heights(matrix) height_scale = height / log2(len(alphabet)) # Draw each "row" of the matrix for i, row in enumerate(heights): x = (i * base_width) y = 0 for j, base_height in enumerate(row): char = alphabet[j] page_height = height_scale * base_height # print matrix.alphabet[j], base_height, height_scale, page_height if page_height > 1: # Draw letter rval.write("%s setrgbcolor\n" % colors.get(char, '0 0 0')) rval.write("%3.2f " % x) rval.write("%3.2f " % y) rval.write("%3.2f " % (x + base_width)) rval.write("%3.2f " % (y + page_height)) rval.write("(%s) textInBox\n" % char) y += page_height rval.write("showpage") return rval.getvalue() bx-python-0.8.13/lib/bx/motif/logo/template.ps000066400000000000000000000022211415666465100211620ustar00rootroot00000000000000%!PS-Adobe-3.1 EPSF-3.0 %%BoundingBox: 0 0 ${bounding_box_width} ${bounding_box_height} /logoFont { /Helvetica-Bold findfont 10 scalefont setfont } def /textBounds { % Figure out bounding box of string in current font. Usage: % call: text letterSize % sets: tbX1 tbY1 tbX2 tbY2 tbW tbH % The bounds are relative to the current position gsave newpath 0 0 moveto true charpath flattenpath pathbbox grestore /tbY2 exch def /tbX2 exch def /tbY1 exch def /tbX1 exch def /tbW tbX2 tbX1 sub def /tbH tbY2 tbY1 sub def } def /textInBox { % Draw text so that it fits inside of box. Usage: % x1 y1 x2 y2 text textInBox % Copy parameters from variables to stack and save context /tibText exch def /tibY2 exch def /tibX2 exch def /tibY1 exch def /tibX1 exch def gsave % move to x1/y1 adjusted for text offset tibText textBounds tibX1 tbX1 sub tibY1 tbY1 sub moveto % Set scaling /tibW tibX2 tibX1 sub def /tibH tibY2 tibY1 sub def tibW tbW div tibH tbH div scale % draw and return tibText show grestore } def /aColor { 0 0.7 0 setrgbcolor } def /cColor { 0 0.5 0.7 setrgbcolor } def /gColor { 0.8 0.5 0 setrgbcolor } def /tColor { 0.9 0 0 setrgbcolor } def logoFont bx-python-0.8.13/lib/bx/motif/pwm.py000066400000000000000000000124301415666465100172230ustar00rootroot00000000000000""" Classes for working with position specific matrices. """ from copy import copy import numpy from numpy import ( float32, int16, log2, maximum, nan, newaxis, ones, zeros ) from . import _pwm class BaseMatrix: """ Base class for position specific matrices. """ def __init__(self, alphabet=None, sorted_alphabet=None, char_to_index=None, values=None): self.alphabet = alphabet self.sorted_alphabet = sorted_alphabet self.char_to_index = char_to_index self.values = values @classmethod def from_rows(Class, alphabet, rows): """ Create a new matrix for a sequence over alphabet `alphabet` taking values from `rows` which is a list whose length is the width of the matrix, and whose elements are lists of values associated with each character (in the order those characters appear in alphabet). """ # Sorted alphabet sorted_alphabet = sorted(alphabet) # Character to index mapping (initialized to -1) char_to_index = zeros((256), int16) - 1 for i, ch in enumerate(sorted_alphabet): char_to_index[ord(ch)] = i # Array values = zeros((len(rows), len(alphabet)), float32) for i, row in enumerate(rows): assert len(row) == len(alphabet) for ch, val in zip(alphabet, row): values[i, char_to_index[ord(ch)]] = val # Matrix matrix = Class() matrix.alphabet = alphabet matrix.sorted_alphabet = sorted_alphabet matrix.char_to_index = char_to_index matrix.values = values return matrix @classmethod def create_from_other(Class, other, values=None): """ Create a new Matrix with attributes taken from `other` but with the values taken from `values` if provided """ m = Class() m.alphabet = other.alphabet m.sorted_alphabet = other.sorted_alphabet m.char_to_index = other.char_to_index if values is not None: m.values = values else: m.values = other.values return m @property def width(self): """ Return the width (size along the sequence axis) of this matrix. """ return self.values.shape[0] def reverse_complement(self): """ Create the reverse complement of this matrix. The result probably only makese sense if the alphabet is that of DNA ('A','C','G','T'). """ rval = copy(self) # Conveniently enough, reversing rows and columns is exactly what we # want, since this results in A swapping with T and C swapping with G. rval.values = self.values[::-1, ::-1].copy() return rval class FrequencyMatrix(BaseMatrix): """ A position specific count/frequency matrix. """ DEFAULT_CORRECTION = 0.0000000001 """ Default value to use for correcting when dealing with counts of zero, chosen to produce scoring matrices that are the same as produced by CREAD. """ def to_logodds_scoring_matrix(self, background=None, correction=DEFAULT_CORRECTION): """ Create a standard logodds scoring matrix. """ alphabet_size = len(self.alphabet) if background is None: background = ones(alphabet_size, float32) / alphabet_size # Row totals as a one column array totals = numpy.sum(self.values, 1)[:, newaxis] values = log2(maximum(self.values, correction)) - log2(totals) - log2(maximum(background, correction)) return ScoringMatrix.create_from_other(self, values.astype(float32)) def to_stormo_scoring_matrix(self, background=None): """ Create a scoring matrix from this count matrix using the method from: Hertz, G.Z. and G.D. Stormo (1999). Identifying DNA and protein patterns with statistically significant alignments of multiple sequences. Bioinformatics 15(7): 563-577. """ alphabet_size = len(self.alphabet) if background is None: background = ones(alphabet_size, float32) / alphabet_size # Row totals as a one column array totals = numpy.sum(self.values, 1)[:, newaxis] values = log2(self.values + background) - log2(totals + 1) - log2(background) return ScoringMatrix.create_from_other(self, values.astype(float32)) class ScoringMatrix(BaseMatrix): """ A position specific matrix containing values that are suitable for scoring a sequence. """ def score_string(self, string): """ Score each valid position in `string` using this scoring matrix. Positions which were not scored are set to nan. """ rval = zeros(len(string), float32) rval[:] = nan _pwm.score_string(self.values, self.char_to_index, string, rval) return rval def score_string_with_gaps(self, string): """ Score each valid position in `string` using this scoring matrix. Positions which were not scored are set to nan. Gap characters are ignored (matrices score across them). """ rval = zeros(len(string), float32) rval[:] = nan _pwm.score_string_with_gaps(self.values, self.char_to_index, string, rval) return rval bx-python-0.8.13/lib/bx/motif/pwm_tests.py000066400000000000000000000063661415666465100204600ustar00rootroot00000000000000from numpy import ( allclose, isnan ) from . import pwm def test_create(): m = pwm.FrequencyMatrix.from_rows(['A', 'C', 'G', 'T'], get_ctcf_rows()) # Alphabet sort assert m.sorted_alphabet == ['A', 'C', 'G', 'T'] # Character to index mapping assert m.char_to_index[ord('A')] == 0 assert m.char_to_index[ord('C')] == 1 assert m.char_to_index[ord('G')] == 2 assert m.char_to_index[ord('T')] == 3 assert m.char_to_index[ord('Q')] == -1 # Values assert allclose(m.values[0], [2620, 2052, 3013, 2314]) assert allclose(m.values[19], [3144, 3231, 3056, 567]) def test_scoring(): m = pwm.FrequencyMatrix.from_rows(['A', 'C', 'G', 'T'], get_ctcf_rows()) # Stormo method sm = m.to_stormo_scoring_matrix() # Forward matches assert allclose(sm.score_string("AATCACCACCTCCTGGCAGG")[0], -156.8261261) assert allclose(sm.score_string("TGCCTGCCTCTGTAGGCTCC")[0], -128.8106842) assert allclose(sm.score_string("GTTGCCAGTTGGGGGAAGCA")[0], 4.65049839) assert allclose(sm.score_string("GCAGACACCAGGTGGTTCAG")[0], 1.60168743) # Reverse matches rc = sm.reverse_complement() assert allclose(rc.score_string("AATCACCACCTCCTGGCAGG")[0], 0.014178276062) assert allclose(rc.score_string("TGCCTGCCTCTGTAGGCTCC")[0], 0.723828315735) assert allclose(rc.score_string("GTTGCCAGTTGGGGGAAGCA")[0], -126.99407196) assert allclose(rc.score_string("GCAGACACCAGGTGGTTCAG")[0], -86.9560623169) # Nothing valid assert isnan(sm.score_string_with_gaps("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")).all() # Too short assert isnan(sm.score_string("TTTT")).all() def test_scoring_with_gaps(): m = pwm.FrequencyMatrix.from_rows(['A', 'C', 'G', 'T'], get_ctcf_rows()) # Stormo method sm = m.to_stormo_scoring_matrix() # Forward matches assert allclose(sm.score_string_with_gaps("GTTGCCAGT----TGGGGGAAGCATTT---AA")[0], 4.65049839) assert allclose(sm.score_string_with_gaps("GCAGA--CACCAGGTGG--TTCAG---")[0], 1.60168743) assert allclose(sm.score_string_with_gaps("----GTTGCCAGTTGGGGGAAGCA")[4], 4.65049839) assert allclose(sm.score_string_with_gaps("TTT--GTT--GCCA--GTTGGGG-G-A-A-G-C-A-")[5], 4.65049839) assert isnan(sm.score_string_with_gaps("TTT--GTT--GCCA--GTTGGGG-G-A-A-G-C-A-")[4]) # Nothing valid assert isnan(sm.score_string_with_gaps("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")).all() assert isnan(sm.score_string_with_gaps("------------------------------------")).all() # Too short assert isnan(sm.score_string_with_gaps("TTTT")).all() assert isnan(sm.score_string_with_gaps("TTTT----")).all() def get_ctcf_rows(): """ The CTCF primary site motif """ return [ [2620, 2052, 3013, 2314], [0, 3580, 1746, 4672], [2008, 1790, 4497, 1703], [3362, 0, 6637, 0], [0, 10000, 0, 0], [0, 10000, 0, 0], [7467, 0, 1310, 1222], [786, 4890, 4323, 0], [1179, 6288, 829, 1703], [10000, 0, 0, 0], [0, 0, 10000, 0], [4847, 0, 5152, 0], [0, 0, 6200, 3799], [0, 0, 10000, 0], [0, 0, 10000, 0], [1572, 7467, 0, 960], [3842, 0, 5545, 611], [0, 5895, 4104, 0], [1615, 4192, 1397, 2794], [3144, 3231, 3056, 567] ] bx-python-0.8.13/lib/bx/phylo/000077500000000000000000000000001415666465100160635ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/phylo/__init__.py000066400000000000000000000000521415666465100201710ustar00rootroot00000000000000""" Phylogenetic file format support. """ bx-python-0.8.13/lib/bx/phylo/newick.py000066400000000000000000000071571415666465100177270ustar00rootroot00000000000000""" Support for parsing phylogenetic tree's in newick format. TODO: Tree/Edge should be a generic data structure, not newick specific. """ from functools import total_ordering from bx_extras.pyparsing import ( alphas, CaselessLiteral, Combine, delimitedList, Forward, nums, Optional, QuotedString, Suppress, Word ) __all__ = ["Tree", "Edge", "NewickParser", "newick_parser"] def indent(s): return "\n".join(" " + line for line in s.split("\n")) def print_(p, s): print(p, type(s), s) return s @total_ordering class Tree: def __init__(self, label, edges=None): self.label = label self.edges = edges def pretty(self): if self.edges: return "Tree( '{}',\n{}\n)".format(self.label, indent("\n".join(repr(edge) for edge in self.edges))) else: return "Tree( '%s' )" % self.label def __lt__(self, other): return self.__dict__ < other.__dict__ def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return f"Tree( {repr(self.label)}, {repr(self.edges)} )" @total_ordering class Edge: def __init__(self, length, tip): self.length = length self.tip = tip def pretty(self): return f"Edge( {repr(self.length)}, \n{indent(repr(self.tip))}\n)" def __lt__(self, other): return self.__dict__ < other.__dict__ def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return f"Edge( {repr(self.length)}, {repr(self.tip)} )" def create_parser(): """ Create a 'pyparsing' parser for newick format trees roughly based on the grammar here: http://evolution.genetics.washington.edu/phylip/newick_doc.html Problems: - Is a single leaf a valid tree? - Branch length on root? Doesn't make sense to me, and forces the root to be an edge. """ # Basic tokens real = Combine( Word("+-" + nums, nums) + Optional("." + Optional(Word(nums))) + Optional(CaselessLiteral("E") + Word("+-" + nums, nums))) lpar = Suppress("(") rpar = Suppress(")") colon = Suppress(":") semi = Suppress(";") # Labels are either unquoted or single quoted, if unquoted underscores will be replaced with spaces quoted_label = QuotedString("'", None, "''").setParseAction(lambda s, l, t: t[0]) simple_label = Word(alphas + nums + "_.").setParseAction(lambda s, l, t: t[0].replace("_", " ")) label = quoted_label | simple_label # Branch length is a real number (note though that exponents are not in the spec!) branch_length = real.setParseAction(lambda s, l, t: float(t[0])) # Need to forward declare this due to circularity node_list = Forward() # A node might have an list of edges (for a subtree), a label, and/or a branch length node = (Optional(node_list, None) + Optional(label, "") + Optional(colon + branch_length, None)) \ .setParseAction(lambda s, l, t: Edge(t[2], Tree(t[1] or None, t[0]))) node_list << (lpar + delimitedList(node) + rpar) \ .setParseAction(lambda s, l, t: [t.asList()]) # The root cannot have a branch length tree = (node_list + Optional(label, "") + semi)\ .setParseAction(lambda s, l, t: Tree(t[1] or None, t[0])) # Return the outermost element return tree class NewickParser: """ Class wrapping a parser for building Trees from newick format strings """ def __init__(self): self.parser = create_parser() def parse_string(self, s): return self.parser.parseString(s)[0] newick_parser = NewickParser() bx-python-0.8.13/lib/bx/phylo/newick_tests.py000066400000000000000000000067371415666465100211540ustar00rootroot00000000000000""" Tests for `bx.phylo.newick`. """ import pytest from bx.phylo.newick import ( Edge, newick_parser, Tree ) trees = [r"(B:6.0,(A:5.0,C:3.0,'Foo ''bar':4.0)Q_X:5.0,D:11.0)label;", "((raccoon:19.19959,bear:6.80041):0.84600,((sea_lion:11.99700, seal:12.00300):7.52973,(( monkey:100.85930,cat:47.14069):20.59201, weasel:18.87953):2.09460):3.87382,dog:25.46154);", "(Bovine:0.69395,(Gibbon:0.36079,(Orang:0.33636,(Gorilla:0.17147,(Chimp:0.19268, Human:0.11927):0.08386):0.06124):0.15057):0.54939,Mouse:1.21460);", "(Bovine:0.69395,(Hylobates:0.36079,(Pongo:0.33636,(G._Gorilla:0.17147, (P._paniscus:0.19268,H._sapiens:0.11927):0.08386):0.06124):0.15057):0.54939, Rodent:1.21460);", "(B,(A,C,E),D);", "(,(,,),);", "(A,(B,C),D);", "((A,D),(C,B));"] results = [(Tree('label', [Edge(6.0, Tree('B', None)), Edge(5.0, Tree('Q X', [Edge(5.0, Tree('A', None)), Edge(3.0, Tree('C', None)), Edge(4.0, Tree("Foo 'bar", None))])), Edge(11.0, Tree('D', None))])), (Tree(None, [Edge(0.84599999999999997, Tree(None, [Edge(19.199590000000001, Tree('raccoon', None)), Edge(6.8004100000000003, Tree('bear', None))])), Edge(3.8738199999999998, Tree(None, [Edge(7.5297299999999998, Tree(None, [Edge(11.997, Tree('sea lion', None)), Edge(12.003, Tree('seal', None))])), Edge(2.0945999999999998, Tree(None, [Edge(20.592009999999998, Tree(None, [Edge(100.8593, Tree('monkey', None)), Edge(47.140689999999999, Tree('cat', None))])), Edge(18.879529999999999, Tree('weasel', None))]))])), Edge(25.461539999999999, Tree('dog', None))])), (Tree(None, [Edge(0.69394999999999996, Tree('Bovine', None)), Edge(0.54939000000000004, Tree(None, [Edge(0.36079, Tree('Gibbon', None)), Edge(0.15057000000000001, Tree(None, [Edge(0.33635999999999999, Tree('Orang', None)), Edge(0.061240000000000003, Tree(None, [Edge(0.17147000000000001, Tree('Gorilla', None)), Edge(0.083860000000000004, Tree(None, [Edge(0.19267999999999999, Tree('Chimp', None)), Edge(0.11927, Tree('Human', None))]))]))]))])), Edge(1.2145999999999999, Tree('Mouse', None))])), (Tree(None, [Edge(0.69394999999999996, Tree('Bovine', None)), Edge(0.54939000000000004, Tree(None, [Edge(0.36079, Tree('Hylobates', None)), Edge(0.15057000000000001, Tree(None, [Edge(0.33635999999999999, Tree('Pongo', None)), Edge(0.061240000000000003, Tree(None, [Edge(0.17147000000000001, Tree('G. Gorilla', None)), Edge(0.083860000000000004, Tree(None, [Edge(0.19267999999999999, Tree('P. paniscus', None)), Edge(0.11927, Tree('H. sapiens', None))]))]))]))])), Edge(1.2145999999999999, Tree('Rodent', None))])), (Tree(None, [Edge(None, Tree('B', None)), Edge(None, Tree(None, [Edge(None, Tree('A', None)), Edge(None, Tree('C', None)), Edge(None, Tree('E', None))])), Edge(None, Tree('D', None))])), (Tree(None, [Edge(None, Tree(None, None)), Edge(None, Tree(None, [Edge(None, Tree(None, None)), Edge(None, Tree(None, None)), Edge(None, Tree(None, None))])), Edge(None, Tree(None, None))])), (Tree(None, [Edge(None, Tree('A', None)), Edge(None, Tree(None, [Edge(None, Tree('B', None)), Edge(None, Tree('C', None))])), Edge(None, Tree('D', None))])), (Tree(None, [Edge(None, Tree(None, [Edge(None, Tree('A', None)), Edge(None, Tree('D', None))])), Edge(None, Tree(None, [Edge(None, Tree('C', None)), Edge(None, Tree('B', None))]))])), ] @pytest.mark.parametrize("tree,result", zip(trees, results)) def test_newick_tree(tree, result): assert newick_parser.parse_string(tree) == result bx-python-0.8.13/lib/bx/phylo/phast.py000066400000000000000000000024231415666465100175550ustar00rootroot00000000000000""" Rudimentary support for PHAST's tree model file format (a simple format for storing trees and rate matrices). """ from numpy import zeros class TreeModel: def __init__(self): self.alphabet = None self.radix = 0 self.order = 0 self.subst_mod = None self.background = None self.tree = None self.matrix = None @staticmethod def from_file(f): input = iter(f) tm = TreeModel() for line in input: if line.startswith("ALPHABET:"): tm.alphabet = tuple(line.split()[1:]) tm.radix = len(tm.alphabet) if line.startswith("ORDER:"): tm.order = int(line.split()[1]) if line.startswith("SUBST_MOD:"): tm.subst_mod = line[11:].rstrip() if line.startswith("BACKGROUND:"): tm.background = tuple(map(float, line.split()[1:])) if line.startswith("TREE:"): tm.tree = line[6:].strip() if line.startswith("RATE_MAT:"): matrix = zeros((tm.radix, tm.radix), float) for i in range(len(tm.alphabet)): matrix[i] = [float(_) for _ in next(input).split()] tm.matrix = matrix return tm bx-python-0.8.13/lib/bx/phylo/phast_tests.py000066400000000000000000000035661415666465100210100ustar00rootroot00000000000000""" Tests for `bx.phylo.phast`. """ from io import StringIO from numpy import ( allclose, array ) from bx.phylo.phast import TreeModel test_data = """ALPHABET: A C G T - ORDER: 0 SUBST_MOD: HKY85+Gap TRAINING_LNL: -178667772.836697 BACKGROUND: 0.227006 0.169993 0.169307 0.227262 0.206432 RATE_MAT: -0.971735 0.122443 0.465361 0.163692 0.220238 0.163508 -1.130351 0.121949 0.624656 0.220238 0.623952 0.122443 -1.130326 0.163692 0.220238 0.163508 0.467247 0.121949 -0.972942 0.220238 0.242187 0.181362 0.180630 0.242461 -0.846640 TREE: ((((((hg16:0.007738,panTro1:0.008356):0.027141,(baboon:0.009853,rheMac1:0.010187):0.035049):0.103138,galago:0.174770):0.019102,((rn3:0.092633,mm6:0.089667):0.273942,rabbit:0.230839):0.021927):0.023762,(canFam1:0.204637,(elephant:0.123777,tenrec:0.278910):0.085977):0.009439):0.306466,monDom1:0.401151)mammals; """ # noqa: W291 def test_parser(): tm = TreeModel.from_file(StringIO(test_data)) assert tm.alphabet == ('A', 'C', 'G', 'T', '-') assert tm.order == 0 assert tm.subst_mod == "HKY85+Gap" assert allclose(tm.background, [0.227006, 0.169993, 0.169307, 0.227262, 0.206432]) assert allclose(tm.matrix, array( [[-0.971735, 0.122443, 0.465361, 0.163692, 0.220238], [0.163508, -1.130351, 0.121949, 0.624656, 0.220238], [0.623952, 0.122443, -1.130326, 0.163692, 0.220238], [0.163508, 0.467247, 0.121949, -0.972942, 0.220238], [0.242187, 0.181362, 0.180630, 0.242461, -0.846640]])) assert tm.tree == "((((((hg16:0.007738,panTro1:0.008356):0.027141,(baboon:0.009853,rheMac1:0.010187):0.035049):0.103138,galago:0.174770):0.019102,((rn3:0.092633,mm6:0.089667):0.273942,rabbit:0.230839):0.021927):0.023762,(canFam1:0.204637,(elephant:0.123777,tenrec:0.278910):0.085977):0.009439):0.306466,monDom1:0.401151)mammals;" bx-python-0.8.13/lib/bx/pwm/000077500000000000000000000000001415666465100155335ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/pwm/__init__.py000066400000000000000000000003651415666465100176500ustar00rootroot00000000000000""" Data structures and tools for working with Position Weight Matrices (PWMs). TODO: Some of the packages in this directory are actually command line programs that provide no library functions and should be moved to the scripts directory. """ bx-python-0.8.13/lib/bx/pwm/_position_weight_matrix.pyx000066400000000000000000000003021415666465100232260ustar00rootroot00000000000000cdef extern from "pwm_utils.h": int pattern_match( char* string, char* pattern, int n) def c_match_consensus( sequence, pattern, size ): return pattern_match( sequence, pattern, size ) bx-python-0.8.13/lib/bx/pwm/bed_score_aligned_pwm.py000077500000000000000000000054031415666465100224050ustar00rootroot00000000000000#!/usr/bin/env python2.4 """ Returns all positions of a maf with any pwm score > threshold The positions are projected onto human coordinates """ import sys from bx import intervals from bx.align import maf as align_maf from bx.pwm.pwm_score_maf import MafBlockScorer from . import position_weight_matrix as pwmx def isnan(x): return not x == x def main(): if len(sys.argv) < 5: print("%s bedfile inmaf spec1,spec2,... motif_file " % sys.argv[0], file=sys.stderr) sys.exit(0) # read in intervals regions = {} for line in open(sys.argv[1]): if line.startswith('#'): continue fields = line.strip().split() chrom, start, end = fields[0], int(fields[1]), int(fields[2]) try: name = fields[3] except IndexError: name = None if chrom not in regions: regions[chrom] = intervals.Intersecter() regions[chrom].add(start, end, name) pwm = {} for wm in pwmx.Reader(open(sys.argv[4])): pwm[wm.id] = wm print(wm.id, len(wm), file=sys.stderr) inmaf = open(sys.argv[2]) threshold = 0.5 species = [] for sp in sys.argv[3].split(','): species.append(sp) for maf in align_maf.Reader(inmaf): mafchrom = maf.components[0].src.split('.')[1] mafstart = maf.components[0].start mafend = maf.components[0].end reftext = maf.components[0].text # maf block scores for each matrix for scoremax, width, headers in MafBlockScorer(pwm, species, maf): blocklength = width mafsrc, mafstart, mafend = headers[0] mafchrom = mafsrc.split('.')[1] # lists of scores for each position in scoremax for mx_name, mx in scoremax.items(): for offset in range(blocklength): # scan all species with threshold for i in range(len(species)): if mx[i][offset] > threshold: refstart = mafstart + offset - reftext.count('-', 0, offset) refend = refstart + len(pwm[mx_name]) data = " ".join(["%.2f" % mx[x][offset] for x in range(len(species))]) # quote the motif r = regions[mafchrom].find(refstart, refend) if mafchrom in regions and len(r) > 0: region_label = r[0].value else: continue v_name = mx_name.replace(' ', '_') print(mafchrom, refstart, refend, region_label, v_name, data) break if __name__ == '__main__': main() bx-python-0.8.13/lib/bx/pwm/bed_score_aligned_string.py000077500000000000000000000056071415666465100231160ustar00rootroot00000000000000#!/usr/bin/env python2.4 """ Returns all positions of a maf with any pwm score > threshold The positions are projected onto human coordinates """ import sys from bx import intervals from bx.align import maf as align_maf from bx.pwm.pwm_score_maf import MafMotifScorer def isnan(x): return not x == x def main(): if len(sys.argv) < 5: print("%s bedfile inmaf spec1,spec2,... string [string2,...]" % sys.argv[0], file=sys.stderr) sys.exit(0) # read in intervals regions = {} for line in open(sys.argv[1]): if line.startswith('#'): continue fields = line.strip().split() chrom, start, end = fields[0], int(fields[1]), int(fields[2]) try: name = fields[3] except IndexError: name = None if chrom not in regions: regions[chrom] = intervals.Intersecter() regions[chrom].add(start, end, name) motif_strings = sys.argv[4:] if not isinstance(motif_strings, list): motif_strings = [motif_strings] inmaf = open(sys.argv[2]) threshold = 0.5 species = [] for sp in sys.argv[3].split(','): species.append(sp) for maf in align_maf.Reader(inmaf): mafchrom = maf.components[0].src.split('.')[1] mafstart = maf.components[0].start mafend = maf.components[0].end reftext = maf.components[0].text r = regions[mafchrom].find(mafstart, mafend) if mafchrom not in regions or len(r) == 0: continue # maf block scores for each matrix for scoremax, width, headers in MafMotifScorer(species, maf, motif_strings): blocklength = width mafsrc, mafstart, mafend = headers[0] mafchrom = mafsrc.split('.')[1] # lists of scores for each position in scoremax for mx_name, mx in scoremax.items(): for offset in range(blocklength): # scan all species with threshold for i in range(len(species)): if mx[i][offset] > threshold: refstart = mafstart + offset - reftext.count('-', 0, offset) refend = refstart + len(mx_name) data = " ".join(["%.2f" % mx[x][offset] for x in range(len(species))]) # quote the motif r = regions[mafchrom].find(refstart, refend) if mafchrom in regions and len(r) > 0: region_label = r[0].value else: # region_label = 0 continue v_name = mx_name.replace(' ', '_') print(mafchrom, refstart, refend, region_label, v_name, data) break if __name__ == '__main__': main() bx-python-0.8.13/lib/bx/pwm/maf_select_motifs.py000077500000000000000000000041271415666465100215770ustar00rootroot00000000000000#!/usr/bin/env python2.4 """ Returns all positions of a maf with any pwm score > threshold The positions are projected onto human coordinates """ import sys import bx.pwm.position_weight_matrix as pwmx from bx.align import maf as align_maf from bx.pwm.pwm_score_maf import MafMotifSelect def isnan(x): return not x == x def main(): if len(sys.argv) < 5: print("%s transfac|basic pwmfile inmaf threshold [motif]" % sys.argv[0], file=sys.stderr) sys.exit(2) r = pwmx.Reader(open(sys.argv[2]), format=sys.argv[1]) pwm = next(iter(r)) inmaf = open(sys.argv[3]) threshold = float(sys.argv[4]) if len(sys.argv) > 5: motif = sys.argv[5] else: motif = None for maf in align_maf.Reader(inmaf): for mafmotif, pwm_score, motif_score in MafMotifSelect(maf, pwm, motif, threshold): print(mafmotif, pwm_score, motif_score) print('zzzzzzzzzzzzzzzzzzzzzzzzzzzzz') def mafwrite(alignment, kvec=None, jvec=None, file=sys.stdout): file.write("a score=" + str(alignment.score)) for key in alignment.attributes: file.write(f" {key}={alignment.attributes[key]}") file.write("\n") rows = [] if not kvec: kvec = [0 for c in alignment.components] if not jvec: jvec = [0 for c in alignment.components] for c, x, y in zip(alignment.components, kvec, jvec): rows.append(("s", c.src, str(c.start), str(c.size), c.strand, str(c.src_size), c.text, "%.2f" % x, str(y))) file.write(format_tabular(rows, "llrrrrrrr")) def format_tabular(rows, align=None): if len(rows) == 0: return "" lengths = [len(col) for col in rows[0]] for row in rows[1:]: for i in range(0, len(row)): lengths[i] = max(lengths[i], len(row[i])) rval = "" for row in rows: for i in range(0, len(row)): if align and align[i] == "l": rval += row[i].ljust(lengths[i]) else: rval += row[i].rjust(lengths[i]) rval += " " rval += "\n" return rval if __name__ == '__main__': main() bx-python-0.8.13/lib/bx/pwm/position_weight_matrix.py000077500000000000000000000737061415666465100227240ustar00rootroot00000000000000#!/usr/bin/env python import math import sys from numpy import float32, putmask, shape, zeros # This is the average of all species in the alignment outside of exons # > mean(r) # A T C G # 0.2863776 0.2878264 0.2129560 0.2128400 # > sd(r) # A T C G # 0.01316192 0.01371148 0.01293836 0.01386655 ENCODE_NONCODING_BACKGROUND = {'A': 0.2863776, 'T': 0.2878264, 'G': 0.2128400, 'C': 0.2129560} class Align: def __init__(self, seqrows, headers=None): self.rows = seqrows self.nrows = len(seqrows) ncol = None for rownum, row in enumerate(self.rows): try: if ncol is None: ncol = len(row) elif ncol != len(row): raise ValueError("Align: __init__:alignment block:row %d does not have %d columns, it has %d" % (rownum, ncol, len(row))) except Exception: print(row) raise Exception('') self.ncols = ncol self.dims = (self.nrows, self.ncols) self.headers = headers def __str__(self): return "\n".join(self.rows) class AlignScoreMatrix: def __init__(self, align): nan = float('nan') matrix = zeros((align.nrows, align.ncols), float32) # set to nans for ir in range(len(matrix)): for ic in range(len(matrix[ir])): matrix[ir][ic] = nan self.matrix = matrix def __len__(self): return shape(self.matrix)[1] def __str__(self): print(self.matrix) def score_align_motif(align, motif, gapmask=None, byPosition=True): chr, chr_start, chr_stop = align.headers[0] # a blank score matrix nrows, ncols = align.dims ascoremax = AlignScoreMatrix(align) scoremax = ascoremax.matrix minSeqLen = len(motif) for ir in range(nrows): pass # row is missing data if isnan(align.rows[ir][0]): continue for start in range(ncols): if align.rows[ir][start] == '-': continue elif align.rows[ir][start] == 'n': continue elif align.rows[ir][start] == 'N': continue # get enough sequence for the weight matrix subseq = "" end = 0 ic = start while len(subseq) < minSeqLen: if ic >= len(align.rows[ir]): break char = align.rows[ir][ic].upper() ic += 1 if char == '-' or char == 'N': continue else: subseq += char if len(subseq) == minSeqLen: end = ic+1 for_score = int(match_consensus(subseq, motif)) revseq = reverse_complement(subseq) rev_score = int(match_consensus(revseq, motif)) score = max(for_score, rev_score) # dbg # if ir == 0: print >>sys.stderr, int(chr_start) + start - align.rows[ir].count('-',0,start), subseq, score # replace the alignment positions with the result if byPosition: scoremax[ir][start] = score else: # replace positions matching the width of the pwm for i in range(start, end): if isnan(scoremax[ir][i]): scoremax[ir][i] = score elif score > scoremax[ir][i]: scoremax[ir][i] = score # break # mask gap characters if gapmask is None: gapmask = score_align_gaps(align) putmask(scoremax, gapmask, float('nan')) return scoremax # ----------- # # WeightMatrix-- # A position weight matrix (PWM) representation of a motif. # # ---------- # construction arguments: # id: id (name) of the motif # rows: the matrix; each row is a hash from symbol to weight, with # .. the weight in string form # alphabet: symbols allowed # background: hash from symbol to background probability of that symbol; if # .. not specified, ENCODE_NONCODING_BACKGROUND is used # internal fields: # rows: the matrix; each row is a hash from symbol to log-odds score # .. of that symbol for that row of the weight matrix # counts: the matrix; count[row][sym] is the weight, as an integer # probs: the matrix; probs[row][sym] is the weight, as an probability # ---------- class PositionWeightMatrix: complementMap = str.maketrans("ACGTacgt", "TGCAtgca") # IUPAC-IUB symbols = { 'A': frozenset(['A']), 'C': frozenset(['C']), 'G': frozenset(['G']), 'T': frozenset(['T']), 'R': frozenset(['A', 'G']), 'Y': frozenset(['C', 'T']), 'M': frozenset(['A', 'C']), 'K': frozenset(['G', 'T']), 'S': frozenset(['G', 'C']), 'W': frozenset(['A', 'T']), 'H': frozenset(['A', 'C', 'T']), 'B': frozenset(['G', 'T', 'C']), 'V': frozenset(['G', 'C', 'A']), 'D': frozenset(['G', 'T', 'A'])} def __init__(self, id, rows, alphabet, background=None, score_correction=True): self.id = id self.alphabet = alphabet nsymbols = len(self.alphabet) for i in range(len(self.alphabet)): self.alphabet[i] = self.alphabet[i].upper() if background is not None: self.background = background else: self.background = {} sorted_alphabet = [] sorted_alphabet[:] = self.alphabet[:] sorted_alphabet.sort() if ['A', 'C', 'G', 'T'] == sorted_alphabet: self.background = ENCODE_NONCODING_BACKGROUND else: for x in self.alphabet: self.background[x] = float(1)/len(self.alphabet) if score_correction: self.score_correction = self.corrected_probability_score else: self.score_correction = self.simple_probability # partition counts from consensus symbol # in order to properly handle scaling in the presense of non-integers, # we prescan the matrix to figure out the largest scale factor, then go # back through and scale 'em all (some rows may be integer counts, # others may be probabilities) self.consensus = [] scale = 1 for i in range(len(rows)): # try: fields, consensus = rows[i][:nsymbols], rows[i][-1] for x, count in enumerate(fields): try: (w, s) = self.parse_weight(count) except ValueError: raise ValueError("pwm row {} has bad weight {}".format(" ".join(fields), w)) # replace row counts with (values,scale) rows[i][x] = (w, s) scale = max(s, scale) # except: # print >>sys.stderr,rows # raise ValueError # raise ValueError, "pwm row %s has wrong field count" % " ".join(fields) self.consensus.append(consensus) hashRows = [] self.matrix_base_counts = {} # for pseudocounts self.counts = [] # for scaled counts self.probs = [] # for probabilities # scale counts to integers for i in range(len(rows)): hashRows.append(dict()) for x, sym in enumerate(alphabet): (w, s) = rows[i][x] hashRows[i][sym] = w * scale/s assert hashRows[i][sym] >= 0 if sym not in self.matrix_base_counts: self.matrix_base_counts[sym] = 0 self.matrix_base_counts[sym] += hashRows[i][sym] self.counts.append(hashRows[i].copy()) self.probs.append(hashRows[i].copy()) totalWeight = float(sum(self.probs[i].values())) for sym in self.probs[i]: self.probs[i][sym] /= totalWeight self.sites = sum(hashRows[0].values()) # scan pwm to pre-compute logs of probabilities and min and max log-odds # scores (over the whole PWM) for scaling; note that the same min and max # applies for scaling long-odds scores for quantum comparisions self.information_content = [] minSum = 0 maxSum = 0 for i in range(len(hashRows)): self.information_content.append(self.information_content_calculation(i, hashRows)) newHashRow = {} for base in self.alphabet: newHashRow[base] = self.pwm_score(base, i, hashRows) hashRows[i] = newHashRow minSum += min(hashRows[i].values()) maxSum += max(hashRows[i].values()) self.minSum = minSum self.maxSum = maxSum self.rows = hashRows # Reference 1: Wasserman and Sandelin: Nat Rev Genet. 2004 Apr;5(4):276-87. # Reference 2: Gertz et al.: Genome Res. 2005 Aug;15(8):1145-52. def information_content_calculation(self, i, counts): # Reference 1) return 2 + sum(self.information_base_content(base, i, counts) for base in self.alphabet) # Reference 2) # return sum( [ self.information_base_content(base,i,counts) for base in self.alphabet ] ) def information_base_content(self, base, i, counts): # Reference 1) # return self.score_correction(counts,base,i) * math.log ( self.score_correction(counts,base,i), 2) # Reference 2) return self.score_correction(counts, base, i) * self.pwm_score(base, i, counts) def __call__(self, seq): return self.score_seq(seq) def __add__(self, other): assert self.alphabet == other.alphabet r, (p, q) = self.max_correlation(other) if p == q == 0: width = max(len(self), len(other)) elif p > 0: width = max(len(other)+p, len(self)) elif q > 0: width = max(len(self)+q, len(other)) sumx = zeros((width, len(self.alphabet)), dtype='int') selfx = self.to_count_matrix() otherx = other.to_count_matrix() if p == q == 0: sumx[:len(self)] += selfx sumx[:len(other)] += otherx elif p > 0: sumx[p:p+len(other)] += otherx sumx[:len(self)] += selfx else: sumx[:len(other)] += otherx sumx[q:q+len(self)] += selfx newRows = [] for x in sumx: y = list(x) y.append(consensus_symbol(y)) y = [str(yi) for yi in y] newRows.append(y) return PositionWeightMatrix(self.id+other.id, newRows, self.alphabet, self.background) def __old_add__(self, other, maxp=None): assert self.alphabet == other.alphabet bigN = max(len(self), len(other)) smallN = min(len(self), len(other)) if not maxp: prsq = self.correlation(other) maxp = prsq.index(max(prsq)) leftpad = ' ' * maxp rightsize = bigN - smallN rightpad = ' ' * rightsize leftStrings = [] rightStrings = [] if len(self) > len(other): larger = self smaller = other leftStrings = self.consensus rightStrings = list(leftpad) + other.consensus + list(rightpad) else: smaller = self larger = other leftStrings = list(leftpad) + self.consensus + list(rightpad) rightStrings = other.consensus sumx = zeros([bigN, len(self.alphabet)]) sumx += larger.to_count_matrix() sumx[maxp:maxp+smallN] += smaller.to_count_matrix() newRows = [] for i, x in enumerate(sumx): y = list(x) y.append(leftStrings[i] + rightStrings[i]) y = [str(yi) for yi in y] newRows.append(y) # return PositionWeightMatrix(self.id+other.id,newRows[maxp:maxp+smallN],self.alphabet,self.background) return PositionWeightMatrix(self.id+other.id, newRows, self.alphabet, self.background) def to_matrix(self): m = zeros([len(self), len(self.alphabet)]) for i in range(len(self)): for j, a in enumerate(self.alphabet): m[i][j] = self[i][a] return m def to_count_matrix(self): m = zeros([len(self), len(self.alphabet)], dtype='int') for i in range(len(self)): for j, a in enumerate(self.alphabet): m[i][j] = self.counts[i][a] return m def max_correlation(self, otherwmx): rsq, ixtuple = self.slide_correlation(otherwmx) max_rsq = max(rsq) maxp, maxq = ixtuple[rsq.index(max_rsq)] return max_rsq, (maxp, maxq) def slide_correlation(self, other): assert self.alphabet == other.alphabet selfx = self.to_count_matrix() otherx = other.to_count_matrix() rsq = [] ixtuple = [] # self staggered over other, scan self backwards until flush for q in range(len(other)-1, -1, -1): r = 0 n = 0 for p in range(len(self)): if q+p < len(other): r += rsquared(list(selfx[p]), list(otherx[q+p])) n += 1 else: n += 1 rsq.append(r/n) ixtuple.append((0, q)) # other staggered below self , scan other forward for p in range(1, len(self)): r = 0 n = 0 for q in range(len(other)): if p+q < len(self): r += rsquared(list(selfx[p+q]), list(otherx[q])) n += 1 else: n += 1 rsq.append(r/n) ixtuple.append((p, 0)) return rsq, ixtuple def correlation(self, otherwmx): assert self.alphabet == otherwmx.alphabet if len(self) > len(otherwmx): larger = self.to_count_matrix() smaller = otherwmx.to_count_matrix() else: smaller = self.to_count_matrix() larger = otherwmx.to_count_matrix() bigN = len(larger) smallN = len(smaller) position_rsq = [] # slide small over large, for ave rsq for p in range(bigN): if p+smallN <= bigN: r = 0 for q in range(smallN): r += rsquared(list(smaller[q]), list(larger[p+q])) position_rsq.append(r / smallN) return position_rsq def score_align(self, align, gapmask=None, byPosition=True): # a blank score matrix nrows, ncols = align.dims ascoremax = AlignScoreMatrix(align) scoremax = ascoremax.matrix minSeqLen = len(self) for ir in range(nrows): # row is missing data if isnan(align.rows[ir][0]): continue for start in range(ncols): if align.rows[ir][start] == '-': continue elif align.rows[ir][start] == 'n': continue elif align.rows[ir][start] == 'N': continue # get enough sequence for the weight matrix subseq = "" end = 0 for ic in range(start, ncols): char = align.rows[ir][ic] if char == '-' or char == 'N': continue else: subseq += char if len(subseq) == minSeqLen: end = ic+1 # forward scores = self.score_seq(subseq) raw, forward_score = scores[0] # reverse scores = self.score_reverse_seq(subseq) raw, reverse_score = scores[0] score = max(forward_score, reverse_score) # replace the alignment positions with the result if byPosition: scoremax[ir][start] = score else: # replace positions matching the width of the pwm for i in range(start, end): if isnan(scoremax[ir][i]): scoremax[ir][i] = score elif score > scoremax[ir][i]: scoremax[ir][i] = score # mask gap characters if gapmask is None: gapmask = score_align_gaps(align) putmask(scoremax, gapmask, float('nan')) return scoremax # seq can be a string, a list of characters, or a quantum sequence (a list # of hashes from symbols to probability) def score_seq(self, seq): if isinstance(seq[0], dict): return self.score_quantum_seq(seq) scores = [] for start in range(len(seq)): if start + len(self) > len(seq): break subseq = seq[start:start+len(self)] raw = 0 try: for i, nt in enumerate(subseq): raw += self.rows[i][nt.upper()] scaled = self.scaled(raw) except KeyError: raw, scaled = float('nan'), float('nan') scores.append((raw, scaled)) return scores def score_quantum_seq(self, seq): scores = [] for start in range(len(seq)): if start + len(self) > len(seq): break subseq = seq[start:start+len(self)] raw = 0 try: for i, nt in enumerate(subseq): numer = sum(subseq[i][nt] * self.probs[i][nt] for nt in subseq[i]) denom = sum(subseq[i][nt] * self.background[nt] for nt in subseq[i]) raw += math.log(numer/denom, 2) scaled = self.scaled(raw) except KeyError: raw, scaled = float('nan'), float('nan') except OverflowError: raw, scaled = float('nan'), float('nan') except ValueError: raw, scaled = float('nan'), float('nan') scores.append((raw, scaled)) return scores def score_reverse_seq(self, seq): revSeq = reverse_complement(seq) scores = self.score_seq(revSeq) scores.reverse() return scores def scaled(self, val): return (val - self.minSum) / (self.maxSum - self.minSum) def pseudocount(self, base=None): def f(count): return math.sqrt(count + 1) if base in self.alphabet: return f(self.matrix_base_counts[base]) elif base is None: return f(self.sites) else: return float("nan") def simple_probability(self, freq, base, i): # p(base,i) = f(base,i) # ---------------------- # sum(f(base,{A,C,G,T})) return float(freq[i][base]) / sum(freq[i][nt] for nt in self.alphabet) def corrected_probability_score(self, freq, base, i): # p(base,i) = f(base,i) + s(base) # -------------------- # N + sum(s(A,C,T,G)) f = float(freq[i][base]) s = self.pseudocount(base) N = self.sites # print >>sys.stderr, "f:%.3f + s:%.3f = %.3f" % (f,s,f + s) # print >>sys.stderr, "-------------------------" # print >>sys.stderr, "N:%d + %d = %d" % (N,self.pseudocount(), N + self.pseudocount()) # print >>sys.stderr, "\t\t %.3f\n" % ((f + s) / (N + self.pseudocount())) assert (f + s) > 0 return (f + s) / (N + self.pseudocount()) def pwm_score(self, base, i, freq, background=None): if background is None: background = self.background p = self.score_correction(freq, base, i) # print >>sys.stderr, p # print >>sys.stderr, "k %d %c" % (i,base),freq[i][base] b = background[base] try: return math.log(p/b, 2) except OverflowError: # print >>sys.stderr,"base=%c, math.log(%.3f / %.3f)" % (base,p,b) # print >>sys.stderr,self.id return float('nan') except ValueError: # print >>sys.stderr,"base=%c, math.log(%.3f / %.3f)" % (base,p,b) # print >>sys.stderr,self.id return float('nan') def parse_weight(self, weightString): fields = weightString.split(".") if len(fields) > 2: raise ValueError w = int(fields[0]) s = 1 if len(fields) == 2: for _ in range(0, len(fields[1])): s *= 10 w = s*w + int(fields[1]) return (w, s) # w = the weight # s = the scale used (a power of 10) def __str__(self): lines = [self.id] headers = ["%s" % nt for nt in self.alphabet] lines.append("P0\t" + "\t".join(headers)) for ix in range(0, len(self.rows)): weights = ["%d" % self.counts[ix][nt] for nt in self.alphabet] # lines.append(("%02d\t" % ix) + "\t".join(weights) + "\t" + self.consensus[ix]) lines.append(("%02d\t" % ix) + "\t".join(weights) + "\t" + str(sum(self.counts[ix].values())) + "\t" + self.consensus[ix]) return "\n".join(lines) def __getitem__(self, key): return self.rows[key] def __setitem__(self, key, value): self.rows[key] = value def __len__(self): return len(self.rows) def score_align_gaps(align): # a blank score matrix nrows, ncols = align.dims scoremax = AlignScoreMatrix(align).matrix for ir in range(nrows): # row is missing data if isnan(align.rows[ir][0]): continue # scan for gaps for pos in range(ncols): if align.rows[ir][pos] == '-': scoremax[ir][pos] = 1 else: scoremax[ir][pos] = 0 return scoremax # ----------- # # WeightMatrix Reader-- # Read position weight matrices (PWM) from a file. # # ----------- class Reader: """Iterate over all interesting weight matrices in a file""" def __init__(self, file, tfIds=None, name=None, format='basic', background=None, score_correction=True): self.tfIds = tfIds self.file = file self.name = name self.lineNumber = 0 self.format = format self.background = background self.score_correction = score_correction def close(self): self.file.close() def where(self): if self.name is None: return "line %d" % self.lineNumber else: return "line %d in %s" % (self.lineNumber, self.name) def __iter__(self): if self.format == 'basic': return self.read_as_basic() elif self.format == 'transfac': return self.read_as_transfac() else: raise ValueError("unknown weight matrix file format: '%s'" % self.format) def read_as_basic(self): tfId = None pwmRows = None alphabet = ['A', 'C', 'G', 'T'] while (True): line = self.file.readline() if not line: break line = line.strip() self.lineNumber += 1 if line.startswith(">"): if pwmRows is not None: yield PositionWeightMatrix(tfId, pwmRows, alphabet, background=self.background) # try: # yield PositionWeightMatrix(tfId,pwmRows,alphabet) # except: # print >>sys.stderr, "Failed to read", tfId tfId = line.strip()[1:] pwmRows = [] elif line[0].isdigit(): tokens = line.strip().split() tokens.append(consensus_symbol(line)) # print >>sys.stderr,[ "%.2f" % (float(v)/sum(vals)) for v in vals], tokens[-1] pwmRows.append(tokens) if pwmRows is not None: # we've finished collecting a desired matrix yield PositionWeightMatrix(tfId, pwmRows, alphabet, background=self.background, score_correction=self.score_correction) def read_as_transfac(self): self.tfToPwm = {} tfId = None pwmRows = None while True: line = self.file.readline() if not line: break line = line.strip() self.lineNumber += 1 # handle an ID line if line.startswith("ID"): if pwmRows is not None: # we've finished collecting a desired matrix try: # FIXME: alphabet is undefined here! yield PositionWeightMatrix(tfId, pwmRows, alphabet, background=self.background, score_correction=self.score_correction) # noqa: F821 except Exception: print("Failed to read", tfId, file=sys.stderr) tfId = None pwmRows = None tokens = line.split(None, 2) if len(tokens) != 2: raise ValueError("bad line, need two fields (%s)" % self.where()) tfId = tokens[1] if self.tfIds is not None and (tfId not in self.tfIds): continue # ignore it, this isn't a desired matrix if tfId in self.tfToPwm: raise ValueError(f"transcription factor {tfId} appears twice ({self.where()})") pwmRows = [] # start collecting a desired matrix continue # if we're not collecting, skip this line if pwmRows is None: continue if len(line) < 1: continue # name, if present, added to ID if line.startswith('NA'): words = line.strip().split() tfId = tfId + "\t" + " ".join(words[1:]) # handle a P0 line if line.startswith("P0"): alphabet = line.split()[1:] if len(alphabet) < 2: raise ValueError("bad line, need more dna (%s)" % self.where()) continue # handle a 01,02,etc. line if line[0].isdigit(): tokens = line.split() try: index = int(tokens[0]) if index != len(pwmRows)+1: raise ValueError except Exception: raise ValueError("bad line, bad index (%s)" % self.where()) pwmRows.append(tokens[1:]) continue # skip low quality entries if line.startswith("CC TRANSFAC Sites of quality"): print(line.strip(), tfId, file=sys.stderr) pwmRows = None continue if pwmRows is not None: # we've finished collecting a desired matrix yield PositionWeightMatrix(tfId, pwmRows, alphabet, background=self.background, score_correction=self.score_correction) # clean up self.tfToPwm = None def isnan(x): # return ieeespecial.isnan(x) if x == x: return False return True def reverse_complement(nukes): return nukes[::-1].translate(PositionWeightMatrix.complementMap) def rsquared(x, y): try: return sum_of_squares(x, y)**2 / (sum_of_squares(x) * sum_of_squares(y)) except ZeroDivisionError: # return float('nan') return 0 def sum_of_squares(x, y=None): if not y: y = x xmean = float(sum(x)) / len(x) ymean = float(sum(y)) / len(y) assert len(x) == len(y) return sum(float(xi)*float(yi) for xi, yi in zip(x, y)) - len(x)*xmean*ymean def consensus_symbol(pattern): if isinstance(pattern, str): try: pattern = [int(x) for x in pattern.split()] except ValueError as e: print(pattern, file=sys.stderr) raise ValueError(e) # IUPAC-IUB nomenclature for wobblers wobblers = { 'R': frozenset(['A', 'G']), 'Y': frozenset(['C', 'T']), 'M': frozenset(['A', 'C']), 'K': frozenset(['G', 'T']), 'S': frozenset(['G', 'C']), 'W': frozenset(['A', 'T']), 'H': frozenset(['A', 'C', 'T']), 'B': frozenset(['G', 'T', 'C']), 'V': frozenset(['G', 'C', 'A']), 'D': frozenset(['G', 'T', 'A'])} symbols = ['A', 'C', 'G', 'T'] if isinstance(pattern, dict): pattern = [pattern[u] for u in symbols] total = sum(pattern) f = [(space/1e5)+(float(x)/total) for space, x in enumerate(pattern)] copy = [] copy[:] = f[:] copy.sort() # http://www.genomatix.de/online_help/help_matinspector/matrix_help.html -- # url says consensus must be greater than 50%, and at least twice the freq # of the second-most frequent. A double-degenerate symbol can be used # if the top two account for 75% or more of the nt, if each is less than 50% # Otherwise, N is used in the consensus. tops = copy[-2:] if tops[1] > 0.5 and tops[1] >= 2 * tops[0]: return symbols[f.index(tops[1])] elif tops[0] < 0.5 and sum(tops) >= 0.75: degen = frozenset(symbols[f.index(v)] for v in tops) for degenSymbol, wobbles in wobblers.items(): # print >>sys.stderr,wobbles if degen == wobbles: return degenSymbol else: return 'N' print(pattern, file=sys.stderr) raise Exception('?') # import C extensions try: from ._position_weight_matrix import c_match_consensus def match_consensus(sequence, pattern): return c_match_consensus(sequence, pattern, len(sequence)) # print >>sys.stderr, "C match_consensus used" except ImportError: # print >>sys.stderr, "python match_consensus used" def match_consensus(sequence, pattern, size): for s, p in zip(sequence, pattern): if p == 'N': continue if s not in PositionWeightMatrix.symbols[p]: return False return True bx-python-0.8.13/lib/bx/pwm/pwm_score_maf.py000077500000000000000000000171241415666465100207360ustar00rootroot00000000000000#!/usr/bin/python2.4 import sys import bx.pwm.position_weight_matrix as pwmx from bx.align import maf as align_maf def isnan(x): # return ieeespecial.isnan(x) if x == x: return False return True NaN = float('nan') def main(): pwm_file = sys.argv[1] splist = sys.argv[2] if len(sys.argv) == 4: inmaf = open(sys.argv[3]) else: inmaf = sys.stdin # read alignment species species = [] for sp in splist.split(','): species.append(sp) # read weight matrices pwm = {} for wm in pwmx.Reader(open(pwm_file), format='basic'): pwm[wm.id] = wm fbunch = {} for scoremax, index, headers in MafScorer(pwm, species, inmaf): for k, matrix in scoremax.items(): fname = k + '.mx' if fname not in fbunch: fbunch[fname] = open(fname, 'w') print("Writing", fname, file=sys.stderr) for i in range(len(matrix)): for j in range(len(matrix[i])): print("%.2f" % matrix[i][j], end=' ', file=fbunch[fname]) print(file=fbunch[fname]) for file in fbunch.values(): file.close() def MafScorer(pwm, species, inmaf): index = 0 scoremax, width = None, None for maf in align_maf.Reader(inmaf): # try: if True: val = MafBlockScorer(pwm, species, maf) for scoremax, width, headers in val: yield scoremax, index, headers try: pass except Exception: print("Failed on:", file=sys.stderr) syserr = align_maf.Writer(sys.stderr) syserr.write(maf) if width: print(width, file=sys.stderr) if scoremax: print(len(scoremax), file=sys.stderr) syserr.close() sys.exit(1) index += width yield scoremax, index, headers def MafMotifSelect(mafblock, pwm, motif=None, threshold=0): if motif is not None and len(motif) != len(pwm): raise Exception("pwm and motif must be the same length") # generic alignment alignlist = [c.text for c in mafblock.components] align = pwmx.Align(alignlist) nrows, ncols = align.dims # required sequence length minSeqLen = len(motif) # record the text sizes from the alignment rows for start in range(ncols - minSeqLen): if align.rows[0][start] == '-': continue subseq = "" pwm_score_vec = [] motif_score_vec = [] max_cols = 0 for ir in range(nrows): expanded = align.rows[ir].count('-', start, minSeqLen) subtext = align.rows[ir][start: minSeqLen+expanded] max_cols = max(len(subtext), max_cols) subseq = subtext.replace('-', '') revseq = pwmx.reverse_complement(subseq) # pwm score nill, f_score = pwm.score_seq(subseq)[0] r_score, nill = pwm.score_seq(revseq)[0] pwm_score_vec.append(max(f_score, r_score)) # consensus score if motif is not None: for_score = int(pwmx.match_consensus(subseq, motif)) rev_score = int(pwmx.match_consensus(revseq, motif)) motif_score_vec.append(max(for_score, rev_score)) # check threshold try: assert not isnan(max(pwm_score_vec)) assert not isnan(max(motif_score_vec)) except AssertionError: print(pwm_score_vec, motif_score_vec, file=sys.stderr) print(len(subseq), len(pwm), file=sys.stderr) if max(pwm_score_vec) < threshold: continue if max(motif_score_vec) < threshold: continue # chop block col_start = start col_end = max_cols + 1 motifmaf = mafblock.slice(col_start, col_end) yield motifmaf, pwm_score_vec, motif_score_vec """ for ir in range(nrows): # scan alignment row for motif subsequences for start in range(ncols): if align.rows[ir][start] == '-': continue elif align.rows[ir][start] == 'n': continue elif align.rows[ir][start] == 'N': continue # gather enough subseq for motif for ic in range(start,ncols): char = align.rows[ir][ic].upper() if char == '-' or char == 'N': continue else: subseq += char if len(subseq) == minSeqLen: revseq = pwmx.reverse_complement( subseq ) align_match_lens.append( ic ) # pwm score nill,f_score = pwm.score_seq( subseq )[0] r_score, nill = pwm.score_seq( revseq )[0] pwm_score_vec.append( max(f_score, r_score) ) # consensus score if motif is not None: for_score = int( pwmx.match_consensus(subseq,motif) ) rev_score = int( pwmx.match_consensus(revseq,motif) ) motif_score_vec.append( max(for_score, rev_score) ) #check threshold try: assert not isnan(max(pwm_score_vec) ) assert not isnan(max(motif_score_vec) ) except: print >>sys.stderr, pwm_score_vec, motif_score_vec print >>sys.stderr, len(subseq), len(pwm) if max(pwm_score_vec) < threshold: continue if max(motif_score_vec) < threshold: continue # chop block col_start = start col_end = max( align_match_lens ) + 1 motifmaf = mafblock.slice( col_start, col_end ) print subseq,revseq,ic print align_match_lens yield motifmaf, pwm_score_vec, motif_score_vec """ def MafBlockScorer(pwm, species, maf): width = len(maf.components[0].text) headers = [(c.src, c.start, c.end) for c in maf.components] # expand block rows to full mafBlockSpecies = [specName.src.split('.')[0] for specName in maf.components] alignlist = [] for sp in species: try: i = mafBlockSpecies.index(sp) alignlist.append(maf.components[i].text) except ValueError: alignlist.append([NaN for n in range(width)]) alignrows = pwmx.Align(alignlist) scoremax = {} # record gap positions filter = pwmx.score_align_gaps(alignrows) # score pwm models for model in pwm.keys(): scoremax[model] = pwm[model].score_align(alignrows, filter) yield scoremax, width, headers def MafMotifScorer(species, maf, motifs): width = len(maf.components[0].text) headers = [(c.src, c.start, c.end) for c in maf.components] # expand block rows to full mafBlockSpecies = [specName.src.split('.')[0] for specName in maf.components] alignlist = [] for sp in species: try: i = mafBlockSpecies.index(sp) alignlist.append(maf.components[i].text) except ValueError: alignlist.append([NaN for n in range(width)]) alignrows = pwmx.Align(alignlist, headers) # record gap positions filter = pwmx.score_align_gaps(alignrows) # score motif if isinstance(motifs, list): scoremax = {} for string in motifs: scoremax[string] = pwmx.score_align_motif(alignrows, string, filter) else: scoremax = pwmx.score_align_motif(alignrows, motifs, filter) yield scoremax, width, headers if __name__ == '__main__': main() bx-python-0.8.13/lib/bx/pwm/pwm_score_motifs.py000077500000000000000000000033501415666465100214700ustar00rootroot00000000000000#!/usr/bin/env python2.4 """ Returns all positions of a maf with any pwm score > threshold The positions are projected onto human coordinates """ import sys from bx.align import maf as align_maf from bx.pwm.pwm_score_maf import MafMotifScorer def isnan(x): return not x == x def main(): if len(sys.argv) < 4: print("%s motif inmaf spec1,spec2,... " % sys.argv[0], file=sys.stderr) sys.exit(0) targmotif = sys.argv[1] inmaf = open(sys.argv[2]) threshold = 0 species = [] for sp in sys.argv[3].split(','): species.append(sp) for maf in align_maf.Reader(inmaf): mafchrom = maf.components[0].src.split('.')[1] mafstart = maf.components[0].start mafend = maf.components[0].end reftext = maf.components[0].text # maf block scores for each matrix for scoremax, width, headers in MafMotifScorer(species, maf, targmotif): blocklength = width mafsrc, mafstart, mafend = headers[0] mafchrom = mafsrc.split('.')[1] # lists of scores for each position in scoremax mx = scoremax for offset in range(blocklength): # scan all species with threshold for i in range(len(species)): if mx[i][offset] > threshold: refstart = mafstart + offset - reftext.count('-', 0, offset) refend = refstart + len(targmotif) data = " ".join(["%.2f" % mx[x][offset] for x in range(len(species))]) # quote the motif print(mafchrom, refstart, refend, "'"+targmotif+"'", data) break if __name__ == '__main__': main() bx-python-0.8.13/lib/bx/pwm/pwm_score_positions.py000077500000000000000000000037451415666465100222260ustar00rootroot00000000000000#!/usr/bin/env python2.4 """ Returns all positions of a maf with any pwm score > threshold The positions are projected onto human coordinates """ import sys import bx.pwm.position_weight_matrix as pwmx from bx.align import maf as align_maf from bx.pwm.pwm_score_maf import MafBlockScorer def isnan(x): return not x == x def main(): if len(sys.argv) < 6: print("%s transfac|basic pwmfile inmaf threshold spec1,spec2,... " % sys.argv[0], file=sys.stderr) sys.exit(0) pwm = {} format = sys.argv[1] for wm in pwmx.Reader(open(sys.argv[2]), format=format): pwm[wm.id] = wm inmaf = open(sys.argv[3]) threshold = float(sys.argv[4]) species = [] for sp in sys.argv[5].split(','): species.append(sp) for maf in align_maf.Reader(inmaf): mafchrom = maf.components[0].src.split('.')[1] mafstart = maf.components[0].start mafend = maf.components[0].end reftext = maf.components[0].text # maf block scores for each matrix for scoremax, width, headers in MafBlockScorer(pwm, species, maf): blocklength = width mafsrc, mafstart, mafend = headers[0] mafchrom = mafsrc.split('.')[1] # lists of scores for each position in scoremax for id, mx in scoremax.items(): for offset in range(blocklength): # scan all species with threshold for i in range(len(species)): if mx[i][offset] > threshold: refstart = mafstart + offset - reftext.count('-', 0, offset) refend = refstart + len(pwm[id]) data = " ".join(["%.2f" % mx[x][offset] for x in range(len(species))]) # underscore spaces in the name print(mafchrom, refstart, refend, id.replace(' ', '_'), data) break if __name__ == '__main__': main() bx-python-0.8.13/lib/bx/pwm/pwm_tests.py000066400000000000000000000055151415666465100201400ustar00rootroot00000000000000import unittest from io import StringIO import bx.pwm.position_weight_matrix as pwm basicPwm = \ """>MA0101 c-REL REL 0 5 8 4 0 1 15 1 1 0 15 1 5 1 9 2 6 5 3 3 5 1 1 10 1 0 0 16 2 0 0 15 0 15 0 2 1 16 0 0 """ transfacPwm = \ """ID TATA XX P0 A C G T 01 33 73 78 16 S 02 10 24 11 155 T 03 176 3 2 19 A 04 2 7 3 188 T 05 178 2 3 17 A 06 133 2 2 63 A 07 183 3 10 4 A 08 112 2 24 62 W 09 78 26 80 16 R 10 29 72 75 24 N 11 42 74 68 16 N 12 42 65 66 27 N 13 41 60 67 32 N 14 35 54 72 39 N 15 40 51 73 36 N XX """ background = {'A': .28, 'C': .21, 'G': .24, 'T': .27} dSeq = "ACCGAGTTAGCGTAAA" dScoresExpected = "-15.3697 0.4240 -16.5309 0.4027" qSeq = [{'A': 0.27, 'C': 0.34, 'G': 0.07, 'T': 0.32}, {'A': 0.24, 'C': 0.32, 'G': 0.09, 'T': 0.35}, {'A': 0.80, 'C': 0.11, 'G': 0.03, 'T': 0.06}, {'A': 0.07, 'C': 0.22, 'G': 0.37, 'T': 0.34}, {'A': 0.07, 'C': 0.44, 'G': 0.03, 'T': 0.46}, {'A': 0.43, 'C': 0.04, 'G': 0.18, 'T': 0.35}, {'A': 0.84, 'C': 0.14, 'G': 0.01, 'T': 0.01}, {'A': 0.31, 'C': 0.52, 'G': 0.13, 'T': 0.04}, {'A': 0.22, 'C': 0.22, 'G': 0.45, 'T': 0.11}, {'A': 0.36, 'C': 0.15, 'G': 0.42, 'T': 0.07}, {'A': 0.11, 'C': 0.78, 'G': 0.07, 'T': 0.04}, {'A': 0.07, 'C': 0.16, 'G': 0.64, 'T': 0.13}, {'A': 0.34, 'C': 0.59, 'G': 0.03, 'T': 0.04}, {'A': 0.32, 'C': 0.15, 'G': 0.07, 'T': 0.46}, {'A': 0.07, 'C': 0.03, 'G': 0.59, 'T': 0.31}] qScoresExpected = "4.1106 0.7810" class PWMTestCase (unittest.TestCase): def testReader(self): # test basic format: i.e. for jaspar wms = [wm for wm in pwm.Reader(StringIO(basicPwm), format="basic", background=background, score_correction=False)] assert len(wms) == 1 # test transfac format wms = [wm for wm in pwm.Reader(StringIO(transfacPwm), format="transfac", background=background, score_correction=False)] assert len(wms) == 1 wm = wms[0] dScores = wm.score_seq(dSeq) assert len(dScores) == 2 assert f"{dScores[0][0]:.4f} {dScores[0][1]:.4f} {dScores[1][0]:.4f} {dScores[1][1]:.4f}" == dScoresExpected qdSeq = [] for (ix, nt) in enumerate(dSeq): qdSeq.append(dict()) qdSeq[ix][nt] = 1.0 qScores = wm.score_seq(qdSeq) assert len(qScores) == 2 assert f"{qScores[0][0]:.4f} {qScores[0][1]:.4f} {qScores[1][0]:.4f} {qScores[1][1]:.4f}" == dScoresExpected qScores = wm.score_seq(qSeq) assert len(qScores) == 1 assert f"{qScores[0][0]:.4f} {qScores[0][1]:.4f}" == qScoresExpected bx-python-0.8.13/lib/bx/seq/000077500000000000000000000000001415666465100155205ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/seq/__init__.py000066400000000000000000000003031415666465100176250ustar00rootroot00000000000000""" Classes for dealing with biological sequences. See `core` for the abstract sequence classes and `nib` and `qdna` for specifics of various formats. """ from bx.seq.core import * # noqa: F40 bx-python-0.8.13/lib/bx/seq/_nib.pyx000066400000000000000000000041711415666465100171740ustar00rootroot00000000000000from cpython.version cimport PY_MAJOR_VERSION cdef extern from "Python.h": char * PyBytes_AsString( object ) object PyBytes_FromStringAndSize( char *, Py_ssize_t ) import struct, sys cdef char * NIB_I2C_TABLE cdef char * NIB_I2C_TABLE_FIRST cdef char * NIB_I2C_TABLE_SECOND #NIB_I2C_TABLE = "TCAGNXXXtcagnxxx" NIB_I2C_TABLE_FIRST = "TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGGNNNNNNNNNNNNNNNNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXttttttttttttttttccccccccccccccccaaaaaaaaaaaaaaaaggggggggggggggggnnnnnnnnnnnnnnnnxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" NIB_I2C_TABLE_SECOND = "TCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxxTCAGNXXXtcagnxxx" def translate_raw_data( data, int start, int length ): """ Data is a block read from the file that needs to be unpacked, dealing with end conditions based on start/length. """ cdef int i, j cdef char * p_rval cdef unsigned char * p_data if length == 0 : return "" # Allocate string to write into rval = PyBytes_FromStringAndSize( NULL, length ) # Get char pointer access to strings p_rval = PyBytes_AsString( rval ) p_data = PyBytes_AsString( data ) i = 0 # Odd start if start & 1: #p_rval[i] = NIB_I2C_TABLE[ p_data[0] & 0xF ] p_rval[i] = NIB_I2C_TABLE_SECOND[ p_data[0] ] p_data = p_data + 1 i = 1 # Two output values for each input value for j from 0 <= j < (length-i)/2: #p_rval[i] = NIB_I2C_TABLE[ ( p_data[0] >> 4 ) & 0xF ]; #p_rval[i+1] = NIB_I2C_TABLE[ ( p_data[0] >> 0 ) & 0xF ]; p_rval[i] = NIB_I2C_TABLE_FIRST [ p_data[0] ] p_rval[i+1] = NIB_I2C_TABLE_SECOND[ p_data[0] ] i = i + 2 p_data = p_data + 1 # Odd end if i < length: p_rval[i] = NIB_I2C_TABLE_FIRST[ p_data[0] ] if PY_MAJOR_VERSION >= 3: return rval.decode() else: return rval bx-python-0.8.13/lib/bx/seq/_twobit.pyx000066400000000000000000000104541415666465100177350ustar00rootroot00000000000000from cpython.version cimport PY_MAJOR_VERSION cdef extern from "Python.h": char * PyBytes_AsString( object ) object PyBytes_FromStringAndSize( char *, Py_ssize_t ) cdef extern from "ctype.h": int tolower( int ) cdef extern from "string.h": void * memset( void *, int, size_t ) import struct, sys from bisect import bisect cdef char* valToNt valToNt = "TCAG" def read( file, seq, int fragStart, int fragEnd, bint do_mask ): """ Stolen directly from Jim Kent's twoBit.c """ cdef int packedStart, packedEnd, packByteCount cdef int pOff, pStart, pEnd cdef int midStart, remainder, partCount cdef int i, j, s, e cdef char * packed cdef char * dna cdef char * dna_orig cdef char partial packedStart = (fragStart>>2); packedEnd = ((fragEnd+3)>>2); packByteCount = packedEnd - packedStart; # Empty string in which to write unpacked DNA dna_py = PyBytes_FromStringAndSize(NULL, fragEnd - fragStart) dna = PyBytes_AsString( dna_py ) seek_bytes = seq.sequence_offset+packedStart # Read it file.seek( seek_bytes ) packed_py = file.read( packByteCount ) packed = PyBytes_AsString( packed_py ) # Handle case where everything is in one packed byte if packByteCount == 1: pOff = (packedStart<<2) pStart = fragStart - pOff pEnd = fragEnd - pOff partial = packed[0] assert pEnd <= 4 assert pStart >= 0 for i from pStart <= i < pEnd: dna[0] = valToNt[(partial >> (6-i-i)) & 3] dna = dna + 1 else: # Handle partial first packed byte. midStart = fragStart; remainder = ( fragStart&3 ) if remainder > 0: partial = packed[0]; packed = packed + 1 partCount = 4 - remainder; for i from partCount - 1 >= i >= 0: dna[i] = valToNt[ partial & 3 ] partial = partial >> 2 midStart = midStart + partCount dna = dna + partCount # Handle middle bytes. remainder = fragEnd&3 midEnd = fragEnd - remainder i = midStart while i < midEnd: partial = packed[0] packed = packed + 1; dna[3] = valToNt[partial&3]; partial = partial >> 2 dna[2] = valToNt[partial&3]; partial = partial >> 2 dna[1] = valToNt[partial&3]; partial = partial >> 2 dna[0] = valToNt[partial&3]; dna = dna + 4; # Increment i = i + 4 ## sys.stderr.write( "!!!< " + dna_py + " >!!!\n" ); sys.stderr.flush() # End if remainder > 0: partial = packed[0]; partial = partial >> (8-remainder-remainder) for i from remainder - 1 >= i >= 0: dna[i] = valToNt[partial&3] partial = partial >> 2 # Restore DNA pointer dna = PyBytes_AsString( dna_py ) # N's n_block_count = len( seq.n_block_starts ) if n_block_count > 0: start_ix = bisect( seq.n_block_starts, fragStart ) - 1 if start_ix < 0: start_ix = 0 for i from start_ix <= i < n_block_count: s = seq.n_block_starts[i]; e = s + seq.n_block_sizes[i]; if (s >= fragEnd): break if (s < fragStart): s = fragStart if (e > fragEnd): e = fragEnd if (s < e): memset( dna + s - fragStart, c'N', e - s) # Mask if do_mask: m_block_count = len( seq.masked_block_starts ) if m_block_count > 0: start_ix = bisect( seq.masked_block_starts, fragStart ) - 1 if start_ix < 0: start_ix = 0 for i from start_ix <= i < m_block_count: s = seq.masked_block_starts[i]; e = s + seq.masked_block_sizes[i]; if (s >= fragEnd): break if (s < fragStart): s = fragStart if (e > fragEnd): e = fragEnd if (s < e): for j from s <= j < e: dna[j-fragStart] = tolower( dna[j-fragStart] ) if PY_MAJOR_VERSION >= 3: return dna_py.decode() else: return dna_py bx-python-0.8.13/lib/bx/seq/core.py000066400000000000000000000051251415666465100170250ustar00rootroot00000000000000""" Support for "biological sequence" files. :Author: Bob Harris (rsharris@bx.psu.edu) See seq.py for more information """ import struct from . import fasta, nib, qdna # DNA reverse complement table DNA_COMP = " - " \ " TVGH CD M KN YSA BWXR tvgh cd m kn ysa bwxr " \ " " \ " " def reverse_complement(text): return text.translate(DNA_COMP)[::-1] def seq_file(file, format=None, revcomp=False, name="", gap=None, contig=None): if (format is None): format = infer_format(file) if (contig is not None) and (format not in ["fasta", None]): raise ValueError("Contigs are not supported for format %s" % format) if (format == "fasta"): return fasta.FastaFile(file, revcomp=revcomp, name=name, gap=gap, contig=contig) elif (format == "nib"): return nib.NibFile(file, revcomp=revcomp, name=name, gap=gap) elif (format == "qdna"): return qdna.QdnaFile(file, revcomp=revcomp, name=name, gap=gap) else: if (format is None): format = "" else: format = " " + format raise ValueError(f"Unknown sequence format{format} in {file.name}") def seq_reader(file, format=None, revcomp=False, name="", gap=None): if (format is None): format = infer_format(file) if (format == "fasta"): return fasta.FastaReader(file, revcomp=revcomp, name=name, gap=gap) elif (format == "nib"): return nib.NibReader(file, revcomp=revcomp, name=name, gap=gap) elif (format == "qdna"): return qdna.QdnaReader(file, revcomp=revcomp, name=name, gap=gap) else: raise ValueError("Unknown sequence format %s" % format) def seq_writer(outfile, format=None, name=""): if (format == "fasta"): return fasta.FastaWriter(outfile) elif (format == "nib"): return nib.NibWriter(outfile) elif (format == "qdna"): return qdna.QdnaWriter(outfile) else: raise ValueError("Unknown sequence format %s" % format) def infer_format(file): format = None magic = struct.unpack(">L", file.read(4))[0] if (magic == nib.NIB_MAGIC_NUMBER) or (magic == nib.NIB_MAGIC_NUMBER_SWAP): format = "nib" elif (magic == qdna.qdnaMagic) or (magic == qdna.qdnaMagicSwap): format = "qdna" else: file.seek(0) if (file.read(1) == b">"): format = "fasta" file.seek(0) return format bx-python-0.8.13/lib/bx/seq/fasta.py000066400000000000000000000071301415666465100171710ustar00rootroot00000000000000""" Classes to support FASTA files. :Author: Bob Harris (rsharris@bx.psu.edu) A FASTA file contains multiple sequences. Each sequence is usually DNA. A typical FASTA file:: >mule TAATACCCCGGATATATGTCCTCACATAGTTCGAGGTCGAGAAAAATGAC TTCCCACCAAGTGGACTCAGCTCGAGTAAACGCCAACGATACGTCCATTA GGTGTGTGCCgcaactagtcggacccgttgtgacggaaacaggtccccgc caagtcacacgggcatgtcatggacTCTCGATCGTTCATCGCCTTCTTGG GTACCGCAGCCGCAATTAAGCCGTGTCTTCTTCCCCCTTCAAACGGGAAT CGTGTCGACTTCTTAGGAGCAGNNNNNNNNNNCTAACTCCAGAG >donkey TAATACCCCGGATATATGTCTTAACATAGTTCCAGGTCGAGAAGAATGAC TTGCCACCAAGTGGACTCAGATTCAGTCAACGCGAACGATAAGTCCATTA GGTGTGTACCgcaactagtgggacccgttgtgacggaaacaggtcaccgc caagtcacacgtgcatgtcatgtacTCTCGATCGTTTATCGCCTTCTTGG GTACCGCAGCCGAAATTAAGCCGTGTCTTCTTCCCACTTCAAACGGGAAT CGTGTCGACTTTACAGGAACAGNNNNNNNNNNATAACGCCAGAG ... more sequences Typical use: for seq in bx.seq.fasta.FastaReader(sys.stdin): print seq.name print seq.get(0,seq.length) """ from bx.seq.seq import SeqFile, SeqReader class FastaFile(SeqFile): def __init__(self, file, revcomp=False, name="", gap=None, lookahead=None, contig=None): SeqFile.__init__(self, file, revcomp, name, gap) self.lookahead = lookahead if (contig is None): contig = 1 assert (contig >= 1), "contig %d is not legal" % contig # nota bene: certainly not the most efficient or elegant implementation currContig = 1 while (True): if (self.lookahead is not None): (line, self.lookahead) = (self.lookahead, None) else: line = self.file.readline() if not isinstance(line, str): line = line.decode() if (line == ""): break if not line: break if (line.startswith(">")): if (self.text is not None): if (currContig == contig): self.lookahead = line # (next sequence header) break currContig += 1 self.name = self.extract_name(line[1:]) self.text = [] continue line = line.split() # (remove whitespace) if (self.text is None): self.text = line # (allows headerless fasta) else: self.text.extend(line) assert (currContig == contig), \ "contig %d is not legal (file contains only %d)" % (contig, currContig) if (self.text is not None): self.text = "".join(self.text) self.length = len(self.text) class FastaReader(SeqReader): def __init__(self, file, revcomp=False, name="", gap=None): SeqReader.__init__(self, file, revcomp, name, gap) self.lookahead = None def __next__(self): seq = FastaFile(self.file, self.revcomp, self.name, self.gap, self.lookahead) if (seq.text is None): return self.lookahead = seq.lookahead self.seqs_read += 1 return seq class FastaWriter: def __init__(self, file, columns=50): self.file = file self.columns = columns def write(self, seq): print(">%s" % seq.name, file=self.file) text = seq.text if (self.columns is not None) and (self.columns > 0): text = "\n".join([text[ix:ix+self.columns] for ix in range(0, len(text), self.columns)]) print(text, file=self.file) def close(self): assert (self.file is not None) self.file.close() self.file = None bx-python-0.8.13/lib/bx/seq/fasta_tests.py000066400000000000000000000020031415666465100204050ustar00rootroot00000000000000""" Tests for `bx.seq.fasta`. """ import unittest from . import fasta test_fa = "test_data/seq_tests/test.fa" # Same sequence data as stored in test.fa valid_seq = "TGGAGGCATTTGTGATTCAATAGATGCAGAAAGAAACCTTCCTAGAGCTG" \ + "GCGTTCTCTAACTAAAAGTGGAAAGTTCTGAGGAATGAGGACTGTTATAA" \ + "ATCCCACCCCACACCGCACCTTCTCCAGGGAAGTTTCATGGCCGTGAAGA" \ + "GGACAGAAAGTGAGAACCAAGATggaactgaataaacaagcttcacactg" \ + "ttagtttccccatatgcttaccttcccacagatgccaaccttggaggcct" \ + "aagaggcctagaatattatcctttgtctgatcatttctctacaaatttat" \ + "tgttctttgttaagatgctacataagcccaaattctaaccacccctttga" \ + "gttacccatcatcaagtttctcccatgtg" valid_seq_len = len(valid_seq) class FASTATestCase(unittest.TestCase): def test_get(self): fastafile = fasta.FastaFile(open(test_fa, "rb")) check_get(fastafile, 0, valid_seq_len) check_get(fastafile, 0, 40) check_get(fastafile, valid_seq_len - 40, 40) def check_get(fastafile, start, len): assert fastafile.get(start, len) == valid_seq[start:start+len] bx-python-0.8.13/lib/bx/seq/nib.py000066400000000000000000000055231415666465100166470ustar00rootroot00000000000000""" Classes to support nib files. :Author: James Taylor (james@bx.psu.edu), Bob Harris (rsharris@bx.psu.edu) A nib sequence is a sequence of DNA, using the 10 character alphabet A,C,G,T,N (upper and lower case). The file is packed as 4 bits per character. nib file format --------------- Fields can be in big- or little-endian format; they must match the endianess of the magic number. ============ =========== ====================================================== offset 0x00: 6B E9 3D 3A big endian magic number (3A 3D E9 6B => little endian) offset 0x04: xx xx xx xx length of data sequence (counted in characters) offset 0x08: ... data sequence; most significant nybble in each byte is first in sequence ============ =========== ====================================================== """ import math import struct from bx.seq.seq import SeqFile, SeqReader from . import _nib NIB_MAGIC_NUMBER = 0x6BE93D3A NIB_MAGIC_NUMBER_SWAP = 0x3A3DE96B NIB_MAGIC_SIZE = 4 NIB_LENGTH_SIZE = 4 class NibFile(SeqFile): def __init__(self, file, revcomp=False, name="", gap=None): SeqFile.__init__(self, file, revcomp, name, gap) self.byte_order = ">" magic = struct.unpack(">L", file.read(NIB_MAGIC_SIZE))[0] if (magic != NIB_MAGIC_NUMBER): if magic == NIB_MAGIC_NUMBER_SWAP: self.byte_order = "<" else: raise Exception("Not a NIB file") self.magic = magic self.length = struct.unpack("%sL" % self.byte_order, file.read(NIB_LENGTH_SIZE))[0] def raw_fetch(self, start, length): # Check parameters assert start >= 0, "Start must be greater than 0" assert length >= 0, "Length must be greater than 0" assert start + length <= self.length, "Interval beyond end of sequence" # Read block of bytes containing sequence block_start = int(math.floor(start / 2)) block_end = int(math.floor((start + length - 1) / 2)) block_len = block_end + 1 - block_start self.file.seek(NIB_MAGIC_SIZE + NIB_LENGTH_SIZE + block_start) raw = self.file.read(block_len) # Unpack compressed block into a character string and return return _nib.translate_raw_data(raw, start, length) class NibReader(SeqReader): def __init__(self, file, revcomp=False, name="", gap=None): SeqReader.__init__(self, file, revcomp, name, gap) def __next__(self): if (self.seqs_read != 0): return # nib files have just one sequence seq = NibFile(self.file, self.revcomp, self.name, self.gap) self.seqs_read += 1 return seq class NibWriter: def __init__(self, file): self.file = file def write(self, seq): assert (False), "NibWriter.write() is not implemented yet" def close(self): self.file.close() bx-python-0.8.13/lib/bx/seq/nib_tests.py000066400000000000000000000026751415666465100200760ustar00rootroot00000000000000""" Tests for `bx.seq.nib`. """ import unittest from . import nib test_nib = "test_data/seq_tests/test.nib" # Same sequence data as stored in test.nib valid_seq = "TGGAGGCATTTGTGATTCAATAGATGCAGAAAGAAACCTTCCTAGAGCTG" \ + "GCGTTCTCTAACTAAAAGTGGAAAGTTCTGAGGAATGAGGACTGTTATAA" \ + "ATCCCACCCCACACCGCACCTTCTCCAGGGAAGTTTCATGGCCGTGAAGA" \ + "GGACAGAAAGTGAGAACCAAGATggaactgaataaacaagcttcacactg" \ + "ttagtttccccatatgcttaccttcccacagatgccaaccttggaggcct" \ + "aagaggcctagaatattatcctttgtctgatcatttctctacaaatttat" \ + "tgttctttgttaagatgctacataagcccaaattctaaccacccctttga" \ + "gttacccatcatcaagtttctcccatgtg" valid_seq_len = len(valid_seq) class NIBTestCase(unittest.TestCase): def test_get(self): nibfile = nib.NibFile(open(test_nib, 'rb')) # Try all combinations of even / odd boundaries check_get(nibfile, 0, 10) check_get(nibfile, 1, 10) check_get(nibfile, 0, 11) check_get(nibfile, 1, 11) # Test near end of file also check_get(nibfile, valid_seq_len - 10, 10) check_get(nibfile, valid_seq_len - 11, 11) # Test really short gets check_get(nibfile, 0, 0) check_get(nibfile, 1, 0) check_get(nibfile, 0, 1) check_get(nibfile, 1, 1) # Test negative length self.assertRaises(AssertionError, nibfile.get, 20, -1) def check_get(nibfile, start, len): assert nibfile.get(start, len) == valid_seq[start:start+len] bx-python-0.8.13/lib/bx/seq/qdna.py000066400000000000000000000242561415666465100170260ustar00rootroot00000000000000""" Classes to support "quantum-DNA" files. :Author: Bob Harris (rsharris@bx.psu.edu) A quantum DNA sequence is a sequence of bytes, each representing a probability distribution (vector) over A, C, G, T. The QdnaFile class encapsulates the sequence of bytes, while the mapping from byte value to probability vector is encapsulated by the QdnaCodebook class. qdna file format ~~~~~~~~~~~~~~~~ Fields can be in big- or little-endian format; they must match the endianess of the magic number. ============ =========== ====================================================== offset 0x00: C4 B4 71 97 big endian magic number (97 71 B4 C4 => little endian) offset 0x04: 00 00 02 00 version 2.0 (fourth byte is sub version) offset 0x08: 00 00 00 14 header length (in bytes, including this field) offset 0x0C: xx xx xx xx S, offset (from file start) to data sequence offset 0x10: xx xx xx xx N, offset to name, 0 indicates no name offset 0x14: xx xx xx xx length of data sequence (counted in 'items') offset 0x18: xx xx xx xx (for version >= 2.0) P, offset to named .. properties, 0 indicates no properties offset N: ... name (zero-terminated string) offset S: ... data sequence offset P: ... named properties (see below) ============ =========== ====================================================== The named properties section consists of a list of pairs of zero-terminated strings. The list itself is terminated by an empty string (i.e. another zero). In each pair, the first is the name of the property and the second is its value. Some names are recognized and handled in some specific manner (see list below this paragraph). Any unrecognized name is simply added as an instance variable with that name, as long as it is not already an instance variable (in which case it is an error). Recognized properties (at present only one): - codebook: A string in qdna code file format (see QdnaCodebook class for details). """ import struct from io import StringIO from bx.seq.seq import SeqFile, SeqReader qdnaMagic = 0xC4B47197 # big endian magic number for qdna files qdnaMagicSwap = 0x9771B4C4 class QdnaFile(SeqFile): def __init__(self, file, revcomp=False, name="", gap=None, codebook=None): SeqFile.__init__(self, file, revcomp, name, gap) if gap is None: self.gap = chr(0) assert not revcomp, "reverse complement is not supported for qdna files" self.codebook = codebook self.byte_order = ">" magic = struct.unpack(">L", file.read(4))[0] if magic != qdnaMagic: if magic == qdnaMagicSwap: self.byte_order = "<" else: raise ValueError("not a quantum-dna file (magic=%08X)" % magic) self.magic = magic # process header self.version = struct.unpack("%sL" % self.byte_order, self.file.read(4))[0] if self.version not in [0x100, 0x200]: raise ValueError("unsupported quantum-dna (version=%08X)" % self.version) self.headerLength = struct.unpack("%sL" % self.byte_order, self.file.read(4))[0] if self.headerLength < 0x10: raise ValueError("unsupported quantum-dna (header len=%08X)" % self.headerLength) if self.version == 0x100 and self.headerLength != 0x10: raise ValueError("unsupported quantum-dna (version 1.0 header len=%08X)" % self.headerLength) self.seqOffset = struct.unpack("%sL" % self.byte_order, self.file.read(4))[0] self.nameOffset = struct.unpack("%sL" % self.byte_order, self.file.read(4))[0] self.length = struct.unpack("%sL" % self.byte_order, self.file.read(4))[0] self.propOffset = 0 if self.headerLength >= 0x14: self.propOffset = struct.unpack("%sL" % self.byte_order, self.file.read(4))[0] self.name = "" if self.nameOffset != 0: self.file.seek(self.nameOffset) self.name = self.read_string() if self.propOffset != 0: self.file.seek(self.propOffset) while (True): name = self.read_string() if len(name) == 0: break value = self.read_string() self.set_property(name, value) def set_property(self, name, value): if name == "codebook": self.codebook = QdnaCodebook(StringIO(value)) else: raise Exception("named properties as instance variables are not implemented yet") # $$$ do this by adding a properties dict and __getitem__/__setitem__ # $$$ also need to write properties in QdnaWriter.write() def read_string(self): s = b"" while (True): ch = self.file.read(1) if ch == b"\0": break s += ch if not isinstance(s, str): return s.decode() return s def raw_fetch(self, start, length): self.file.seek(self.seqOffset + start) return self.file.read(length).decode() def get_quantum(self, start, length): assert self.codebook is not None, "qdna sequence %s has no code book" % self.name return [self.codebook[codeNum] for codeNum in self.raw_fetch(start, length)] class QdnaReader(SeqReader): def __init__(self, file, revcomp=False, name="", gap=None, codebook=None): SeqReader.__init__(self, file, revcomp, name, gap) self.codebook = codebook def __next__(self): if self.seqs_read != 0: return # qdna files have just one sequence seq = QdnaFile(self.file, self.revcomp, self.name, self.gap, self.codebook) self.seqs_read += 1 return seq """ A QdnaCodebook maps code numbers to the corresponding probability vector. The latter is a hash from symbols (usually "A", "C", "G", or "T") to the corresponsing probability. Note that code numbers are of type string. qdna code file format: The file is ascii text and looks something like what's shown below. Lines beginning with # are comments, and columns are assumed to represent A, C, G and T (in that order). Anything other than five columns is an error. Note that code number zero is usually reserved for gaps in quantum sequences, and thus usually won't appear in a code file. Note that code numbers are two-digit hexadecimal (to match the textual displays of quantum sequences). 01 0.111002 0.072588 0.127196 0.689214 02 0.081057 0.023799 0.098657 0.796487 03 0.000260 0.003823 0.000336 0.995581 ... more lines, usually a total of 255 ... FF 0.465900 0.008602 0.482301 0.043197 """ class QdnaCodebook: def __init__(self, file): (self.alphabet, self.codeToProbs) = self.read_codebook(file) def __str__(self): codeSet = sorted(codeNum for codeNum in self.codeToProbs) return "\n".join([self.vector_text(codeNum) for codeNum in codeSet]) def vector_text(self, codeNum): if codeNum in self.codeToProbs: vec = self.codeToProbs[codeNum] else: vec = {} for sym in self.alphabet: if sym not in vec: vec[sym] = 0.0 return ("%02X\t" % ord(codeNum)) \ + "\t".join(["%.6f" % vec[sym] for sym in self.alphabet]) def __getitem__(self, codeNum): return self.codeToProbs[codeNum] def __setitem__(self, codeNum, value): self.codeToProbs[codeNum] = value # value should be hash from symbol to probability def read_codebook(self, codeF): alphabet = "ACGT" codeToProbs = {} for (lineNum, line) in enumerate(codeF): lineNum += 1 line = line.rstrip() stripped = line.strip() if stripped == "" or stripped.startswith("#"): continue fields = line.split(None) if len(fields) != 5: raise ValueError("wrong vector size (line %d)" % lineNum) try: codeNum = int(fields[0], 16) except ValueError: raise ValueError("bad character code %s (line %d)" % (fields[0], lineNum)) if not 0 <= codeNum <= 255: raise ValueError("character code %s is outside the valid range (line %d)" % (fields[0], lineNum)) if chr(codeNum) in codeToProbs: raise ValueError("character code %s appears more than once (line %d)" % (fields[0], lineNum)) try: vec = {} for ix in range(1, 5): p = float(fields[ix]) if p < 0 or p > 1: raise ValueError vec[alphabet[ix-1]] = p except Exception: raise ValueError("%s is a bad probability value (line %d)" % (fields[ix], lineNum)) codeToProbs[chr(codeNum)] = vec return (alphabet, codeToProbs) class QdnaWriter: def __init__(self, file): self.file = file def write(self, seq): text = seq.text if text is None: text = "" version = 0x200 headerLen = 0x014 offset = headerLen + 8 nameOffset = 0 if seq.name is not None and seq.name != "": nameOffset = 0x01C offset += len(seq.name) + 1 name = seq.name + chr(0) dataOffset = offset offset += len(text) assert seq.codebook is None, "QdnaWriter.write() does not support codebooks yet" propOffset = 0 self.file.write(struct.pack("%sL" % seq.byte_order, qdnaMagic)) self.file.write(struct.pack("%sL" % seq.byte_order, version)) self.file.write(struct.pack("%sL" % seq.byte_order, headerLen)) self.file.write(struct.pack("%sL" % seq.byte_order, dataOffset)) self.file.write(struct.pack("%sL" % seq.byte_order, nameOffset)) self.file.write(struct.pack("%sL" % seq.byte_order, len(text))) self.file.write(struct.pack("%sL" % seq.byte_order, propOffset)) if nameOffset != 0: self.file.write(name) self.file.write(text) def close(self): self.file.close() bx-python-0.8.13/lib/bx/seq/qdna_tests.py000066400000000000000000000020031415666465100202320ustar00rootroot00000000000000""" Tests for `bx.seq.qdna`. """ import unittest from . import qdna test_qdna = "test_data/seq_tests/test.qdna" # Same sequence data as stored in test.qdna valid_seq = "C7wMwHQrMKqEtSREuUv5nsLinpTS8l7jXpbI7IipvCbHnhOdgx" \ + "5tzRgzYl4j85d:xSlvKPEKEIvZkfiX1YPkBi1Ibhfn9fTZd8gG" \ + "Wy284hJnwf93W4eHOjeRk7LuVYmH{UTYkYM:b4J4MruMq1ihhv" \ + "1Yl5W[xXEmi8[JuuLRgooBpy23PllMuUiIiKVIK5vzhjPPYp5Y" \ + "1eqPxo[e5I24KeCdTV94MZWNybUb:McC:1n4Jczk8JqnR4q1gY" \ + "HjLS4Bes3s5YvvWdKzS4VrFZy2erhd7YoWRoS[UK8JtSp1{Z1o" \ + "5:TpvN8mrmWrghiNw{S6nT8DSfF{1ff6kNGpI:FsZE2RgipTVO" \ + "mJN6vPm8MUgNYd7MDBEu37YOPzPjO1dr" valid_seq_len = len(valid_seq) class QDNATestCase(unittest.TestCase): def test_get(self): qdnafile = qdna.QdnaFile(open(test_qdna, "rb")) check_get(qdnafile, 0, valid_seq_len) check_get(qdnafile, 0, 40) check_get(qdnafile, valid_seq_len - 40, 40) def check_get(qdnafile, start, len): assert qdnafile.get(start, len) == valid_seq[start:start+len] bx-python-0.8.13/lib/bx/seq/seq.py000066400000000000000000000113711415666465100166650ustar00rootroot00000000000000""" Classes to support "biological sequence" files. :Author: Bob Harris (rsharris@bx.psu.edu) """ # DNA reverse complement table DNA_COMP = " - " \ " TVGH CD M KN YSA BWXR tvgh cd m kn ysa bwxr " \ " " \ " " class SeqFile: """ A biological sequence is a sequence of bytes or characters. Usually these represent DNA (A,C,G,T), proteins, or some variation of those. class attributes: file: file object containing the sequence revcomp: whether gets from this sequence should be reverse-complemented False => no reverse complement True => (same as "-5'") "maf" => (same as "-5'") "+5'" => minus strand is from plus strand's 5' end (same as "-3'") "+3'" => minus strand is from plus strand's 3' end (same as "-5'") "-5'" => minus strand is from its 5' end (as per MAF file format) "-3'" => minus strand is from its 3' end (as per genome browser, but with origin-zero) name: usually a species and/or chromosome name (e.g. "mule.chr5"); if the file contains a name, that overrides this one gap: gap character that aligners should use for gaps in this sequence """ def __init__(self, file=None, revcomp=False, name="", gap=None): self.file = file if revcomp: self.revcomp = "-5'" elif revcomp == "+3'": self.revcomp = "-5'" elif revcomp == "+5'": self.revcomp = "-3'" elif revcomp == "maf": self.revcomp = "-5'" else: self.revcomp = revcomp self.name = name if gap is None: self.gap = "-" else: self.gap = gap self.text = None # (subclasses fill in text and self.length = 0 # length or they most override get()) def close(self): assert (self.file is not None) self.file.close() self.file = None def extract_name(self, line): try: return line.split()[0] except Exception: return "" def set_text(self, text): self.text = text self.length = len(text) def __str__(self): text = "" if self.name is not None: text += self.name + " " text += self.get(0, self.length) return text def get(self, start, length): """ Fetch subsequence starting at position `start` with length `length`. This method is picky about parameters, the requested interval must have non-negative length and fit entirely inside the NIB sequence, the returned string will contain exactly 'length' characters, or an AssertionError will be generated. """ # Check parameters assert length >= 0, "Length must be non-negative (got %d)" % length assert start >= 0, "Start must be greater than 0 (got %d)" % start assert start + length <= self.length, \ f"Interval beyond end of sequence ({start}..{start + length} > {self.length})" # Fetch sequence and reverse complement if necesary if not self.revcomp: return self.raw_fetch(start, length) if self.revcomp == "-3'": return self.reverse_complement(self.raw_fetch(start, length)) assert self.revcomp == "-5'", "unrecognized reverse complement scheme" start = self.length - (start+length) return self.reverse_complement(self.raw_fetch(start, length)) def raw_fetch(self, start, length): return self.text[start:start+length] def reverse_complement(self, text): comp = [ch for ch in text.translate(DNA_COMP)] comp.reverse() return "".join(comp) class SeqReader: """Iterate over all sequences in a file in order""" def __init__(self, file, revcomp=False, name="", gap=None): self.file = file self.revcomp = revcomp self.name = name self.gap = gap self.seqs_read = 0 def close(self): self.file.close() def __iter__(self): return SeqReaderIter(self) def __next__(self): # subclasses should override this method and return the return # .. next sequence (of type SeqFile or a subclass) read from self.file class SeqReaderIter: def __init__(self, reader): self.reader = reader def __iter__(self): return self def __next__(self): v = next(self.reader) if not v: raise StopIteration return v bx-python-0.8.13/lib/bx/seq/seq_tests.py000066400000000000000000000035551415666465100201140ustar00rootroot00000000000000""" Tests for `bx.seq.seq`. """ import unittest import bx.seq from bx.seq import fasta_tests, nib_tests, qdna_tests test_fa = "test_data/seq_tests/test.fa" test2_fa = "test_data/seq_tests/test2.fa" test_nib = "test_data/seq_tests/test.nib" test_qdna = "test_data/seq_tests/test.qdna" valid_fasta = fasta_tests.valid_seq valid_nib = nib_tests.valid_seq valid_qdna = qdna_tests.valid_seq # Same sequences as stored in test2.fa valid2_fa = [("apple", "GGCGCTGCGATAAGGTTGCGACAACACGGACCTTCTTTTGCCTACCTCTGTTCTTGGCACG"), ("orange", "CGTGCCGAGAACAGAAAATACGCCGGGCGGTGCAGTAGTATCTTGGTATCCGATATGCAGG"), ("grapefruit", "CCTGCATATCGACTAGTACACCCTCCCGAGGTACCCCACCCATCCCTCTTTTCTCGGCGCG")] class SEQTestCase (unittest.TestCase): def test_get_fasta(self): fastafile = bx.seq.seq_file(open(test_fa, "rb")) check_get(fastafile, valid_fasta, 3, 40) def test_get_nib(self): nibfile = bx.seq.seq_file(open(test_nib, "rb")) check_get(nibfile, valid_nib, 3, 40) def test_get_qdna(self): qdnafile = bx.seq.seq_file(open(test_qdna, "rb")) check_get(qdnafile, valid_qdna, 3, 40) def test_get_reader(self): reader = bx.seq.seq_reader(open(test2_fa, "rb")) for (ix, seq) in enumerate(reader): assert (ix < len(valid2_fa)), "FastaReader returns too many sequences" text = "%s" % seq fields = text.split() assert (len(fields) == 2), "SeqReader.__str__ returns incorrect sequence string \"%s\" (%d)" % text assert (fields[0] == valid2_fa[ix][0]), f"FastaReader returned the wrong name ({fields[0]},{valid2_fa[ix][0]})" assert (fields[1] == valid2_fa[ix][1]), f"FastaReader returned the wrong text ({fields[1]},{valid2_fa[ix][1]})" def check_get(seqfile, valid_seq, start, len): assert seqfile.get(start, len) == valid_seq[start:start+len] bx-python-0.8.13/lib/bx/seq/twobit.py000066400000000000000000000077521415666465100174150ustar00rootroot00000000000000""" Access to files containing sequence data in 'twobit' format. """ from collections.abc import Mapping from struct import calcsize, unpack from . import _twobit TWOBIT_MAGIC_NUMBER = 0x1A412743 TWOBIT_MAGIC_NUMBER_SWAP = 0x4327411A TWOBIT_MAGIC_SIZE = 4 TWOBIT_VERSION = 0 class TwoBitSequence: def __init__(self, tbf, header_offset=None): self.tbf = tbf self.header_offset = header_offset self.sequence_offset = None self.size = None self.n_blocks = None self.masked_blocks = None self.loaded = False def __getitem__(self, slice): start, stop, stride = slice.indices(self.size) assert stride == 1, "Striding in slices not supported" if stop - start < 1: return "" return _twobit.read(self.tbf.file, self, start, stop, self.tbf.do_mask) def __len__(self): return self.size def get(self, start, end): # Trim start / stop if start < 0: start = 0 if end > self.size: end = self.size out_size = end - start if out_size < 1: raise Exception(f"end before start ({start},{end})") # Find position of packed portion dna = _twobit.read(self.tbf.file, self, start, end, self.tbf.do_mask) # Return return dna class TwoBitFile(Mapping): def __init__(self, file, do_mask=True): self.do_mask = do_mask # Read magic and determine byte order self.byte_order = ">" strng = file.read(TWOBIT_MAGIC_SIZE) magic = unpack(">L", strng)[0] if magic != TWOBIT_MAGIC_NUMBER: if magic == TWOBIT_MAGIC_NUMBER_SWAP: self.byte_order = "<" else: raise Exception("Not a NIB file") self.magic = magic self.file = file # Read version self.version = self.read("L") if self.version != TWOBIT_VERSION: raise Exception("File is version '%d' but I only know about '%d'" % (self.version, TWOBIT_VERSION)) # Number of sequences in file self.seq_count = self.read("L") # Header contains some reserved space self.reserved = self.read("L") # Read index of sequence names to offsets index = dict() for _ in range(self.seq_count): name = self.read_p_string() offset = self.read("L") index[name] = TwoBitSequence(self, offset) self.index = index def __getitem__(self, name): seq = self.index[name] if not seq.loaded: self.load_sequence(name) return seq def __iter__(self): return iter(self.index.keys()) def __len__(self): return len(self.index) def load_sequence(self, name): seq = self.index[name] # Seek to start of sequence block self.file.seek(seq.header_offset) # Size of sequence seq.size = self.read("L") # Read N and masked block regions seq.n_block_starts, seq.n_block_sizes = self.read_block_coords() seq.masked_block_starts, seq.masked_block_sizes = self.read_block_coords() # Reserved self.read("L") # Save start of actualt sequence seq.sequence_offset = self.file.tell() # Mark as loaded seq.loaded = True def read_block_coords(self): block_count = self.read("L") if block_count == 0: return [], [] starts = self.read(str(block_count) + "L", untuple=False) sizes = self.read(str(block_count) + "L", untuple=False) return list(starts), list(sizes) def read(self, pattern, untuple=True): rval = unpack(self.byte_order + pattern, self.file.read(calcsize(self.byte_order + pattern))) if untuple and len(rval) == 1: return rval[0] return rval def read_p_string(self): """ Read a length-prefixed string """ length = self.read("B") return self.file.read(length) bx-python-0.8.13/lib/bx/seq/twobit_tests.py000066400000000000000000000031121415666465100206210ustar00rootroot00000000000000import random import pytest from . import twobit def quick_fasta_iter(f): current_header = None current_sequence = [] for line in f: if line.startswith("#"): continue if line.startswith(">"): if current_sequence: yield current_header, "".join(current_sequence) current_sequence = [] current_header = line.strip()[1:] else: current_sequence.append("".join(line.split())) if current_sequence: yield current_header, "".join(current_sequence) current_sequence = [] @pytest.mark.parametrize("filename", ["test", "testN", "testMask"]) def test_random_subseq_matches(filename): test_fa = f"test_data/seq_tests/{filename}.fa" test_twobit = f"test_data/seq_tests/{filename}.2bit" # Load Fasta data expected = {} with open(test_fa) as f: for h, s in quick_fasta_iter(f): expected[h] = s # Open 2bit with open(test_twobit, 'rb') as f: t = twobit.TwoBitFile(f) for k, s in expected.items(): k = k.encode() assert k in t.index # assert t.index[k].size == len(s) length = len(s) for _ in range(100): start = random.randint(0, length-2) end = random.randint(start+1, length) assert t[k].get(start, end) == s[start:end] assert t[k][start:end] == s[start:end], \ "seq: %s, start: %d, end: %d\nExpected:\n%s\nActual:\n%s\n" % (k, start, end, s[start:end], t.get(k, start, end)) bx-python-0.8.13/lib/bx/seqmapping.py000066400000000000000000000054501415666465100174520ustar00rootroot00000000000000""" Classes for char-to-int mapping and int-to-int mapping. :Author: James Taylor (james@bx.psu.edu) The char-to-int mapping can be used to translate a list of strings over some alphabet to a single int array (example for encoding a multiple sequence alignment). The int-to-int mapping is particularly useful for creating partitions, and provides methods to merge/split symbols in the output mapping. The two forms of mapping can be combined, for example to encode a multiple sequence alignment in a reduced alphabet defined by a partition of alignment columns. Many of the helper methods provided are for solving such alignment oriented problems. This code was originally written for the `ESPERR`_ project which includes software for searcing for alignment encodings that work well for specific classification problems using various Markov chain classifiers over the reduced encodings. Most of the core implementation is in the pyrex/C extension "_seqmapping.pyx" for performance reasons (specifically to avoid the excessive bounds checking that would make a sequence/array lookup heavy problem like this slow in pure python). .. _ESPERR: http://www.bx.psu.edu/projects/esperr/ """ from ._seqmapping import ( CharToIntArrayMapping, IntToIntMapping, ) # Char->Int mapping for DNA characters with missing data DNA = CharToIntArrayMapping() DNA.set_mapping("a", 0) DNA.set_mapping("A", 0) DNA.set_mapping("c", 1) DNA.set_mapping("C", 1) DNA.set_mapping("g", 2) DNA.set_mapping("G", 2) DNA.set_mapping("t", 3) DNA.set_mapping("T", 3) DNA.set_mapping("-", 4) DNA.set_mapping("*", 5) # Creating mappings def alignment_mapping_from_file(f, char_mapping=DNA): """ Create a mapping from a file of alignment columns. """ columns, symbols = [], [] for line in f: column, symbol = line.split() columns.append(column) symbols.append(int(symbol)) align_count = len(columns[0]) mapping = IntToIntMapping(char_mapping.get_out_size() ** align_count) for column, symbol in zip(columns, symbols): index = char_mapping.translate_list(list(column))[0] mapping.set_mapping(index, symbol) return align_count, mapping def second_mapping_from_file(f, first_mapping, char_mapping=DNA): columns, symbols = [], [] for line in f: column, symbol = line.split() columns.append(column) symbols.append(int(symbol)) mapping = IntToIntMapping(first_mapping.get_out_size()) for column, symbol in zip(columns, symbols): index = char_mapping.translate_list(list(column))[0] if first_mapping[index] >= 0: mapping.set_mapping(first_mapping[index], symbol) return mapping def identity_mapping(size): mapping = IntToIntMapping(size) for i in range(size): mapping.set_mapping(i, i) return mapping bx-python-0.8.13/lib/bx/seqmapping_tests.py000066400000000000000000000054741415666465100207020ustar00rootroot00000000000000""" Tests for `bx.seqmapping`. """ import unittest from io import StringIO from numpy import ( allclose, array, ) import bx.seqmapping class CharMappingTests(unittest.TestCase): __test__ = False def test_DNA(self): assert(allclose( bx.seqmapping.DNA.translate("ACGTacgt-?X"), [0, 1, 2, 3, 0, 1, 2, 3, 4, -1, -1])) def test_DNA_list(self): assert(allclose( bx.seqmapping.DNA.translate_list(["ACGTA", "TGCAX"]), [0 + 3*6, 1 + 2*6, 2 + 1*6, 3 + 0*6, -1])) def test_other(self): m = bx.seqmapping.CharToIntArrayMapping() m.set_mapping("A", 0) m.set_mapping("B", 7) assert(allclose(m.translate("ABCCBA"), [0, 7, -1, -1, 7, 0])) class IntMappingTests(unittest.TestCase): __test__ = False def test_simple(self): m = bx.seqmapping.IntToIntMapping(4) m.set_mapping(0, 0) m.set_mapping(2, 0) m.set_mapping(1, 1) m.set_mapping(3, 1) assert(allclose(m.translate(array([0, 1, 2, 3, 4], 'i')), array([0, 1, 0, 1, -1]))) eight_species_mapping = """TTTTTTTT 0 CCCCCCCC 4 AAAAAAAA 1 GGGGGGGG 5 AAAAA*AA 2 TTTTT*TT 0 GGGGG*GG 5 CCCCC*CC 4 GGGG*GGG 5 TTTT*TTT 2 GGGAAGGG 5 AAAA*AAA 2 TTTTT*T* 2 CCCCC*C* 4 CCCTTCCC 4 CCCC*CCC 4 TTTT**TT 2 AAAA**AA 2 AAAAA*A* 2 GGGGG*G* 5 AAAAAGAA 2 TTTTTCTT 2 GGGAA*GG 5 TTTT**T* 2 TTTCCTTT 0 AAAAAAA* 1 CCCTT*CC 3 TTTTTTT* 2 CC*CC*CC 3 AAAGGAAA 2 ------G- 2 """ rows = ["AAATTGT-----ATGTCCATCCTTTAAAGGTCATTCCTTTAATGGTCTTTTCTGGACACCACTAGGGGTCAGAAGTAGTTCATCAAAC-----------------TTTCTTCCCTCCC-TACTTCAGTG", "AAATTGT-----ATGTCCATCCTTTAAAGGTCATTCCTTTAATGGTCTTTTCTGGACACCACTAGGGGTCAGAAGTAGTTCATCAAAC-----------------TTTCTTCCCTCCC-TACTTCAGTG", "AAATTTT-----ATGTCTATCCTTTAAAGGTCATTCCTCTAATAGTCTTTTCTGGACACCACTAGGGGTCAGAAGTAGTTCATTAAAC-----------------TTTCTTCCCTCCC-TACCTCAGTG", "AAACTGT-----ATCACCACCTTTTTAAGGTCATTTCTCTAATGATCCTGTT-GCATACCAGTAGGGGGCAGAAGTGTTCCGCTGATTTCCGCCCTCCTCCCCACCCCCCCACCCCCC-TTATTCAAAG", "*********************************************************************************************************************************", "-TATTAT-----ATGGCCATGTTCAAAAGGTTGTTTCTCTAATGATTCCTTC-TGATACCAGTAGGGGTCAGAAGTGGTCCATTGATT---------------------CTTTTCCTC-TGATTC-AAG", "AAATTGA--AAGATCTCACTCTTTGCCAGGTAGTCCATCTAAGGGTCACATATGGATACCAGCAGGGCCT-GAAGAAGCCCATTGAAT------------------------TTTCCC-ATCTTCAAGG", "AAATTCATGATAGTGTCACTCTTAAATAGATGATTC--------TTCACAT---GATGCCAGCAGGGGGC-AGAGCAGGCTGTGAAAT------------------------TTTCCCTTTCTTCAAAG"] class AlignmentMappingTests(unittest.TestCase): __test__ = False def test_largescale(self): f = StringIO(eight_species_mapping) n, m = bx.seqmapping.alignment_mapping_from_file(f) t = bx.seqmapping.DNA.translate_list(rows) m.translate(t) bx-python-0.8.13/lib/bx/tabular/000077500000000000000000000000001415666465100163625ustar00rootroot00000000000000bx-python-0.8.13/lib/bx/tabular/__init__.py000066400000000000000000000000671415666465100204760ustar00rootroot00000000000000""" Support for working with delimited data files. """ bx-python-0.8.13/lib/bx/tabular/io.py000066400000000000000000000106061415666465100173460ustar00rootroot00000000000000""" Reading and writing delimited data files (with headers and comments). """ from itertools import count FIRST_LINE_IS_HEADER = object() class ParseError(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args) self.linenum = kwargs.get("linenum", None) def __str__(self): if self.linenum: return Exception.__str__(self) + " on line " + str(self.linenum) else: return Exception.__str__(self) class TableRow: """ A row of a table """ def __init__(self, reader, fields): self.reader = reader self.fields = fields def __getitem__(self, key): if isinstance(key, int): return self.fields[key] elif isinstance(key, str): if self.reader.header: return self.fields[self.reader.header.field_to_column[key]] else: raise TypeError("column names only supported for files with headers") else: raise TypeError("field indices must be integers or strings") @property def fieldnames(self): return self.reader.header.fields def __str__(self): return "\t".join(self.fields) class Header: """ Header of a table -- contains column names and a mapping from them to column indexes """ def __init__(self, fields): self.set_fields(fields) def set_fields(self, fields): self.fields = fields self.field_to_column = dict(zip(fields, count())) def __getitem__(self, key): if isinstance(key, int): return self.fields[key] elif isinstance(key, str): if key in self.field_to_column: return key else: raise TypeError("field indices must be integers or strings") def __str__(self): return "#" + "\t".join(self.fields) class Comment: def __init__(self, line): self.line = line def __str__(self): if self.line.startswith("#"): return self.line return "#" + self.line class TableReader: """ Reader for iterating tabular data """ def __init__(self, input, return_header=True, return_comments=True, force_header=None, comment_lines_startswith=["#"]): self.input = input self.return_comments = return_comments self.return_header = return_header self.input_iter = iter(input) self.linenum = 0 self.header = force_header self.comment_lines_startswith = comment_lines_startswith def __iter__(self): return self def __next__(self): line = next(self.input_iter) self.linenum += 1 line = line.rstrip("\r\n") # Catch blank lines (throw a warning?) # This will end up adding a '#' at the beginning of blank lines if line == '': if self.return_comments: return Comment(line) else: return next(self) # Force header? if self.header is FIRST_LINE_IS_HEADER and self.linenum == 1: self.header = self.parse_header(line) if self.return_header: return self.header else: return next(self) # Is it a comment line? for comment_line_start in self.comment_lines_startswith: if line.startswith(comment_line_start): # If a comment and the first line we assume it is a header if self.header is None and self.linenum == 1: self.header = self.parse_header(line) if self.return_header: return self.header else: return next(self) else: if self.return_comments: return self.parse_comment(line) else: return next(self) # Not a comment, must be an interval try: return self.parse_row(line) except ParseError as e: e.linenum = self.linenum raise e def parse_header(self, line): if line.startswith("#"): fields = line[1:].split("\t") else: fields = line.split("\t") return Header(fields) def parse_comment(self, line): return Comment(line) def parse_row(self, line): return TableRow(self, line.split("\t")) bx-python-0.8.13/lib/bx/wiggle.py000066400000000000000000000053431415666465100165650ustar00rootroot00000000000000""" Support for scores in the `wiggle`_ file format used by the UCSC Genome Browser. The positions in the wiggle format are 1-relative, however, the positions returned match the BED/interval format which is zero-based, half-open. .. _wiggle: http://genome.ucsc.edu/goldenPath/help/wiggle.html """ def parse_header(line): return dict([field.split('=') for field in line.split()[1:]]) def IntervalReader(f): """ Iterator yielding chrom, start, end, strand, value. Values are zero-based, half-open. Regions which lack a score are ignored. """ current_chrom = None current_pos = None current_step = None # always for wiggle data strand = '+' mode = "bed" for line in f: if line.isspace() or line.startswith("track") or line.startswith("#") or line.startswith("browser"): continue elif line.startswith("variableStep"): header = parse_header(line) current_chrom = header['chrom'] current_pos = None current_step = None if 'span' in header: current_span = int(header['span']) else: current_span = 1 mode = "variableStep" elif line.startswith("fixedStep"): header = parse_header(line) current_chrom = header['chrom'] current_pos = int(header['start']) - 1 current_step = int(header['step']) if 'span' in header: current_span = int(header['span']) else: current_span = 1 mode = "fixedStep" elif mode == "bed": fields = line.split() if len(fields) > 3: if len(fields) > 5: yield fields[0], int(fields[1]), int(fields[2]), fields[5], float(fields[3]) else: yield fields[0], int(fields[1]), int(fields[2]), strand, float(fields[3]) elif mode == "variableStep": fields = line.split() pos = int(fields[0]) - 1 yield current_chrom, pos, pos + current_span, strand, float(fields[1]) elif mode == "fixedStep": yield current_chrom, current_pos, current_pos + current_span, strand, float(line.split()[0]) current_pos += current_step else: raise ValueError("Unexpected input line: %s" % line.strip()) class Reader: """ Iterator yielding chrom, position, value. Values are zero-based. Regions which lack a score are ignored. """ def __init__(self, f): self.file = f def __iter__(self): for chrom, start, end, strand, val in IntervalReader(self.file): for pos in range(start, end): yield chrom, pos, val bx-python-0.8.13/lib/bx/wiggle_tests.py000066400000000000000000000063571415666465100200150ustar00rootroot00000000000000""" Tests for `bx.wiggle`. """ import unittest from io import StringIO from bx import wiggle # A modified version of UCSC's example wiggle, taken from http://genome.ucsc.edu/goldenPath/help/wiggleExample.txt test_wig = """browser position chr19:59302001-59311000 browser hide all browser pack refGene encodeRegions browser full altGraph # 5 base wide bar graph, autoScale is on by default == graphing # limits will dynamically change to always show full range of data # in viewing window, priority = 20 positions this as the second graph # Note, zero-relative, half-open coordinate system in use for bed format track type=wiggle_0 name="Bed Format" description="BED format" visibility=full color=200,100,0 altColor=0,100,200 priority=20 chr19 59302000 59302005 -1.0 chr19 59302300 59302305 -0.75 # 4 base wide bar graph at arbitrarily spaced positions, # threshold line drawn at y=11.76 # autoScale off viewing range set to [0:25] # priority = 10 positions this as the first graph # Note, one-relative coordinate system in use for this format track type=wiggle_0 name="variableStep" description="variableStep format" visibility=full autoScale=off viewLimits=0.0:25.0 color=255,200,0 yLineMark=11.76 yLineOnOff=on priority=10 variableStep chrom=chr19 span=4 59304701 10.0 59304901 12.5 # 3 base wide points graph at every 300 bases, 50 pixel high graph # autoScale off and viewing range set to [0:1000] # priority = 30 positions this as the third graph # Note, one-relative coordinate system in use for this format track type=wiggle_0 name="fixedStep" description="fixed step" visibility=full autoScale=off viewLimits=0:1000 color=0,200,100 maxHeightPixels=100:50:20 graphType=points priority=30 fixedStep chrom=chr19 start=59307401 step=300 span=3 1000 900 800 """ interval_reader_result = [ "chr19,59302000,59302005,+,-1.0", "chr19,59302300,59302305,+,-0.75", "chr19,59304700,59304704,+,10.0", "chr19,59304900,59304904,+,12.5", "chr19,59307400,59307403,+,1000.0", "chr19,59307700,59307703,+,900.0", "chr19,59308000,59308003,+,800.0" ] position_reader_result = [ "chr19,59302000,-1.0", "chr19,59302001,-1.0", "chr19,59302002,-1.0", "chr19,59302003,-1.0", "chr19,59302004,-1.0", "chr19,59302300,-0.75", "chr19,59302301,-0.75", "chr19,59302302,-0.75", "chr19,59302303,-0.75", "chr19,59302304,-0.75", "chr19,59304700,10.0", "chr19,59304701,10.0", "chr19,59304702,10.0", "chr19,59304703,10.0", "chr19,59304900,12.5", "chr19,59304901,12.5", "chr19,59304902,12.5", "chr19,59304903,12.5", "chr19,59307400,1000.0", "chr19,59307401,1000.0", "chr19,59307402,1000.0", "chr19,59307700,900.0", "chr19,59307701,900.0", "chr19,59307702,900.0", "chr19,59308000,800.0", "chr19,59308001,800.0", "chr19,59308002,800.0" ] class TestWiggleReader(unittest.TestCase): def test_reader(self): # Test position reader assert position_reader_result == [",".join(map(str, value)) for value in wiggle.Reader(StringIO(test_wig))] def test_interval_reader(self): # Test interval reader reader assert interval_reader_result == [",".join(map(str, value)) for value in wiggle.IntervalReader(StringIO(test_wig))] if __name__ == '__main__': unittest.main() bx-python-0.8.13/lib/bx_extras/000077500000000000000000000000001415666465100163165ustar00rootroot00000000000000bx-python-0.8.13/lib/bx_extras/__init__.py000066400000000000000000000000001415666465100204150ustar00rootroot00000000000000bx-python-0.8.13/lib/bx_extras/fpconst.py000066400000000000000000000115031415666465100203440ustar00rootroot00000000000000"""Utilities for handling IEEE 754 floating point special values This python module implements constants and functions for working with IEEE754 double-precision special values. It provides constants for Not-a-Number (NaN), Positive Infinity (PosInf), and Negative Infinity (NegInf), as well as functions to test for these values. The code is implemented in pure python by taking advantage of the 'struct' standard module. Care has been taken to generate proper results on both big-endian and little-endian machines. Some efficiency could be gained by translating the core routines into C. See for reference material on the IEEE 754 floating point standard. Further information on this package is available at . Author: Gregory R. Warnes Date:: 2003-04-08 Copyright: (c) 2003, Pfizer, Inc. """ __version__ = "0.7.0" import operator import struct from functools import reduce ident = "$Id: fpconst.py,v 1.12 2004/05/22 04:38:17 warnes Exp $" # check endianess _big_endian = struct.pack('i', 1)[:1] != b'\x01' # and define appropriate constants if(_big_endian): NaN = struct.unpack('d', b'\x7F\xF8\x00\x00\x00\x00\x00\x00')[0] PosInf = struct.unpack('d', b'\x7F\xF0\x00\x00\x00\x00\x00\x00')[0] NegInf = -PosInf else: NaN = struct.unpack('d', b'\x00\x00\x00\x00\x00\x00\xf8\xff')[0] PosInf = struct.unpack('d', b'\x00\x00\x00\x00\x00\x00\xf0\x7f')[0] NegInf = -PosInf def _double_as_bytes(dval): "Use struct.unpack to decode a double precision float into eight bytes" tmp = list(struct.unpack('8B', struct.pack('d', dval))) if not _big_endian: tmp.reverse() return tmp ## # Functions to extract components of the IEEE 754 floating point format ## def _sign(dval): "Extract the sign bit from a double-precision floating point value" bb = _double_as_bytes(dval) return bb[0] >> 7 & 0x01 def _exponent(dval): """Extract the exponentent bits from a double-precision floating point value. Note that for normalized values, the exponent bits have an offset of 1023. As a consequence, the actual exponentent is obtained by subtracting 1023 from the value returned by this function """ bb = _double_as_bytes(dval) return (bb[0] << 4 | bb[1] >> 4) & 0x7ff def _mantissa(dval): """Extract the _mantissa bits from a double-precision floating point value.""" bb = _double_as_bytes(dval) mantissa = bb[1] & 0x0f << 48 mantissa += bb[2] << 40 mantissa += bb[3] << 32 mantissa += bb[4] return mantissa def _zero_mantissa(dval): """Determine whether the mantissa bits of the given double are all zero.""" bb = _double_as_bytes(dval) return ((bb[1] & 0x0f) | reduce(operator.or_, bb[2:])) == 0 ## # Functions to test for IEEE 754 special values ## def isNaN(value): "Determine if the argument is a IEEE 754 NaN (Not a Number) value." return (_exponent(value) == 0x7ff and not _zero_mantissa(value)) def isInf(value): """Determine if the argument is an infinite IEEE 754 value (positive or negative inifinity)""" return (_exponent(value) == 0x7ff and _zero_mantissa(value)) def isFinite(value): """Determine if the argument is an finite IEEE 754 value (i.e., is not NaN, positive or negative inifinity)""" return (_exponent(value) != 0x7ff) def isPosInf(value): "Determine if the argument is a IEEE 754 positive infinity value" return (_sign(value) == 0 and _exponent(value) == 0x7ff and _zero_mantissa(value)) def isNegInf(value): "Determine if the argument is a IEEE 754 negative infinity value" return (_sign(value) == 1 and _exponent(value) == 0x7ff and _zero_mantissa(value)) ## # Functions to test public functions. ## def test_isNaN(): assert(not isNaN(PosInf)) assert(not isNaN(NegInf)) assert(isNaN(NaN)) assert(not isNaN(1.0)) assert(not isNaN(-1.0)) def test_isInf(): assert(isInf(PosInf)) assert(isInf(NegInf)) assert(not isInf(NaN)) assert(not isInf(1.0)) assert(not isInf(-1.0)) def test_isFinite(): assert(not isFinite(PosInf)) assert(not isFinite(NegInf)) assert(not isFinite(NaN)) assert(isFinite(1.0)) assert(isFinite(-1.0)) def test_isPosInf(): assert(isPosInf(PosInf)) assert(not isPosInf(NegInf)) assert(not isPosInf(NaN)) assert(not isPosInf(1.0)) assert(not isPosInf(-1.0)) def test_isNegInf(): assert(not isNegInf(PosInf)) assert(isNegInf(NegInf)) assert(not isNegInf(NaN)) assert(not isNegInf(1.0)) assert(not isNegInf(-1.0)) # overall test def test(): test_isNaN() test_isInf() test_isFinite() test_isPosInf() test_isNegInf() if __name__ == "__main__": test() bx-python-0.8.13/lib/bx_extras/fpconst_tests.py000066400000000000000000000000771415666465100215720ustar00rootroot00000000000000import bx_extras.fpconst as fp def test_all(): fp.test() bx-python-0.8.13/lib/bx_extras/lrucache.py000066400000000000000000000160451415666465100204640ustar00rootroot00000000000000# lrucache.py -- a simple LRU (Least-Recently-Used) cache class # Copyright 2004 Evan Prodromou # Licensed under the Academic Free License 2.1 # arch-tag: LRU cache main module """a simple LRU (Least-Recently-Used) cache module This module provides very simple LRU (Least-Recently-Used) cache functionality. An *in-memory cache* is useful for storing the results of an 'expensive' process (one that takes a lot of time or resources) for later re-use. Typical examples are accessing data from the filesystem, a database, or a network location. If you know you'll need to re-read the data again, it can help to keep it in a cache. You *can* use a Python dictionary as a cache for some purposes. However, if the results you're caching are large, or you have a lot of possible results, this can be impractical memory-wise. An *LRU cache*, on the other hand, only keeps _some_ of the results in memory, which keeps you from overusing resources. The cache is bounded by a maximum size; if you try to add more values to the cache, it will automatically discard the values that you haven't read or written to in the longest time. In other words, the least-recently-used items are discarded. [1]_ .. [1]: 'Discarded' here means 'removed from the cache'. """ # TODO: Remove this file in favor of functools.lru_cache # when the minimum Python version is high enough import time from heapq import ( heapify, heappop, heappush, ) __version__ = "0.2" __all__ = ['CacheKeyError', 'LRUCache', 'DEFAULT_SIZE'] __docformat__ = 'reStructuredText en' DEFAULT_SIZE = 16 """Default size of a new LRUCache object, if no 'size' argument is given.""" class CacheKeyError(KeyError): """Error raised when cache requests fail When a cache record is accessed which no longer exists (or never did), this error is raised. To avoid it, you may want to check for the existence of a cache record before reading or deleting it.""" class LRUCache: """Least-Recently-Used (LRU) cache. Instances of this class provide a least-recently-used (LRU) cache. They emulate a Python mapping type. You can use an LRU cache more or less like a Python dictionary, with the exception that objects you put into the cache may be discarded before you take them out. Some example usage:: cache = LRUCache(32) # new cache cache['foo'] = get_file_contents('foo') # or whatever if 'foo' in cache: # if it's still in cache... # use cached version contents = cache['foo'] else: # recalculate contents = get_file_contents('foo') # store in cache for next time cache['foo'] = contents print cache.size # Maximum size print len(cache) # 0 <= len(cache) <= cache.size cache.size = 10 # Auto-shrink on size assignment for i in range(50): # note: larger than cache size cache[i] = i if 0 not in cache: print 'Zero was discarded.' if 42 in cache: del cache[42] # Manual deletion for j in cache: # iterate (in LRU order) print j, cache[j] # iterator produces keys, not values """ class __Node: """Record of a cached value. Not for public consumption.""" def __init__(self, key, obj, timestamp): object.__init__(self) self.key = key self.obj = obj self.atime = timestamp self.mtime = self.atime def __lt__(self, other): return self.atime < other.atime def __eq__(self, other): return self.atime == other.atime def __le__(self, other): return self.__lt__(other) or self.__eq__(other) def __gt__(self, other): return not (self.__lt__(other) or self.__eq__(other)) def __ge__(self, other): return not self.__lt__(other) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "<%s %s => %s (%s)>" % \ (self.__class__, self.key, self.obj, time.asctime(time.localtime(self.atime))) def __init__(self, size=DEFAULT_SIZE): # Check arguments if size <= 0: raise ValueError(size) elif not isinstance(size, int): raise TypeError(size) object.__init__(self) self.__heap = [] self.__dict = {} self.size = size """Maximum size of the cache. If more than 'size' elements are added to the cache, the least-recently-used ones will be discarded.""" def __len__(self): return len(self.__heap) def __contains__(self, key): return key in self.__dict def __setitem__(self, key, obj): if key in self.__dict: node = self.__dict[key] node.obj = obj node.atime = time.time() node.mtime = node.atime heapify(self.__heap) else: # size may have been reset, so we loop while len(self.__heap) >= self.size: lru = heappop(self.__heap) del self.__dict[lru.key] node = self.__Node(key, obj, time.time()) self.__dict[key] = node heappush(self.__heap, node) def __getitem__(self, key): if key not in self.__dict: raise CacheKeyError(key) else: node = self.__dict[key] node.atime = time.time() heapify(self.__heap) return node.obj def __delitem__(self, key): if key not in self.__dict: raise CacheKeyError(key) else: node = self.__dict[key] del self.__dict[key] self.__heap.remove(node) heapify(self.__heap) return node.obj def __iter__(self): copy = self.__heap[:] while len(copy) > 0: node = heappop(copy) yield node.key raise StopIteration def __setattr__(self, name, value): object.__setattr__(self, name, value) # automagically shrink heap on resize if name == 'size': while len(self.__heap) > value: lru = heappop(self.__heap) del self.__dict[lru.key] def __repr__(self): return "<%s (%d elements)>" % (str(self.__class__), len(self.__heap)) def mtime(self, key): """Return the last modification time for the cache record with key. May be useful for cache instances where the stored values can get 'stale', such as caching file or network resource contents.""" if key not in self.__dict: raise CacheKeyError(key) else: node = self.__dict[key] return node.mtime if __name__ == "__main__": cache = LRUCache(25) print(cache) for i in range(50): cache[i] = str(i) print(cache) if 46 in cache: del cache[46] print(cache) cache.size = 10 print(cache) cache[46] = '46' print(cache) print(len(cache)) for c in cache: print(c) print(cache) print(cache.mtime(46)) for c in cache: print(c) bx-python-0.8.13/lib/bx_extras/pstat.py000066400000000000000000001127261415666465100200340ustar00rootroot00000000000000# Copyright (c) 1999-2000 Gary Strangman; All Rights Reserved. # # This software is distributable under the terms of the GNU # General Public License (GPL) v2, the text of which can be found at # http://www.gnu.org/copyleft/gpl.html. Installing, importing or otherwise # using this module constitutes acceptance of the terms of this License. # # Disclaimer # # This software is provided "as-is". There are no expressed or implied # warranties of any kind, including, but not limited to, the warranties # of merchantability and fittness for a given application. In no event # shall Gary Strangman be liable for any direct, indirect, incidental, # special, exemplary or consequential damages (including, but not limited # to, loss of use, data or profits, or business interruption) however # caused and on any theory of liability, whether in contract, strict # liability or tort (including negligence or otherwise) arising in any way # out of the use of this software, even if advised of the possibility of # such damage. # # Comments and/or additions are welcome (send e-mail to: # strang@nmr.mgh.harvard.edu). # """ pstat.py module ################################################# ####### Written by: Gary Strangman ########### ####### Last modified: Jun 29, 2001 ########### ################################################# This module provides some useful list and array manipulation routines modeled after those found in the |Stat package by Gary Perlman, plus a number of other useful list/file manipulation functions. The list-based functions include: abut (source,*args) simpleabut (source, addon) colex (listoflists,cnums) collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None) dm (listoflists,criterion) flat (l) linexand (listoflists,columnlist,valuelist) linexor (listoflists,columnlist,valuelist) linedelimited (inlist,delimiter) lineincols (inlist,colsize) lineincustcols (inlist,colsizes) list2string (inlist) makelol(inlist) makestr(x) printcc (lst,extra=2) printincols (listoflists,colsize) pl (listoflists) printl(listoflists) replace (lst,oldval,newval) recode (inlist,listmap,cols='all') remap (listoflists,criterion) roundlist (inlist,num_digits_to_round_floats_to) sortby(listoflists,sortcols) unique (inlist) duplicates(inlist) writedelimited (listoflists, delimiter, file, writetype='w') Some of these functions have alternate versions which are defined only if Numeric (NumPy) can be imported. These functions are generally named as above, with an 'a' prefix. aabut (source, *args) acolex (a,indices,axis=1) acollapse (a,keepcols,collapsecols,sterr=0,ns=0) adm (a,criterion) alinexand (a,columnlist,valuelist) alinexor (a,columnlist,valuelist) areplace (a,oldval,newval) arecode (a,listmap,col='all') arowcompare (row1, row2) arowsame (row1, row2) asortrows(a,axis=0) aunique(inarray) aduplicates(inarray) Currently, the code is all but completely un-optimized. In many cases, the array versions of functions amount simply to aliases to built-in array functions/methods. Their inclusion here is for function name consistency. """ # CHANGE LOG: # ========== # 01-11-15 ... changed list2string() to accept a delimiter # 01-06-29 ... converted exec()'s to eval()'s to make compatible with Py2.1 # 01-05-31 ... added duplicates() and aduplicates() functions # 00-12-28 ... license made GPL, docstring and import requirements # 99-11-01 ... changed version to 0.3 # 99-08-30 ... removed get, getstrings, put, aget, aput (into io.py) # 03/27/99 ... added areplace function, made replace fcn recursive # 12/31/98 ... added writefc function for ouput to fixed column sizes # 12/07/98 ... fixed import problem (failed on collapse() fcn) # added __version__ variable (now 0.2) # 12/05/98 ... updated doc-strings # added features to collapse() function # added flat() function for lists # fixed a broken asortrows() # 11/16/98 ... fixed minor bug in aput for 1D arrays ## # 11/08/98 ... fixed aput to output large arrays correctly import copy import string import sys if sys.version_info[0] > 2: def cmp(x, y): """ Replacement for built-in function cmp that was removed in Python 3 """ return (x > y) - (x < y) __version__ = 0.4 # =========================== LIST FUNCTIONS ========================== ### # Here are the list functions, DEFINED FOR ALL SYSTEMS. # Array functions (for NumPy-enabled computers) appear below. ### def abut(source, *args): """ Like the |Stat abut command. It concatenates two lists side-by-side and returns the result. '2D' lists are also accomodated for either argument (source or addon). CAUTION: If one list is shorter, it will be repeated until it is as long as the longest list. If this behavior is not desired, use pstat.simpleabut(). Usage: abut(source, args) where args=any # of lists Returns: a list of lists as long as the LONGEST list past, source on the 'left', lists in attached consecutively on the 'right' """ if type(source) not in [list, tuple]: source = [source] for addon in args: if type(addon) not in [list, tuple]: addon = [addon] if len(addon) < len(source): # is source list longer? if len(source) % len(addon) == 0: # are they integer multiples? repeats = len(source)/len(addon) # repeat addon n times origadd = copy.deepcopy(addon) for i in range(repeats-1): addon = addon + origadd else: repeats = len(source)/len(addon)+1 # repeat addon x times, origadd = copy.deepcopy(addon) # x is NOT an integer for i in range(repeats-1): addon = addon + origadd addon = addon[0:len(source)] elif len(source) < len(addon): # is addon list longer? if len(addon) % len(source) == 0: # are they integer multiples? repeats = len(addon)/len(source) # repeat source n times origsour = copy.deepcopy(source) for i in range(repeats-1): source = source + origsour else: repeats = len(addon)/len(source)+1 # repeat source x times, origsour = copy.deepcopy(source) # x is NOT an integer for i in range(repeats-1): source = source + origsour source = source[0:len(addon)] source = simpleabut(source, addon) return source def simpleabut(source, addon): """ Concatenates two lists as columns and returns the result. '2D' lists are also accomodated for either argument (source or addon). This DOES NOT repeat either list to make the 2 lists of equal length. Beware of list pairs with different lengths ... the resulting list will be the length of the FIRST list passed. Usage: simpleabut(source,addon) where source, addon=list (or list-of-lists) Returns: a list of lists as long as source, with source on the 'left' and addon on the 'right' """ if type(source) not in [list, tuple]: source = [source] if type(addon) not in [list, tuple]: addon = [addon] minlen = min(len(source), len(addon)) source_copy = copy.deepcopy(source) # start abut process if type(source[0]) not in [list, tuple]: if type(addon[0]) not in [list, tuple]: for i in range(minlen): source_copy[i] = [source[i]] + [addon[i]] # source/addon = column else: for i in range(minlen): source_copy[i] = [source[i]] + addon[i] # addon=list-of-lists else: if type(addon[0]) not in [list, tuple]: for i in range(minlen): source_copy[i] = source[i] + [addon[i]] # source=list-of-lists else: for i in range(minlen): source_copy[i] = source[i] + addon[i] # source/addon = list-of-lists source = source_copy return source def colex(listoflists, cnums): """ Extracts from listoflists the columns specified in the list 'cnums' (cnums can be an integer, a sequence of integers, or a string-expression that corresponds to a slice operation on the variable x ... e.g., 'x[3:]' will colex columns 3 onward from the listoflists). Usage: colex (listoflists,cnums) Returns: a list-of-lists corresponding to the columns from listoflists specified by cnums, in the order the column numbers appear in cnums """ global index column = 0 if type(cnums) in [list, tuple]: # if multiple columns to get index = cnums[0] column = [x[index] for x in listoflists] for col in cnums[1:]: index = col column = abut(column, [x[index] for x in listoflists]) elif isinstance(cnums, str): # if an 'x[3:]' type expr. evalstring = 'map(lambda x: x'+cnums+', listoflists)' column = eval(evalstring) else: # else it's just 1 col to get index = cnums column = [x[index] for x in listoflists] return column def collapse(listoflists, keepcols, collapsecols, fcn1=None, fcn2=None, cfcn=None): """ Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. Setting fcn1 and/or fcn2 to point to a function rather than None (e.g., stats.sterr, len) will append those results (e.g., the sterr, N) after each calculated mean. cfcn is the collapse function to apply (defaults to mean, defined here in the pstat module to avoid circular imports with stats.py, but harmonicmean or others could be passed). Usage: collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None) Returns: a list of lists with all unique permutations of entries appearing in columns ("conditions") specified by keepcols, abutted with the result of cfcn (if cfcn=None, defaults to the mean) of each column specified by collapsecols. """ def collmean(inlist): s = 0 for item in inlist: s = s + item return s/float(len(inlist)) if type(keepcols) not in [list, tuple]: keepcols = [keepcols] if type(collapsecols) not in [list, tuple]: collapsecols = [collapsecols] if cfcn is None: cfcn = collmean if keepcols == []: means = [0]*len(collapsecols) for i in range(len(collapsecols)): avgcol = colex(listoflists, collapsecols[i]) means[i] = cfcn(avgcol) if fcn1: try: test = fcn1(avgcol) except Exception: test = 'N/A' means[i] = [means[i], test] if fcn2: try: test = fcn2(avgcol) except Exception: test = 'N/A' try: means[i] = means[i] + [len(avgcol)] except TypeError: means[i] = [means[i], len(avgcol)] return means else: values = colex(listoflists, keepcols) uniques = sorted(unique(values)) newlist = [] if type(keepcols) not in [list, tuple]: keepcols = [keepcols] for item in uniques: if type(item) not in [list, tuple]: item = [item] tmprows = linexand(listoflists, keepcols, item) for col in collapsecols: avgcol = colex(tmprows, col) item.append(cfcn(avgcol)) if fcn1 is not None: try: test = fcn1(avgcol) except Exception: test = 'N/A' item.append(test) if fcn2 is not None: try: test = fcn2(avgcol) except Exception: test = 'N/A' item.append(test) newlist.append(item) return newlist def dm(listoflists, criterion): """ Returns rows from the passed list of lists that meet the criteria in the passed criterion expression (a string as a function of x; e.g., 'x[3]>=9' will return all rows where the 4th column>=9 and "x[2]=='N'" will return rows with column 2 equal to the string 'N'). Usage: dm (listoflists, criterion) Returns: rows from listoflists that meet the specified criterion. """ function = 'filter(lambda x: '+criterion+',listoflists)' lines = eval(function) return lines def flat(l): """ Returns the flattened version of a '2D' list. List-correlate to the a.flat() method of NumPy arrays. Usage: flat(l) """ newl = [] for i in range(len(l)): for j in range(len(l[i])): newl.append(l[i][j]) return newl def linexand(listoflists, columnlist, valuelist): """ Returns the rows of a list of lists where col (from columnlist) = val (from valuelist) for EVERY pair of values (columnlist[i],valuelists[i]). len(columnlist) must equal len(valuelist). Usage: linexand (listoflists,columnlist,valuelist) Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ALL i """ if type(columnlist) not in [list, tuple]: columnlist = [columnlist] if type(valuelist) not in [list, tuple]: valuelist = [valuelist] criterion = '' for i in range(len(columnlist)): if isinstance(valuelist[i], str): critval = '\'' + valuelist[i] + '\'' else: critval = str(valuelist[i]) criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and' criterion = criterion[0:-3] # remove the "and" after the last crit function = 'filter(lambda x: '+criterion+',listoflists)' lines = eval(function) return lines def linexor(listoflists, columnlist, valuelist): """ Returns the rows of a list of lists where col (from columnlist) = val (from valuelist) for ANY pair of values (colunmlist[i],valuelist[i[). One value is required for each column in columnlist. If only one value exists for columnlist but multiple values appear in valuelist, the valuelist values are all assumed to pertain to the same column. Usage: linexor (listoflists,columnlist,valuelist) Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ANY i """ if type(columnlist) not in [list, tuple]: columnlist = [columnlist] if type(valuelist) not in [list, tuple]: valuelist = [valuelist] criterion = '' if len(columnlist) == 1 and len(valuelist) > 1: columnlist = columnlist*len(valuelist) for i in range(len(columnlist)): # build an exec string if isinstance(valuelist[i], str): critval = '\'' + valuelist[i] + '\'' else: critval = str(valuelist[i]) criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or' criterion = criterion[0:-2] # remove the "or" after the last crit function = 'filter(lambda x: '+criterion+',listoflists)' lines = eval(function) return lines def linedelimited(inlist, delimiter): """ Returns a string composed of elements in inlist, with each element separated by 'delimiter.' Used by function writedelimited. Use '\t' for tab-delimiting. Usage: linedelimited (inlist,delimiter) """ outstr = '' for item in inlist: if not isinstance(item, str): item = str(item) outstr = outstr + item + delimiter outstr = outstr[0:-1] return outstr def lineincols(inlist, colsize): """ Returns a string composed of elements in inlist, with each element right-aligned in columns of (fixed) colsize. Usage: lineincols (inlist,colsize) where colsize is an integer """ outstr = '' for item in inlist: if not isinstance(item, str): item = str(item) size = len(item) if size <= colsize: for i in range(colsize-size): outstr = outstr + ' ' outstr = outstr + item else: outstr = outstr + item[0:colsize+1] return outstr def lineincustcols(inlist, colsizes): """ Returns a string composed of elements in inlist, with each element right-aligned in a column of width specified by a sequence colsizes. The length of colsizes must be greater than or equal to the number of columns in inlist. Usage: lineincustcols (inlist,colsizes) Returns: formatted string created from inlist """ outstr = '' for i in range(len(inlist)): if not isinstance(inlist[i], str): item = str(inlist[i]) else: item = inlist[i] size = len(item) if size <= colsizes[i]: for j in range(colsizes[i]-size): outstr = outstr + ' ' outstr = outstr + item else: outstr = outstr + item[0:colsizes[i]+1] return outstr def list2string(inlist, delimit=' '): """ Converts a 1D list to a single long string for file output, using the string.join function. Usage: list2string (inlist,delimit=' ') Returns: the string created from inlist """ stringlist = [makestr(_) for _ in inlist] return string.join(stringlist, delimit) def makelol(inlist): """ Converts a 1D list to a 2D list (i.e., a list-of-lists). Useful when you want to use put() to write a 1D list one item per line in the file. Usage: makelol(inlist) Returns: if l = [1,2,'hi'] then returns [[1],[2],['hi']] etc. """ x = [] for item in inlist: x.append([item]) return x def makestr(x): if not isinstance(x, str): x = str(x) return x def printcc(lst, extra=2): """ Prints a list of lists in columns, customized by the max size of items within the columns (max size of items in col, plus 'extra' number of spaces). Use 'dashes' or '\\n' in the list-of-lists to print dashes or blank lines, respectively. Usage: printcc (lst,extra=2) Returns: None """ if type(lst[0]) not in [list, tuple]: lst = [lst] rowstokill = [] list2print = copy.deepcopy(lst) for i in range(len(lst)): if lst[i] == ['\n'] or lst[i] == '\n' or lst[i] == 'dashes' or lst[i] == '' or lst[i] == ['']: rowstokill = rowstokill + [i] rowstokill.reverse() # delete blank rows from the end for row in rowstokill: del list2print[row] maxsize = [0]*len(list2print[0]) for col in range(len(list2print[0])): items = colex(list2print, col) items = [makestr(_) for _ in items] maxsize[col] = max(map(len, items)) + extra for row in lst: if row == ['\n'] or row == '\n' or row == '' or row == ['']: print() elif row == ['dashes'] or row == 'dashes': dashes = [0]*len(maxsize) for j in range(len(maxsize)): dashes[j] = '-'*(maxsize[j]-2) print(lineincustcols(dashes, maxsize)) else: print(lineincustcols(row, maxsize)) return None def printincols(listoflists, colsize): """ Prints a list of lists in columns of (fixed) colsize width, where colsize is an integer. Usage: printincols (listoflists,colsize) Returns: None """ for row in listoflists: print(lineincols(row, colsize)) return None def pl(listoflists): """ Prints a list of lists, 1 list (row) at a time. Usage: pl(listoflists) Returns: None """ for row in listoflists: if row[-1] == '\n': print(row, end=' ') else: print(row) return None def printl(listoflists): """Alias for pl.""" pl(listoflists) return def replace(inlst, oldval, newval): """ Replaces all occurrences of 'oldval' with 'newval', recursively. Usage: replace (inlst,oldval,newval) """ lst = inlst*1 for i in range(len(lst)): if type(lst[i]) not in [list, tuple]: if lst[i] == oldval: lst[i] = newval else: lst[i] = replace(lst[i], oldval, newval) return lst def recode(inlist, listmap, cols=None): """ Changes the values in a list to a new set of values (useful when you need to recode data from (e.g.) strings to numbers. cols defaults to None (meaning all columns are recoded). Usage: recode (inlist,listmap,cols=None) cols=recode cols, listmap=2D list Returns: inlist with the appropriate values replaced with new ones """ lst = copy.deepcopy(inlist) if cols is not None: if type(cols) not in [list, tuple]: cols = [cols] for col in cols: for row in range(len(lst)): try: idx = colex(listmap, 0).index(lst[row][col]) lst[row][col] = listmap[idx][1] except ValueError: pass else: for row in range(len(lst)): for col in range(len(lst)): try: idx = colex(listmap, 0).index(lst[row][col]) lst[row][col] = listmap[idx][1] except ValueError: pass return lst def remap(listoflists, criterion): """ Remaps values in a given column of a 2D list (listoflists). This requires a criterion as a function of 'x' so that the result of the following is returned ... map(lambda x: 'criterion',listoflists). Usage: remap(listoflists,criterion) criterion=string Returns: remapped version of listoflists """ function = 'map(lambda x: '+criterion+',listoflists)' lines = eval(function) return lines def roundlist(inlist, digits): """ Goes through each element in a 1D or 2D inlist, and applies the following function to all elements of float ... round(element,digits). Usage: roundlist(inlist,digits) Returns: list with rounded floats """ if type(inlist[0]) in [int, float]: inlist = [inlist] l = inlist*1 for i in range(len(l)): for j in range(len(l[i])): if isinstance(l[i][j], float): l[i][j] = round(l[i][j], digits) return l def sortby(listoflists, sortcols): """ Sorts a list of lists on the column(s) specified in the sequence sortcols. Usage: sortby(listoflists,sortcols) Returns: sorted list, unchanged column ordering """ newlist = sorted(abut(colex(listoflists, sortcols), listoflists)) try: numcols = len(sortcols) except TypeError: numcols = 1 crit = '[' + str(numcols) + ':]' newlist = colex(newlist, crit) return newlist def unique(inlist): """ Returns all unique items in the passed list. If the a list-of-lists is passed, unique LISTS are found (i.e., items in the first dimension are compared). Usage: unique (inlist) Returns: the unique elements (or rows) in inlist """ uniques = [] for item in inlist: if item not in uniques: uniques.append(item) return uniques def duplicates(inlist): """ Returns duplicate items in the FIRST dimension of the passed list. Usage: duplicates (inlist) """ dups = [] for i in range(len(inlist)): if inlist[i] in inlist[i+1:]: dups.append(inlist[i]) return dups def nonrepeats(inlist): """ Returns items that are NOT duplicated in the first dim of the passed list. Usage: nonrepeats (inlist) """ nonrepeats = [] for i in range(len(inlist)): if inlist.count(inlist[i]) == 1: nonrepeats.append(inlist[i]) return nonrepeats # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== # =================== PSTAT ARRAY FUNCTIONS ===================== try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE import Numeric N = Numeric def aabut(source, *args): """ Like the |Stat abut command. It concatenates two arrays column-wise and returns the result. CAUTION: If one array is shorter, it will be repeated until it is as long as the other. Usage: aabut (source, args) where args=any # of arrays Returns: an array as long as the LONGEST array past, source appearing on the 'left', arrays in attached on the 'right'. """ if len(source.shape) == 1: width = 1 source = N.resize(source, [source.shape[0], width]) else: width = source.shape[1] for addon in args: if len(addon.shape) == 1: width = 1 addon = N.resize(addon, [source.shape[0], width]) else: width = source.shape[1] if len(addon) < len(source): addon = N.resize(addon, [source.shape[0], addon.shape[1]]) elif len(source) < len(addon): source = N.resize(source, [addon.shape[0], source.shape[1]]) source = N.concatenate((source, addon), 1) return source def acolex(a, indices, axis=1): """ Extracts specified indices (a list) from passed array, along passed axis (column extraction is default). BEWARE: A 1D array is presumed to be a column-array (and that the whole array will be returned as a column). Usage: acolex (a,indices,axis=1) Returns: the columns of a specified by indices """ if type(indices) not in [list, tuple, N.ArrayType]: indices = [indices] if len(N.shape(a)) == 1: cols = N.resize(a, [a.shape[0], 1]) else: cols = N.take(a, indices, axis) return cols def acollapse(a, keepcols, collapsecols, fcn1=None, fcn2=None, cfcn=None): """ Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. If stderror or N of the mean are desired, set either or both parameters to 1. Usage: acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None) Returns: unique 'conditions' specified by the contents of columns specified by keepcols, abutted with the mean(s) of column(s) specified by collapsecols """ def acollmean(inarray): return N.sum(N.ravel(inarray)) if cfcn is None: cfcn = acollmean if keepcols == []: avgcol = acolex(a, collapsecols) means = N.sum(avgcol)/float(len(avgcol)) if fcn1 is not None: try: test = fcn1(avgcol) except Exception: test = N.array(['N/A']*len(means)) means = aabut(means, test) if fcn2 is not None: try: test = fcn2(avgcol) except Exception: test = N.array(['N/A']*len(means)) means = aabut(means, test) return means else: if type(keepcols) not in [list, tuple, N.ArrayType]: keepcols = [keepcols] values = colex(a, keepcols) # so that "item" can be appended (below) uniques = sorted(unique(values)) # get a LIST, so .sort keeps rows intact newlist = [] for item in uniques: if type(item) not in [list, tuple, N.ArrayType]: item = [item] tmprows = alinexand(a, keepcols, item) for col in collapsecols: avgcol = acolex(tmprows, col) item.append(acollmean(avgcol)) if fcn1 is not None: try: test = fcn1(avgcol) except Exception: test = 'N/A' item.append(test) if fcn2 is not None: try: test = fcn2(avgcol) except Exception: test = 'N/A' item.append(test) newlist.append(item) try: new_a = N.array(newlist) except TypeError: new_a = N.array(newlist, 'O') return new_a def adm(a, criterion): """ Returns rows from the passed list of lists that meet the criteria in the passed criterion expression (a string as a function of x). Usage: adm (a,criterion) where criterion is like 'x[2]==37' """ function = 'filter(lambda x: '+criterion+',a)' lines = eval(function) try: lines = N.array(lines) except Exception: lines = N.array(lines, 'O') return lines def isstring(x): if isinstance(x, str): return 1 else: return 0 def alinexand(a, columnlist, valuelist): """ Returns the rows of an array where col (from columnlist) = val (from valuelist). One value is required for each column in columnlist. Usage: alinexand (a,columnlist,valuelist) Returns: the rows of a where columnlist[i]=valuelist[i] for ALL i """ if type(columnlist) not in [list, tuple, N.ArrayType]: columnlist = [columnlist] if type(valuelist) not in [list, tuple, N.ArrayType]: valuelist = [valuelist] criterion = '' for i in range(len(columnlist)): if isinstance(valuelist[i], str): critval = '\'' + valuelist[i] + '\'' else: critval = str(valuelist[i]) criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and' criterion = criterion[0:-3] # remove the "and" after the last crit return adm(a, criterion) def alinexor(a, columnlist, valuelist): """ Returns the rows of an array where col (from columnlist) = val (from valuelist). One value is required for each column in columnlist. The exception is if either columnlist or valuelist has only 1 value, in which case that item will be expanded to match the length of the other list. Usage: alinexor (a,columnlist,valuelist) Returns: the rows of a where columnlist[i]=valuelist[i] for ANY i """ if type(columnlist) not in [list, tuple, N.ArrayType]: columnlist = [columnlist] if type(valuelist) not in [list, tuple, N.ArrayType]: valuelist = [valuelist] criterion = '' if len(columnlist) == 1 and len(valuelist) > 1: columnlist = columnlist*len(valuelist) elif len(valuelist) == 1 and len(columnlist) > 1: valuelist = valuelist*len(columnlist) for i in range(len(columnlist)): if isinstance(valuelist[i], str): critval = '\'' + valuelist[i] + '\'' else: critval = str(valuelist[i]) criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or' criterion = criterion[0:-2] # remove the "or" after the last crit return adm(a, criterion) def areplace(a, oldval, newval): """ Replaces all occurrences of oldval with newval in array a. Usage: areplace(a,oldval,newval) """ newa = N.not_equal(a, oldval)*a return newa+N.equal(a, oldval)*newval def arecode(a, listmap, col='all'): """ Remaps the values in an array to a new set of values (useful when you need to recode data from (e.g.) strings to numbers as most stats packages require. Can work on SINGLE columns, or 'all' columns at once. Usage: arecode (a,listmap,col='all') Returns: a version of array a where listmap[i][0] = (instead) listmap[i][1] """ ashape = a.shape if col == 'all': work = a.flat else: work = acolex(a, col) work = work.flat for pair in listmap: if isinstance(pair[1], str) or work.typecode() == 'O' or a.typecode() == 'O': work = N.array(work, 'O') a = N.array(a, 'O') for i in range(len(work)): if work[i] == pair[0]: work[i] = pair[1] if col == 'all': return N.reshape(work, ashape) else: return N.concatenate([a[:, 0:col], work[:, N.NewAxis], a[:, col+1:]], 1) else: # must be a non-Object type array and replacement work = N.where(N.equal(work, pair[0]), pair[1], work) return N.concatenate([a[:, 0:col], work[:, N.NewAxis], a[:, col+1:]], 1) def arowcompare(row1, row2): """ Compares two numeric rows from an array, Usage: arowcompare(row1,row2) Returns: an array of equal length containing 1s where the two rows had identical elements and 0 otherwise """ return N.equal(row1, row2) def arowsame(row1, row2): """ Compares two rows from an array, regardless of whether it is an array of numbers or of python objects (which requires the cmp function). Usage: arowsame(row1,row2) Returns: 1 if the two rows are identical, 0 otherwise. """ cmpval = N.alltrue(arowcompare(row1, row2)) return cmpval def asortrows(a, axis=0): """ Sorts an array "by rows". This differs from the Numeric.sort() function, which sorts elements WITHIN the given axis. Instead, this function keeps the elements along the given axis intact, but shifts them 'up or down' relative to one another. Usage: asortrows(a,axis=0) Returns: sorted version of a """ if axis != 0: a = N.swapaxes(a, axis, 0) l = sorted(a.tolist()) y = N.array(l) if axis != 0: y = N.swapaxes(y, axis, 0) return y def aunique(inarray): """ Returns unique items in the FIRST dimension of the passed array. Only works on arrays NOT including string items. Usage: aunique (inarray) """ uniques = N.array([inarray[0]]) if len(uniques.shape) == 1: # IF IT'S A 1D ARRAY for item in inarray[1:]: if N.add.reduce(N.equal(uniques, item).flat) == 0: try: uniques = N.concatenate([uniques, N.array[N.NewAxis, :]]) except TypeError: uniques = N.concatenate([uniques, N.array([item])]) else: # IT MUST BE A 2+D ARRAY if inarray.typecode() != 'O': # not an Object array for item in inarray[1:]: if not N.sum(N.alltrue(N.equal(uniques, item), 1)): try: uniques = N.concatenate([uniques, item[N.NewAxis, :]]) except TypeError: # the item to add isn't a list uniques = N.concatenate([uniques, N.array([item])]) else: pass # this item is already in the uniques array else: # must be an Object array, alltrue/equal functions don't work for item in inarray[1:]: newflag = 1 for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=> # TODO fix this test = N.sum(abs(N.array(list(map(cmp, item, unq))))) if test == 0: # if item identical to any 1 row in uniques newflag = 0 # then not a novel item to add break if newflag == 1: try: uniques = N.concatenate([uniques, item[N.NewAxis, :]]) except TypeError: # the item to add isn't a list uniques = N.concatenate([uniques, N.array([item])]) return uniques def aduplicates(inarray): """ Returns duplicate items in the FIRST dimension of the passed array. Only works on arrays NOT including string items. Usage: aunique (inarray) """ inarray = N.array(inarray) if len(inarray.shape) == 1: # IF IT'S A 1D ARRAY dups = [] inarray = inarray.tolist() for i in range(len(inarray)): if inarray[i] in inarray[i+1:]: dups.append(inarray[i]) dups = aunique(dups) else: # IT MUST BE A 2+D ARRAY dups = [] aslist = inarray.tolist() for i in range(len(aslist)): if aslist[i] in aslist[i+1:]: dups.append(aslist[i]) dups = unique(dups) dups = N.array(dups) return dups except ImportError: # IF NUMERIC ISN'T AVAILABLE, SKIP ALL arrayfuncs pass bx-python-0.8.13/lib/bx_extras/pyparsing.py000066400000000000000000004141741415666465100207170ustar00rootroot00000000000000# module pyparsing.py # # Copyright (c) 2003-2008 Paul T. McGuire # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __doc__ = """ pyparsing module - Classes and methods to define and execute parsing grammars The pyparsing module is an alternative approach to creating and executing simple grammars, vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you don't need to learn a new syntax for defining grammars or matching expressions - the parsing module provides a library of classes that you use to construct the grammar directly in Python. Here is a program to parse "Hello, World!" (or any greeting of the form ", !"):: from pyparsing import Word, alphas # define grammar of a greeting greet = Word( alphas ) + "," + Word( alphas ) + "!" hello = "Hello, World!" print hello, "->", greet.parseString( hello ) The program outputs the following:: Hello, World! -> ['Hello', ',', 'World', '!'] The Python representation of the grammar is quite readable, owing to the self-explanatory class names, and the use of '+', '|' and '^' operators. The parsed results returned from parseString() can be accessed as a nested list, a dictionary, or an object with named attributes. The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - quoted strings - embedded comments """ __version__ = "1.5.0" __versionTime__ = "28 May 2008 10:05" __author__ = "Paul McGuire " import copy import re import string import sys import warnings import xml.sax.saxutils from weakref import ref as wkref import sre_constants __all__ = [ 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase', 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums', 'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno', 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', 'indentedBlock', ] """ Detect if we are running version 3.X and make appropriate changes Robert A. Clark """ _MAX_INT = sys.maxsize def _str2dict(strg): return {c: 0 for c in strg} # ~ return set( [c for c in strg] ) class _Constants: pass alphas = string.ascii_lowercase + string.ascii_uppercase nums = string.digits hexnums = nums + "ABCDEFabcdef" alphanums = alphas + nums _bslash = "\\" printables = "".join([c for c in string.printable if c not in string.whitespace]) class ParseBaseException(Exception): """base exception class for all parsing runtime exceptions""" __slots__ = ("loc", "msg", "pstr", "parserElement") # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible def __init__(self, pstr, loc=0, msg=None, elem=None): self.loc = loc if msg is None: self.msg = pstr self.pstr = "" else: self.msg = msg self.pstr = pstr self.parserElement = elem def __getattr__(self, aname): """supported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text """ if(aname == "lineno"): return lineno(self.loc, self.pstr) elif(aname in ("col", "column")): return col(self.loc, self.pstr) elif(aname == "line"): return line(self.loc, self.pstr) else: raise AttributeError(aname) def __str__(self): return "%s (at char %d), (line:%d, col:%d)" % (self.msg, self.loc, self.lineno, self.column) def __repr__(self): return str(self) def markInputline(self, markerString=">!<"): """Extracts the exception line from the input string, and marks the location of the exception with a special symbol. """ line_str = self.line line_column = self.column - 1 if markerString: line_str = "".join([line_str[:line_column], markerString, line_str[line_column:]]) return line_str.strip() class ParseException(ParseBaseException): """exception thrown when parse expressions don't match class; supported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text """ class ParseFatalException(ParseBaseException): """user-throwable exception thrown when inconsistent parse content is found; stops all parsing immediately""" class ParseSyntaxException(ParseFatalException): """just like ParseFatalException, but thrown internally when an ErrorStop indicates that parsing is to stop immediately because an unbacktrackable syntax error has been found""" def __init__(self, pe): super().__init__(pe.pstr, pe.loc, pe.msg, pe.parserElement) class RecursiveGrammarException(Exception): """exception thrown by validate() if the grammar could be improperly recursive""" def __init__(self, parseElementList): self.parseElementTrace = parseElementList def __str__(self): return "RecursiveGrammarException: %s" % self.parseElementTrace class _ParseResultsWithOffset: def __init__(self, p1, p2): self.tup = (p1, p2) def __getitem__(self, i): return self.tup[i] def __repr__(self): return repr(self.tup) class ParseResults: """Structured parse results, to provide multiple means of access to the parsed data: - as a list (len(results)) - by list index (results[0], results[1], etc.) - by attribute (results.) """ __slots__ = ("__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__") def __new__(cls, toklist, name=None, asList=True, modal=True): if isinstance(toklist, cls): return toklist retobj = object.__new__(cls) retobj.__doinit = True return retobj # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible def __init__(self, toklist, name=None, asList=True, modal=True): if self.__doinit: self.__doinit = False self.__name = None self.__parent = None self.__accumNames = {} if isinstance(toklist, list): self.__toklist = toklist[:] else: self.__toklist = [toklist] self.__tokdict = dict() # this line is related to debugging the asXML bug # ~ asList = False if name: if not modal: self.__accumNames[name] = 0 if isinstance(name, int): name = str(name) self.__name = name if toklist not in (None, '', []): if isinstance(toklist, str): toklist = [toklist] if asList: if isinstance(toklist, ParseResults): self[name] = _ParseResultsWithOffset(toklist.copy(), -1) else: self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), -1) self[name].__name = name else: try: self[name] = toklist[0] except (KeyError, TypeError): self[name] = toklist def __getitem__(self, i): if isinstance(i, (int, slice)): return self.__toklist[i] else: if i not in self.__accumNames: return self.__tokdict[i][-1][0] else: return ParseResults([v[0] for v in self.__tokdict[i]]) def __setitem__(self, k, v): if isinstance(v, _ParseResultsWithOffset): self.__tokdict[k] = self.__tokdict.get(k, list()) + [v] sub = v[0] elif isinstance(k, int): self.__toklist[k] = v sub = v else: self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)] sub = v if isinstance(sub, ParseResults): sub.__parent = wkref(self) def __delitem__(self, i): if isinstance(i, (int, slice)): mylen = len(self.__toklist) del self.__toklist[i] # convert int to slice if isinstance(i, int): if i < 0: i += mylen i = slice(i, i+1) # get removed indices removed = list(range(*i.indices(mylen))) removed.reverse() # fixup indices in token dictionary for name in self.__tokdict: occurrences = self.__tokdict[name] for j in removed: for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) else: del self.__tokdict[i] def __contains__(self, k): return k in self.__tokdict def __len__(self): return len(self.__toklist) def __bool__(self): return len(self.__toklist) > 0 __nonzero__ = __bool__ def __iter__(self): return iter(self.__toklist) def __reversed__(self): return iter(reversed(self.__toklist)) def keys(self): """Returns all named result keys.""" return self.__tokdict.keys() def pop(self, index=-1): """Removes and returns item at specified index (default=last). Will work with either numeric indices or dict-key indicies.""" ret = self[index] del self[index] return ret def get(self, key, defaultValue=None): """Returns named result matching the given key, or if there is no such name, then returns the given defaultValue or None if no defaultValue is specified.""" if key in self: return self[key] else: return defaultValue def items(self): """Returns all named result keys and values as a list of tuples.""" return [(k, self[k]) for k in self.__tokdict] def values(self): """Returns all named result values.""" return [v[-1][0] for v in self.__tokdict.values()] def __getattr__(self, name): if name not in self.__slots__: if name in self.__tokdict: if name not in self.__accumNames: return self.__tokdict[name][-1][0] else: return ParseResults([v[0] for v in self.__tokdict[name]]) else: return "" return None def __add__(self, other): ret = self.copy() ret += other return ret def __iadd__(self, other): if other.__tokdict: offset = len(self.__toklist) addoffset = (lambda a: (a < 0 and offset) or (a+offset)) otheritems = other.__tokdict.items() otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) for (k, vlist) in otheritems for v in vlist] for k, v in otherdictitems: self[k] = v if isinstance(v[0], ParseResults): v[0].__parent = wkref(self) self.__toklist += other.__toklist self.__accumNames.update(other.__accumNames) del other return self def __repr__(self): return f"({repr(self.__toklist)}, {repr(self.__tokdict)})" def __str__(self): out = "[" sep = "" for i in self.__toklist: if isinstance(i, ParseResults): out += sep + str(i) else: out += sep + repr(i) sep = ", " out += "]" return out def _asStringList(self, sep=''): out = [] for item in self.__toklist: if out and sep: out.append(sep) if isinstance(item, ParseResults): out += item._asStringList() else: out.append(str(item)) return out def asList(self): """Returns the parse results as a nested list of matching tokens, all converted to strings.""" out = [] for res in self.__toklist: if isinstance(res, ParseResults): out.append(res.asList()) else: out.append(res) return out def asDict(self): """Returns the named parse results as dictionary.""" return dict(self.items()) def copy(self): """Returns a new copy of a ParseResults object.""" ret = ParseResults(self.__toklist) ret.__tokdict = self.__tokdict.copy() ret.__parent = self.__parent ret.__accumNames.update(self.__accumNames) ret.__name = self.__name return ret def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True): """Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.""" nl = "\n" out = [] namedItems = {v[1]: k for (k, vlist) in self.__tokdict.items() for v in vlist} nextLevelIndent = indent + " " # collapse out indents if formatting is not desired if not formatted: indent = "" nextLevelIndent = "" nl = "" selfTag = None if doctag is not None: selfTag = doctag else: if self.__name: selfTag = self.__name if not selfTag: if namedItemsOnly: return "" else: selfTag = "ITEM" out += [nl, indent, "<", selfTag, ">"] worklist = self.__toklist for i, res in enumerate(worklist): if isinstance(res, ParseResults): if i in namedItems: out += [res.asXML( namedItems[i], namedItemsOnly and doctag is None, nextLevelIndent, formatted)] else: out += [res.asXML( None, namedItemsOnly and doctag is None, nextLevelIndent, formatted)] else: # individual token, see if there is a name for it resTag = None if i in namedItems: resTag = namedItems[i] if not resTag: if namedItemsOnly: continue else: resTag = "ITEM" xmlBodyText = xml.sax.saxutils.escape(str(res)) out += [nl, nextLevelIndent, "<", resTag, ">", xmlBodyText, ""] out += [nl, indent, ""] return "".join(out) def __lookup(self, sub): for k, vlist in self.__tokdict.items(): for v, _loc in vlist: if sub is v: return k return None def getName(self): """Returns the results name for this token expression.""" if self.__name: return self.__name elif self.__parent: par = self.__parent() if par: return par.__lookup(self) else: return None elif (len(self) == 1 and len(self.__tokdict) == 1 and self.__tokdict.values()[0][0][1] in (0, -1)): return self.__tokdict.keys()[0] else: return None def dump(self, indent='', depth=0): """Diagnostic method for listing out the contents of a ParseResults. Accepts an optional indent argument so that this string can be embedded in a nested display of other data.""" out = [] out.append(indent+str(self.asList())) keys = sorted(self.items()) for k, v in keys: if out: out.append('\n') out.append("{}{}- {}: ".format(indent, (' '*depth), k)) if isinstance(v, ParseResults): if v.keys(): out.append(v.dump(indent, depth+1)) else: out.append(str(v)) else: out.append(str(v)) return "".join(out) # add support for pickle protocol def __getstate__(self): return (self.__toklist, ( self.__tokdict.copy(), self.__parent is not None and self.__parent() or None, self.__accumNames, self.__name)) def __setstate__(self, state): self.__toklist = state[0] self.__tokdict, par, inAccumNames, self.__name = state[1] self.__accumNames = {} self.__accumNames.update(inAccumNames) if par is not None: self.__parent = wkref(par) else: self.__parent = None def col(loc, strg): """Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}} for more information on parsing strings containing s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ return (loc < len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc) def lineno(loc, strg): """Returns current line number within a string, counting newlines as line separators. The first line is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}} for more information on parsing strings containing s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ return strg.count("\n", 0, loc) + 1 def line(loc, strg): """Returns the line of text containing loc within a string, counting newlines as line separators. """ lastCR = strg.rfind("\n", 0, loc) nextCR = strg.find("\n", loc) if nextCR > 0: return strg[lastCR+1:nextCR] else: return strg[lastCR+1:] def _defaultStartDebugAction(instring, loc, expr): print("Match " + str(expr) + " at loc " + str(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring))) def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks): print("Matched " + str(expr) + " -> " + str(toks.asList())) def _defaultExceptionDebugAction(instring, loc, expr, exc): print("Exception raised:" + str(exc)) def nullDebugAction(*args): """'Do-nothing' debug action, to suppress debugging output during parsing.""" class ParserElement: """Abstract base level parser element class.""" DEFAULT_WHITE_CHARS = " \n\t\r" def setDefaultWhitespaceChars(chars): """Overrides the default whitespace chars """ ParserElement.DEFAULT_WHITE_CHARS = chars setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars) def __init__(self, savelist=False): self.parseAction = list() self.failAction = None # ~ self.name = "" # don't define self.name, let subclasses try/except upcall self.strRepr = None self.resultsName = None self.saveAsList = savelist self.skipWhitespace = True self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS self.copyDefaultWhiteChars = True self.mayReturnEmpty = False # used when checking for left-recursion self.keepTabs = False self.ignoreExprs = list() self.debug = False self.streamlined = False self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index self.errmsg = "" self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) self.debugActions = (None, None, None) # custom debug actions self.re = None self.callPreparse = True # used to avoid redundant calls to preParse self.callDuringTry = False def copy(self): """Make a copy of this ParserElement. Useful for defining different parse actions for the same parsing pattern, using copies of the original parse element.""" cpy = copy.copy(self) cpy.parseAction = self.parseAction[:] cpy.ignoreExprs = self.ignoreExprs[:] if self.copyDefaultWhiteChars: cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS return cpy def setName(self, name): """Define name for this expression, for use in debugging.""" self.name = name self.errmsg = "Expected " + self.name if hasattr(self, "exception"): self.exception.msg = self.errmsg return self def setResultsName(self, name, listAllMatches=False): """Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original ParserElement object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names. """ newself = self.copy() newself.resultsName = name newself.modalResults = not listAllMatches return newself def setBreak(self, breakFlag=True): """Method to invoke the Python pdb debugger when this element is about to be parsed. Set breakFlag to True to enable, False to disable. """ if breakFlag: _parseMethod = self._parse def breaker(instring, loc, doActions=True, callPreParse=True): import pdb pdb.set_trace() _parseMethod(instring, loc, doActions, callPreParse) breaker._originalParseMethod = _parseMethod self._parse = breaker else: if hasattr(self._parse, "_originalParseMethod"): self._parse = self._parse._originalParseMethod return self def _normalizeParseActionArgs(f): """Internal method used to decorate parse actions that take fewer than 3 arguments, so that all parse actions can be called as f(s,l,t).""" STAR_ARGS = 4 try: restore = None if isinstance(f, type): restore = f f = f.__init__ codeObj = f.code if codeObj.co_flags & STAR_ARGS: return f numargs = codeObj.co_argcount if hasattr(f, "__self__"): numargs -= 1 if restore: f = restore except AttributeError: try: call_im_func_code = f.__code__ # not a function, must be a callable object, get info from the # im_func binding of its bound __call__ method if call_im_func_code.co_flags & STAR_ARGS: return f numargs = call_im_func_code.co_argcount if hasattr(f.__call__, "__self__"): numargs -= 0 except AttributeError: call_func_code = f.__call__.__code__ # not a bound method, get info directly from __call__ method if call_func_code.co_flags & STAR_ARGS: return f numargs = call_func_code.co_argcount if hasattr(f.__call__, "__self__"): numargs -= 1 # ~ print ("adding function %s with %d args" % (f.func_name,numargs)) if numargs == 3: return f else: if numargs > 3: def tmp(s, l, t): return f(f.__call__.__self__, s, l, t) elif numargs == 2: def tmp(s, l, t): return f(l, t) elif numargs == 1: def tmp(s, l, t): return f(t) else: # ~ numargs == 0: def tmp(s, l, t): return f() try: tmp.__name__ = f.__name__ except (AttributeError, TypeError): # no need for special handling if attribute doesnt exist pass try: tmp.__doc__ = f.__doc__ except (AttributeError, TypeError): # no need for special handling if attribute doesnt exist pass try: tmp.__dict__.update(f.__dict__) except (AttributeError, TypeError): # no need for special handling if attribute doesnt exist pass return tmp _normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs) def setParseAction(self, *fns, **kwargs): """Define action to perform when successfully matching parse element definition. Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks), fn(loc,toks), fn(toks), or just fn(), where: - s = the original string being parsed (see note below) - loc = the location of the matching substring - toks = a list of the matched tokens, packaged as a ParseResults object If the functions in fns modify the tokens, they can return them as the return value from fn, and the modified list of tokens will replace the original. Otherwise, fn does not need to return any value. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{parseString}} for more information on parsing strings containing s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ self.parseAction = list(map(self._normalizeParseActionArgs, list(fns))) self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"]) return self def addParseAction(self, *fns, **kwargs): """Add parse action to expression's list of parse actions. See L{I{setParseAction}}.""" self.parseAction += list(map(self._normalizeParseActionArgs, list(fns))) self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"]) return self def setFailAction(self, fn): """Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments fn(s,loc,expr,err) where: - s = string being parsed - loc = location where expression match was attempted and failed - expr = the parse expression that failed - err = the exception thrown The function returns no value. It may throw ParseFatalException if it is desired to stop parsing immediately.""" self.failAction = fn return self def _skipIgnorables(self, instring, loc): exprsFound = True while exprsFound: exprsFound = False for e in self.ignoreExprs: try: while True: loc, dummy = e._parse(instring, loc) exprsFound = True except ParseException: pass return loc def preParse(self, instring, loc): if self.ignoreExprs: loc = self._skipIgnorables(instring, loc) if self.skipWhitespace: wt = self.whiteChars instrlen = len(instring) while loc < instrlen and instring[loc] in wt: loc += 1 return loc def parseImpl(self, instring, loc, doActions=True): return loc, [] def postParse(self, instring, loc, tokenlist): return tokenlist # ~ @profile def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True): debugging = (self.debug) # and doActions ) if debugging or self.failAction: # ~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) if self.debugActions[0]: self.debugActions[0](instring, loc, self) if callPreParse and self.callPreparse: preloc = self.preParse(instring, loc) else: preloc = loc tokensStart = loc try: try: loc, tokens = self.parseImpl(instring, preloc, doActions) except IndexError: raise ParseException(instring, len(instring), self.errmsg, self) except ParseBaseException as err: # ~ print ("Exception raised:", err) if self.debugActions[2]: self.debugActions[2](instring, tokensStart, self, err) if self.failAction: self.failAction(instring, tokensStart, self, err) raise else: if callPreParse and self.callPreparse: preloc = self.preParse(instring, loc) else: preloc = loc tokensStart = loc if self.mayIndexError or loc >= len(instring): try: loc, tokens = self.parseImpl(instring, preloc, doActions) except IndexError: raise ParseException(instring, len(instring), self.errmsg, self) else: loc, tokens = self.parseImpl(instring, preloc, doActions) tokens = self.postParse(instring, loc, tokens) retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults) if self.parseAction and (doActions or self.callDuringTry): if debugging: try: for fn in self.parseAction: tokens = fn(instring, tokensStart, retTokens) if tokens is not None: retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), modal=self.modalResults) except ParseBaseException as err: # ~ print "Exception raised in user parse action:", err if self.debugActions[2]: self.debugActions[2](instring, tokensStart, self, err) raise else: for fn in self.parseAction: tokens = fn(instring, tokensStart, retTokens) if tokens is not None: retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), modal=self.modalResults) if debugging: # ~ print ("Matched",self,"->",retTokens.asList()) if self.debugActions[1]: self.debugActions[1](instring, tokensStart, loc, self, retTokens) return loc, retTokens def tryParse(self, instring, loc): try: return self._parse(instring, loc, doActions=False)[0] except ParseFatalException: raise ParseException(instring, loc, self.errmsg, self) # this method gets repeatedly called during backtracking with the same arguments - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression def _parseCache(self, instring, loc, doActions=True, callPreParse=True): lookup = (self, instring, loc, callPreParse, doActions) if lookup in ParserElement._exprArgCache: value = ParserElement._exprArgCache[lookup] if isinstance(value, Exception): raise value return value else: try: value = self._parseNoCache(instring, loc, doActions, callPreParse) ParserElement._exprArgCache[lookup] = (value[0], value[1].copy()) return value except ParseBaseException as pe: ParserElement._exprArgCache[lookup] = pe raise _parse = _parseNoCache # argument cache for optimizing repeated calls when backtracking through recursive expressions _exprArgCache = {} def resetCache(): ParserElement._exprArgCache.clear() resetCache = staticmethod(resetCache) _packratEnabled = False def enablePackrat(): """Enables "packrat" parsing, which adds memoizing to the parsing logic. Repeated parse attempts at the same string location (which happens often in many complex grammars) can immediately return a cached value, instead of re-executing parsing/validating code. Memoizing is done of both valid results and parsing exceptions. This speedup may break existing programs that use parse actions that have side-effects. For this reason, packrat parsing is disabled when you first import pyparsing. To activate the packrat feature, your program must call the class method ParserElement.enablePackrat(). If your program uses psyco to "compile as you go", you must call enablePackrat before calling psyco.full(). If you do not do this, Python will crash. For best results, call enablePackrat() immediately after importing pyparsing. """ if not ParserElement._packratEnabled: ParserElement._packratEnabled = True ParserElement._parse = ParserElement._parseCache enablePackrat = staticmethod(enablePackrat) def parseString(self, instring, parseAll=False): """Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. If you want the grammar to require that the entire input string be successfully parsed, then set parseAll to True (equivalent to ending the grammar with StringEnd()). Note: parseString implicitly calls expandtabs() on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and the grammar uses parse actions that use the loc argument to index into the string being parsed, you can ensure you have a consistent view of the input string by: - calling parseWithTabs on your grammar before calling parseString (see L{I{parseWithTabs}}) - define your parse action using the full (s,loc,toks) signature, and reference the input string using the parse action's s argument - explictly expand the tabs in your input string before calling parseString """ ParserElement.resetCache() if not self.streamlined: self.streamline() # ~ self.saveAsList = True for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = instring.expandtabs() loc, tokens = self._parse(instring, 0) if parseAll: StringEnd()._parse(instring, loc) return tokens def scanString(self, instring, maxMatches=_MAX_INT): """Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional maxMatches argument, to clip scanning after 'n' matches are found. Note that the start and end locations are reported relative to the string being parsed. See L{I{parseString}} for more information on parsing strings with embedded tabs.""" if not self.streamlined: self.streamline() for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = str(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() matches = 0 while loc <= instrlen and matches < maxMatches: try: preloc = preparseFn(instring, loc) nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) except ParseException: loc = preloc+1 else: matches += 1 yield tokens, preloc, nextLoc loc = nextLoc def transformString(self, instring): """Extension to scanString, to modify matching text with modified tokens that may be returned from a parse action. To use transformString, define a grammar and attach a parse action to it that modifies the returned token list. Invoking transformString() on a target string will then scan for matches, and replace the matched text patterns according to the logic in the parse action. transformString() returns the resulting transformed string.""" out = [] lastE = 0 # force preservation of s, to minimize unwanted transformation of string, and to # keep string locs straight between transformString and scanString self.keepTabs = True for t, s, e in self.scanString(instring): out.append(instring[lastE:s]) if t: if isinstance(t, ParseResults): out += t.asList() elif isinstance(t, list): out += t else: out.append(t) lastE = e out.append(instring[lastE:]) return "".join(map(str, out)) def searchString(self, instring, maxMatches=_MAX_INT): """Another extension to scanString, simplifying the access to the tokens found to match the given parse expression. May be called with optional maxMatches argument, to clip searching after 'n' matches are found. """ return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)]) def __add__(self, other): """Implementation of + operator - returns And""" if isinstance(other, str): other = Literal(other) if not isinstance(other, ParserElement): warnings.warn( "Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return And([self, other]) def __radd__(self, other): """Implementation of + operator when left operand is not a ParserElement""" if isinstance(other, str): other = Literal(other) if not isinstance(other, ParserElement): warnings.warn( "Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other + self def __sub__(self, other): """Implementation of - operator, returns And with error stop""" if isinstance(other, str): other = Literal(other) if not isinstance(other, ParserElement): warnings.warn( "Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return And([self, And._ErrorStop(), other]) def __rsub__(self, other): """Implementation of - operator when left operand is not a ParserElement""" if isinstance(other, str): other = Literal(other) if not isinstance(other, ParserElement): warnings.warn( "Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other - self def __mul__(self, other): if isinstance(other, int): minElements, optElements = other, 0 elif isinstance(other, tuple): if len(other) == 0: other = (None, None) elif len(other) == 1: other = (other[0], None) if len(other) == 2: if other[0] is None: other = (0, other[1]) if isinstance(other[0], int) and other[1] is None: if other[0] == 0: return ZeroOrMore(self) if other[0] == 1: return OneOrMore(self) else: return self*other[0] + ZeroOrMore(self) elif isinstance(other[0], int) and isinstance(other[1], int): minElements, optElements = other optElements -= minElements else: raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]), type(other[1])) else: raise TypeError("can only multiply 'ParserElement' and int or (int,int) objects") else: raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) if minElements < 0: raise ValueError("cannot multiply ParserElement by negative value") if optElements < 0: raise ValueError("second tuple value must be greater or equal to first tuple value") if minElements == optElements == 0: raise ValueError("cannot multiply ParserElement by 0 or (0,0)") if optElements: def makeOptionalList(n): if n > 1: return Optional(self + makeOptionalList(n-1)) else: return Optional(self) if minElements: if minElements == 1: ret = self + makeOptionalList(optElements) else: ret = And([self]*minElements) + makeOptionalList(optElements) else: ret = makeOptionalList(optElements) else: if minElements == 1: ret = self else: ret = And([self]*minElements) return ret def __rmul__(self, other): return self.__mul__(other) def __or__(self, other): """Implementation of | operator - returns MatchFirst""" if isinstance(other, str): other = Literal(other) if not isinstance(other, ParserElement): warnings.warn( "Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return MatchFirst([self, other]) def __ror__(self, other): """Implementation of | operator when left operand is not a ParserElement""" if isinstance(other, str): other = Literal(other) if not isinstance(other, ParserElement): warnings.warn( "Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other | self def __xor__(self, other): """Implementation of ^ operator - returns Or""" if isinstance(other, str): other = Literal(other) if not isinstance(other, ParserElement): warnings.warn( "Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return Or([self, other]) def __rxor__(self, other): """Implementation of ^ operator when left operand is not a ParserElement""" if isinstance(other, str): other = Literal(other) if not isinstance(other, ParserElement): warnings.warn( "Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other ^ self def __and__(self, other): """Implementation of & operator - returns Each""" if isinstance(other, str): other = Literal(other) if not isinstance(other, ParserElement): warnings.warn( "Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return Each([self, other]) def __rand__(self, other): """Implementation of & operator when left operand is not a ParserElement""" if isinstance(other, str): other = Literal(other) if not isinstance(other, ParserElement): warnings.warn( "Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other & self def __invert__(self): """Implementation of ~ operator - returns NotAny""" return NotAny(self) def __call__(self, name): """Shortcut for setResultsName, with listAllMatches=default:: userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") could be written as:: userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") """ return self.setResultsName(name) def suppress(self): """Suppresses the output of this ParserElement; useful to keep punctuation from cluttering up returned output. """ return Suppress(self) def leaveWhitespace(self): """Disables the skipping of whitespace before matching the characters in the ParserElement's defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars. """ self.skipWhitespace = False return self def setWhitespaceChars(self, chars): """Overrides the default whitespace chars """ self.skipWhitespace = True self.whiteChars = chars self.copyDefaultWhiteChars = False return self def parseWithTabs(self): """Overrides default behavior to expand s to spaces before parsing the input string. Must be called before parseString when the input grammar contains elements that match characters.""" self.keepTabs = True return self def ignore(self, other): """Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. """ if isinstance(other, Suppress): if other not in self.ignoreExprs: self.ignoreExprs.append(other) else: self.ignoreExprs.append(Suppress(other)) return self def setDebugActions(self, startAction, successAction, exceptionAction): """Enable display of debugging messages while doing pattern matching.""" self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) self.debug = True return self def setDebug(self, flag=True): """Enable display of debugging messages while doing pattern matching. Set flag to True to enable, False to disable.""" if flag: self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) else: self.debug = False return self def __str__(self): return self.name def __repr__(self): return str(self) def streamline(self): self.streamlined = True self.strRepr = None return self def checkRecursion(self, parseElementList): pass def validate(self, validateTrace=None): """Check defined expressions for valid structure, check for infinite recursive definitions.""" self.checkRecursion([]) def parseFile(self, file_or_filename): """Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), the entire file is opened, read, and closed before parsing. """ try: file_contents = file_or_filename.read() except AttributeError: f = open(file_or_filename, "rb") file_contents = f.read() f.close() return self.parseString(file_contents) def getException(self): return ParseException("", 0, self.errmsg, self) def __getattr__(self, aname): if aname == "myException": self.myException = ret = self.getException() return ret else: raise AttributeError("no such attribute " + aname) def __eq__(self, other): if isinstance(other, str): try: (self + StringEnd()).parseString(str(other)) return True except ParseBaseException: return False else: return super() == other def __hash__(self): return hash(id(self)) def __req__(self, other): return self == other class Token(ParserElement): """Abstract ParserElement subclass, for defining atomic matching patterns.""" def __init__(self): super().__init__(savelist=False) def setName(self, name): s = super().setName(name) self.errmsg = "Expected " + self.name return s class Empty(Token): """An empty token, will always match.""" def __init__(self): super().__init__() self.name = "Empty" self.mayReturnEmpty = True self.mayIndexError = False class NoMatch(Token): """A token that will never match.""" def __init__(self): super().__init__() self.name = "NoMatch" self.mayReturnEmpty = True self.mayIndexError = False self.errmsg = "Unmatchable token" def parseImpl(self, instring, loc, doActions=True): exc = self.myException exc.loc = loc exc.pstr = instring raise exc class Literal(Token): """Token to exactly match a specified string.""" def __init__(self, matchString): super().__init__() self.match = matchString self.matchLen = len(matchString) try: self.firstMatchChar = matchString[0] except IndexError: warnings.warn( "null string passed to Literal; use Empty() instead", SyntaxWarning, stacklevel=2) self.__class__ = Empty self.name = '"%s"' % str(self.match) self.errmsg = "Expected " + self.name self.mayReturnEmpty = False self.mayIndexError = False # Performance tuning: this routine gets called a *lot* # if this is a single character match string and the first character matches, # short-circuit as quickly as possible, and avoid calling startswith # ~ @profile def parseImpl(self, instring, loc, doActions=True): if (instring[loc] == self.firstMatchChar and (self.matchLen == 1 or instring.startswith(self.match, loc))): return loc+self.matchLen, self.match # ~ raise ParseException( instring, loc, self.errmsg ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc _L = Literal class Keyword(Token): """Token to exactly match a specified string as a keyword, that is, it must be immediately followed by a non-keyword character. Compare with Literal:: Literal("if") will match the leading 'if' in 'ifAndOnlyIf'. Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)' Accepts two optional constructor arguments in addition to the keyword string: identChars is a string of characters that would be valid identifier characters, defaulting to all alphanumerics + "_" and "$"; caseless allows case-insensitive matching, default is False. """ DEFAULT_KEYWORD_CHARS = alphanums+"_$" def __init__(self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False): super().__init__() self.match = matchString self.matchLen = len(matchString) try: self.firstMatchChar = matchString[0] except IndexError: warnings.warn( "null string passed to Keyword; use Empty() instead", SyntaxWarning, stacklevel=2) self.name = '"%s"' % self.match self.errmsg = "Expected " + self.name self.mayReturnEmpty = False self.mayIndexError = False self.caseless = caseless if caseless: self.caselessmatch = matchString.upper() identChars = identChars.upper() self.identChars = _str2dict(identChars) def parseImpl(self, instring, loc, doActions=True): if self.caseless: if ((instring[loc:loc+self.matchLen].upper() == self.caselessmatch) and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and (loc == 0 or instring[loc-1].upper() not in self.identChars)): return loc+self.matchLen, self.match else: if (instring[loc] == self.firstMatchChar and (self.matchLen == 1 or instring.startswith(self.match, loc)) and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and (loc == 0 or instring[loc-1] not in self.identChars)): return loc+self.matchLen, self.match # ~ raise ParseException( instring, loc, self.errmsg ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc def copy(self): c = super().copy() c.identChars = Keyword.DEFAULT_KEYWORD_CHARS return c def setDefaultKeywordChars(chars): """Overrides the default Keyword chars """ Keyword.DEFAULT_KEYWORD_CHARS = chars setDefaultKeywordChars = staticmethod(setDefaultKeywordChars) class CaselessLiteral(Literal): """Token to match a specified string, ignoring case of letters. Note: the matched results will always be in the case of the given match string, NOT the case of the input text. """ def __init__(self, matchString): super().__init__(matchString.upper()) # Preserve the defining literal. self.returnString = matchString self.name = "'%s'" % self.returnString self.errmsg = "Expected " + self.name def parseImpl(self, instring, loc, doActions=True): if instring[loc:loc+self.matchLen].upper() == self.match: return loc+self.matchLen, self.returnString # ~ raise ParseException( instring, loc, self.errmsg ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc class CaselessKeyword(Keyword): def __init__(self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS): super().__init__(matchString, identChars, caseless=True) def parseImpl(self, instring, loc, doActions=True): if ((instring[loc:loc+self.matchLen].upper() == self.caselessmatch) and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars)): return loc+self.matchLen, self.match # ~ raise ParseException( instring, loc, self.errmsg ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc class Word(Token): """Token for matching words composed of allowed character sets. Defined with string containing all allowed initial characters, an optional string containing allowed body characters (if omitted, defaults to the initial character set), and an optional minimum, maximum, and/or exact length. The default value for min is 1 (a minimum value < 1 is not valid); the default values for max and exact are 0, meaning no maximum or exact length restriction. """ def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False): super().__init__() self.initCharsOrig = initChars self.initChars = _str2dict(initChars) if bodyChars: self.bodyCharsOrig = bodyChars self.bodyChars = _str2dict(bodyChars) else: self.bodyCharsOrig = initChars self.bodyChars = _str2dict(initChars) self.maxSpecified = max > 0 if min < 1: raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") self.minLen = min if max > 0: self.maxLen = max else: self.maxLen = _MAX_INT if exact > 0: self.maxLen = exact self.minLen = exact self.name = str(self) self.errmsg = "Expected " + self.name self.mayIndexError = False self.asKeyword = asKeyword if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0): if self.bodyCharsOrig == self.initCharsOrig: self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) elif len(self.bodyCharsOrig) == 1: self.reString = "{}[{}]*".format( re.escape(self.initCharsOrig), _escapeRegexRangeChars(self.bodyCharsOrig)) else: self.reString = "[{}][{}]*".format( _escapeRegexRangeChars(self.initCharsOrig), _escapeRegexRangeChars(self.bodyCharsOrig)) if self.asKeyword: self.reString = r"\b"+self.reString+r"\b" try: self.re = re.compile(self.reString) except Exception: self.re = None def parseImpl(self, instring, loc, doActions=True): if self.re: result = self.re.match(instring, loc) if not result: exc = self.myException exc.loc = loc exc.pstr = instring raise exc loc = result.end() return loc, result.group() if not(instring[loc] in self.initChars): # ~ raise ParseException( instring, loc, self.errmsg ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc start = loc loc += 1 instrlen = len(instring) bodychars = self.bodyChars maxloc = start + self.maxLen maxloc = min(maxloc, instrlen) while loc < maxloc and instring[loc] in bodychars: loc += 1 throwException = False if loc - start < self.minLen: throwException = True if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: throwException = True if self.asKeyword: if (start > 0 and instring[start-1] in bodychars) or (loc < instrlen and instring[loc] in bodychars): throwException = True if throwException: # ~ raise ParseException( instring, loc, self.errmsg ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc return loc, instring[start:loc] def __str__(self): try: return super().__str__() except Exception: pass if self.strRepr is None: def charsAsStr(s): if len(s) > 4: return s[:4]+"..." else: return s if self.initCharsOrig != self.bodyCharsOrig: self.strRepr = f"W:({charsAsStr(self.initCharsOrig)},{charsAsStr(self.bodyCharsOrig)})" else: self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) return self.strRepr class Regex(Token): """Token for matching strings that match a given regular expression. Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. """ def __init__(self, pattern, flags=0): """The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.""" super().__init__() if len(pattern) == 0: warnings.warn( "null string passed to Regex; use Empty() instead", SyntaxWarning, stacklevel=2) self.pattern = pattern self.flags = flags try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error: warnings.warn( "invalid pattern (%s) passed to Regex" % pattern, SyntaxWarning, stacklevel=2) raise self.name = str(self) self.errmsg = "Expected " + self.name self.mayIndexError = False self.mayReturnEmpty = True def parseImpl(self, instring, loc, doActions=True): result = self.re.match(instring, loc) if not result: exc = self.myException exc.loc = loc exc.pstr = instring raise exc loc = result.end() d = result.groupdict() ret = ParseResults(result.group()) if d: for k in d: ret[k] = d[k] return loc, ret def __str__(self): try: return super().__str__() except Exception: pass if self.strRepr is None: self.strRepr = "Re:(%s)" % repr(self.pattern) return self.strRepr class QuotedString(Token): """Token for matching strings that are delimited by quoting characters. """ def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None): """ Defined with the following parameters: - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash (default=None) - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None) - multiline - boolean indicating whether quotes can span multiple lines (default=False) - unquoteResults - boolean indicating whether the matched text should be unquoted (default=True) - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar) """ super().__init__() # remove white space from quote chars - wont work anyway quoteChar = quoteChar.strip() if len(quoteChar) == 0: warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) raise SyntaxError() if endQuoteChar is None: endQuoteChar = quoteChar else: endQuoteChar = endQuoteChar.strip() if len(endQuoteChar) == 0: warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) raise SyntaxError() self.quoteChar = quoteChar self.quoteCharLen = len(quoteChar) self.firstQuoteChar = quoteChar[0] self.endQuoteChar = endQuoteChar self.endQuoteCharLen = len(endQuoteChar) self.escChar = escChar self.escQuote = escQuote self.unquoteResults = unquoteResults if multiline: self.flags = re.MULTILINE | re.DOTALL self.pattern = r'{}(?:[^{}{}]'.format( re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (escChar is not None and _escapeRegexRangeChars(escChar) or '')) else: self.flags = 0 self.pattern = r'{}(?:[^{}\n\r{}]'.format( re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (escChar is not None and _escapeRegexRangeChars(escChar) or '')) if len(self.endQuoteChar) > 1: self.pattern += ( '|(?:' + ')|(?:'.join(["{}[^{}]".format( re.escape(self.endQuoteChar[:i]), _escapeRegexRangeChars(self.endQuoteChar[i]) ) for i in range(len(self.endQuoteChar)-1, 0, -1)]) + ')') if escQuote: self.pattern += (r'|(?:%s)' % re.escape(escQuote)) if escChar: self.pattern += (r'|(?:%s.)' % re.escape(escChar)) self.escCharReplacePattern = re.escape(self.escChar)+"(.)" self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error: warnings.warn( "invalid pattern (%s) passed to Regex" % self.pattern, SyntaxWarning, stacklevel=2) raise self.name = str(self) self.errmsg = "Expected " + self.name self.mayIndexError = False self.mayReturnEmpty = True def parseImpl(self, instring, loc, doActions=True): result = instring[loc] == self.firstQuoteChar and self.re.match(instring, loc) or None if not result: exc = self.myException exc.loc = loc exc.pstr = instring raise exc loc = result.end() ret = result.group() if self.unquoteResults: # strip off quotes ret = ret[self.quoteCharLen:-self.endQuoteCharLen] if isinstance(ret, str): # replace escaped characters if self.escChar: ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) # replace escaped quotes if self.escQuote: ret = ret.replace(self.escQuote, self.endQuoteChar) return loc, ret def __str__(self): try: return super().__str__() except Exception: pass if self.strRepr is None: self.strRepr = f"quoted string, starting with {self.quoteChar} ending with {self.endQuoteChar}" return self.strRepr class CharsNotIn(Token): """Token for matching words composed of characters *not* in a given set. Defined with string containing all disallowed characters, and an optional minimum, maximum, and/or exact length. The default value for min is 1 (a minimum value < 1 is not valid); the default values for max and exact are 0, meaning no maximum or exact length restriction. """ def __init__(self, notChars, min=1, max=0, exact=0): super().__init__() self.skipWhitespace = False self.notChars = notChars if min < 1: raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") self.minLen = min if max > 0: self.maxLen = max else: self.maxLen = _MAX_INT if exact > 0: self.maxLen = exact self.minLen = exact self.name = str(self) self.errmsg = "Expected " + self.name self.mayReturnEmpty = (self.minLen == 0) self.mayIndexError = False def parseImpl(self, instring, loc, doActions=True): if instring[loc] in self.notChars: # ~ raise ParseException( instring, loc, self.errmsg ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc start = loc loc += 1 notchars = self.notChars maxlen = min(start+self.maxLen, len(instring)) while loc < maxlen and (instring[loc] not in notchars): loc += 1 if loc - start < self.minLen: # ~ raise ParseException( instring, loc, self.errmsg ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc return loc, instring[start:loc] def __str__(self): try: return super().__str__() except Exception: pass if self.strRepr is None: if len(self.notChars) > 4: self.strRepr = "!W:(%s...)" % self.notChars[:4] else: self.strRepr = "!W:(%s)" % self.notChars return self.strRepr class White(Token): """Special matching class for matching whitespace. Normally, whitespace is ignored by pyparsing grammars. This class is included when some whitespace structures are significant. Define with a string containing the whitespace characters to be matched; default is " \\t\\n". Also takes optional min, max, and exact arguments, as defined for the Word class.""" whiteStrs = { " ": "", "\t": "", "\n": "", "\r": "", "\f": "", } def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): super().__init__() self.matchWhite = ws self.setWhitespaceChars("".join([c for c in self.whiteChars if c not in self.matchWhite])) self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite])) self.mayReturnEmpty = True self.errmsg = "Expected " + self.name self.minLen = min if max > 0: self.maxLen = max else: self.maxLen = _MAX_INT if exact > 0: self.maxLen = exact self.minLen = exact def parseImpl(self, instring, loc, doActions=True): if not(instring[loc] in self.matchWhite): exc = self.myException exc.loc = loc exc.pstr = instring raise exc start = loc loc += 1 maxloc = start + self.maxLen maxloc = min(maxloc, len(instring)) while loc < maxloc and instring[loc] in self.matchWhite: loc += 1 if loc - start < self.minLen: # ~ raise ParseException( instring, loc, self.errmsg ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc return loc, instring[start:loc] class _PositionToken(Token): def __init__(self): super().__init__() self.name = self.__class__.__name__ self.mayReturnEmpty = True self.mayIndexError = False class GoToColumn(_PositionToken): """Token to advance to a specific column of input text; useful for tabular report scraping.""" def __init__(self, colno): super().__init__() self.col = colno def preParse(self, instring, loc): if col(loc, instring) != self.col: instrlen = len(instring) if self.ignoreExprs: loc = self._skipIgnorables(instring, loc) while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col: loc += 1 return loc def parseImpl(self, instring, loc, doActions=True): thiscol = col(loc, instring) if thiscol > self.col: raise ParseException(instring, loc, "Text not in expected column", self) newloc = loc + self.col - thiscol ret = instring[loc: newloc] return newloc, ret class LineStart(_PositionToken): """Matches if current position is at the beginning of a line within the parse string""" def __init__(self): super().__init__() self.setWhitespaceChars(" \t") self.errmsg = "Expected start of line" def preParse(self, instring, loc): preloc = super().preParse(instring, loc) if instring[preloc] == "\n": loc += 1 return loc def parseImpl(self, instring, loc, doActions=True): if not(loc == 0 or (loc == self.preParse(instring, 0)) or (instring[loc-1] == "\n")): # col(loc, instring) != 1: exc = self.myException exc.loc = loc exc.pstr = instring raise exc return loc, [] class LineEnd(_PositionToken): """Matches if current position is at the end of a line within the parse string""" def __init__(self): super().__init__() self.setWhitespaceChars(" \t") self.errmsg = "Expected end of line" def parseImpl(self, instring, loc, doActions=True): if loc < len(instring): if instring[loc] == "\n": return loc+1, "\n" else: exc = self.myException exc.loc = loc exc.pstr = instring raise exc elif loc == len(instring): return loc+1, [] else: exc = self.myException exc.loc = loc exc.pstr = instring raise exc class StringStart(_PositionToken): """Matches if current position is at the beginning of the parse string""" def __init__(self): super().__init__() self.errmsg = "Expected start of text" def parseImpl(self, instring, loc, doActions=True): if loc != 0: # see if entire string up to here is just whitespace and ignoreables if loc != self.preParse(instring, 0): exc = self.myException exc.loc = loc exc.pstr = instring raise exc return loc, [] class StringEnd(_PositionToken): """Matches if current position is at the end of the parse string""" def __init__(self): super().__init__() self.errmsg = "Expected end of text" def parseImpl(self, instring, loc, doActions=True): if loc < len(instring): exc = self.myException exc.loc = loc exc.pstr = instring raise exc elif loc == len(instring): return loc+1, [] elif loc > len(instring): return loc, [] else: exc = self.myException exc.loc = loc exc.pstr = instring raise exc class WordStart(_PositionToken): """Matches if the current position is at the beginning of a Word, and is not preceded by any character in a given set of wordChars (default=printables). To emulate the \b behavior of regular expressions, use WordStart(alphanums). WordStart will also match at the beginning of the string being parsed, or at the beginning of a line. """ def __init__(self, wordChars=printables): super().__init__() self.wordChars = _str2dict(wordChars) self.errmsg = "Not at the start of a word" def parseImpl(self, instring, loc, doActions=True): if loc != 0: if (instring[loc-1] in self.wordChars or instring[loc] not in self.wordChars): exc = self.myException exc.loc = loc exc.pstr = instring raise exc return loc, [] class WordEnd(_PositionToken): """Matches if the current position is at the end of a Word, and is not followed by any character in a given set of wordChars (default=printables). To emulate the \b behavior of regular expressions, use WordEnd(alphanums). WordEnd will also match at the end of the string being parsed, or at the end of a line. """ def __init__(self, wordChars=printables): super().__init__() self.wordChars = _str2dict(wordChars) self.skipWhitespace = False self.errmsg = "Not at the end of a word" def parseImpl(self, instring, loc, doActions=True): instrlen = len(instring) if instrlen > 0 and loc < instrlen: if (instring[loc] in self.wordChars or instring[loc-1] not in self.wordChars): # ~ raise ParseException( instring, loc, "Expected end of word" ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc return loc, [] class ParseExpression(ParserElement): """Abstract subclass of ParserElement, for combining and post-processing parsed tokens.""" def __init__(self, exprs, savelist=False): super().__init__(savelist) if isinstance(exprs, list): self.exprs = exprs elif isinstance(exprs, str): self.exprs = [Literal(exprs)] else: self.exprs = [exprs] self.callPreparse = False def __getitem__(self, i): return self.exprs[i] def append(self, other): self.exprs.append(other) self.strRepr = None return self def leaveWhitespace(self): """Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on all contained expressions.""" self.skipWhitespace = False self.exprs = [e.copy() for e in self.exprs] for e in self.exprs: e.leaveWhitespace() return self def ignore(self, other): if isinstance(other, Suppress): if other not in self.ignoreExprs: super().ignore(other) for e in self.exprs: e.ignore(self.ignoreExprs[-1]) else: super().ignore(other) for e in self.exprs: e.ignore(self.ignoreExprs[-1]) return self def __str__(self): try: return super().__str__() except Exception: pass if self.strRepr is None: self.strRepr = f"{self.__class__.__name__}:({str(self.exprs)})" return self.strRepr def streamline(self): super().streamline() for e in self.exprs: e.streamline() # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) # but only if there are no parse actions or resultsNames on the nested And's # (likewise for Or's and MatchFirst's) if len(self.exprs) == 2: other = self.exprs[0] if (isinstance(other, self.__class__) and not(other.parseAction) and other.resultsName is None and not other.debug): self.exprs = other.exprs[:] + [self.exprs[1]] self.strRepr = None self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError other = self.exprs[-1] if (isinstance(other, self.__class__) and not(other.parseAction) and other.resultsName is None and not other.debug): self.exprs = self.exprs[:-1] + other.exprs[:] self.strRepr = None self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError return self def setResultsName(self, name, listAllMatches=False): ret = super().setResultsName(name, listAllMatches) return ret def validate(self, validateTrace=None): if validateTrace is None: validateTrace = [] tmp = validateTrace[:]+[self] for e in self.exprs: e.validate(tmp) self.checkRecursion([]) class And(ParseExpression): """Requires all given ParseExpressions to be found in the given order. Expressions may be separated by whitespace. May be constructed using the '+' operator. """ class _ErrorStop(Empty): def __new__(cls, *args, **kwargs): return And._ErrorStop.instance _ErrorStop.instance = Empty() _ErrorStop.instance.leaveWhitespace() def __init__(self, exprs, savelist=True): super().__init__(exprs, savelist) self.mayReturnEmpty = True for e in self.exprs: if not e.mayReturnEmpty: self.mayReturnEmpty = False break self.setWhitespaceChars(exprs[0].whiteChars) self.skipWhitespace = exprs[0].skipWhitespace self.callPreparse = True def parseImpl(self, instring, loc, doActions=True): # pass False as last arg to _parse for first element, since we already # pre-parsed the string as part of our And pre-parsing loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False) errorStop = False for e in self.exprs[1:]: if e is And._ErrorStop.instance: errorStop = True continue if errorStop: try: loc, exprtokens = e._parse(instring, loc, doActions) except ParseBaseException as pe: raise ParseSyntaxException(pe) except IndexError: raise ParseSyntaxException(ParseException(instring, len(instring), self.errmsg, self)) else: loc, exprtokens = e._parse(instring, loc, doActions) if exprtokens or exprtokens.keys(): resultlist += exprtokens return loc, resultlist def __iadd__(self, other): if isinstance(other, str): other = Literal(other) return self.append(other) # And( [ self, other ] ) def checkRecursion(self, parseElementList): subRecCheckList = parseElementList[:] + [self] for e in self.exprs: e.checkRecursion(subRecCheckList) if not e.mayReturnEmpty: break def __str__(self): if hasattr(self, "name"): return self.name if self.strRepr is None: self.strRepr = "{" + " ".join([str(e) for e in self.exprs]) + "}" return self.strRepr class Or(ParseExpression): """Requires that at least one ParseExpression is found. If two expressions match, the expression that matches the longest string will be used. May be constructed using the '^' operator. """ def __init__(self, exprs, savelist=False): super().__init__(exprs, savelist) self.mayReturnEmpty = False for e in self.exprs: if e.mayReturnEmpty: self.mayReturnEmpty = True break def parseImpl(self, instring, loc, doActions=True): maxExcLoc = -1 maxMatchLoc = -1 maxException = None for e in self.exprs: try: loc2 = e.tryParse(instring, loc) except ParseException as err: if err.loc > maxExcLoc: maxException = err maxExcLoc = err.loc except IndexError: if len(instring) > maxExcLoc: maxException = ParseException(instring, len(instring), e.errmsg, self) maxExcLoc = len(instring) else: if loc2 > maxMatchLoc: maxMatchLoc = loc2 maxMatchExp = e if maxMatchLoc < 0: if maxException is not None: raise maxException else: raise ParseException(instring, loc, "no defined alternatives to match", self) return maxMatchExp._parse(instring, loc, doActions) def __ixor__(self, other): if isinstance(other, str): other = Literal(other) return self.append(other) # Or( [ self, other ] ) def __str__(self): if hasattr(self, "name"): return self.name if self.strRepr is None: self.strRepr = "{" + " ^ ".join([str(e) for e in self.exprs]) + "}" return self.strRepr def checkRecursion(self, parseElementList): subRecCheckList = parseElementList[:] + [self] for e in self.exprs: e.checkRecursion(subRecCheckList) class MatchFirst(ParseExpression): """Requires that at least one ParseExpression is found. If two expressions match, the first one listed is the one that will match. May be constructed using the '|' operator. """ def __init__(self, exprs, savelist=False): super().__init__(exprs, savelist) if exprs: self.mayReturnEmpty = False for e in self.exprs: if e.mayReturnEmpty: self.mayReturnEmpty = True break else: self.mayReturnEmpty = True def parseImpl(self, instring, loc, doActions=True): maxExcLoc = -1 maxException = None for e in self.exprs: try: ret = e._parse(instring, loc, doActions) return ret except ParseException as err: if err.loc > maxExcLoc: maxException = err maxExcLoc = err.loc except IndexError: if len(instring) > maxExcLoc: maxException = ParseException(instring, len(instring), e.errmsg, self) maxExcLoc = len(instring) # only got here if no expression matched, raise exception for match that made it the furthest else: if maxException is not None: raise maxException else: raise ParseException(instring, loc, "no defined alternatives to match", self) def __ior__(self, other): if isinstance(other, str): other = Literal(other) return self.append(other) # MatchFirst( [ self, other ] ) def __str__(self): if hasattr(self, "name"): return self.name if self.strRepr is None: self.strRepr = "{" + " | ".join([str(e) for e in self.exprs]) + "}" return self.strRepr def checkRecursion(self, parseElementList): subRecCheckList = parseElementList[:] + [self] for e in self.exprs: e.checkRecursion(subRecCheckList) class Each(ParseExpression): """Requires all given ParseExpressions to be found, but in any order. Expressions may be separated by whitespace. May be constructed using the '&' operator. """ def __init__(self, exprs, savelist=True): super().__init__(exprs, savelist) self.mayReturnEmpty = True for e in self.exprs: if not e.mayReturnEmpty: self.mayReturnEmpty = False break self.skipWhitespace = True self.initExprGroups = True def parseImpl(self, instring, loc, doActions=True): if self.initExprGroups: self.optionals = [e.expr for e in self.exprs if isinstance(e, Optional)] self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)] self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)] self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))] self.required += self.multirequired self.initExprGroups = False tmpLoc = loc tmpReqd = self.required[:] tmpOpt = self.optionals[:] matchOrder = [] keepMatching = True while keepMatching: tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired failed = [] for e in tmpExprs: try: tmpLoc = e.tryParse(instring, tmpLoc) except ParseException: failed.append(e) else: matchOrder.append(e) if e in tmpReqd: tmpReqd.remove(e) elif e in tmpOpt: tmpOpt.remove(e) if len(failed) == len(tmpExprs): keepMatching = False if tmpReqd: missing = ", ".join([str(e) for e in tmpReqd]) raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing) # add any unmatched Optionals, in case they have default values defined matchOrder += list(e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt) resultlist = [] for e in matchOrder: loc, results = e._parse(instring, loc, doActions) resultlist.append(results) finalResults = ParseResults([]) for r in resultlist: dups = {} for k in r.keys(): if k in finalResults.keys(): tmp = ParseResults(finalResults[k]) tmp += ParseResults(r[k]) dups[k] = tmp finalResults += ParseResults(r) for k, v in dups.items(): finalResults[k] = v return loc, finalResults def __str__(self): if hasattr(self, "name"): return self.name if self.strRepr is None: self.strRepr = "{" + " & ".join([str(e) for e in self.exprs]) + "}" return self.strRepr def checkRecursion(self, parseElementList): subRecCheckList = parseElementList[:] + [self] for e in self.exprs: e.checkRecursion(subRecCheckList) class ParseElementEnhance(ParserElement): """Abstract subclass of ParserElement, for combining and post-processing parsed tokens.""" def __init__(self, expr, savelist=False): super().__init__(savelist) if isinstance(expr, str): expr = Literal(expr) self.expr = expr self.strRepr = None if expr is not None: self.mayIndexError = expr.mayIndexError self.mayReturnEmpty = expr.mayReturnEmpty self.setWhitespaceChars(expr.whiteChars) self.skipWhitespace = expr.skipWhitespace self.saveAsList = expr.saveAsList self.callPreparse = expr.callPreparse self.ignoreExprs.extend(expr.ignoreExprs) def parseImpl(self, instring, loc, doActions=True): if self.expr is not None: return self.expr._parse(instring, loc, doActions, callPreParse=False) else: raise ParseException("", loc, self.errmsg, self) def leaveWhitespace(self): self.skipWhitespace = False self.expr = self.expr.copy() if self.expr is not None: self.expr.leaveWhitespace() return self def ignore(self, other): if isinstance(other, Suppress): if other not in self.ignoreExprs: super().ignore(other) if self.expr is not None: self.expr.ignore(self.ignoreExprs[-1]) else: super().ignore(other) if self.expr is not None: self.expr.ignore(self.ignoreExprs[-1]) return self def streamline(self): super().streamline() if self.expr is not None: self.expr.streamline() return self def checkRecursion(self, parseElementList): if self in parseElementList: raise RecursiveGrammarException(parseElementList+[self]) subRecCheckList = parseElementList[:] + [self] if self.expr is not None: self.expr.checkRecursion(subRecCheckList) def validate(self, validateTrace=None): if validateTrace is None: validateTrace = [] tmp = validateTrace[:]+[self] if self.expr is not None: self.expr.validate(tmp) self.checkRecursion([]) def __str__(self): try: return super().__str__() except Exception: pass if self.strRepr is None and self.expr is not None: self.strRepr = f"{self.__class__.__name__}:({str(self.expr)})" return self.strRepr class FollowedBy(ParseElementEnhance): """Lookahead matching of the given parse expression. FollowedBy does *not* advance the parsing position within the input string, it only verifies that the specified parse expression matches at the current position. FollowedBy always returns a null token list.""" def __init__(self, expr): super().__init__(expr) self.mayReturnEmpty = True def parseImpl(self, instring, loc, doActions=True): self.expr.tryParse(instring, loc) return loc, [] class NotAny(ParseElementEnhance): """Lookahead to disallow matching with the given parse expression. NotAny does *not* advance the parsing position within the input string, it only verifies that the specified parse expression does *not* match at the current position. Also, NotAny does *not* skip over leading whitespace. NotAny always returns a null token list. May be constructed using the '~' operator.""" def __init__(self, expr): super().__init__(expr) self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs self.mayReturnEmpty = True self.errmsg = "Found unwanted token, "+str(self.expr) def parseImpl(self, instring, loc, doActions=True): try: self.expr.tryParse(instring, loc) except (ParseException, IndexError): pass else: exc = self.myException exc.loc = loc exc.pstr = instring raise exc return loc, [] def __str__(self): if hasattr(self, "name"): return self.name if self.strRepr is None: self.strRepr = "~{" + str(self.expr) + "}" return self.strRepr class ZeroOrMore(ParseElementEnhance): """Optional repetition of zero or more of the given expression.""" def __init__(self, expr): super().__init__(expr) self.mayReturnEmpty = True def parseImpl(self, instring, loc, doActions=True): tokens = [] try: loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False) hasIgnoreExprs = (len(self.ignoreExprs) > 0) while True: if hasIgnoreExprs: preloc = self._skipIgnorables(instring, loc) else: preloc = loc loc, tmptokens = self.expr._parse(instring, preloc, doActions) if tmptokens or tmptokens.keys(): tokens += tmptokens except (ParseException, IndexError): pass return loc, tokens def __str__(self): if hasattr(self, "name"): return self.name if self.strRepr is None: self.strRepr = "[" + str(self.expr) + "]..." return self.strRepr def setResultsName(self, name, listAllMatches=False): ret = super().setResultsName(name, listAllMatches) ret.saveAsList = True return ret class OneOrMore(ParseElementEnhance): """Repetition of one or more of the given expression.""" def parseImpl(self, instring, loc, doActions=True): # must be at least one loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False) try: hasIgnoreExprs = (len(self.ignoreExprs) > 0) while True: if hasIgnoreExprs: preloc = self._skipIgnorables(instring, loc) else: preloc = loc loc, tmptokens = self.expr._parse(instring, preloc, doActions) if tmptokens or tmptokens.keys(): tokens += tmptokens except (ParseException, IndexError): pass return loc, tokens def __str__(self): if hasattr(self, "name"): return self.name if self.strRepr is None: self.strRepr = "{" + str(self.expr) + "}..." return self.strRepr def setResultsName(self, name, listAllMatches=False): ret = super().setResultsName(name, listAllMatches) ret.saveAsList = True return ret class _NullToken: def __bool__(self): return False __nonzero__ = __bool__ def __str__(self): return "" _optionalNotMatched = _NullToken() class Optional(ParseElementEnhance): """Optional matching of the given expression. A default return string can also be specified, if the optional expression is not found. """ def __init__(self, exprs, default=_optionalNotMatched): super().__init__(exprs, savelist=False) self.defaultValue = default self.mayReturnEmpty = True def parseImpl(self, instring, loc, doActions=True): try: loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False) except (ParseException, IndexError): if self.defaultValue is not _optionalNotMatched: if self.expr.resultsName: tokens = ParseResults([self.defaultValue]) tokens[self.expr.resultsName] = self.defaultValue else: tokens = [self.defaultValue] else: tokens = [] return loc, tokens def __str__(self): if hasattr(self, "name"): return self.name if self.strRepr is None: self.strRepr = "[" + str(self.expr) + "]" return self.strRepr class SkipTo(ParseElementEnhance): """Token for skipping over all undefined text until the matched expression is found. If include is set to true, the matched expression is also consumed. The ignore argument is used to define grammars (typically quoted strings and comments) that might contain false matches. """ def __init__(self, other, include=False, ignore=None): super().__init__(other) if ignore is not None: self.expr = self.expr.copy() self.expr.ignore(ignore) self.mayReturnEmpty = True self.mayIndexError = False self.includeMatch = include self.asList = False self.errmsg = "No match found for "+str(self.expr) def parseImpl(self, instring, loc, doActions=True): startLoc = loc instrlen = len(instring) expr = self.expr while loc <= instrlen: try: loc = expr._skipIgnorables(instring, loc) expr._parse(instring, loc, doActions=False, callPreParse=False) if self.includeMatch: skipText = instring[startLoc:loc] loc, mat = expr._parse(instring, loc, doActions, callPreParse=False) if mat: skipRes = ParseResults(skipText) skipRes += mat return loc, [skipRes] else: return loc, [skipText] else: return loc, [instring[startLoc:loc]] except (ParseException, IndexError): loc += 1 exc = self.myException exc.loc = loc exc.pstr = instring raise exc class Forward(ParseElementEnhance): """Forward declaration of an expression to be defined later - used for recursive grammars, such as algebraic infix notation. When the expression is known, it is assigned to the Forward variable using the '<<' operator. Note: take care when assigning to Forward not to overlook precedence of operators. Specifically, '|' has a lower precedence than '<<', so that:: fwdExpr << a | b | c will actually be evaluated as:: (fwdExpr << a) | b | c thereby leaving b and c out as parseable alternatives. It is recommended that you explicitly group the values inserted into the Forward:: fwdExpr << (a | b | c) """ def __init__(self, other=None): super().__init__(other, savelist=False) def __lshift__(self, other): if isinstance(other, str): other = Literal(other) self.expr = other self.mayReturnEmpty = other.mayReturnEmpty self.strRepr = None self.mayIndexError = self.expr.mayIndexError self.mayReturnEmpty = self.expr.mayReturnEmpty self.setWhitespaceChars(self.expr.whiteChars) self.skipWhitespace = self.expr.skipWhitespace self.saveAsList = self.expr.saveAsList self.ignoreExprs.extend(self.expr.ignoreExprs) return None def leaveWhitespace(self): self.skipWhitespace = False return self def streamline(self): if not self.streamlined: self.streamlined = True if self.expr is not None: self.expr.streamline() return self def validate(self, validateTrace=None): if validateTrace is None: validateTrace = [] if self not in validateTrace: tmp = validateTrace[:]+[self] if self.expr is not None: self.expr.validate(tmp) self.checkRecursion([]) def __str__(self): if hasattr(self, "name"): return self.name self.__class__ = _ForwardNoRecurse try: if self.expr is not None: retString = str(self.expr) else: retString = "None" finally: self.__class__ = Forward return "Forward: "+retString def copy(self): if self.expr is not None: return super().copy() else: ret = Forward() ret << self return ret class _ForwardNoRecurse(Forward): def __str__(self): return "..." class TokenConverter(ParseElementEnhance): """Abstract subclass of ParseExpression, for converting parsed results.""" def __init__(self, expr, savelist=False): super().__init__(expr) # , savelist ) self.saveAsList = False class Upcase(TokenConverter): """Converter to upper case all matching tokens.""" def __init__(self, *args): super().__init__(*args) warnings.warn( "Upcase class is deprecated, use upcaseTokens parse action instead", DeprecationWarning, stacklevel=2) def postParse(self, instring, loc, tokenlist): return list(map(string.upper, tokenlist)) class Combine(TokenConverter): """Converter to concatenate all matching tokens to a single string. By default, the matching patterns must also be contiguous in the input string; this can be disabled by specifying 'adjacent=False' in the constructor. """ def __init__(self, expr, joinString="", adjacent=True): super().__init__(expr) # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself if adjacent: self.leaveWhitespace() self.adjacent = adjacent self.skipWhitespace = True self.joinString = joinString def ignore(self, other): if self.adjacent: ParserElement.ignore(self, other) else: super().ignore(other) return self def postParse(self, instring, loc, tokenlist): retToks = tokenlist.copy() del retToks[:] retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults) if self.resultsName and len(retToks.keys()) > 0: return [retToks] else: return retToks class Group(TokenConverter): """Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions.""" def __init__(self, expr): super().__init__(expr) self.saveAsList = True def postParse(self, instring, loc, tokenlist): return [tokenlist] class Dict(TokenConverter): """Converter to return a repetitive expression as a list, but also as a dictionary. Each element can also be referenced using the first token in the expression as its key. Useful for tabular report scraping when the first column can be used as a item key. """ def __init__(self, exprs): super().__init__(exprs) self.saveAsList = True def postParse(self, instring, loc, tokenlist): for i, tok in enumerate(tokenlist): if len(tok) == 0: continue ikey = tok[0] if isinstance(ikey, int): ikey = str(tok[0]).strip() if len(tok) == 1: tokenlist[ikey] = _ParseResultsWithOffset("", i) elif len(tok) == 2 and not isinstance(tok[1], ParseResults): tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) else: dictvalue = tok.copy() # ParseResults(i) del dictvalue[0] if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.keys()): tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) else: tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) if self.resultsName: return [tokenlist] else: return tokenlist class Suppress(TokenConverter): """Converter for ignoring the results of a parsed expression.""" def postParse(self, instring, loc, tokenlist): return [] def suppress(self): return self class OnlyOnce: """Wrapper for parse actions, to ensure they are only called once.""" def __init__(self, methodCall): self.callable = ParserElement._normalizeParseActionArgs(methodCall) self.called = False def __call__(self, s, l, t): if not self.called: results = self.callable(s, l, t) self.called = True return results raise ParseException(s, l, "") def reset(self): self.called = False def traceParseAction(f): """Decorator for debugging parse actions.""" f = ParserElement._normalizeParseActionArgs(f) def z(*paArgs): thisFunc = f.func_name s, l, t = paArgs[-3:] if len(paArgs) > 3: thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc sys.stderr.write(">>entering %s(line: '%s', %d, %s)\n" % (thisFunc, line(l, s), l, t)) try: ret = f(*paArgs) except Exception as exc: sys.stderr.write(f"<", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) try: if len(symbols) == len("".join(symbols)): return Regex("[%s]" % "".join([_escapeRegexRangeChars(sym) for sym in symbols])) else: return Regex("|".join([re.escape(sym) for sym in symbols])) except Exception: warnings.warn( "Exception creating Regex for oneOf, building MatchFirst", SyntaxWarning, stacklevel=2) # last resort, just use MatchFirst return MatchFirst([parseElementClass(sym) for sym in symbols]) def dictOf(key, value): """Helper to easily and clearly define a dictionary by specifying the respective patterns for the key and value. Takes care of defining the Dict, ZeroOrMore, and Group tokens in the proper order. The key pattern can include delimiting markers or punctuation, as long as they are suppressed, thereby leaving the significant key text. The value pattern can include named results, so that the Dict results can include named token fields. """ return Dict(ZeroOrMore(Group(key + value))) # convenience constants for positional expressions empty = Empty().setName("empty") lineStart = LineStart().setName("lineStart") lineEnd = LineEnd().setName("lineEnd") stringStart = StringStart().setName("stringStart") stringEnd = StringEnd().setName("stringEnd") _escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1]) _printables_less_backslash = "".join([c for c in printables if c not in r"\]"]) _escapedHexChar = Combine(Suppress(_bslash + "0x") + Word(hexnums)).setParseAction(lambda s, l, t: chr(int(t[0], 16))) _escapedOctChar = Combine(Suppress(_bslash) + Word("0", "01234567")).setParseAction(lambda s, l, t: chr(int(t[0], 8))) _singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash, exact=1) _charRange = Group(_singleChar + Suppress("-") + _singleChar) _reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]" def _expanded(p): return (isinstance(p, ParseResults) and ''.join([chr(c) for c in range(ord(p[0]), ord(p[1])+1)]) or p) def srange(s): r"""Helper to easily define string ranges for use in Word construction. Borrows syntax from regexp '[]' string range definitions:: srange("[0-9]") -> "0123456789" srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" The input string must be enclosed in []'s, and the returned string is the expanded character set joined into a single string. The values enclosed in the []'s may be:: a single character an escaped character with a leading backslash (such as \- or \]) an escaped hex character with a leading '\0x' (\0x21, which is a '!' character) an escaped octal character with a leading '\0' (\041, which is a '!' character) a range of any of the above, separated by a dash ('a-z', etc.) any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.) """ try: return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body]) except Exception: return "" def matchOnlyAtCol(n): """Helper method for defining parse actions that require matching at a specific column in the input text. """ def verifyCol(strg, locn, toks): if col(locn, strg) != n: raise ParseException(strg, locn, "matched token not at column %d" % n) return verifyCol def replaceWith(replStr): """Helper method for common parse actions that simply return a literal value. Especially useful when used with transformString(). """ def _replFunc(*args): return [replStr] return _replFunc def removeQuotes(s, l, t): """Helper parse action for removing quotation marks from parsed quoted strings. To use, add this parse action to quoted string using:: quotedString.setParseAction( removeQuotes ) """ return t[0][1:-1] def upcaseTokens(s, l, t): """Helper parse action to convert tokens to upper case.""" return [tt.upper() for tt in map(str, t)] def downcaseTokens(s, l, t): """Helper parse action to convert tokens to lower case.""" return [tt.lower() for tt in map(str, t)] def keepOriginalText(s, startLoc, t): """Helper parse action to preserve original parsed text, overriding any nested parse actions.""" try: endloc = getTokensEndLoc() except ParseException: raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action") del t[:] t += ParseResults(s[startLoc:endloc]) return t def getTokensEndLoc(): """Method to be called from within a parse action to determine the end location of the parsed tokens.""" import inspect fstack = inspect.stack() try: # search up the stack (through intervening argument normalizers) for correct calling routine for f in fstack[2:]: if f[3] == "_parseNoCache": endloc = f[0].f_locals["loc"] return endloc else: raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action") finally: del fstack def _makeTags(tagStr, xml): """Internal helper to construct opening and closing tag expressions, given a tag name""" if isinstance(tagStr, str): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name tagAttrName = Word(alphas, alphanums+"_-:") if xml: tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes) openTag = Suppress("<") + tagStr + \ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + \ Optional("/", default=[False]).setResultsName("empty").setParseAction(lambda s, l, t: t[0] == '/') + Suppress(">") else: printablesLessRAbrack = "".join([c for c in printables if c not in ">"]) tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printablesLessRAbrack) openTag = Suppress("<") + tagStr + \ Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens) + Optional(Suppress("=") + tagAttrValue)))) + \ Optional("/", default=[False]).setResultsName("empty").setParseAction(lambda s, l, t: t[0] == '/') + Suppress(">") closeTag = Combine(_L("") openTag = openTag.setResultsName("start"+"".join(resname.replace(":", " ").title().split())).setName("<%s>" % tagStr) closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":", " ").title().split())).setName("" % tagStr) return openTag, closeTag def makeHTMLTags(tagStr): """Helper to construct opening and closing tag expressions for HTML, given a tag name""" return _makeTags(tagStr, False) def makeXMLTags(tagStr): """Helper to construct opening and closing tag expressions for XML, given a tag name""" return _makeTags(tagStr, True) def withAttribute(*args, **attrDict): """Helper to create a validating parse action to be used with start tags created with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag with a required attribute value, to avoid false matches on common tags such as or
    . Call withAttribute with a series of attribute names and values. Specify the list of filter attributes names and values as: - keyword arguments, as in (class="Customer",align="right"), or - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. To verify that the attribute exists, but without specifying a value, pass withAttribute.ANY_VALUE as the value. """ if args: attrs = args[:] else: attrs = attrDict.items() attrs = [(k, v) for k, v in attrs] def pa(s, l, tokens): for attrName, attrValue in attrs: if attrName not in tokens: raise ParseException(s, l, "no matching attribute " + attrName) if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: raise ParseException(s, l, f"attribute '{attrName}' has value '{tokens[attrName]}', must be '{attrValue}'") return pa withAttribute.ANY_VALUE = object() opAssoc = _Constants() opAssoc.LEFT = object() opAssoc.RIGHT = object() def operatorPrecedence(baseExpr, opList): """Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants opAssoc.RIGHT and opAssoc.LEFT. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) """ ret = Forward() lastExpr = baseExpr | (Suppress('(') + ret + Suppress(')')) for operDef in opList: opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] if arity == 3: if opExpr is None or len(opExpr) != 2: raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") opExpr1, opExpr2 = opExpr thisExpr = Forward() # .setName("expr%d" % i) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: matchExpr = FollowedBy(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr)) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr)) else: matchExpr = FollowedBy(lastExpr+lastExpr) + Group(lastExpr + OneOrMore(lastExpr)) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ Group(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") elif rightLeftAssoc == opAssoc.RIGHT: if arity == 1: # try to avoid LR with this extra test if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr)) else: matchExpr = FollowedBy(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr)) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") else: raise ValueError("operator must indicate right or left associativity") if pa: matchExpr.setParseAction(pa) thisExpr << (matchExpr | lastExpr) lastExpr = thisExpr ret << lastExpr return ret dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes") sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes") quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes") unicodeString = Combine(_L('u') + quotedString.copy()) def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString): """Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default="("); can also be a pyparsing expression - closer - closing character for a nested list (default=")"); can also be a pyparsing expression - content - expression for items within the nested lists (default=None) - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the ignoreExpr argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an Or or MatchFirst. The default is quotedString, but if no expressions are to be ignored, then pass None for this argument. """ if opener == closer: raise ValueError("opening and closing strings cannot be the same") if content is None: if isinstance(opener, str) and isinstance(closer, str): if ignoreExpr is not None: content = (Combine(OneOrMore(~ignoreExpr + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS, exact=1))) .setParseAction(lambda t: t[0].strip())) else: content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t: t[0].strip())) else: raise ValueError("opening and closing arguments must be strings if no content expression is given") ret = Forward() if ignoreExpr is not None: ret << Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)) else: ret << Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) return ret def indentedBlock(blockStatementExpr, indentStack, indent=True): """Helper method for defining space-delimited indentation blocks, such as those used to define block statements in Python source code. Parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements (default=True) A valid block must contain at least one blockStatement. """ def checkPeerIndent(s, l, t): if l >= len(s): return curCol = col(l, s) if curCol != indentStack[-1]: if curCol > indentStack[-1]: raise ParseFatalException(s, l, "illegal nesting") raise ParseException(s, l, "not a peer entry") def checkSubIndent(s, l, t): curCol = col(l, s) if curCol > indentStack[-1]: indentStack.append(curCol) else: raise ParseException(s, l, "not a subentry") def checkUnindent(s, l, t): if l >= len(s): return curCol = col(l, s) if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): raise ParseException(s, l, "not an unindent") indentStack.pop() NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) INDENT = Empty() + Empty().setParseAction(checkSubIndent) PEER = Empty().setParseAction(checkPeerIndent) UNDENT = Empty().setParseAction(checkUnindent) if indent: smExpr = Group( Optional(NL) + FollowedBy(blockStatementExpr) + INDENT + (OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL))) + UNDENT) else: smExpr = Group(Optional(NL) + (OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL)))) blockStatementExpr.ignore("\\" + LineEnd()) return smExpr alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums+"_:")) commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") + ";") _htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(), "><& '")) def replaceHTMLEntity(t): return t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None # it's easy to get these comment structures wrong - they're very common, so may as well make them available cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment") htmlComment = Regex(r"") restOfLine = Regex(r".*").leaveWhitespace() dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment") cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?" + str(tokenlist)) print("tokens = " + str(tokens)) print("tokens.columns = " + str(tokens.columns)) print("tokens.tables = " + str(tokens.tables)) print(tokens.asXML("SQL", True)) except ParseBaseException as err: print(teststring + "->") print(err.line) print(" "*(err.column-1) + "^") print(err) print() selectToken = CaselessLiteral("select") fromToken = CaselessLiteral("from") ident = Word(alphas, alphanums + "_$") columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) columnNameList = Group(delimitedList(columnName)) # .setName("columns") tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) tableNameList = Group(delimitedList(tableName)) # .setName("tables") simpleSQL = ( selectToken + ('*' | columnNameList).setResultsName("columns") + fromToken + tableNameList.setResultsName("tables")) test("SELECT * from XYZZY, ABC") test("select * from SYS.XYZZY") test("Select A from Sys.dual") test("Select AA,BB,CC from Sys.dual") test("Select A, B, C from Sys.dual") test("Select A, B, C from Sys.dual") test("Xelect A, B, C from Sys.dual") test("Select A, B, C frox Sys.dual") test("Select") test("Select ^^^ frox Sys.dual") test("Select A, B, C from Sys.dual, Table2 ") bx-python-0.8.13/lib/bx_extras/stats.py000066400000000000000000004542531415666465100200430ustar00rootroot00000000000000# Copyright (c) 1999-2002 Gary Strangman; All Rights Reserved. # # This software is distributable under the terms of the GNU # General Public License (GPL) v2, the text of which can be found at # http://www.gnu.org/copyleft/gpl.html. Installing, importing or otherwise # using this module constitutes acceptance of the terms of this License. # # Disclaimer # # This software is provided "as-is". There are no expressed or implied # warranties of any kind, including, but not limited to, the warranties # of merchantability and fittness for a given application. In no event # shall Gary Strangman be liable for any direct, indirect, incidental, # special, exemplary or consequential damages (including, but not limited # to, loss of use, data or profits, or business interruption) however # caused and on any theory of liability, whether in contract, strict # liability or tort (including negligence or otherwise) arising in any way # out of the use of this software, even if advised of the possibility of # such damage. # # Comments and/or additions are welcome (send e-mail to: # strang@nmr.mgh.harvard.edu). # """ stats.py module (Requires pstat.py module.) ################################################# ####### Written by: Gary Strangman ########### ####### Last modified: May 10, 2002 ########### ################################################# A collection of basic statistical functions for python. The function names appear below. IMPORTANT: There are really *3* sets of functions. The first set has an 'l' prefix, which can be used with list or tuple arguments. The second set has an 'a' prefix, which can accept NumPy array arguments. These latter functions are defined only when NumPy is available on the system. The third type has NO prefix (i.e., has the name that appears below). Functions of this set are members of a "Dispatch" class, c/o David Ascher. This class allows different functions to be called depending on the type of the passed arguments. Thus, stats.mean is a member of the Dispatch class and stats.mean(range(20)) will call stats.lmean(range(20)) while stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)). This is a handy way to keep consistent function names when different argument types require different functions to be called. Having implementated the Dispatch class, however, means that to get info on a given function, you must use the REAL function name ... that is "print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine, while "print stats.mean.__doc__" will print the doc for the Dispatch class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options but should otherwise be consistent with the corresponding list functions. Disclaimers: The function list is obviously incomplete and, worse, the functions are not optimized. All functions have been tested (some more so than others), but they are far from bulletproof. Thus, as with any free software, no warranty or guarantee is expressed or implied. :-) A few extra functions that don't appear in the list below can be found by interested treasure-hunters. These functions don't necessarily have both list and array versions but were deemed useful CENTRAL TENDENCY: geometricmean harmonicmean mean median medianscore mode MOMENTS: moment variation skew kurtosis skewtest (for Numpy arrays only) kurtosistest (for Numpy arrays only) normaltest (for Numpy arrays only) ALTERED VERSIONS: tmean (for Numpy arrays only) tvar (for Numpy arrays only) tmin (for Numpy arrays only) tmax (for Numpy arrays only) tstdev (for Numpy arrays only) tsem (for Numpy arrays only) describe FREQUENCY STATS: itemfreq scoreatpercentile percentileofscore histogram cumfreq relfreq VARIABILITY: obrientransform samplevar samplestdev signaltonoise (for Numpy arrays only) var stdev sterr sem z zs zmap (for Numpy arrays only) TRIMMING FCNS: threshold (for Numpy arrays only) trimboth trim1 round (round all vals to 'n' decimals; Numpy only) CORRELATION FCNS: covariance (for Numpy arrays only) correlation (for Numpy arrays only) paired pearsonr spearmanr pointbiserialr kendalltau linregress INFERENTIAL STATS: ttest_1samp ttest_ind ttest_rel chisquare ks_2samp mannwhitneyu ranksums wilcoxont kruskalwallish friedmanchisquare PROBABILITY CALCS: chisqprob erfcc zprob ksprob fprob betacf gammln betai ANOVA FUNCTIONS: F_oneway F_value SUPPORT FUNCTIONS: writecc incr sign (for Numpy arrays only) sum cumsum ss summult sumdiffsquared square_of_sums shellsort rankdata outputpairedstats findwithin """ # CHANGE LOG: # =========== # 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows # 02-05-10 ... fixed lchisqprob indentation (failed when df=even) # 00-12-28 ... removed aanova() to separate module, fixed licensing to # match Python License, fixed doc string & imports # 00-04-13 ... pulled all "global" statements, except from aanova() # added/fixed lots of documentation, removed io.py dependency # changed to version 0.5 # 99-11-13 ... added asign() function # 99-11-01 ... changed version to 0.4 ... enough incremental changes now # 99-10-25 ... added acovariance and acorrelation functions # 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors # added aglm function (crude, but will be improved) # 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, avar, etc. to # all handle lists of 'dimension's and keepdims # REMOVED ar0, ar2, ar3, ar4 and replaced them with around # reinserted fixes for abetai to avoid math overflows # 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to # handle multi-dimensional arrays (whew!) # 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990) # added anormaltest per same reference # re-wrote azprob to calc arrays of probs all at once # 99-08-22 ... edited attest_ind printing section so arrays could be rounded # 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on # short/byte arrays (mean of #s btw 100-300 = -150??) # 99-08-09 ... fixed asum so that the None case works for Byte arrays # 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays # 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap) # 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0]) # 04/11/99 ... added asignaltonoise, athreshold functions, changed all # max/min in array section to N.maximum/N.minimum, # fixed square_of_sums to prevent integer overflow # 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums # 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions # 02/28/99 ... Fixed aobrientransform to return an array rather than a list # 01/15/99 ... Essentially ceased updating list-versions of functions (!!!) # 01/13/99 ... CHANGED TO VERSION 0.3 # fixed bug in a/lmannwhitneyu p-value calculation # 12/31/98 ... fixed variable-name bug in ldescribe # 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix) # 12/16/98 ... changed amedianscore to return float (not array) for 1 score # 12/14/98 ... added atmin and atmax functions # removed umath from import line (not needed) # l/ageometricmean modified to reduce chance of overflows (take # nth root first, then multiply) # 12/07/98 ... added __version__variable (now 0.2) # removed all 'stats.' from anova() fcn # 12/06/98 ... changed those functions (except shellsort) that altered # arguments in-place ... cumsum, ranksort, ... # updated (and fixed some) doc-strings # 12/01/98 ... added anova() function (requires NumPy) # incorporated Dispatch class # 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean # added 'asum' function (added functionality to N.add.reduce) # fixed both moment and amoment (two errors) # changed name of skewness and askewness to skew and askew # fixed (a)histogram (which sometimes counted points = len(inlist)/2.0: cfbin = i break LRL = smallest + binsize*cfbin # get lower read limit of that bin cfbelow = cumhist[cfbin-1] freq = float(hist[cfbin]) # frequency IN the 50%ile bin median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula return median def lmedianscore(inlist): """ Returns the 'middle' score of the passed list. If there is an even number of scores, the mean of the 2 middle scores is returned. Usage: lmedianscore(inlist) """ newlist = sorted(copy.deepcopy(inlist)) if len(newlist) % 2 == 0: # if even number of scores, average middle 2 index = len(newlist)/2 # integer division correct median = float(newlist[index] + newlist[index-1]) / 2 else: index = len(newlist)/2 # int divsion gives mid value when count from 0 median = newlist[index] return median def lmode(inlist): """ Returns a list of the modal (most common) score(s) in the passed list. If there is more than one such score, all are returned. The bin-count for the mode(s) is also returned. Usage: lmode(inlist) Returns: bin-count for mode(s), a list of modal value(s) """ scores = sorted(pstat.unique(inlist)) freq = [] for item in scores: freq.append(inlist.count(item)) maxfreq = max(freq) mode = [] stillmore = 1 while stillmore: try: indx = freq.index(maxfreq) mode.append(scores[indx]) del freq[indx] del scores[indx] except ValueError: stillmore = 0 return maxfreq, mode # MOMENTS def lmoment(inlist, moment=1): """ Calculates the nth moment about the mean for a sample (defaults to the 1st moment). Used to calculate coefficients of skewness and kurtosis. Usage: lmoment(inlist,moment=1) Returns: appropriate moment (r) from ... 1/n * SUM((inlist(i)-mean)**r) """ if moment == 1: return 0.0 else: mn = mean(inlist) n = len(inlist) s = 0 for x in inlist: s = s + (x-mn)**moment return s/float(n) def lvariation(inlist): """ Returns the coefficient of variation, as defined in CRC Standard Probability and Statistics, p.6. Usage: lvariation(inlist) """ return 100.0*samplestdev(inlist)/float(mean(inlist)) def lskew(inlist): """ Returns the skewness of a distribution, as defined in Numerical Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.) Usage: lskew(inlist) """ return moment(inlist, 3)/pow(moment(inlist, 2), 1.5) def lkurtosis(inlist): """ Returns the kurtosis of a distribution, as defined in Numerical Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.) Usage: lkurtosis(inlist) """ return moment(inlist, 4)/pow(moment(inlist, 2), 2.0) def ldescribe(inlist): """ Returns some descriptive statistics of the passed list (assumed to be 1D). Usage: ldescribe(inlist) Returns: n, mean, standard deviation, skew, kurtosis """ n = len(inlist) mm = (min(inlist), max(inlist)) m = mean(inlist) sd = stdev(inlist) sk = skew(inlist) kurt = kurtosis(inlist) return n, mm, m, sd, sk, kurt # FREQUENCY STATS def litemfreq(inlist): """ Returns a list of pairs. Each pair consists of one of the scores in inlist and it's frequency count. Assumes a 1D list is passed. Usage: litemfreq(inlist) Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies) """ scores = sorted(pstat.unique(inlist)) freq = [] for item in scores: freq.append(inlist.count(item)) return pstat.abut(scores, freq) def lscoreatpercentile(inlist, percent): """ Returns the score at a given percentile relative to the distribution given by inlist. Usage: lscoreatpercentile(inlist,percent) """ if percent > 1: print("\nDividing percent>1 by 100 in lscoreatpercentile().\n") percent = percent / 100.0 targetcf = percent*len(inlist) h, lrl, binsize, extras = histogram(inlist) cumhist = cumsum(copy.deepcopy(h)) for i in range(len(cumhist)): if cumhist[i] >= targetcf: break score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i) return score def lpercentileofscore(inlist, score, histbins=10, defaultlimits=None): """ Returns the percentile value of a score relative to the distribution given by inlist. Formula depends on the values used to histogram the data(!). Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None) """ h, lrl, binsize, extras = histogram(inlist, histbins, defaultlimits) cumhist = cumsum(copy.deepcopy(h)) i = int((score - lrl)/float(binsize)) pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100 return pct def lhistogram(inlist, numbins=10, defaultreallimits=None, printextras=0): """ Returns (i) a list of histogram bin counts, (ii) the smallest value of the histogram binning, and (iii) the bin width (the last 2 are not necessarily integers). Default number of bins is 10. If no sequence object is given for defaultreallimits, the routine picks (usually non-pretty) bins spanning all the numbers in the inlist. Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0) Returns: list of bin values, lowerreallimit, binsize, extrapoints """ if (defaultreallimits is not None): if type(defaultreallimits) not in [list, tuple] or len(defaultreallimits) == 1: # only one limit given, assumed to be lower one & upper is calc'd lowerreallimit = defaultreallimits upperreallimit = 1.0001 * max(inlist) else: # assume both limits given lowerreallimit = defaultreallimits[0] upperreallimit = defaultreallimits[1] binsize = (upperreallimit-lowerreallimit)/float(numbins) else: # no limits given for histogram, both must be calc'd estbinwidth = (max(inlist)-min(inlist))/float(numbins) + 1 # 1=>cover all binsize = (max(inlist)-min(inlist)+estbinwidth)/float(numbins) lowerreallimit = min(inlist) - binsize/2 # lower real limit,1st bin bins = [0]*(numbins) extrapoints = 0 for num in inlist: try: if (num-lowerreallimit) < 0: extrapoints = extrapoints + 1 else: bintoincrement = int((num-lowerreallimit)/float(binsize)) bins[bintoincrement] = bins[bintoincrement] + 1 except Exception: extrapoints = extrapoints + 1 if (extrapoints > 0 and printextras == 1): print('\nPoints outside given histogram range =', extrapoints) return (bins, lowerreallimit, binsize, extrapoints) def lcumfreq(inlist, numbins=10, defaultreallimits=None): """ Returns a cumulative frequency histogram, using the histogram function. Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None) Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints """ h, l, b, e = histogram(inlist, numbins, defaultreallimits) cumhist = cumsum(copy.deepcopy(h)) return cumhist, l, b, e def lrelfreq(inlist, numbins=10, defaultreallimits=None): """ Returns a relative frequency histogram, using the histogram function. Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None) Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints """ h, l, b, e = histogram(inlist, numbins, defaultreallimits) for i in range(len(h)): h[i] = h[i]/float(len(inlist)) return h, l, b, e # VARIABILITY FUNCTIONS def lobrientransform(*args): """ Computes a transform on input data (any number of columns). Used to test for homogeneity of variance prior to running one-way stats. From Maxwell and Delaney, p.112. Usage: lobrientransform(*args) Returns: transformed data for use in an ANOVA """ TINY = 1e-10 k = len(args) n = [0.0]*k v = [0.0]*k m = [0.0]*k nargs = [] for i in range(k): nargs.append(copy.deepcopy(args[i])) n[i] = float(len(nargs[i])) v[i] = var(nargs[i]) m[i] = mean(nargs[i]) for j in range(k): for i in range(n[j]): t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2 t2 = 0.5*v[j]*(n[j]-1.0) t3 = (n[j]-1.0)*(n[j]-2.0) nargs[j][i] = (t1-t2) / float(t3) check = 1 for j in range(k): if v[j] - mean(nargs[j]) > TINY: check = 0 if check != 1: raise ValueError('Problem in obrientransform.') else: return nargs def lsamplevar(inlist): """ Returns the variance of the values in the passed list using N for the denominator (i.e., DESCRIBES the sample variance only). Usage: lsamplevar(inlist) """ n = len(inlist) mn = mean(inlist) deviations = [] for item in inlist: deviations.append(item-mn) return ss(deviations)/float(n) def lsamplestdev(inlist): """ Returns the standard deviation of the values in the passed list using N for the denominator (i.e., DESCRIBES the sample stdev only). Usage: lsamplestdev(inlist) """ return math.sqrt(samplevar(inlist)) def lvar(inlist): """ Returns the variance of the values in the passed list using N-1 for the denominator (i.e., for estimating population variance). Usage: lvar(inlist) """ n = len(inlist) mn = mean(inlist) deviations = [0]*len(inlist) for i in range(len(inlist)): deviations[i] = inlist[i] - mn return ss(deviations)/float(n-1) def lstdev(inlist): """ Returns the standard deviation of the values in the passed list using N-1 in the denominator (i.e., to estimate population stdev). Usage: lstdev(inlist) """ return math.sqrt(var(inlist)) def lsterr(inlist): """ Returns the standard error of the values in the passed list using N-1 in the denominator (i.e., to estimate population standard error). Usage: lsterr(inlist) """ return stdev(inlist) / float(math.sqrt(len(inlist))) def lsem(inlist): """ Returns the estimated standard error of the mean (sx-bar) of the values in the passed list. sem = stdev / sqrt(n) Usage: lsem(inlist) """ sd = stdev(inlist) n = len(inlist) return sd/math.sqrt(n) def lz(inlist, score): """ Returns the z-score for a given input score, given that score and the list from which that score came. Not appropriate for population calculations. Usage: lz(inlist, score) """ z = (score-mean(inlist))/samplestdev(inlist) return z def lzs(inlist): """ Returns a list of z-scores, one for each score in the passed list. Usage: lzs(inlist) """ zscores = [] for item in inlist: zscores.append(z(inlist, item)) return zscores # TRIMMING FUNCTIONS def ltrimboth(l, proportiontocut): """ Slices off the passed proportion of items from BOTH ends of the passed list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost' 10% of scores. Assumes list is sorted by magnitude. Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrimboth (l,proportiontocut) Returns: trimmed version of list l """ lowercut = int(proportiontocut*len(l)) uppercut = len(l) - lowercut return l[lowercut:uppercut] def ltrim1(l, proportiontocut, tail='right'): """ Slices off the passed proportion of items from ONE end of the passed list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost' 10% of scores). Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left' Returns: trimmed version of list l """ if tail == 'right': lowercut = 0 uppercut = len(l) - int(proportiontocut*len(l)) elif tail == 'left': lowercut = int(proportiontocut*len(l)) uppercut = len(l) return l[lowercut:uppercut] # CORRELATION FUNCTIONS def lpaired(x, y): """ Interactively determines the type of data and then runs the appropriated statistic for paired group data. Usage: lpaired(x,y) Returns: appropriate statistic name, value, and probability """ samples = '' while samples not in ['i', 'r', 'I', 'R', 'c', 'C']: print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ') samples = input() if samples in ['i', 'I', 'r', 'R']: print('\nComparing variances ...', end=' ') # USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112 r = obrientransform(x, y) f, p = F_oneway(pstat.colex(r, 0), pstat.colex(r, 1)) if p < 0.05: vartype = 'unequal, p='+str(round(p, 4)) else: vartype = 'equal' print(vartype) if samples in ['i', 'I']: if vartype[0] == 'e': t, p = ttest_ind(x, y, 0) print('\nIndependent samples t-test: ', round(t, 4), round(p, 4)) else: if len(x) > 20 or len(y) > 20: z, p = ranksums(x, y) print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4)) else: u, p = mannwhitneyu(x, y) print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4)) else: # RELATED SAMPLES if vartype[0] == 'e': t, p = ttest_rel(x, y, 0) print('\nRelated samples t-test: ', round(t, 4), round(p, 4)) else: t, p = ranksums(x, y) print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4)) else: # CORRELATION ANALYSIS corrtype = '' while corrtype not in ['c', 'C', 'r', 'R', 'd', 'D']: print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ') corrtype = input() if corrtype in ['c', 'C']: m, b, r, p, see = linregress(x, y) print('\nLinear regression for continuous variables ...') lol = [['Slope', 'Intercept', 'r', 'Prob', 'SEestimate'], [round(m, 4), round(b, 4), round(r, 4), round(p, 4), round(see, 4)]] pstat.printcc(lol) elif corrtype in ['r', 'R']: r, p = spearmanr(x, y) print('\nCorrelation for ranked variables ...') print("Spearman's r: ", round(r, 4), round(p, 4)) else: # DICHOTOMOUS r, p = pointbiserialr(x, y) print('\nAssuming x contains a dichotomous variable ...') print('Point Biserial r: ', round(r, 4), round(p, 4)) print('\n\n') return None def lpearsonr(x, y): """ Calculates a Pearson correlation coefficient and the associated probability value. Taken from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195. Usage: lpearsonr(x,y) where x and y are equal-length lists Returns: Pearson's r value, two-tailed p-value """ TINY = 1.0e-30 if len(x) != len(y): raise ValueError('Input values not paired in pearsonr. Aborting.') n = len(x) x = [float(_) for _ in x] y = [float(_) for _ in y] r_num = n*(summult(x, y)) - sum(x)*sum(y) r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y))) r = (r_num / r_den) # denominator already a float df = n-2 t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY))) prob = betai(0.5*df, 0.5, df/float(df+t*t)) return r, prob def lspearmanr(x, y): """ Calculates a Spearman rank-order correlation coefficient. Taken from Heiman's Basic Statistics for the Behav. Sci (1st), p.192. Usage: lspearmanr(x,y) where x and y are equal-length lists Returns: Spearman's r, two-tailed p-value """ if len(x) != len(y): raise ValueError('Input values not paired in spearmanr. Aborting.') n = len(x) rankx = rankdata(x) ranky = rankdata(y) dsq = sumdiffsquared(rankx, ranky) rs = 1 - 6*dsq / float(n*(n**2-1)) t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs))) df = n-2 probrs = betai(0.5*df, 0.5, df/(df+t*t)) # t already a float # probability values for rs are from part 2 of the spearman function in # Numerical Recipies, p.510. They are close to tables, but not exact. (?) return rs, probrs def lpointbiserialr(x, y): """ Calculates a point-biserial correlation coefficient and the associated probability value. Taken from Heiman's Basic Statistics for the Behav. Sci (1st), p.194. Usage: lpointbiserialr(x,y) where x,y are equal-length lists Returns: Point-biserial r, two-tailed p-value """ TINY = 1e-30 if len(x) != len(y): raise ValueError('INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.') data = pstat.abut(x, y) categories = pstat.unique(x) if len(categories) != 2: raise ValueError("Exactly 2 categories required for pointbiserialr().") else: # there are 2 categories, continue codemap = pstat.abut(categories, range(2)) pstat.recode(data, codemap, 0) # recoded x = pstat.linexand(data, 0, categories[0]) y = pstat.linexand(data, 0, categories[1]) xmean = mean(pstat.colex(x, 1)) ymean = mean(pstat.colex(y, 1)) n = len(data) adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n))) rpb = (ymean - xmean)/samplestdev(pstat.colex(data, 1))*adjust df = n-2 t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY))) prob = betai(0.5*df, 0.5, df/(df+t*t)) # t already a float return rpb, prob def lkendalltau(x, y): """ Calculates Kendall's tau ... correlation of ordinal data. Adapted from function kendl1 in Numerical Recipies. Needs good test-routine.@@@ Usage: lkendalltau(x,y) Returns: Kendall's tau, two-tailed p-value """ n1 = 0 n2 = 0 iss = 0 for j in range(len(x)-1): for k in range(j, len(y)): a1 = x[j] - x[k] a2 = y[j] - y[k] aa = a1 * a2 if (aa): # neither list has a tie n1 = n1 + 1 n2 = n2 + 1 if aa > 0: iss = iss + 1 else: iss = iss - 1 else: if (a1): n1 = n1 + 1 else: n2 = n2 + 1 tau = iss / math.sqrt(n1*n2) svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1)) z = tau / math.sqrt(svar) prob = erfcc(abs(z)/1.4142136) return tau, prob def llinregress(x, y): """ Calculates a regression line on x,y pairs. Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate """ TINY = 1.0e-20 if len(x) != len(y): raise ValueError('Input values not paired in linregress. Aborting.') n = len(x) x = [float(_) for _ in x] y = [float(_) for _ in y] xmean = mean(x) ymean = mean(y) r_num = float(n*(summult(x, y)) - sum(x)*sum(y)) r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y))) r = r_num / r_den df = n-2 t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY))) prob = betai(0.5*df, 0.5, df/(df+t*t)) slope = r_num / float(n*ss(x) - square_of_sums(x)) intercept = ymean - slope*xmean sterrest = math.sqrt(1-r*r)*samplestdev(y) return slope, intercept, r, prob, sterrest # INFERENTIAL STATISTICS def lttest_1samp(a, popmean, printit=0, name='Sample', writemode='a'): """ Calculates the t-obtained for the independent samples T-test on ONE group of scores a, given a population mean. If printit=1, results are printed to the screen. If printit='filename', the results are output to 'filename' using the given writemode (default=append). Returns t-value, and prob. Usage: lttest_1samp(a,popmean,Name='Sample',printit=0,writemode='a') Returns: t-value, two-tailed prob """ x = mean(a) v = var(a) n = len(a) df = n-1 svar = ((n-1)*v)/float(df) t = (x-popmean)/math.sqrt(svar*(1.0/n)) prob = betai(0.5*df, 0.5, float(df)/(df+t*t)) if printit != 0: statname = 'Single-sample T-test.' outputpairedstats(printit, writemode, 'Population', '--', popmean, 0, 0, 0, name, n, x, v, min(a), max(a), statname, t, prob) return t, prob def lttest_ind(a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'): """ Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores a, and b. From Numerical Recipies, p.483. If printit=1, results are printed to the screen. If printit='filename', the results are output to 'filename' using the given writemode (default=append). Returns t-value, and prob. Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a') Returns: t-value, two-tailed prob """ x1 = mean(a) x2 = mean(b) v1 = stdev(a)**2 v2 = stdev(b)**2 n1 = len(a) n2 = len(b) df = n1+n2-2 svar = ((n1-1)*v1+(n2-1)*v2)/float(df) t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2)) prob = betai(0.5*df, 0.5, df/(df+t*t)) if printit != 0: statname = 'Independent samples T-test.' outputpairedstats(printit, writemode, name1, n1, x1, v1, min(a), max(a), name2, n2, x2, v2, min(b), max(b), statname, t, prob) return t, prob def lttest_rel(a, b, printit=0, name1='Sample1', name2='Sample2', writemode='a'): """ Calculates the t-obtained T-test on TWO RELATED samples of scores, a and b. From Numerical Recipies, p.483. If printit=1, results are printed to the screen. If printit='filename', the results are output to 'filename' using the given writemode (default=append). Returns t-value, and prob. Usage: lttest_rel(a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a') Returns: t-value, two-tailed prob """ if len(a) != len(b): raise ValueError('Unequal length lists in ttest_rel.') x1 = mean(a) x2 = mean(b) v1 = var(a) v2 = var(b) n = len(a) cov = 0 for i in range(len(a)): cov = cov + (a[i]-x1) * (b[i]-x2) df = n-1 cov = cov / float(df) sd = math.sqrt((v1+v2 - 2.0*cov)/float(n)) t = (x1-x2)/sd prob = betai(0.5*df, 0.5, df/(df+t*t)) if printit != 0: statname = 'Related samples T-test.' outputpairedstats(printit, writemode, name1, n, x1, v1, min(a), max(a), name2, n, x2, v2, min(b), max(b), statname, t, prob) return t, prob def lchisquare(f_obs, f_exp=None): """ Calculates a one-way chi square for list of observed frequencies and returns the result. If no expected frequencies are given, the total N is assumed to be equally distributed across all groups. Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq. Returns: chisquare-statistic, associated p-value """ k = len(f_obs) # number of groups if f_exp is None: f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq. chisq = 0 for i in range(len(f_obs)): chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i]) return chisq, chisqprob(chisq, k-1) def lks_2samp(data1, data2): """ Computes the Kolmogorov-Smirnof statistic on 2 samples. From Numerical Recipies in C, page 493. Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions Returns: KS D-value, associated p-value """ j1 = 0 j2 = 0 fn1 = 0.0 fn2 = 0.0 n1 = len(data1) n2 = len(data2) en1 = n1 en2 = n2 d = 0.0 data1.sort() data2.sort() while j1 < n1 and j2 < n2: d1 = data1[j1] d2 = data2[j2] if d1 <= d2: fn1 = (j1)/float(en1) j1 = j1 + 1 if d2 <= d1: fn2 = (j2)/float(en2) j2 = j2 + 1 dt = (fn2-fn1) if math.fabs(dt) > math.fabs(d): d = dt try: en = math.sqrt(en1*en2/float(en1+en2)) prob = ksprob((en+0.12+0.11/en)*abs(d)) except Exception: prob = 1.0 return d, prob def lmannwhitneyu(x, y): """ Calculates a Mann-Whitney U statistic on the provided scores and returns the result. Use only when the n in each condition is < 20 and you have 2 independent samples of ranks. NOTE: Mann-Whitney U is significant if the u-obtained is LESS THAN or equal to the critical value of U found in the tables. Equivalent to Kruskal-Wallis H with just 2 groups. Usage: lmannwhitneyu(data) Returns: u-statistic, one-tailed p-value (i.e., p(z(U))) """ n1 = len(x) n2 = len(y) ranked = rankdata(x+y) rankx = ranked[0:n1] # get the x-ranks u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x u2 = n1*n2 - u1 # remainder is U for y bigu = max(u1, u2) smallu = min(u1, u2) T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores if T == 0: raise ValueError('All numbers are identical in lmannwhitneyu') sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0) z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc return smallu, 1.0 - zprob(z) def ltiecorrect(rankvals): """ Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c code. Usage: ltiecorrect(rankvals) Returns: T correction factor for U or H """ sorted, posn = shellsort(rankvals) n = len(sorted) T = 0.0 i = 0 while (i < n-1): if sorted[i] == sorted[i+1]: nties = 1 while (i < n-1) and (sorted[i] == sorted[i+1]): nties = nties + 1 i = i + 1 T = T + nties**3 - nties i = i+1 T = T / float(n**3-n) return 1.0 - T def lranksums(x, y): """ Calculates the rank sums statistic on the provided scores and returns the result. Use only when the n in each condition is > 20 and you have 2 independent samples of ranks. Usage: lranksums(x,y) Returns: a z-statistic, two-tailed p-value """ n1 = len(x) n2 = len(y) alldata = x+y ranked = rankdata(alldata) x = ranked[:n1] y = ranked[n1:] s = sum(x) expected = n1*(n1+n2+1) / 2.0 z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0) prob = 2*(1.0 - zprob(abs(z))) return z, prob def lwilcoxont(x, y): """ Calculates the Wilcoxon T-test for related samples and returns the result. A non-parametric T-test. Usage: lwilcoxont(x,y) Returns: a t-statistic, two-tail probability estimate """ if len(x) != len(y): raise ValueError('Unequal N in wilcoxont. Aborting.') d = [] for i in range(len(x)): diff = x[i] - y[i] if diff != 0: d.append(diff) count = len(d) absd = [abs(_) for _ in d] absranked = rankdata(absd) r_plus = 0.0 r_minus = 0.0 for i in range(len(absd)): if d[i] < 0: r_minus = r_minus + absranked[i] else: r_plus = r_plus + absranked[i] wt = min(r_plus, r_minus) mn = count * (count+1) * 0.25 se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0) z = math.fabs(wt-mn) / se prob = 2*(1.0 - zprob(abs(z))) return wt, prob def lkruskalwallish(*args): """ The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more groups, requiring at least 5 subjects in each group. This function calculates the Kruskal-Wallis H-test for 3 or more independent samples and returns the result. Usage: lkruskalwallish(*args) Returns: H-statistic (corrected for ties), associated p-value """ args = list(args) n = [0]*len(args) all = [] n = [len(_) for _ in args] for i in range(len(args)): all = all + args[i] ranked = rankdata(all) T = tiecorrect(ranked) for i in range(len(args)): args[i] = ranked[0:n[i]] del ranked[0:n[i]] rsums = [] for i in range(len(args)): rsums.append(sum(args[i])**2) rsums[i] = rsums[i] / float(n[i]) ssbn = sum(rsums) totaln = sum(n) h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1) df = len(args) - 1 if T == 0: raise ValueError('All numbers are identical in lkruskalwallish') h = h / float(T) return h, chisqprob(h, df) def lfriedmanchisquare(*args): """ Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA. This function calculates the Friedman Chi-square test for repeated measures and returns the result, along with the associated probability value. It assumes 3 or more repeated measures. Only 3 levels requires a minimum of 10 subjects in the study. Four levels requires 5 subjects per level(??). Usage: lfriedmanchisquare(*args) Returns: chi-square statistic, associated p-value """ k = len(args) if k < 3: raise ValueError('Less than 3 levels. Friedman test not appropriate.') n = len(args[0]) data = pstat.abut(*tuple(args)) for i in range(len(data)): data[i] = rankdata(data[i]) ssbn = 0 for i in range(k): ssbn = ssbn + sum(args[i])**2 chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1) return chisq, chisqprob(chisq, k-1) # PROBABILITY CALCULATIONS def lchisqprob(chisq, df): """ Returns the (1-tailed) probability value associated with the provided chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat. Usage: lchisqprob(chisq,df) """ BIG = 20.0 def ex(x): BIG = 20.0 if x < -BIG: return 0.0 else: return math.exp(x) if chisq <= 0 or df < 1: return 1.0 a = 0.5 * chisq if df % 2 == 0: even = 1 else: even = 0 if df > 1: y = ex(-a) if even: s = y else: s = 2.0 * zprob(-math.sqrt(chisq)) if (df > 2): chisq = 0.5 * (df - 1.0) if even: z = 1.0 else: z = 0.5 if a > BIG: if even: e = 0.0 else: e = math.log(math.sqrt(math.pi)) c = math.log(a) while (z <= chisq): e = math.log(z) + e s = s + ex(c*z-a-e) z = z + 1.0 return s else: if even: e = 1.0 else: e = 1.0 / math.sqrt(math.pi) / math.sqrt(a) c = 0.0 while (z <= chisq): e = e * (a/float(z)) c = c + e z = z + 1.0 return (c*y+s) else: return s def lerfcc(x): """ Returns the complementary error function erfc(x) with fractional error everywhere less than 1.2e-7. Adapted from Numerical Recipies. Usage: lerfcc(x) """ z = abs(x) t = 1.0 / (1.0+0.5*z) ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277))))))))) if x >= 0: return ans else: return 2.0 - ans def lzprob(z): """ Returns the area under the normal curve 'to the left of' the given z value. Thus, for z<0, zprob(z) = 1-tail probability for z>0, 1.0-zprob(z) = 1-tail probability for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability Adapted from z.c in Gary Perlman's |Stat. Usage: lzprob(z) """ Z_MAX = 6.0 # maximum meaningful z-value if z == 0.0: x = 0.0 else: y = 0.5 * math.fabs(z) if y >= (Z_MAX*0.5): x = 1.0 elif (y < 1.0): w = y*y x = ((((((((0.000124818987 * w - 0.001075204047) * w + 0.005198775019) * w - 0.019198292004) * w + 0.059054035642) * w - 0.151968751364) * w + 0.319152932694) * w - 0.531923007300) * w + 0.797884560593) * y * 2.0 else: y = y - 2.0 x = (((((((((((((-0.000045255659 * y + 0.000152529290) * y - 0.000019538132) * y - 0.000676904986) * y + 0.001390604284) * y - 0.000794620820) * y - 0.002034254874) * y + 0.006549791214) * y - 0.010557625006) * y + 0.011630447319) * y - 0.009279453341) * y + 0.005353579108) * y - 0.002141268741) * y + 0.000535310849) * y + 0.999936657524 if z > 0.0: prob = ((x+1.0)*0.5) else: prob = ((1.0-x)*0.5) return prob def lksprob(alam): """ Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from Numerical Recipies. Usage: lksprob(alam) """ fac = 2.0 sum = 0.0 termbf = 0.0 a2 = -2.0*alam*alam for j in range(1, 201): term = fac*math.exp(a2*j*j) sum = sum + term if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum): return sum fac = -fac termbf = math.fabs(term) return 1.0 # Get here only if fails to converge; was 0.0!! def lfprob(dfnum, dfden, F): """ Returns the (1-tailed) significance level (p-value) of an F statistic given the degrees of freedom for the numerator (dfR-dfF) and the degrees of freedom for the denominator (dfF). Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn """ p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F)) return p def lbetacf(a, b, x): """ This function evaluates the continued fraction form of the incomplete Beta function, betai. (Adapted from: Numerical Recipies in C.) Usage: lbetacf(a,b,x) """ ITMAX = 200 EPS = 3.0e-7 bm = az = am = 1.0 qab = a+b qap = a+1.0 qam = a-1.0 bz = 1.0-qab*x/qap for i in range(ITMAX+1): em = float(i+1) tem = em + em d = em*(b-em)*x/((qam+tem)*(a+tem)) ap = az + d*am bp = bz+d*bm d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem)) app = ap+d*az bpp = bp+d*bz aold = az am = ap/bpp bm = bp/bpp az = app/bpp bz = 1.0 if (abs(az-aold) < (EPS*abs(az))): return az print('a or b too big, or ITMAX too small in Betacf.') def lgammln(xx): """ Returns the gamma function of xx. Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt. (Adapted from: Numerical Recipies in C.) Usage: lgammln(xx) """ coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516, 0.120858003e-2, -0.536382e-5] x = xx - 1.0 tmp = x + 5.5 tmp = tmp - (x+0.5)*math.log(tmp) ser = 1.0 for j in range(len(coeff)): x = x + 1 ser = ser + coeff[j]/x return -tmp + math.log(2.50662827465*ser) def lbetai(a, b, x): """ Returns the incomplete beta function: I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt) where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma function of a. The continued fraction formulation is implemented here, using the betacf function. (Adapted from: Numerical Recipies in C.) Usage: lbetai(a,b,x) """ if (x < 0.0 or x > 1.0): raise ValueError('Bad x in lbetai') if (x == 0.0 or x == 1.0): bt = 0.0 else: bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b * math.log(1.0-x)) if (x < (a+1.0)/(a+b+2.0)): return bt*betacf(a, b, x)/float(a) else: return 1.0-bt*betacf(b, a, 1.0-x)/float(b) # ANOVA CALCULATIONS def lF_oneway(*lists): """ Performs a 1-way ANOVA, returning an F-value and probability given any number of groups. From Heiman, pp.394-7. Usage: F_oneway(*lists) where *lists is any number of lists, one per treatment group Returns: F value, one-tailed p-value """ a = len(lists) # ANOVA on 'a' groups, each in it's own list alldata = [] for i in range(len(lists)): alldata = alldata + lists[i] alldata = N.array(alldata) bign = len(alldata) sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign)) ssbn = 0 for list in lists: ssbn = ssbn + asquare_of_sums(N.array(list))/float(len(list)) ssbn = ssbn - (asquare_of_sums(alldata)/float(bign)) sswn = sstot-ssbn dfbn = a-1 dfwn = bign - a msb = ssbn/float(dfbn) msw = sswn/float(dfwn) f = msb/msw prob = fprob(dfbn, dfwn, f) return f, prob def lF_value(ER, EF, dfnum, dfden): """ Returns an F-statistic given the following: ER = error associated with the null hypothesis (the Restricted model) EF = error associated with the alternate hypothesis (the Full model) dfR-dfF = degrees of freedom of the numerator dfF = degrees of freedom associated with the denominator/Full model Usage: lF_value(ER,EF,dfnum,dfden) """ return ((ER-EF)/float(dfnum) / (EF/float(dfden))) # SUPPORT FUNCTIONS def writecc(listoflists, file, writetype='w', extra=2): """ Writes a list of lists to a file in columns, customized by the max size of items within the columns (max size of items in col, +2 characters) to specified file. File-overwrite is the default. Usage: writecc (listoflists,file,writetype='w',extra=2) Returns: None """ if type(listoflists[0]) not in [list, tuple]: listoflists = [listoflists] outfile = open(file, writetype) rowstokill = [] list2print = copy.deepcopy(listoflists) for i in range(len(listoflists)): if listoflists[i] == ['\n'] or listoflists[i] == '\n' or listoflists[i] == 'dashes': rowstokill = rowstokill + [i] rowstokill.reverse() for row in rowstokill: del list2print[row] maxsize = [0]*len(list2print[0]) for col in range(len(list2print[0])): items = pstat.colex(list2print, col) items = [pstat.makestr(_) for _ in items] maxsize[col] = max(map(len, items)) + extra for row in listoflists: if row == ['\n'] or row == '\n': outfile.write('\n') elif row == ['dashes'] or row == 'dashes': dashes = [0]*len(maxsize) for j in range(len(maxsize)): dashes[j] = '-'*(maxsize[j]-2) outfile.write(pstat.lineincustcols(dashes, maxsize)) else: outfile.write(pstat.lineincustcols(row, maxsize)) outfile.write('\n') outfile.close() return None def lincr(l, cap): # to increment a list up to a max-list of 'cap' """ Simulate a counting system from an n-dimensional list. Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n Returns: next set of values for list l, OR -1 (if overflow) """ l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap) for i in range(len(l)): if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done l[i] = 0 l[i+1] = l[i+1] + 1 elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished l = -1 return l def lsum(inlist): """ Returns the sum of the items in the passed list. Usage: lsum(inlist) """ s = 0 for item in inlist: s = s + item return s def lcumsum(inlist): """ Returns a list consisting of the cumulative sum of the items in the passed list. Usage: lcumsum(inlist) """ newlist = copy.deepcopy(inlist) for i in range(1, len(newlist)): newlist[i] = newlist[i] + newlist[i-1] return newlist def lss(inlist): """ Squares each value in the passed list, adds up these squares and returns the result. Usage: lss(inlist) """ ss = 0 for item in inlist: ss = ss + item*item return ss def lsummult(list1, list2): """ Multiplies elements in list1 and list2, element by element, and returns the sum of all resulting multiplications. Must provide equal length lists. Usage: lsummult(list1,list2) """ if len(list1) != len(list2): raise ValueError("Lists not equal length in summult.") s = 0 for item1, item2 in pstat.abut(list1, list2): s = s + item1*item2 return s def lsumdiffsquared(x, y): """ Takes pairwise differences of the values in lists x and y, squares these differences, and returns the sum of these squares. Usage: lsumdiffsquared(x,y) Returns: sum[(x[i]-y[i])**2] """ sds = 0 for i in range(len(x)): sds = sds + (x[i]-y[i])**2 return sds def lsquare_of_sums(inlist): """ Adds the values in the passed list, squares the sum, and returns the result. Usage: lsquare_of_sums(inlist) Returns: sum(inlist[i])**2 """ s = sum(inlist) return float(s)*s def lshellsort(inlist): """ Shellsort algorithm. Sorts a 1D-list. Usage: lshellsort(inlist) Returns: sorted-inlist, sorting-index-vector (for original list) """ n = len(inlist) svec = copy.deepcopy(inlist) ivec = list(range(n)) gap = n/2 # integer division needed while gap > 0: for i in range(gap, n): for j in range(i-gap, -1, -gap): while j >= 0 and svec[j] > svec[j+gap]: temp = svec[j] svec[j] = svec[j+gap] svec[j+gap] = temp itemp = ivec[j] ivec[j] = ivec[j+gap] ivec[j+gap] = itemp gap = gap / 2 # integer division needed # svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]] return svec, ivec def lrankdata(inlist): """ Ranks the data in inlist, dealing with ties appropritely. Assumes a 1D inlist. Adapted from Gary Perlman's |Stat ranksort. Usage: lrankdata(inlist) Returns: a list of length equal to inlist, containing rank scores """ n = len(inlist) svec, ivec = shellsort(inlist) sumranks = 0 dupcount = 0 newlist = [0]*n for i in range(n): sumranks = sumranks + i dupcount = dupcount + 1 if i == n-1 or svec[i] != svec[i+1]: averank = sumranks / float(dupcount) + 1 for j in range(i-dupcount+1, i+1): newlist[ivec[j]] = averank sumranks = 0 dupcount = 0 return newlist def outputpairedstats(fname, writemode, name1, n1, m1, se1, min1, max1, name2, n2, m2, se2, min2, max2, statname, stat, prob): """ Prints or write to a file stats for two groups, using the name, n, mean, sterr, min and max for each group, as well as the statistic name, its value, and the associated p-value. Usage: outputpairedstats(fname,writemode, name1,n1,mean1,stderr1,min1,max1, name2,n2,mean2,stderr2,min2,max2, statname,stat,prob) Returns: None """ suffix = '' # for *s after the p-value try: prob.shape prob = prob[0] except Exception: pass if prob < 0.001: suffix = ' ***' elif prob < 0.01: suffix = ' **' elif prob < 0.05: suffix = ' *' title = [['Name', 'N', 'Mean', 'SD', 'Min', 'Max']] lofl = title+[[name1, n1, round(m1, 3), round(math.sqrt(se1), 3), min1, max1], [name2, n2, round(m2, 3), round(math.sqrt(se2), 3), min2, max2]] if not isinstance(fname, str) or len(fname) == 0: print() print(statname) print() pstat.printcc(lofl) print() try: if stat.shape == (): stat = stat[0] if prob.shape == (): prob = prob[0] except Exception: pass print('Test statistic = ', round(stat, 3), ' p = ', round(prob, 3), suffix) print() else: file = open(fname, writemode) file.write('\n'+statname+'\n\n') file.close() writecc(lofl, fname, 'a') file = open(fname, 'a') try: if stat.shape == (): stat = stat[0] if prob.shape == (): prob = prob[0] except Exception: pass file.write(pstat.list2string(['\nTest statistic = ', round(stat, 4), ' p = ', round(prob, 4), suffix, '\n\n'])) file.close() return None def lfindwithin(data): """ Returns an integer representing a binary vector, where 1=within- subject factor, 0=between. Input equals the entire data 2D list (i.e., column 0=random factor, column -1=measured values (those two are skipped). Note: input data is in |Stat format ... a list of lists ("2D list") with one row per measured value, first column=subject identifier, last column= score, one in-between column per factor (these columns contain level designations on each factor). See also stats.anova.__doc__. Usage: lfindwithin(data) data in |Stat format """ numfact = len(data[0])-1 withinvec = 0 for col in range(1, numfact): examplelevel = pstat.unique(pstat.colex(data, col))[0] rows = pstat.linexand(data, col, examplelevel) # get 1 level of this factor factsubjs = pstat.unique(pstat.colex(rows, 0)) allsubjs = pstat.unique(pstat.colex(data, 0)) if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor? withinvec = withinvec + (1 << col) return withinvec # DISPATCH LISTS AND TUPLES TO ABOVE FCNS # CENTRAL TENDENCY: geometricmean = Dispatch((lgeometricmean, (list, tuple)), ) harmonicmean = Dispatch((lharmonicmean, (list, tuple)), ) mean = Dispatch((lmean, (list, tuple)), ) median = Dispatch((lmedian, (list, tuple)), ) medianscore = Dispatch((lmedianscore, (list, tuple)), ) mode = Dispatch((lmode, (list, tuple)), ) # MOMENTS: moment = Dispatch((lmoment, (list, tuple)), ) variation = Dispatch((lvariation, (list, tuple)), ) skew = Dispatch((lskew, (list, tuple)), ) kurtosis = Dispatch((lkurtosis, (list, tuple)), ) describe = Dispatch((ldescribe, (list, tuple)), ) # FREQUENCY STATISTICS: itemfreq = Dispatch((litemfreq, (list, tuple)), ) scoreatpercentile = Dispatch((lscoreatpercentile, (list, tuple)), ) percentileofscore = Dispatch((lpercentileofscore, (list, tuple)), ) histogram = Dispatch((lhistogram, (list, tuple)), ) cumfreq = Dispatch((lcumfreq, (list, tuple)), ) relfreq = Dispatch((lrelfreq, (list, tuple)), ) # VARIABILITY: obrientransform = Dispatch((lobrientransform, (list, tuple)), ) samplevar = Dispatch((lsamplevar, (list, tuple)), ) samplestdev = Dispatch((lsamplestdev, (list, tuple)), ) var = Dispatch((lvar, (list, tuple)), ) stdev = Dispatch((lstdev, (list, tuple)), ) sterr = Dispatch((lsterr, (list, tuple)), ) sem = Dispatch((lsem, (list, tuple)), ) z = Dispatch((lz, (list, tuple)), ) zs = Dispatch((lzs, (list, tuple)), ) # TRIMMING FCNS: trimboth = Dispatch((ltrimboth, (list, tuple)), ) trim1 = Dispatch((ltrim1, (list, tuple)), ) # CORRELATION FCNS: paired = Dispatch((lpaired, (list, tuple)), ) pearsonr = Dispatch((lpearsonr, (list, tuple)), ) spearmanr = Dispatch((lspearmanr, (list, tuple)), ) pointbiserialr = Dispatch((lpointbiserialr, (list, tuple)), ) kendalltau = Dispatch((lkendalltau, (list, tuple)), ) linregress = Dispatch((llinregress, (list, tuple)), ) # INFERENTIAL STATS: ttest_1samp = Dispatch((lttest_1samp, (list, tuple)), ) ttest_ind = Dispatch((lttest_ind, (list, tuple)), ) ttest_rel = Dispatch((lttest_rel, (list, tuple)), ) chisquare = Dispatch((lchisquare, (list, tuple)), ) ks_2samp = Dispatch((lks_2samp, (list, tuple)), ) mannwhitneyu = Dispatch((lmannwhitneyu, (list, tuple)), ) ranksums = Dispatch((lranksums, (list, tuple)), ) tiecorrect = Dispatch((ltiecorrect, (list, tuple)), ) wilcoxont = Dispatch((lwilcoxont, (list, tuple)), ) kruskalwallish = Dispatch((lkruskalwallish, (list, tuple)), ) friedmanchisquare = Dispatch((lfriedmanchisquare, (list, tuple)), ) # PROBABILITY CALCS: chisqprob = Dispatch((lchisqprob, (int, float)), ) zprob = Dispatch((lzprob, (int, float)), ) ksprob = Dispatch((lksprob, (int, float)), ) fprob = Dispatch((lfprob, (int, float)), ) betacf = Dispatch((lbetacf, (int, float)), ) betai = Dispatch((lbetai, (int, float)), ) erfcc = Dispatch((lerfcc, (int, float)), ) gammln = Dispatch((lgammln, (int, float)), ) # ANOVA FUNCTIONS: F_oneway = Dispatch((lF_oneway, (list, tuple)), ) F_value = Dispatch((lF_value, (list, tuple)), ) # SUPPORT FUNCTIONS: incr = Dispatch((lincr, (list, tuple)), ) sum = Dispatch((lsum, (list, tuple)), ) cumsum = Dispatch((lcumsum, (list, tuple)), ) ss = Dispatch((lss, (list, tuple)), ) summult = Dispatch((lsummult, (list, tuple)), ) square_of_sums = Dispatch((lsquare_of_sums, (list, tuple)), ) sumdiffsquared = Dispatch((lsumdiffsquared, (list, tuple)), ) shellsort = Dispatch((lshellsort, (list, tuple)), ) rankdata = Dispatch((lrankdata, (list, tuple)), ) findwithin = Dispatch((lfindwithin, (list, tuple)), ) # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== # ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS =============== try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE import Numeric N = Numeric import LinearAlgebra LA = LinearAlgebra # ACENTRAL TENDENCY def ageometricmean(inarray, dimension=None, keepdims=0): """ Calculates the geometric mean of the values in the passed array. That is: n-th root of (x1 * x2 * ... * xn). Defaults to ALL values in the passed array. Use dimension=None to flatten array first. REMEMBER: if dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and if dimension is a sequence, it collapses over all specified dimensions. If keepdims is set to 1, the resulting array will have as many dimensions as inarray, with only 1 'level' per dim that was collapsed over. Usage: ageometricmean(inarray,dimension=None,keepdims=0) Returns: geometric mean computed over dim(s) listed in dimension """ inarray = N.array(inarray, N.Float) if dimension is None: inarray = N.ravel(inarray) size = len(inarray) mult = N.power(inarray, 1.0/size) mult = N.multiply.reduce(mult) elif type(dimension) in [int, float]: size = inarray.shape[dimension] mult = N.power(inarray, 1.0/size) mult = N.multiply.reduce(mult, dimension) if keepdims == 1: shp = list(inarray.shape) shp[dimension] = 1 N.reshape(sum, shp) else: # must be a SEQUENCE of dims to average over dims = sorted(dimension) dims.reverse() size = N.array(N.multiply.reduce(N.take(inarray.shape, dims)), N.Float) mult = N.power(inarray, 1.0/size) for dim in dims: mult = N.multiply.reduce(mult, dim) if keepdims == 1: shp = list(inarray.shape) for dim in dims: shp[dim] = 1 mult = N.reshape(mult, shp) return mult def aharmonicmean(inarray, dimension=None, keepdims=0): """ Calculates the harmonic mean of the values in the passed array. That is: n / (1/x1 + 1/x2 + ... + 1/xn). Defaults to ALL values in the passed array. Use dimension=None to flatten array first. REMEMBER: if dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and if dimension is a sequence, it collapses over all specified dimensions. If keepdims is set to 1, the resulting array will have as many dimensions as inarray, with only 1 'level' per dim that was collapsed over. Usage: aharmonicmean(inarray,dimension=None,keepdims=0) Returns: harmonic mean computed over dim(s) in dimension """ inarray = inarray.astype(N.Float) if dimension is None: inarray = N.ravel(inarray) size = len(inarray) s = N.add.reduce(1.0 / inarray) elif type(dimension) in [int, float]: size = float(inarray.shape[dimension]) s = N.add.reduce(1.0/inarray, dimension) if keepdims == 1: shp = list(inarray.shape) shp[dimension] = 1 s = N.reshape(s, shp) else: # must be a SEQUENCE of dims to average over dims = sorted(dimension) nondims = [] for i in range(len(inarray.shape)): if i not in dims: nondims.append(i) tinarray = N.transpose(inarray, nondims+dims) # put keep-dims first idx = [0] * len(nondims) if idx == []: size = len(N.ravel(inarray)) s = asum(1.0 / inarray) if keepdims == 1: s = N.reshape([s], N.ones(len(inarray.shape))) else: idx[0] = -1 loopcap = N.array(tinarray.shape[0:len(nondims)]) - 1 s = N.zeros(loopcap+1, N.Float) while incr(idx, loopcap) != -1: s[idx] = asum(1.0/tinarray[idx]) size = N.multiply.reduce(N.take(inarray.shape, dims)) if keepdims == 1: shp = list(inarray.shape) for dim in dims: shp[dim] = 1 s = N.reshape(s, shp) return size / s def amean(inarray, dimension=None, keepdims=0): """ Calculates the arithmatic mean of the values in the passed array. That is: 1/n * (x1 + x2 + ... + xn). Defaults to ALL values in the passed array. Use dimension=None to flatten array first. REMEMBER: if dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and if dimension is a sequence, it collapses over all specified dimensions. If keepdims is set to 1, the resulting array will have as many dimensions as inarray, with only 1 'level' per dim that was collapsed over. Usage: amean(inarray,dimension=None,keepdims=0) Returns: arithematic mean calculated over dim(s) in dimension """ if inarray.typecode() in ['l', 's', 'b']: inarray = inarray.astype(N.Float) if dimension is None: inarray = N.ravel(inarray) sum = N.add.reduce(inarray) denom = float(len(inarray)) elif type(dimension) in [int, float]: sum = asum(inarray, dimension) denom = float(inarray.shape[dimension]) if keepdims == 1: shp = list(inarray.shape) shp[dimension] = 1 sum = N.reshape(sum, shp) else: # must be a TUPLE of dims to average over dims = sorted(dimension) dims.reverse() sum = inarray * 1.0 for dim in dims: sum = N.add.reduce(sum, dim) denom = N.array(N.multiply.reduce(N.take(inarray.shape, dims)), N.Float) if keepdims == 1: shp = list(inarray.shape) for dim in dims: shp[dim] = 1 sum = N.reshape(sum, shp) return sum/denom def amedian(inarray, numbins=1000): """ Calculates the COMPUTED median value of an array of numbers, given the number of bins to use for the histogram (more bins approaches finding the precise median value of the array; default number of bins = 1000). From G.W. Heiman's Basic Stats, or CRC Probability & Statistics. NOTE: THIS ROUTINE ALWAYS uses the entire passed array (flattens it first). Usage: amedian(inarray,numbins=1000) Returns: median calculated over ALL values in inarray """ inarray = N.ravel(inarray) (hist, smallest, binsize, extras) = ahistogram(inarray, numbins) cumhist = N.cumsum(hist) # make cumulative histogram otherbins = N.greater_equal(cumhist, len(inarray)/2.0) otherbins = list(otherbins) # list of 0/1s, 1s start at median bin cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score LRL = smallest + binsize*cfbin # get lower read limit of that bin cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin freq = hist[cfbin] # frequency IN the 50%ile bin median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN return median def amedianscore(inarray, dimension=None): """ Returns the 'middle' score of the passed array. If there is an even number of scores, the mean of the 2 middle scores is returned. Can function with 1D arrays, or on the FIRST dimension of 2D arrays (i.e., dimension can be None, to pre-flatten the array, or else dimension must equal 0). Usage: amedianscore(inarray,dimension=None) Returns: 'middle' score of the array, or the mean of the 2 middle scores """ if dimension is None: inarray = N.ravel(inarray) dimension = 0 inarray = N.sort(inarray, dimension) if inarray.shape[dimension] % 2 == 0: # if even number of elements indx = inarray.shape[dimension]/2 # integer division correct median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0 else: indx = inarray.shape[dimension] / 2 # integer division correct median = N.take(inarray, [indx], dimension) if median.shape == (1,): median = median[0] return median def amode(a, dimension=None): """ Returns an array of the modal (most common) score in the passed array. If there is more than one such score, ONLY THE FIRST is returned. The bin-count for the modal values is also returned. Operates on whole array (dimension=None), or on a given dimension. Usage: amode(a, dimension=None) Returns: array of bin-counts for mode(s), array of corresponding modal values """ if dimension is None: a = N.ravel(a) dimension = 0 scores = pstat.aunique(N.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[dimension] = 1 oldmostfreq = N.zeros(testshape) oldcounts = N.zeros(testshape) for score in scores: template = N.equal(a, score) counts = asum(template, dimension, 1) mostfrequent = N.where(N.greater(counts, oldcounts), score, oldmostfreq) oldcounts = N.where(N.greater(counts, oldcounts), counts, oldcounts) oldmostfreq = mostfrequent return oldcounts, mostfrequent def atmean(a, limits=None, inclusive=(1, 1)): """ Returns the arithmetic mean of all values in an array, ignoring values strictly outside the sequence passed to 'limits'. Note: either limit in the sequence, or the value of limits itself, can be set to None. The inclusive list/tuple determines whether the lower and upper limiting bounds (respectively) are open/exclusive (0) or closed/inclusive (1). Usage: atmean(a,limits=None,inclusive=(1,1)) """ if a.typecode() in ['l', 's', 'b']: a = a.astype(N.Float) if limits is None: return mean(a) assert type(limits) in [list, tuple, N.ArrayType], "Wrong type for limits in atmean" if inclusive[0]: lowerfcn = N.greater_equal else: lowerfcn = N.greater if inclusive[1]: upperfcn = N.less_equal else: upperfcn = N.less if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)): raise ValueError("No array values within given limits (atmean).") elif limits[0] is None and limits[1] is not None: mask = upperfcn(a, limits[1]) elif limits[0] is not None and limits[1] is None: mask = lowerfcn(a, limits[0]) elif limits[0] is not None and limits[1] is not None: mask = lowerfcn(a, limits[0])*upperfcn(a, limits[1]) s = float(N.add.reduce(N.ravel(a*mask))) n = float(N.add.reduce(N.ravel(mask))) return s/n def atvar(a, limits=None, inclusive=(1, 1)): """ Returns the sample variance of values in an array, (i.e., using N-1), ignoring values strictly outside the sequence passed to 'limits'. Note: either limit in the sequence, or the value of limits itself, can be set to None. The inclusive list/tuple determines whether the lower and upper limiting bounds (respectively) are open/exclusive (0) or closed/inclusive (1). Usage: atvar(a,limits=None,inclusive=(1,1)) """ a = a.astype(N.Float) if limits is None or limits == [None, None]: term1 = N.add.reduce(N.ravel(a*a)) n = float(len(N.ravel(a))) - 1 term2 = N.add.reduce(N.ravel(a))**2 / n print(term1, term2, n) return (term1 - term2) / n assert type(limits) in [list, tuple, N.ArrayType], "Wrong type for limits in atvar" if inclusive[0]: lowerfcn = N.greater_equal else: lowerfcn = N.greater if inclusive[1]: upperfcn = N.less_equal else: upperfcn = N.less if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)): raise ValueError("No array values within given limits (atvar).") elif limits[0] is None and limits[1] is not None: mask = upperfcn(a, limits[1]) elif limits[0] is not None and limits[1] is None: mask = lowerfcn(a, limits[0]) elif limits[0] is not None and limits[1] is not None: mask = lowerfcn(a, limits[0])*upperfcn(a, limits[1]) term1 = N.add.reduce(N.ravel(a*a*mask)) n = float(N.add.reduce(N.ravel(mask))) - 1 term2 = N.add.reduce(N.ravel(a*mask))**2 / n print(term1, term2, n) return (term1 - term2) / n def atmin(a, lowerlimit=None, dimension=None, inclusive=1): """ Returns the minimum value of a, along dimension, including only values less than (or equal to, if inclusive=1) lowerlimit. If the limit is set to None, all values in the array are used. Usage: atmin(a,lowerlimit=None,dimension=None,inclusive=1) """ if inclusive: lowerfcn = N.greater else: lowerfcn = N.greater_equal if dimension is None: a = N.ravel(a) dimension = 0 if lowerlimit is None: lowerlimit = N.minimum.reduce(N.ravel(a))-11 biggest = N.maximum.reduce(N.ravel(a)) ta = N.where(lowerfcn(a, lowerlimit), a, biggest) return N.minimum.reduce(ta, dimension) def atmax(a, upperlimit, dimension=None, inclusive=1): """ Returns the maximum value of a, along dimension, including only values greater than (or equal to, if inclusive=1) upperlimit. If the limit is set to None, a limit larger than the max value in the array is used. Usage: atmax(a,upperlimit,dimension=None,inclusive=1) """ if inclusive: upperfcn = N.less else: upperfcn = N.less_equal if dimension is None: a = N.ravel(a) dimension = 0 if upperlimit is None: upperlimit = N.maximum.reduce(N.ravel(a))+1 smallest = N.minimum.reduce(N.ravel(a)) ta = N.where(upperfcn(a, upperlimit), a, smallest) return N.maximum.reduce(ta, dimension) def atstdev(a, limits=None, inclusive=(1, 1)): """ Returns the standard deviation of all values in an array, ignoring values strictly outside the sequence passed to 'limits'. Note: either limit in the sequence, or the value of limits itself, can be set to None. The inclusive list/tuple determines whether the lower and upper limiting bounds (respectively) are open/exclusive (0) or closed/inclusive (1). Usage: atstdev(a,limits=None,inclusive=(1,1)) """ return N.sqrt(tvar(a, limits, inclusive)) def atsem(a, limits=None, inclusive=(1, 1)): """ Returns the standard error of the mean for the values in an array, (i.e., using N for the denominator), ignoring values strictly outside the sequence passed to 'limits'. Note: either limit in the sequence, or the value of limits itself, can be set to None. The inclusive list/tuple determines whether the lower and upper limiting bounds (respectively) are open/exclusive (0) or closed/inclusive (1). Usage: atsem(a,limits=None,inclusive=(1,1)) """ sd = tstdev(a, limits, inclusive) if limits is None or limits == [None, None]: n = float(len(N.ravel(a))) assert type(limits) in [list, tuple, N.ArrayType], "Wrong type for limits in atsem" if inclusive[0]: lowerfcn = N.greater_equal else: lowerfcn = N.greater if inclusive[1]: upperfcn = N.less_equal else: upperfcn = N.less if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)): raise ValueError("No array values within given limits (atsem).") elif limits[0] is None and limits[1] is not None: mask = upperfcn(a, limits[1]) elif limits[0] is not None and limits[1] is None: mask = lowerfcn(a, limits[0]) elif limits[0] is not None and limits[1] is not None: mask = lowerfcn(a, limits[0])*upperfcn(a, limits[1]) N.add.reduce(N.ravel(a*a*mask)) n = float(N.add.reduce(N.ravel(mask))) return sd/math.sqrt(n) # AMOMENTS def amoment(a, moment=1, dimension=None): """ Calculates the nth moment about the mean for a sample (defaults to the 1st moment). Generally used to calculate coefficients of skewness and kurtosis. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Usage: amoment(a,moment=1,dimension=None) Returns: appropriate moment along given dimension """ if dimension is None: a = N.ravel(a) dimension = 0 if moment == 1: return 0.0 else: mn = amean(a, dimension, 1) # 1=keepdims s = N.power((a-mn), moment) return amean(s, dimension) def avariation(a, dimension=None): """ Returns the coefficient of variation, as defined in CRC Standard Probability and Statistics, p.6. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Usage: avariation(a,dimension=None) """ return 100.0*asamplestdev(a, dimension)/amean(a, dimension) def askew(a, dimension=None): """ Returns the skewness of a distribution (normal ==> 0.0; >0 means extra weight in left tail). Use askewtest() to see if it's close enough. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Usage: askew(a, dimension=None) Returns: skew of vals in a along dimension, returning ZERO where all vals equal """ denom = N.power(amoment(a, 2, dimension), 1.5) zero = N.equal(denom, 0) if isinstance(denom, N.ArrayType) and asum(zero) != 0: print("Number of zeros in askew: ", asum(zero)) denom = denom + zero # prevent divide-by-zero return N.where(zero, 0, amoment(a, 3, dimension)/denom) def akurtosis(a, dimension=None): """ Returns the kurtosis of a distribution (normal ==> 3.0; >3 means heavier in the tails, and usually more peaked). Use akurtosistest() to see if it's close enough. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Usage: akurtosis(a,dimension=None) Returns: kurtosis of values in a along dimension, and ZERO where all vals equal """ denom = N.power(amoment(a, 2, dimension), 2) zero = N.equal(denom, 0) if isinstance(denom, N.ArrayType) and asum(zero) != 0: print("Number of zeros in akurtosis: ", asum(zero)) denom = denom + zero # prevent divide-by-zero return N.where(zero, 0, amoment(a, 4, dimension)/denom) def adescribe(inarray, dimension=None): """ Returns several descriptive statistics of the passed array. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Usage: adescribe(inarray,dimension=None) Returns: n, (min,max), mean, standard deviation, skew, kurtosis """ if dimension is None: inarray = N.ravel(inarray) dimension = 0 n = inarray.shape[dimension] mm = (N.minimum.reduce(inarray), N.maximum.reduce(inarray)) m = amean(inarray, dimension) sd = astdev(inarray, dimension) skew = askew(inarray, dimension) kurt = akurtosis(inarray, dimension) return n, mm, m, sd, skew, kurt # NORMALITY TESTS def askewtest(a, dimension=None): """ Tests whether the skew is significantly different from a normal distribution. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Usage: askewtest(a,dimension=None) Returns: z-score and 2-tail z-probability """ if dimension is None: a = N.ravel(a) dimension = 0 b2 = askew(a, dimension) n = float(a.shape[dimension]) y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2))) beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9)) W2 = -1 + N.sqrt(2*(beta2-1)) delta = 1/N.sqrt(N.log(N.sqrt(W2))) alpha = N.sqrt(2/(W2-1)) y = N.where(N.equal(y, 0), 1, y) Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1)) return Z, (1.0-zprob(Z))*2 def akurtosistest(a, dimension=None): """ Tests whether a dataset has normal kurtosis (i.e., kurtosis=3(n-1)/(n+1)) Valid only for n>20. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Usage: akurtosistest(a,dimension=None) Returns: z-score and 2-tail z-probability, returns 0 for bad pixels """ if dimension is None: a = N.ravel(a) dimension = 0 n = float(a.shape[dimension]) if n < 20: print("akurtosistest only valid for n>=20 ... continuing anyway, n=", n) b2 = akurtosis(a, dimension) E = 3.0*(n-1) / (n+1) varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5)) x = (b2-E)/N.sqrt(varb2) sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5)) / (n*(n-2)*(n-3))) A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2))) term1 = 1 - 2/(9.0*A) denom = 1 + x*N.sqrt(2/(A-4.0)) denom = N.where(N.less(denom, 0), 99, denom) term2 = N.where(N.equal(denom, 0), term1, N.power((1-2.0/A)/denom, 1/3.0)) Z = (term1 - term2) / N.sqrt(2/(9.0*A)) Z = N.where(N.equal(denom, 99), 0, Z) return Z, (1.0-zprob(Z))*2 def anormaltest(a, dimension=None): """ Tests whether skew and/OR kurtosis of dataset differs from normal curve. Can operate over multiple dimensions. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Usage: anormaltest(a,dimension=None) Returns: z-score and 2-tail probability """ if dimension is None: a = N.ravel(a) dimension = 0 s, p = askewtest(a, dimension) k, p = akurtosistest(a, dimension) k2 = N.power(s, 2) + N.power(k, 2) return k2, achisqprob(k2, 2) # AFREQUENCY FUNCTIONS def aitemfreq(a): """ Returns a 2D array of item frequencies. Column 1 contains item values, column 2 contains their respective counts. Assumes a 1D array is passed. Usage: aitemfreq(a) Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies) """ scores = pstat.aunique(a) scores = N.sort(scores) freq = N.zeros(len(scores)) for i in range(len(scores)): freq[i] = N.add.reduce(N.equal(a, scores[i])) return N.array(pstat.aabut(scores, freq)) def ascoreatpercentile(inarray, percent): """ Usage: ascoreatpercentile(inarray,percent) 0= targetcf: break score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i) return score def apercentileofscore(inarray, score, histbins=10, defaultlimits=None): """ Note: result of this function depends on the values used to histogram the data(!). Usage: apercentileofscore(inarray,score,histbins=10,defaultlimits=None) Returns: percentile-position of score (0-100) relative to inarray """ h, lrl, binsize, extras = histogram(inarray, histbins, defaultlimits) cumhist = cumsum(h*1) i = int((score - lrl)/float(binsize)) pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inarray)) * 100 return pct def ahistogram(inarray, numbins=10, defaultlimits=None, printextras=1): """ Returns (i) an array of histogram bin counts, (ii) the smallest value of the histogram binning, and (iii) the bin width (the last 2 are not necessarily integers). Default number of bins is 10. Defaultlimits can be None (the routine picks bins spanning all the numbers in the inarray) or a 2-sequence (lowerlimit, upperlimit). Returns all of the following: array of bin values, lowerreallimit, binsize, extrapoints. Usage: ahistogram(inarray,numbins=10,defaultlimits=None,printextras=1) Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range) """ inarray = N.ravel(inarray) # flatten any >1D arrays if (defaultlimits is not None): lowerreallimit = defaultlimits[0] upperreallimit = defaultlimits[1] binsize = (upperreallimit-lowerreallimit) / float(numbins) else: Min = N.minimum.reduce(inarray) Max = N.maximum.reduce(inarray) estbinwidth = float(Max - Min)/float(numbins) + 1 binsize = (Max-Min+estbinwidth)/float(numbins) lowerreallimit = Min - binsize/2.0 # lower real limit,1st bin bins = N.zeros(numbins) extrapoints = 0 for num in inarray: try: if (num-lowerreallimit) < 0: extrapoints = extrapoints + 1 else: bintoincrement = int((num-lowerreallimit) / float(binsize)) bins[bintoincrement] = bins[bintoincrement] + 1 except Exception: # point outside lower/upper limits extrapoints = extrapoints + 1 if (extrapoints > 0 and printextras == 1): print('\nPoints outside given histogram range =', extrapoints) return (bins, lowerreallimit, binsize, extrapoints) def acumfreq(a, numbins=10, defaultreallimits=None): """ Returns a cumulative frequency histogram, using the histogram function. Defaultreallimits can be None (use all data), or a 2-sequence containing lower and upper limits on values to include. Usage: acumfreq(a,numbins=10,defaultreallimits=None) Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints """ h, l, b, e = histogram(a, numbins, defaultreallimits) cumhist = cumsum(h*1) return cumhist, l, b, e def arelfreq(a, numbins=10, defaultreallimits=None): """ Returns a relative frequency histogram, using the histogram function. Defaultreallimits can be None (use all data), or a 2-sequence containing lower and upper limits on values to include. Usage: arelfreq(a,numbins=10,defaultreallimits=None) Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints """ h, l, b, e = histogram(a, numbins, defaultreallimits) h = N.array(h/float(a.shape[0])) return h, l, b, e # AVARIABILITY FUNCTIONS def aobrientransform(*args): """ Computes a transform on input data (any number of columns). Used to test for homogeneity of variance prior to running one-way stats. Each array in *args is one level of a factor. If an F_oneway() run on the transformed data and found significant, variances are unequal. From Maxwell and Delaney, p.112. Usage: aobrientransform(*args) *args = 1D arrays, one per level of factor Returns: transformed data for use in an ANOVA """ TINY = 1e-10 k = len(args) n = N.zeros(k, N.Float) v = N.zeros(k, N.Float) m = N.zeros(k, N.Float) nargs = [] for i in range(k): nargs.append(args[i].astype(N.Float)) n[i] = float(len(nargs[i])) v[i] = var(nargs[i]) m[i] = mean(nargs[i]) for j in range(k): for i in range(n[j]): t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2 t2 = 0.5*v[j]*(n[j]-1.0) t3 = (n[j]-1.0)*(n[j]-2.0) nargs[j][i] = (t1-t2) / float(t3) check = 1 for j in range(k): if v[j] - mean(nargs[j]) > TINY: check = 0 if check != 1: raise ValueError('Lack of convergence in obrientransform.') else: return N.array(nargs) def asamplevar(inarray, dimension=None, keepdims=0): """ Returns the sample standard deviation of the values in the passed array (i.e., using N). Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Set keepdims=1 to return an array with the same number of dimensions as inarray. Usage: asamplevar(inarray,dimension=None,keepdims=0) """ if dimension is None: inarray = N.ravel(inarray) dimension = 0 if dimension == 1: mn = amean(inarray, dimension)[:, N.NewAxis] else: mn = amean(inarray, dimension, keepdims=1) deviations = inarray - mn if isinstance(dimension, list): n = 1 for d in dimension: n = n*inarray.shape[d] else: n = inarray.shape[dimension] svar = ass(deviations, dimension, keepdims) / float(n) return svar def asamplestdev(inarray, dimension=None, keepdims=0): """ Returns the sample standard deviation of the values in the passed array (i.e., using N). Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Set keepdims=1 to return an array with the same number of dimensions as inarray. Usage: asamplestdev(inarray,dimension=None,keepdims=0) """ return N.sqrt(asamplevar(inarray, dimension, keepdims)) def asignaltonoise(instack, dimension=0): """ Calculates signal-to-noise. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Usage: asignaltonoise(instack,dimension=0): Returns: array containing the value of (mean/stdev) along dimension, or 0 when stdev=0 """ m = mean(instack, dimension) sd = stdev(instack, dimension) return N.where(N.equal(sd, 0), 0, m/sd) def avar(inarray, dimension=None, keepdims=0): """ Returns the estimated population variance of the values in the passed array (i.e., N-1). Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Set keepdims=1 to return an array with the same number of dimensions as inarray. Usage: avar(inarray,dimension=None,keepdims=0) """ if dimension is None: inarray = N.ravel(inarray) dimension = 0 mn = amean(inarray, dimension, 1) deviations = inarray - mn if isinstance(dimension, list): n = 1 for d in dimension: n = n*inarray.shape[d] else: n = inarray.shape[dimension] var = ass(deviations, dimension, keepdims)/float(n-1) return var def astdev(inarray, dimension=None, keepdims=0): """ Returns the estimated population standard deviation of the values in the passed array (i.e., N-1). Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Set keepdims=1 to return an array with the same number of dimensions as inarray. Usage: astdev(inarray,dimension=None,keepdims=0) """ return N.sqrt(avar(inarray, dimension, keepdims)) def asterr(inarray, dimension=None, keepdims=0): """ Returns the estimated population standard error of the values in the passed array (i.e., N-1). Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Set keepdims=1 to return an array with the same number of dimensions as inarray. Usage: asterr(inarray,dimension=None,keepdims=0) """ if dimension is None: inarray = N.ravel(inarray) dimension = 0 return astdev(inarray, dimension, keepdims) / float(N.sqrt(inarray.shape[dimension])) def asem(inarray, dimension=None, keepdims=0): """ Returns the standard error of the mean (i.e., using N) of the values in the passed array. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Set keepdims=1 to return an array with the same number of dimensions as inarray. Usage: asem(inarray,dimension=None, keepdims=0) """ if dimension is None: inarray = N.ravel(inarray) dimension = 0 if isinstance(dimension, list): n = 1 for d in dimension: n = n*inarray.shape[d] else: n = inarray.shape[dimension] s = asamplestdev(inarray, dimension, keepdims) / N.sqrt(n-1) return s def az(a, score): """ Returns the z-score of a given input score, given thearray from which that score came. Not appropriate for population calculations, nor for arrays > 1D. Usage: az(a, score) """ z = (score-amean(a)) / asamplestdev(a) return z def azs(a): """ Returns a 1D array of z-scores, one for each score in the passed array, computed relative to the passed array. Usage: azs(a) """ zscores = [] for item in a: zscores.append(z(a, item)) return N.array(zscores) def azmap(scores, compare, dimension=0): """ Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0 of the compare array. Usage: azs(scores, compare, dimension=0) """ mns = amean(compare, dimension) sstd = asamplestdev(compare, 0) return (scores - mns) / sstd # ATRIMMING FUNCTIONS def around(a, digits=1): """ Rounds all values in array a to 'digits' decimal places. Usage: around(a,digits) Returns: a, where each value is rounded to 'digits' decimals """ def ar(x, d=digits): return round(x, d) if not isinstance(a, N.ArrayType): try: a = N.array(a) except Exception: a = N.array(a, 'O') shp = a.shape if a.typecode() in ['f', 'F', 'd', 'D']: b = N.ravel(a) b = N.array([ar(_) for _ in b]) b.shape = shp elif a.typecode() in ['o', 'O']: b = N.ravel(a)*1 for i in range(len(b)): if isinstance(b[i], float): b[i] = round(b[i], digits) b.shape = shp else: # not a float, double or Object array b = a*1 return b def athreshold(a, threshmin=None, threshmax=None, newval=0): """ Like Numeric.clip() except that values threshmax are replaced by newval instead of by threshmin/threshmax (respectively). Usage: athreshold(a,threshmin=None,threshmax=None,newval=0) Returns: a, with values threshmax replaced with newval """ mask = N.zeros(a.shape) if threshmin is not None: mask = mask + N.where(N.less(a, threshmin), 1, 0) if threshmax is not None: mask = mask + N.where(N.greater(a, threshmax), 1, 0) mask = N.clip(mask, 0, 1) return N.where(mask, newval, a) def atrimboth(a, proportiontocut): """ Slices off the passed proportion of items from BOTH ends of the passed array (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost' 10% of scores. You must pre-sort the array if you want "proper" trimming. Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: atrimboth (a,proportiontocut) Returns: trimmed version of array a """ lowercut = int(proportiontocut*len(a)) uppercut = len(a) - lowercut return a[lowercut:uppercut] def atrim1(a, proportiontocut, tail='right'): """ Slices off the passed proportion of items from ONE end of the passed array (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost' 10% of scores). Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: atrim1(a,proportiontocut,tail='right') or set tail='left' Returns: trimmed version of array a """ if string.lower(tail) == 'right': lowercut = 0 uppercut = len(a) - int(proportiontocut*len(a)) elif string.lower(tail) == 'left': lowercut = int(proportiontocut*len(a)) uppercut = len(a) return a[lowercut:uppercut] # ACORRELATION FUNCTIONS def acovariance(X): """ Computes the covariance matrix of a matrix X. Requires a 2D matrix input. Usage: acovariance(X) Returns: covariance matrix of X """ if len(X.shape) != 2: raise TypeError("acovariance requires 2D matrices") n = X.shape[0] mX = amean(X, 0) return N.dot(N.transpose(X), X) / float(n) - N.multiply.outer(mX, mX) def acorrelation(X): """ Computes the correlation matrix of a matrix X. Requires a 2D matrix input. Usage: acorrelation(X) Returns: correlation matrix of X """ C = acovariance(X) V = N.diagonal(C) return C / N.sqrt(N.multiply.outer(V, V)) def apaired(x, y): """ Interactively determines the type of data in x and y, and then runs the appropriated statistic for paired group data. Usage: apaired(x,y) x,y = the two arrays of values to be compared Returns: appropriate statistic name, value, and probability """ samples = '' while samples not in ['i', 'r', 'I', 'R', 'c', 'C']: print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ') samples = input() if samples in ['i', 'I', 'r', 'R']: print('\nComparing variances ...', end=' ') # USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112 r = obrientransform(x, y) f, p = F_oneway(pstat.colex(r, 0), pstat.colex(r, 1)) if p < 0.05: vartype = 'unequal, p='+str(round(p, 4)) else: vartype = 'equal' print(vartype) if samples in ['i', 'I']: if vartype[0] == 'e': t, p = ttest_ind(x, y, None, 0) print('\nIndependent samples t-test: ', round(t, 4), round(p, 4)) else: if len(x) > 20 or len(y) > 20: z, p = ranksums(x, y) print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4)) else: u, p = mannwhitneyu(x, y) print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4)) else: # RELATED SAMPLES if vartype[0] == 'e': t, p = ttest_rel(x, y, 0) print('\nRelated samples t-test: ', round(t, 4), round(p, 4)) else: t, p = ranksums(x, y) print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4)) else: # CORRELATION ANALYSIS corrtype = '' while corrtype not in ['c', 'C', 'r', 'R', 'd', 'D']: print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ') corrtype = input() if corrtype in ['c', 'C']: m, b, r, p, see = linregress(x, y) print('\nLinear regression for continuous variables ...') lol = [['Slope', 'Intercept', 'r', 'Prob', 'SEestimate'], [round(m, 4), round(b, 4), round(r, 4), round(p, 4), round(see, 4)]] pstat.printcc(lol) elif corrtype in ['r', 'R']: r, p = spearmanr(x, y) print('\nCorrelation for ranked variables ...') print("Spearman's r: ", round(r, 4), round(p, 4)) else: # DICHOTOMOUS r, p = pointbiserialr(x, y) print('\nAssuming x contains a dichotomous variable ...') print('Point Biserial r: ', round(r, 4), round(p, 4)) print('\n\n') return None def apearsonr(x, y, verbose=1): """ Calculates a Pearson correlation coefficient and returns p. Taken from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195. Usage: apearsonr(x,y,verbose=1) where x,y are equal length arrays Returns: Pearson's r, two-tailed p-value """ TINY = 1.0e-20 n = len(x) r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y) r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y))) r = (r_num / r_den) df = n-2 t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY))) prob = abetai(0.5*df, 0.5, df/(df+t*t), verbose) return r, prob def aspearmanr(x, y): """ Calculates a Spearman rank-order correlation coefficient. Taken from Heiman's Basic Statistics for the Behav. Sci (1st), p.192. Usage: aspearmanr(x,y) where x,y are equal-length arrays Returns: Spearman's r, two-tailed p-value """ n = len(x) rankx = rankdata(x) ranky = rankdata(y) dsq = N.add.reduce((rankx-ranky)**2) rs = 1 - 6*dsq / float(n*(n**2-1)) t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs))) df = n-2 probrs = abetai(0.5*df, 0.5, df/(df+t*t)) # probability values for rs are from part 2 of the spearman function in # Numerical Recipies, p.510. They close to tables, but not exact.(?) return rs, probrs def apointbiserialr(x, y): """ Calculates a point-biserial correlation coefficient and the associated probability value. Taken from Heiman's Basic Statistics for the Behav. Sci (1st), p.194. Usage: apointbiserialr(x,y) where x,y are equal length arrays Returns: Point-biserial r, two-tailed p-value """ TINY = 1e-30 categories = pstat.aunique(x) data = pstat.aabut(x, y) if len(categories) != 2: raise ValueError("Exactly 2 categories required (in x) for pointbiserialr().") else: # there are 2 categories, continue codemap = pstat.aabut(categories, N.arange(2)) pstat.arecode(data, codemap, 0) # recoded x = pstat.alinexand(data, 0, categories[0]) y = pstat.alinexand(data, 0, categories[1]) xmean = amean(pstat.acolex(x, 1)) ymean = amean(pstat.acolex(y, 1)) n = len(data) adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n))) rpb = (ymean - xmean)/asamplestdev(pstat.acolex(data, 1))*adjust df = n-2 t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY))) prob = abetai(0.5*df, 0.5, df/(df+t*t)) return rpb, prob def akendalltau(x, y): """ Calculates Kendall's tau ... correlation of ordinal data. Adapted from function kendl1 in Numerical Recipies. Needs good test-cases.@@@ Usage: akendalltau(x,y) Returns: Kendall's tau, two-tailed p-value """ n1 = 0 n2 = 0 iss = 0 for j in range(len(x)-1): for k in range(j, len(y)): a1 = x[j] - x[k] a2 = y[j] - y[k] aa = a1 * a2 if (aa): # neither array has a tie n1 = n1 + 1 n2 = n2 + 1 if aa > 0: iss = iss + 1 else: iss = iss - 1 else: if (a1): n1 = n1 + 1 else: n2 = n2 + 1 tau = iss / math.sqrt(n1*n2) svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1)) z = tau / math.sqrt(svar) prob = erfcc(abs(z)/1.4142136) return tau, prob def alinregress(*args): """ Calculates a regression line on two arrays, x and y, corresponding to x,y pairs. If a single 2D array is passed, alinregress finds dim with 2 levels and splits data into x,y pairs along that dim. Usage: alinregress(*args) args=2 equal-length arrays, or one 2D array Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate """ TINY = 1.0e-20 if len(args) == 1: # more than 1D array? args = args[0] if len(args) == 2: x = args[0] y = args[1] else: x = args[:, 0] y = args[:, 1] else: x = args[0] y = args[1] n = len(x) xmean = amean(x) ymean = amean(y) r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y) r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y))) r = r_num / r_den df = n-2 t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY))) prob = abetai(0.5*df, 0.5, df/(df+t*t)) slope = r_num / (float(n)*ass(x) - asquare_of_sums(x)) intercept = ymean - slope*xmean sterrest = math.sqrt(1-r*r)*asamplestdev(y) return slope, intercept, r, prob, sterrest # AINFERENTIAL STATISTICS def attest_1samp(a, popmean, printit=0, name='Sample', writemode='a'): """ Calculates the t-obtained for the independent samples T-test on ONE group of scores a, given a population mean. If printit=1, results are printed to the screen. If printit='filename', the results are output to 'filename' using the given writemode (default=append). Returns t-value, and prob. Usage: attest_1samp(a,popmean,Name='Sample',printit=0,writemode='a') Returns: t-value, two-tailed prob """ if not isinstance(a, N.ArrayType): a = N.array(a) x = amean(a) v = avar(a) n = len(a) df = n-1 svar = ((n-1)*v) / float(df) t = (x-popmean)/math.sqrt(svar*(1.0/n)) prob = abetai(0.5*df, 0.5, df/(df+t*t)) if printit != 0: statname = 'Single-sample T-test.' outputpairedstats(printit, writemode, 'Population', '--', popmean, 0, 0, 0, name, n, x, v, N.minimum.reduce(N.ravel(a)), N.maximum.reduce(N.ravel(a)), statname, t, prob) return t, prob def attest_ind(a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2', writemode='a'): """ Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores a, and b. From Numerical Recipies, p.483. If printit=1, results are printed to the screen. If printit='filename', the results are output to 'filename' using the given writemode (default=append). Dimension can equal None (ravel array first), or an integer (the dimension over which to operate on a and b). Usage: attest_ind (a,b,dimension=None,printit=0, Name1='Samp1',Name2='Samp2',writemode='a') Returns: t-value, two-tailed p-value """ if dimension is None: a = N.ravel(a) b = N.ravel(b) dimension = 0 x1 = amean(a, dimension) x2 = amean(b, dimension) v1 = avar(a, dimension) v2 = avar(b, dimension) n1 = a.shape[dimension] n2 = b.shape[dimension] df = n1+n2-2 svar = ((n1-1)*v1+(n2-1)*v2) / float(df) zerodivproblem = N.equal(svar, 0) svar = N.where(zerodivproblem, 1, svar) # avoid zero-division in 1st place t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!! t = N.where(zerodivproblem, 1.0, t) # replace NaN/wrong t-values with 1.0 probs = abetai(0.5*df, 0.5, float(df)/(df+t*t)) if isinstance(t, N.ArrayType): probs = N.reshape(probs, t.shape) if len(probs) == 1: probs = probs[0] if printit != 0: if isinstance(t, N.ArrayType): t = t[0] if isinstance(probs, N.ArrayType): probs = probs[0] statname = 'Independent samples T-test.' outputpairedstats(printit, writemode, name1, n1, x1, v1, N.minimum.reduce(N.ravel(a)), N.maximum.reduce(N.ravel(a)), name2, n2, x2, v2, N.minimum.reduce(N.ravel(b)), N.maximum.reduce(N.ravel(b)), statname, t, probs) return return t, probs def attest_rel(a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2', writemode='a'): """ Calculates the t-obtained T-test on TWO RELATED samples of scores, a and b. From Numerical Recipies, p.483. If printit=1, results are printed to the screen. If printit='filename', the results are output to 'filename' using the given writemode (default=append). Dimension can equal None (ravel array first), or an integer (the dimension over which to operate on a and b). Usage: attest_rel(a,b,dimension=None,printit=0, name1='Samp1',name2='Samp2',writemode='a') Returns: t-value, two-tailed p-value """ if dimension is None: a = N.ravel(a) b = N.ravel(b) dimension = 0 if len(a) != len(b): raise ValueError('Unequal length arrays.') x1 = amean(a, dimension) x2 = amean(b, dimension) v1 = avar(a, dimension) v2 = avar(b, dimension) n = a.shape[dimension] df = float(n-1) d = (a-b).astype('d') denom = N.sqrt((n*N.add.reduce(d*d, dimension) - N.add.reduce(d, dimension)**2) / df) zerodivproblem = N.equal(denom, 0) denom = N.where(zerodivproblem, 1, denom) # avoid zero-division in 1st place t = N.add.reduce(d, dimension) / denom # N-D COMPUTATION HERE!!!!!! t = N.where(zerodivproblem, 1.0, t) # replace NaN/wrong t-values with 1.0 probs = abetai(0.5*df, 0.5, float(df)/(df+t*t)) if isinstance(t, N.ArrayType): probs = N.reshape(probs, t.shape) if len(probs) == 1: probs = probs[0] if printit != 0: statname = 'Related samples T-test.' outputpairedstats(printit, writemode, name1, n, x1, v1, N.minimum.reduce(N.ravel(a)), N.maximum.reduce(N.ravel(a)), name2, n, x2, v2, N.minimum.reduce(N.ravel(b)), N.maximum.reduce(N.ravel(b)), statname, t, probs) return return t, probs def achisquare(f_obs, f_exp=None): """ Calculates a one-way chi square for array of observed frequencies and returns the result. If no expected frequencies are given, the total N is assumed to be equally distributed across all groups. Usage: achisquare(f_obs, f_exp=None) f_obs = array of observed cell freq. Returns: chisquare-statistic, associated p-value """ k = len(f_obs) if f_exp is None: f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs), N.Float) f_exp = f_exp.astype(N.Float) chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp) return chisq, chisqprob(chisq, k-1) def aks_2samp(data1, data2): """ Computes the Kolmogorov-Smirnof statistic on 2 samples. Modified from Numerical Recipies in C, page 493. Returns KS D-value, prob. Not ufunc- like. Usage: aks_2samp(data1,data2) where data1 and data2 are 1D arrays Returns: KS D-value, p-value """ j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE j2 = 0 # N.zeros(data2.shape[1:]) fn1 = 0.0 # N.zeros(data1.shape[1:],N.Float) fn2 = 0.0 # N.zeros(data2.shape[1:],N.Float) n1 = data1.shape[0] n2 = data2.shape[0] en1 = n1*1 en2 = n2*1 d = N.zeros(data1.shape[1:], N.Float) data1 = N.sort(data1, 0) data2 = N.sort(data2, 0) while j1 < n1 and j2 < n2: d1 = data1[j1] d2 = data2[j2] if d1 <= d2: fn1 = (j1)/float(en1) j1 = j1 + 1 if d2 <= d1: fn2 = (j2)/float(en2) j2 = j2 + 1 dt = (fn2-fn1) if abs(dt) > abs(d): d = dt try: en = math.sqrt(en1*en2/float(en1+en2)) prob = aksprob((en+0.12+0.11/en)*N.fabs(d)) except Exception: prob = 1.0 return d, prob def amannwhitneyu(x, y): """ Calculates a Mann-Whitney U statistic on the provided scores and returns the result. Use only when the n in each condition is < 20 and you have 2 independent samples of ranks. REMEMBER: Mann-Whitney U is significant if the u-obtained is LESS THAN or equal to the critical value of U. Usage: amannwhitneyu(x,y) where x,y are arrays of values for 2 conditions Returns: u-statistic, one-tailed p-value (i.e., p(z(U))) """ n1 = len(x) n2 = len(y) ranked = rankdata(N.concatenate((x, y))) rankx = ranked[0:n1] # get the x-ranks u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x u2 = n1*n2 - u1 # remainder is U for y bigu = max(u1, u2) smallu = min(u1, u2) T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores if T == 0: raise ValueError('All numbers are identical in amannwhitneyu') sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0) z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc return smallu, 1.0 - zprob(z) def atiecorrect(rankvals): """ Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests. See Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c code. Usage: atiecorrect(rankvals) Returns: T correction factor for U or H """ sorted, posn = ashellsort(N.array(rankvals)) n = len(sorted) T = 0.0 i = 0 while (i < n-1): if sorted[i] == sorted[i+1]: nties = 1 while (i < n-1) and (sorted[i] == sorted[i+1]): nties = nties + 1 i = i + 1 T = T + nties**3 - nties i = i+1 T = T / float(n**3-n) return 1.0 - T def aranksums(x, y): """ Calculates the rank sums statistic on the provided scores and returns the result. Usage: aranksums(x,y) where x,y are arrays of values for 2 conditions Returns: z-statistic, two-tailed p-value """ n1 = len(x) n2 = len(y) alldata = N.concatenate((x, y)) ranked = arankdata(alldata) x = ranked[:n1] y = ranked[n1:] s = sum(x) expected = n1*(n1+n2+1) / 2.0 z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0) prob = 2*(1.0 - zprob(abs(z))) return z, prob def awilcoxont(x, y): """ Calculates the Wilcoxon T-test for related samples and returns the result. A non-parametric T-test. Usage: awilcoxont(x,y) where x,y are equal-length arrays for 2 conditions Returns: t-statistic, two-tailed p-value """ if len(x) != len(y): raise ValueError('Unequal N in awilcoxont. Aborting.') d = x-y d = N.compress(N.not_equal(d, 0), d) # Keep all non-zero differences count = len(d) absd = abs(d) absranked = arankdata(absd) r_plus = 0.0 r_minus = 0.0 for i in range(len(absd)): if d[i] < 0: r_minus = r_minus + absranked[i] else: r_plus = r_plus + absranked[i] wt = min(r_plus, r_minus) mn = count * (count+1) * 0.25 se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0) z = math.fabs(wt-mn) / se z = math.fabs(wt-mn) / se prob = 2*(1.0 - zprob(abs(z))) return wt, prob def akruskalwallish(*args): """ The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more groups, requiring at least 5 subjects in each group. This function calculates the Kruskal-Wallis H and associated p-value for 3 or more independent samples. Usage: akruskalwallish(*args) args are separate arrays for 3+ conditions Returns: H-statistic (corrected for ties), associated p-value """ assert len(args) == 3, "Need at least 3 groups in stats.akruskalwallish()" args = list(args) n = [0]*len(args) n = [len(_) for _ in args] all = [] for i in range(len(args)): all = all + args[i].tolist() ranked = rankdata(all) T = tiecorrect(ranked) for i in range(len(args)): args[i] = ranked[0:n[i]] del ranked[0:n[i]] rsums = [] for i in range(len(args)): rsums.append(sum(args[i])**2) rsums[i] = rsums[i] / float(n[i]) ssbn = sum(rsums) totaln = sum(n) h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1) df = len(args) - 1 if T == 0: raise ValueError('All numbers are identical in akruskalwallish') h = h / float(T) return h, chisqprob(h, df) def afriedmanchisquare(*args): """ Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA. This function calculates the Friedman Chi-square test for repeated measures and returns the result, along with the associated probability value. It assumes 3 or more repeated measures. Only 3 levels requires a minimum of 10 subjects in the study. Four levels requires 5 subjects per level(??). Usage: afriedmanchisquare(*args) args are separate arrays for 2+ conditions Returns: chi-square statistic, associated p-value """ k = len(args) if k < 3: raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n') n = len(args[0]) data = pstat.aabut(*args) data = data.astype(N.Float) for i in range(len(data)): data[i] = arankdata(data[i]) ssbn = asum(asum(args, 1)**2) chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1) return chisq, chisqprob(chisq, k-1) # APROBABILITY CALCULATIONS def achisqprob(chisq, df): """ Returns the (1-tail) probability value associated with the provided chi-square value and df. Heavily modified from chisq.c in Gary Perlman's |Stat. Can handle multiple dimensions. Usage: achisqprob(chisq,df) chisq=chisquare stat., df=degrees of freedom """ BIG = 200.0 def ex(x): BIG = 200.0 exponents = N.where(N.less(x, -BIG), -BIG, x) return N.exp(exponents) if not isinstance(chisq, N.ArrayType): chisq = N.array([chisq]) if df < 1: return N.ones(chisq.shape, N.float) probs = N.zeros(chisq.shape, N.Float) probs = N.where(N.less_equal(chisq, 0), 1.0, probs) # set prob=1 for chisq<0 a = 0.5 * chisq if df > 1: y = ex(-a) if df % 2 == 0: even = 1 s = y*1 s2 = s*1 else: even = 0 s = 2.0 * azprob(-N.sqrt(chisq)) s2 = s*1 if (df > 2): chisq = 0.5 * (df - 1.0) if even: z = N.ones(probs.shape, N.Float) else: z = 0.5 * N.ones(probs.shape, N.Float) if even: e = N.zeros(probs.shape, N.Float) else: e = N.log(N.sqrt(N.pi)) * N.ones(probs.shape, N.Float) c = N.log(a) mask = N.zeros(probs.shape) a_big = N.greater(a, BIG) a_big_frozen = -1 * N.ones(probs.shape, N.Float) totalelements = N.multiply.reduce(N.array(probs.shape)) while asum(mask) != totalelements: e = N.log(z) + e s = s + ex(c*z-a-e) z = z + 1.0 # print z, e, s newmask = N.greater(z, chisq) a_big_frozen = N.where(newmask*N.equal(mask, 0)*a_big, s, a_big_frozen) mask = N.clip(newmask+mask, 0, 1) if even: z = N.ones(probs.shape, N.Float) e = N.ones(probs.shape, N.Float) else: z = 0.5 * N.ones(probs.shape, N.Float) e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape, N.Float) c = 0.0 mask = N.zeros(probs.shape) a_notbig_frozen = -1 * N.ones(probs.shape, N.Float) while asum(mask) != totalelements: e = e * (a/z.astype(N.Float)) c = c + e z = z + 1.0 # print '#2', z, e, c, s, c*y+s2 newmask = N.greater(z, chisq) a_notbig_frozen = N.where(newmask*N.equal(mask, 0)*(1-a_big), c*y+s2, a_notbig_frozen) mask = N.clip(newmask+mask, 0, 1) probs = N.where(N.equal(probs, 1), 1, N.where(N.greater(a, BIG), a_big_frozen, a_notbig_frozen)) return probs else: return s def aerfcc(x): """ Returns the complementary error function erfc(x) with fractional error everywhere less than 1.2e-7. Adapted from Numerical Recipies. Can handle multiple dimensions. Usage: aerfcc(x) """ z = abs(x) t = 1.0 / (1.0+0.5*z) ans = t * N.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277))))))))) return N.where(N.greater_equal(x, 0), ans, 2.0-ans) def azprob(z): """ Returns the area under the normal curve 'to the left of' the given z value. Thus, for z<0, zprob(z) = 1-tail probability for z>0, 1.0-zprob(z) = 1-tail probability for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability Adapted from z.c in Gary Perlman's |Stat. Can handle multiple dimensions. Usage: azprob(z) where z is a z-value """ def yfunc(y): x = (((((((((((((-0.000045255659 * y + 0.000152529290) * y - 0.000019538132) * y - 0.000676904986) * y + 0.001390604284) * y - 0.000794620820) * y - 0.002034254874) * y + 0.006549791214) * y - 0.010557625006) * y + 0.011630447319) * y - 0.009279453341) * y + 0.005353579108) * y - 0.002141268741) * y + 0.000535310849) * y + 0.999936657524 return x def wfunc(w): x = ((((((((0.000124818987 * w - 0.001075204047) * w + 0.005198775019) * w - 0.019198292004) * w + 0.059054035642) * w - 0.151968751364) * w + 0.319152932694) * w - 0.531923007300) * w + 0.797884560593) * N.sqrt(w) * 2.0 return x Z_MAX = 6.0 # maximum meaningful z-value x = N.zeros(z.shape, N.Float) # initialize y = 0.5 * N.fabs(z) x = N.where(N.less(y, 1.0), wfunc(y*y), yfunc(y-2.0)) # get x's x = N.where(N.greater(y, Z_MAX*0.5), 1.0, x) # kill those with big Z prob = N.where(N.greater(z, 0), (x+1)*0.5, (1-x)*0.5) return prob def aksprob(alam): """ Returns the probability value for a K-S statistic computed via ks_2samp. Adapted from Numerical Recipies. Can handle multiple dimensions. Usage: aksprob(alam) """ if isinstance(alam, N.ArrayType): frozen = -1 * N.ones(alam.shape, N.Float64) alam = alam.astype(N.Float64) arrayflag = 1 else: frozen = N.array(-1.) alam = N.array(alam, N.Float64) mask = N.zeros(alam.shape) fac = 2.0 * N.ones(alam.shape, N.Float) sum = N.zeros(alam.shape, N.Float) termbf = N.zeros(alam.shape, N.Float) a2 = N.array(-2.0*alam*alam, N.Float64) totalelements = N.multiply.reduce(N.array(mask.shape)) for j in range(1, 201): if asum(mask) == totalelements: break exponents = (a2*j*j) overflowmask = N.less(exponents, -746) frozen = N.where(overflowmask, 0, frozen) mask = mask+overflowmask term = fac*N.exp(exponents) sum = sum + term newmask = N.where(N.less_equal(abs(term), (0.001*termbf)) + N.less(abs(term), 1.0e-8*sum), 1, 0) frozen = N.where(newmask*N.equal(mask, 0), sum, frozen) mask = N.clip(mask+newmask, 0, 1) fac = -fac termbf = abs(term) if arrayflag: return N.where(N.equal(frozen, -1), 1.0, frozen) # 1.0 if doesn't converge else: return N.where(N.equal(frozen, -1), 1.0, frozen)[0] # 1.0 if doesn't converge def afprob(dfnum, dfden, F): """ Returns the 1-tailed significance level (p-value) of an F statistic given the degrees of freedom for the numerator (dfR-dfF) and the degrees of freedom for the denominator (dfF). Can handle multiple dims for F. Usage: afprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn """ if isinstance(F, N.ArrayType): return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F)) else: return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F)) def abetacf(a, b, x, verbose=1): """ Evaluates the continued fraction form of the incomplete Beta function, betai. (Adapted from: Numerical Recipies in C.) Can handle multiple dimensions for x. Usage: abetacf(a,b,x,verbose=1) """ ITMAX = 200 EPS = 3.0e-7 arrayflag = 1 if isinstance(x, N.ArrayType): frozen = N.ones(x.shape, N.Float) * -1 # start out w/ -1s, should replace all else: arrayflag = 0 frozen = N.array([-1]) x = N.array([x]) mask = N.zeros(x.shape) bm = az = am = 1.0 qab = a+b qap = a+1.0 qam = a-1.0 bz = 1.0-qab*x/qap for i in range(ITMAX+1): if N.sum(N.ravel(N.equal(frozen, -1))) == 0: break em = float(i+1) tem = em + em d = em*(b-em)*x/((qam+tem)*(a+tem)) ap = az + d*am bp = bz+d*bm d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem)) app = ap+d*az bpp = bp+d*bz aold = az*1 am = ap/bpp bm = bp/bpp az = app/bpp bz = 1.0 newmask = N.less(abs(az-aold), EPS*abs(az)) frozen = N.where(newmask*N.equal(mask, 0), az, frozen) mask = N.clip(mask+newmask, 0, 1) noconverge = asum(N.equal(frozen, -1)) if noconverge != 0 and verbose: print('a or b too big, or ITMAX too small in Betacf for ', noconverge, ' elements') if arrayflag: return frozen else: return frozen[0] def agammln(xx): """ Returns the gamma function of xx. Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt. Adapted from: Numerical Recipies in C. Can handle multiple dims ... but probably doesn't normally have to. Usage: agammln(xx) """ coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516, 0.120858003e-2, -0.536382e-5] x = xx - 1.0 tmp = x + 5.5 tmp = tmp - (x+0.5)*N.log(tmp) ser = 1.0 for j in range(len(coeff)): x = x + 1 ser = ser + coeff[j]/x return -tmp + N.log(2.50662827465*ser) def abetai(a, b, x, verbose=1): """ Returns the incomplete beta function: I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt) where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma function of a. The continued fraction formulation is implemented here, using the betacf function. (Adapted from: Numerical Recipies in C.) Can handle multiple dimensions. Usage: abetai(a,b,x,verbose=1) """ TINY = 1e-15 if isinstance(a, N.ArrayType): if asum(N.less(x, 0)+N.greater(x, 1)) != 0: raise ValueError('Bad x in abetai') x = N.where(N.equal(x, 0), TINY, x) x = N.where(N.equal(x, 1.0), 1-TINY, x) bt = N.where(N.equal(x, 0)+N.equal(x, 1), 0, -1) exponents = (gammln(a+b)-gammln(a)-gammln(b)+a*N.log(x)+b * N.log(1.0-x)) # 746 (below) is the MAX POSSIBLE BEFORE OVERFLOW exponents = N.where(N.less(exponents, -740), -740, exponents) bt = N.exp(exponents) if isinstance(x, N.ArrayType): ans = N.where(N.less(x, (a+1)/(a+b+2.0)), bt*abetacf(a, b, x, verbose)/float(a), 1.0-bt*abetacf(b, a, 1.0-x, verbose)/float(b)) else: if x < (a+1)/(a+b+2.0): ans = bt*abetacf(a, b, x, verbose)/float(a) else: ans = 1.0-bt*abetacf(b, a, 1.0-x, verbose)/float(b) return ans # AANOVA CALCULATIONS import LinearAlgebra LA = LinearAlgebra def aglm(data, para): """ Calculates a linear model fit ... anova/ancova/lin-regress/t-test/etc. Taken from: Peterson et al. Statistical limitations in functional neuroimaging I. Non-inferential methods and statistical models. Phil Trans Royal Soc Lond B 354: 1239-1260. Usage: aglm(data,para) Returns: statistic, p-value ??? """ if len(para) != len(data): print("data and para must be same length in aglm") return n = len(para) p = pstat.aunique(para) x = N.zeros((n, len(p))) # design matrix for l in range(len(p)): x[:, l] = N.equal(para, p[l]) b = N.dot(N.dot(LA.inverse(N.dot(N.transpose(x), x)), # i.e., b=inv(X'X)X'Y N.transpose(x)), data) diffs = (data - N.dot(x, b)) s_sq = 1./(n-len(p)) * N.dot(N.transpose(diffs), diffs) if len(p) == 2: # ttest_ind c = N.array([1, -1]) df = n-2 fact = asum(1.0/asum(x, 0)) # i.e., 1/n1 + 1/n2 + 1/n3 ... t = N.dot(c, b) / N.sqrt(s_sq*fact) probs = abetai(0.5*df, 0.5, float(df)/(df+t*t)) return t, probs def aF_oneway(*args): """ Performs a 1-way ANOVA, returning an F-value and probability given any number of groups. From Heiman, pp.394-7. Usage: aF_oneway (*args) where *args is 2 or more arrays, one per treatment group Returns: f-value, probability """ na = len(args) # ANOVA on 'na' groups, each in it's own array alldata = [] alldata = N.concatenate(args) bign = len(alldata) sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign)) ssbn = 0 for a in args: ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a)) ssbn = ssbn - (asquare_of_sums(alldata)/float(bign)) sswn = sstot-ssbn dfbn = na-1 dfwn = bign - na msb = ssbn/float(dfbn) msw = sswn/float(dfwn) f = msb/msw prob = fprob(dfbn, dfwn, f) return f, prob def aF_value(ER, EF, dfR, dfF): """ Returns an F-statistic given the following: ER = error associated with the null hypothesis (the Restricted model) EF = error associated with the alternate hypothesis (the Full model) dfR = degrees of freedom the Restricted model dfF = degrees of freedom associated with the Restricted model """ return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF))) def outputfstats(Enum, Eden, dfnum, dfden, f, prob): Enum = round(Enum, 3) Eden = round(Eden, 3) dfnum = round(Enum, 3) dfden = round(dfden, 3) f = round(f, 3) prob = round(prob, 3) suffix = '' # for *s after the p-value if prob < 0.001: suffix = ' ***' elif prob < 0.01: suffix = ' **' elif prob < 0.05: suffix = ' *' title = [['EF/ER', 'DF', 'Mean Square', 'F-value', 'prob', '']] lofl = title+[[Enum, dfnum, round(Enum/float(dfnum), 3), f, prob, suffix], [Eden, dfden, round(Eden/float(dfden), 3), '', '', '']] pstat.printcc(lofl) return def F_value_multivariate(ER, EF, dfnum, dfden): """ Returns an F-statistic given the following: ER = error associated with the null hypothesis (the Restricted model) EF = error associated with the alternate hypothesis (the Full model) dfR = degrees of freedom the Restricted model dfF = degrees of freedom associated with the Restricted model where ER and EF are matrices from a multivariate F calculation. """ if type(ER) in [int, float]: ER = N.array([[ER]]) if type(EF) in [int, float]: EF = N.array([[EF]]) n_um = (LA.determinant(ER) - LA.determinant(EF)) / float(dfnum) d_en = LA.determinant(EF) / float(dfden) return n_um / d_en # ASUPPORT FUNCTIONS def asign(a): """ Usage: asign(a) Returns: array shape of a, with -1 where a<0 and +1 where a>=0 """ a = N.asarray(a) if ((isinstance(a, float)) or (isinstance(a, int))): return a-a-N.less(a, 0)+N.greater(a, 0) else: return N.zeros(N.shape(a))-N.less(a, 0)+N.greater(a, 0) def asum(a, dimension=None, keepdims=0): """ An alternative to the Numeric.add.reduce function, which allows one to (1) collapse over multiple dimensions at once, and/or (2) to retain all dimensions in the original array (squashing one down to size. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). If keepdims=1, the resulting array will have as many dimensions as the input array. Usage: asum(a, dimension=None, keepdims=0) Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1 """ if isinstance(a, N.ArrayType) and a.typecode() in ['l', 's', 'b']: a = a.astype(N.Float) if dimension is None: s = N.sum(N.ravel(a)) elif type(dimension) in [int, float]: s = N.add.reduce(a, dimension) if keepdims == 1: shp = list(a.shape) shp[dimension] = 1 s = N.reshape(s, shp) else: # must be a SEQUENCE of dims to sum over dims = sorted(dimension) dims.reverse() s = a * 1.0 for dim in dims: s = N.add.reduce(s, dim) if keepdims == 1: shp = list(a.shape) for dim in dims: shp[dim] = 1 s = N.reshape(s, shp) return s def acumsum(a, dimension=None): """ Returns an array consisting of the cumulative sum of the items in the passed array. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions, but this last one just barely makes sense). Usage: acumsum(a,dimension=None) """ if dimension is None: a = N.ravel(a) dimension = 0 if type(dimension) in [list, tuple, N.ArrayType]: dimension = sorted(dimension) dimension.reverse() for d in dimension: a = N.add.accumulate(a, d) return a else: return N.add.accumulate(a, dimension) def ass(inarray, dimension=None, keepdims=0): """ Squares each value in the passed array, adds these squares & returns the result. Unfortunate function name. :-) Defaults to ALL values in the array. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). Set keepdims=1 to maintain the original number of dimensions. Usage: ass(inarray, dimension=None, keepdims=0) Returns: sum-along-'dimension' for (inarray*inarray) """ if dimension is None: inarray = N.ravel(inarray) dimension = 0 return asum(inarray*inarray, dimension, keepdims) def asummult(array1, array2, dimension=None, keepdims=0): """ Multiplies elements in array1 and array2, element by element, and returns the sum (along 'dimension') of all resulting multiplications. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). A trivial function, but included for completeness. Usage: asummult(array1,array2,dimension=None,keepdims=0) """ if dimension is None: array1 = N.ravel(array1) array2 = N.ravel(array2) dimension = 0 return asum(array1*array2, dimension, keepdims) def asquare_of_sums(inarray, dimension=None, keepdims=0): """ Adds the values in the passed array, squares that sum, and returns the result. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). If keepdims=1, the returned array will have the same NUMBER of dimensions as the original. Usage: asquare_of_sums(inarray, dimension=None, keepdims=0) Returns: the square of the sum over dim(s) in dimension """ if dimension is None: inarray = N.ravel(inarray) dimension = 0 s = asum(inarray, dimension, keepdims) if isinstance(s, N.ArrayType): return s.astype(N.Float)*s else: return float(s)*s def asumdiffsquared(a, b, dimension=None, keepdims=0): """ Takes pairwise differences of the values in arrays a and b, squares these differences, and returns the sum of these squares. Dimension can equal None (ravel array first), an integer (the dimension over which to operate), or a sequence (operate over multiple dimensions). keepdims=1 means the return shape = len(a.shape) = len(b.shape) Usage: asumdiffsquared(a,b) Returns: sum[ravel(a-b)**2] """ if dimension is None: N.ravel(a) # inarray dimension = 0 return asum((a-b)**2, dimension, keepdims) def ashellsort(inarray): """ Shellsort algorithm. Sorts a 1D-array. Usage: ashellsort(inarray) Returns: sorted-inarray, sorting-index-vector (for original array) """ n = len(inarray) svec = inarray * 1.0 ivec = list(range(n)) gap = n/2 # integer division needed while gap > 0: for i in range(gap, n): for j in range(i-gap, -1, -gap): while j >= 0 and svec[j] > svec[j+gap]: temp = svec[j] svec[j] = svec[j+gap] svec[j+gap] = temp itemp = ivec[j] ivec[j] = ivec[j+gap] ivec[j+gap] = itemp gap = gap / 2 # integer division needed # svec is now sorted input vector, ivec has the order svec[i] = vec[ivec[i]] return svec, ivec def arankdata(inarray): """ Ranks the data in inarray, dealing with ties appropritely. Assumes a 1D inarray. Adapted from Gary Perlman's |Stat ranksort. Usage: arankdata(inarray) Returns: array of length equal to inarray, containing rank scores """ n = len(inarray) svec, ivec = ashellsort(inarray) sumranks = 0 dupcount = 0 newarray = N.zeros(n, N.Float) for i in range(n): sumranks = sumranks + i dupcount = dupcount + 1 if i == n-1 or svec[i] != svec[i+1]: averank = sumranks / float(dupcount) + 1 for j in range(i-dupcount+1, i+1): newarray[ivec[j]] = averank sumranks = 0 dupcount = 0 return newarray def afindwithin(data): """ Returns a binary vector, 1=within-subject factor, 0=between. Input equals the entire data array (i.e., column 1=random factor, last column = measured values. Usage: afindwithin(data) data in |Stat format """ numfact = len(data[0])-2 withinvec = [0]*numfact for col in range(1, numfact+1): rows = pstat.linexand(data, col, pstat.unique(pstat.colex(data, 1))[0]) # get 1 level of this factor if len(pstat.unique(pstat.colex(rows, 0))) < len(rows): # if fewer subjects than scores on this factor withinvec[col-1] = 1 return withinvec # RE-DEFINE DISPATCHES TO INCLUDE ARRAYS # CENTRAL TENDENCY: geometricmean = Dispatch((lgeometricmean, (list, tuple)), (ageometricmean, (N.ArrayType,))) harmonicmean = Dispatch((lharmonicmean, (list, tuple)), (aharmonicmean, (N.ArrayType,))) mean = Dispatch((lmean, (list, tuple)), (amean, (N.ArrayType,))) median = Dispatch((lmedian, (list, tuple)), (amedian, (N.ArrayType,))) medianscore = Dispatch((lmedianscore, (list, tuple)), (amedianscore, (N.ArrayType,))) mode = Dispatch((lmode, (list, tuple)), (amode, (N.ArrayType,))) tmean = Dispatch((atmean, (N.ArrayType,))) tvar = Dispatch((atvar, (N.ArrayType,))) tstdev = Dispatch((atstdev, (N.ArrayType,))) tsem = Dispatch((atsem, (N.ArrayType,))) # VARIATION: moment = Dispatch((lmoment, (list, tuple)), (amoment, (N.ArrayType,))) variation = Dispatch((lvariation, (list, tuple)), (avariation, (N.ArrayType,))) skew = Dispatch((lskew, (list, tuple)), (askew, (N.ArrayType,))) kurtosis = Dispatch((lkurtosis, (list, tuple)), (akurtosis, (N.ArrayType,))) describe = Dispatch((ldescribe, (list, tuple)), (adescribe, (N.ArrayType,))) # DISTRIBUTION TESTS skewtest = Dispatch((askewtest, (list, tuple)), (askewtest, (N.ArrayType,))) kurtosistest = Dispatch((akurtosistest, (list, tuple)), (akurtosistest, (N.ArrayType,))) normaltest = Dispatch((anormaltest, (list, tuple)), (anormaltest, (N.ArrayType,))) # FREQUENCY STATS: itemfreq = Dispatch((litemfreq, (list, tuple)), (aitemfreq, (N.ArrayType,))) scoreatpercentile = Dispatch((lscoreatpercentile, (list, tuple)), (ascoreatpercentile, (N.ArrayType,))) percentileofscore = Dispatch((lpercentileofscore, (list, tuple)), (apercentileofscore, (N.ArrayType,))) histogram = Dispatch((lhistogram, (list, tuple)), (ahistogram, (N.ArrayType,))) cumfreq = Dispatch((lcumfreq, (list, tuple)), (acumfreq, (N.ArrayType,))) relfreq = Dispatch((lrelfreq, (list, tuple)), (arelfreq, (N.ArrayType,))) # VARIABILITY: obrientransform = Dispatch((lobrientransform, (list, tuple)), (aobrientransform, (N.ArrayType,))) samplevar = Dispatch((lsamplevar, (list, tuple)), (asamplevar, (N.ArrayType,))) samplestdev = Dispatch((lsamplestdev, (list, tuple)), (asamplestdev, (N.ArrayType,))) signaltonoise = Dispatch((asignaltonoise, (N.ArrayType,)),) var = Dispatch((lvar, (list, tuple)), (avar, (N.ArrayType,))) stdev = Dispatch((lstdev, (list, tuple)), (astdev, (N.ArrayType,))) sterr = Dispatch((lsterr, (list, tuple)), (asterr, (N.ArrayType,))) sem = Dispatch((lsem, (list, tuple)), (asem, (N.ArrayType,))) z = Dispatch((lz, (list, tuple)), (az, (N.ArrayType,))) zs = Dispatch((lzs, (list, tuple)), (azs, (N.ArrayType,))) # TRIMMING FCNS: threshold = Dispatch((athreshold, (N.ArrayType,)),) trimboth = Dispatch((ltrimboth, (list, tuple)), (atrimboth, (N.ArrayType,))) trim1 = Dispatch((ltrim1, (list, tuple)), (atrim1, (N.ArrayType,))) # CORRELATION FCNS: paired = Dispatch((lpaired, (list, tuple)), (apaired, (N.ArrayType,))) pearsonr = Dispatch((lpearsonr, (list, tuple)), (apearsonr, (N.ArrayType,))) spearmanr = Dispatch((lspearmanr, (list, tuple)), (aspearmanr, (N.ArrayType,))) pointbiserialr = Dispatch((lpointbiserialr, (list, tuple)), (apointbiserialr, (N.ArrayType,))) kendalltau = Dispatch((lkendalltau, (list, tuple)), (akendalltau, (N.ArrayType,))) linregress = Dispatch((llinregress, (list, tuple)), (alinregress, (N.ArrayType,))) # INFERENTIAL STATS: ttest_1samp = Dispatch((lttest_1samp, (list, tuple)), (attest_1samp, (N.ArrayType,))) ttest_ind = Dispatch((lttest_ind, (list, tuple)), (attest_ind, (N.ArrayType,))) ttest_rel = Dispatch((lttest_rel, (list, tuple)), (attest_rel, (N.ArrayType,))) chisquare = Dispatch((lchisquare, (list, tuple)), (achisquare, (N.ArrayType,))) ks_2samp = Dispatch((lks_2samp, (list, tuple)), (aks_2samp, (N.ArrayType,))) mannwhitneyu = Dispatch((lmannwhitneyu, (list, tuple)), (amannwhitneyu, (N.ArrayType,))) tiecorrect = Dispatch((ltiecorrect, (list, tuple)), (atiecorrect, (N.ArrayType,))) ranksums = Dispatch((lranksums, (list, tuple)), (aranksums, (N.ArrayType,))) wilcoxont = Dispatch((lwilcoxont, (list, tuple)), (awilcoxont, (N.ArrayType,))) kruskalwallish = Dispatch((lkruskalwallish, (list, tuple)), (akruskalwallish, (N.ArrayType,))) friedmanchisquare = Dispatch((lfriedmanchisquare, (list, tuple)), (afriedmanchisquare, (N.ArrayType,))) # PROBABILITY CALCS: chisqprob = Dispatch((lchisqprob, (int, float)), (achisqprob, (N.ArrayType,))) zprob = Dispatch((lzprob, (int, float)), (azprob, (N.ArrayType,))) ksprob = Dispatch((lksprob, (int, float)), (aksprob, (N.ArrayType,))) fprob = Dispatch((lfprob, (int, float)), (afprob, (N.ArrayType,))) betacf = Dispatch((lbetacf, (int, float)), (abetacf, (N.ArrayType,))) betai = Dispatch((lbetai, (int, float)), (abetai, (N.ArrayType,))) erfcc = Dispatch((lerfcc, (int, float)), (aerfcc, (N.ArrayType,))) gammln = Dispatch((lgammln, (int, float)), (agammln, (N.ArrayType,))) # ANOVA FUNCTIONS: F_oneway = Dispatch((lF_oneway, (list, tuple)), (aF_oneway, (N.ArrayType,))) F_value = Dispatch((lF_value, (list, tuple)), (aF_value, (N.ArrayType,))) # SUPPORT FUNCTIONS: incr = Dispatch((lincr, (list, tuple, N.ArrayType)), ) sum = Dispatch((lsum, (list, tuple)), (asum, (N.ArrayType,))) cumsum = Dispatch((lcumsum, (list, tuple)), (acumsum, (N.ArrayType,))) ss = Dispatch((lss, (list, tuple)), (ass, (N.ArrayType,))) summult = Dispatch((lsummult, (list, tuple)), (asummult, (N.ArrayType,))) square_of_sums = Dispatch((lsquare_of_sums, (list, tuple)), (asquare_of_sums, (N.ArrayType,))) sumdiffsquared = Dispatch((lsumdiffsquared, (list, tuple)), (asumdiffsquared, (N.ArrayType,))) shellsort = Dispatch((lshellsort, (list, tuple)), (ashellsort, (N.ArrayType,))) rankdata = Dispatch((lrankdata, (list, tuple)), (arankdata, (N.ArrayType,))) findwithin = Dispatch((lfindwithin, (list, tuple)), (afindwithin, (N.ArrayType,))) # END OF NUMERIC FUNCTION BLOCK # END OF STATISTICAL FUNCTIONS except ImportError: pass bx-python-0.8.13/lib/psyco_full.py000066400000000000000000000001761415666465100170540ustar00rootroot00000000000000""" Attempt to call psyco.full, but ignore any errors. """ try: import psyco psyco.full() except Exception: pass bx-python-0.8.13/pyproject.toml000066400000000000000000000022011415666465100164600ustar00rootroot00000000000000[build-system] requires = ["cython", "oldest-supported-numpy", "setuptools", "wheel"] build-backend = "setuptools.build_meta" [tool.cibuildwheel] test-command = "python -c 'import bx, bx.align, bx.align.sitemask, bx.align.tools, bx.arrays, bx.bbi, bx.cookbook, bx.intervals, bx.intervals.operations, bx.intseq, bx.misc, bx.motif, bx.motif.io, bx.motif.logo, bx.phylo, bx.pwm, bx.seq, bx.tabular, bx_extras'" [tool.cibuildwheel.linux] before-all = """ if command -v yum; then yum -y install zlib-devel elif command -v apk; then apk add zlib-dev else apt-get -y install libz-dev fi """ [tool.cibuildwheel.macos] # If there is no wheel for numpy on macOS (e.g. for the oldest supported numpy # for PyPy3.7), we need to build it using OpenBLAS (both before building the # bx-python wheel and when testing it), see # https://github.com/numpy/numpy/issues/15947#issuecomment-683355728 before-all = """ brew install openblas && cat > ~/.numpy-site.cfg < 2: out_file = open(args[2], 'w') else: out_file = sys.stdout binned = bool(options.binned) mask_fname = options.mask except Exception: doc_optparse.exit() if binned: scores_by_chrom = load_scores_ba_dir(score_fname) else: scores_by_chrom = load_scores_wiggle(score_fname) if mask_fname: masks = binned_bitsets_from_file(open(mask_fname)) else: masks = None for line in open(interval_fname): fields = line.split() chrom, start, stop = fields[0], int(fields[1]), int(fields[2]) total = 0 count = 0 min_score = 100000000 max_score = -100000000 for i in range(start, stop): if chrom in scores_by_chrom and scores_by_chrom[chrom][i]: # Skip if base is masked if masks and chrom in masks: if masks[chrom][i]: continue # Get the score, only count if not 'nan' score = scores_by_chrom[chrom][i] if not isNaN(score): total += score count += 1 max_score = max(score, max_score) min_score = min(score, min_score) if count > 0: avg = total/count else: avg = "nan" min_score = "nan" max_score = "nan" print("\t".join(map(str, [chrom, start, stop, avg, min_score, max_score])), file=out_file) out_file.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/align_print_template.py000077500000000000000000000020461415666465100220200ustar00rootroot00000000000000#!/usr/bin/env python """ Read an alignment from stdin and for each block print the result of evaluating `template_string` (in cheetah template format). The alignment block will be placed in the template context as `a` and the list of components as `c`. usage: %prog template [options] -f, --format = maf: Input format, maf (default) or axt """ import sys from bx import align from bx.cookbook import doc_optparse try: from Cheetah.Template import Template except ImportError: print("This script requires the Cheetah template modules", file=sys.stderr) sys.exit(-1) def main(): # Parse command line arguments options, args = doc_optparse.parse(__doc__) try: template = Template(args[0]) format = options.format if not format: format = "maf" except Exception: doc_optparse.exception() reader = align.get_reader(format, sys.stdin) for a in reader: template.a = a template.c = a.components print(template) if __name__ == "__main__": main() bx-python-0.8.13/scripts/axt_extract_ranges.py000077500000000000000000000034601415666465100215050ustar00rootroot00000000000000#!/usr/bin/env python """ Reads a list of intervals and an axt. Produces a new axt containing the portions of the original that overlapped the intervals usage: %prog interval_file refindex [options] < axt_file -m, --mincols=10: Minimum length (columns) required for alignment to be output """ import sys import bx.align.axt from bx import intervals from bx.cookbook import doc_optparse def __main__(): options, args = doc_optparse.parse(__doc__) try: range_filename = args[0] refindex = int(args[1]) if options.mincols: mincols = int(options.mincols) else: mincols = 10 except Exception: doc_optparse.exit() # Load Intervals intersecter = intervals.Intersecter() for line in open(range_filename): fields = line.split() intersecter.add_interval(intervals.Interval(int(fields[0]), int(fields[1]))) # Start axt on stdout out = bx.align.axt.Writer(sys.stdout) # Iterate over input axt for axt in bx.align.axt.Reader(sys.stdin): ref_component = axt.components[refindex] # Find overlap with reference component intersections = sorted(intersecter.find(ref_component.start, ref_component.end)) # Keep output axt ordered # Write each intersecting block for interval in intersections: start = max(interval.start, ref_component.start) end = min(interval.end, ref_component.end) sliced = axt.slice_by_component(refindex, start, end) good = True for c in sliced.components: if c.size < 1: good = False if good and sliced.text_size > mincols: out.write(sliced) # Close output axt out.close() if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/axt_to_fasta.py000077500000000000000000000023321415666465100202710ustar00rootroot00000000000000#!/usr/bin/env python """ Application to convert AXT file to FASTA file. Reads an AXT file from standard input and writes a FASTA file to standard out. usage: %prog < axt_file > fasta_file """ __author__ = "Bob Harris (rsharris@bx.psu.edu)" import sys import bx.align.axt def usage(s=None): message = """ axt_to_fasta < axt_file > fasta_file """ if s is None: sys.exit(message) else: sys.exit(f"{s}\n{message}") def main(): # check the command line if len(sys.argv) > 1: usage("give me no arguments") # convert the alignment blocks reader = bx.align.axt.Reader(sys.stdin, support_ids=True, species1="", species2="") for a in reader: if "id" in a.attributes: id = a.attributes["id"] else: id = None print_component_as_fasta(a.components[0], id) print_component_as_fasta(a.components[1], id) print() # $$$ this should be moved to a bx.align.fasta module def print_component_as_fasta(c, id=None): header = f">{c.src}_{c.start}_{c.start + c.size}" if id is not None: header += " " + id print(header) print(c.text) if __name__ == "__main__": main() bx-python-0.8.13/scripts/axt_to_lav.py000077500000000000000000000104271415666465100177610ustar00rootroot00000000000000#!/usr/bin/env python """ Application to convert AXT file to LAV file. Reads an AXT file from standard input and writes a LAV file to standard out; some statistics are written to standard error. usage: %prog primary_spec secondary_spec [--silent] < axt_file > lav_file Each spec is of the form seq_file[:species_name]:lengths_file. - seq_file should be a format string for the file names for the individual sequences, with %s to be replaced by the alignment's src field. For example, "hg18/%s.nib" would prescribe files named "hg18/chr1.nib", "hg18/chr2.nib", etc. - species_name is optional. If present, it is prepended to the alignment's src field. - Lengths files provide the length of each chromosome (lav format needs this information but axt file does not contain it). The format is a series of lines of the form: The chromosome field in each axt block must match some in the lengths file. """ __author__ = "Bob Harris (rsharris@bx.psu.edu)" import sys import bx.align.axt import bx.align.lav def usage(s=None): message = __doc__ if s is None: sys.exit(message) else: sys.exit(f"{s}\n{message}") def main(): global debug primary = None secondary = None silent = False # pick off options args = sys.argv[1:] while len(args) > 0: arg = args.pop(0) val = None fields = arg.split("=", 1) if len(fields) == 2: arg = fields[0] val = fields[1] if val == "": usage("missing a value in %s=" % arg) if arg == "--silent" and val is None: silent = True elif primary is None and val is None: primary = arg elif secondary is None and val is None: secondary = arg else: usage("unknown argument: %s" % arg) if primary is None: usage("missing primary file name and length") if secondary is None: usage("missing secondary file name and length") try: (primaryFile, primary, primaryLengths) = parse_spec(primary) except Exception: usage("bad primary spec (must be seq_file[:species_name]:lengths_file") try: (secondaryFile, secondary, secondaryLengths) = parse_spec(secondary) except Exception: usage("bad secondary spec (must be seq_file[:species_name]:lengths_file") # read the lengths speciesToLengths = {} speciesToLengths[primary] = read_lengths(primaryLengths) speciesToLengths[secondary] = read_lengths(secondaryLengths) # read the alignments out = bx.align.lav.Writer( sys.stdout, attributes={ "name_format_1": primaryFile, "name_format_2": secondaryFile}) axtsRead = 0 axtsWritten = 0 for axtBlock in bx.align.axt.Reader( sys.stdin, species_to_lengths=speciesToLengths, species1=primary, species2=secondary, support_ids=True): axtsRead += 1 out.write(axtBlock) axtsWritten += 1 out.close() if not silent: sys.stderr.write("%d blocks read, %d written\n" % (axtsRead, axtsWritten)) def parse_spec(spec): # returns (seq_file,species_name,lengths_file) fields = spec.split(":") if len(fields) == 2: return (fields[0], "", fields[1]) elif len(fields) == 3: return (fields[0], fields[1], fields[2]) else: raise ValueError def read_lengths(fileName): chromToLength = {} f = open(fileName) for lineNumber, line in enumerate(f): line = line.strip() if line == "": continue if line.startswith("#"): continue fields = line.split() if len(fields) != 2: raise ValueError("bad lengths line (%s:%d): %s" % (fileName, lineNumber, line)) chrom = fields[0] try: length = int(fields[1]) except ValueError: raise ValueError("bad lengths line (%s:%d): %s" % (fileName, lineNumber, line)) if chrom in chromToLength: raise ValueError("%s appears more than once (%s:%d): %s" % (chrom, fileName, lineNumber, line)) chromToLength[chrom] = length f.close() return chromToLength if __name__ == "__main__": main() bx-python-0.8.13/scripts/axt_to_maf.py000077500000000000000000000102011415666465100177300ustar00rootroot00000000000000#!/usr/bin/env python """ Application to convert AXT file to MAF file. Reads an AXT file from standard input and writes a MAF file to standard out; some statistics are written to standard error. axt_to_maf primary:lengths_file secondary:lengths_file < axt_file > maf_file --silent: prevents stats report Lengths files provide the length of each chromosome (maf format needs this information but axt file does not contain it). The format is a series of lines of the form: The chromosome field in each axt block must match some in the lengths file. """ __author__ = "Bob Harris (rsharris@bx.psu.edu)" import copy import sys import bx.align.axt import bx.align.maf def usage(s=None): message = __doc__ if (s is None): sys.exit(message) else: sys.exit(f"{s}\n{message}") def main(): global debug ########## # parse the command line ########## primary = None secondary = None silent = False # pick off options args = sys.argv[1:] while (len(args) > 0): arg = args.pop(0) val = None fields = arg.split("=", 1) if (len(fields) == 2): arg = fields[0] val = fields[1] if (val == ""): usage("missing a value in %s=" % arg) if (arg == "--silent") and (val is None): silent = True elif (primary is None) and (val is None): primary = arg elif (secondary is None) and (val is None): secondary = arg else: usage("unknown argument: %s" % arg) if (primary is None): usage("missing primary species") if (secondary is None): usage("missing secondary species") fields = primary.split(":") if (len(fields) != 2): usage("bad primary species (must be species:lengths_file") primary = fields[0] primaryLengths = fields[1] fields = secondary.split(":") if (len(fields) != 2): usage("bad secondary species (must be species:lengths_file") secondary = fields[0] secondaryLengths = fields[1] ########## # read the lengths ########## speciesToLengths = {} speciesToLengths[primary] = read_lengths(primaryLengths) speciesToLengths[secondary] = read_lengths(secondaryLengths) ########## # read the alignments ########## out = bx.align.maf.Writer(sys.stdout) axtsRead = 0 axtsWritten = 0 for axtBlock in bx.align.axt.Reader( sys.stdin, species_to_lengths=speciesToLengths, species1=primary, species2=secondary): axtsRead += 1 p = axtBlock.get_component_by_src_start(primary) if (p is None): continue s = axtBlock.get_component_by_src_start(secondary) if (s is None): continue mafBlock = bx.align.Alignment(axtBlock.score, axtBlock.attributes) mafBlock.add_component(clone_component(p)) mafBlock.add_component(clone_component(s)) out.write(mafBlock) axtsWritten += 1 if (not silent): sys.stderr.write("%d blocks read, %d written\n" % (axtsRead, axtsWritten)) def clone_component(c): return bx.align.Component(c.src, c.start, c.size, c.strand, c.src_size, copy.copy(c.text)) def read_lengths(fileName): chromToLength = {} f = open(fileName) for lineNumber, line in enumerate(f): line = line.strip() if (line == ""): continue if (line.startswith("#")): continue fields = line.split() if (len(fields) != 2): raise ValueError("bad lengths line (%s:%d): %s" % (fileName, lineNumber, line)) chrom = fields[0] try: length = int(fields[1]) except ValueError: raise ValueError("bad lengths line (%s:%d): %s" % (fileName, lineNumber, line)) if (chrom in chromToLength): raise ValueError("%s appears more than once (%s:%d): %s" % (chrom, fileName, lineNumber, line)) chromToLength[chrom] = length f.close() return chromToLength if __name__ == "__main__": main() bx-python-0.8.13/scripts/bed_bigwig_profile.py000077500000000000000000000020171415666465100214170ustar00rootroot00000000000000#!/usr/bin/env python """ Create a site profile vector showing the average signal accumulated from a bigwig file around the center of each interval from a BED file. Output is the average signal value at that relative position across the intervals. usage: %prog bigwig_file.bw padding < bed_file.bed """ import sys from numpy import ( float64, floor, int32, isnan, savetxt, zeros ) from bx.bbi.bigwig_file import BigWigFile from bx.intervals.io import GenomicIntervalReader bw = BigWigFile(open(sys.argv[1])) padding = int(sys.argv[2]) totals = zeros(padding*2, dtype=float64) valid = zeros(padding*2, dtype=int32) for interval in GenomicIntervalReader(sys.stdin): center = floor((interval.start + interval.end) / 2) values = bw.get_as_array(interval.chrom, center - padding, center + padding) # Determine which positions had data and mask the rest for totalling invalid = isnan(values) values[invalid] = 0 totals += values valid += (~ invalid) savetxt(sys.stdout, totals/valid) bx-python-0.8.13/scripts/bed_build_windows.py000077500000000000000000000031271415666465100213030ustar00rootroot00000000000000#!/usr/bin/env python """ Build windows of length `window_size` over the sequences defined by `len_file` excluding regions in `gap_file`. After removing the gaps, windows of exactly `window_size` units will be placed in the remaining regions, with the extra space evenly placed between the windows. `len_file` is LEN format (name length) and `gap_file is BED (name start stop). usage: %prog len_file gap_file window_size """ import sys from bx.bitset_builders import binned_bitsets_from_file def main(): region_fname, exclude_fname, window_size = sys.argv[1], sys.argv[2], int(sys.argv[3]) exclude_bitsets = binned_bitsets_from_file(open(exclude_fname)) for line in open(region_fname): fields = line.split() chr, start, end = fields[0], 0, int(fields[1]) if chr not in exclude_bitsets: do_windows(chr, start, end, window_size) else: bits = exclude_bitsets[chr] assert end < bits.size e = 0 while True: s = bits.next_clear(e) if s > end: break e = bits.next_set(s) do_windows(chr, s, min(e, end), window_size) def do_windows(chr, start, end, window_size): length = end - start window_count = length // window_size if window_count == 0: return lost = length % window_size skip_amount = lost // window_count s = 0 for i in range(0, window_count): s += skip_amount print(chr, start + s, start + s + window_size) s += window_size if __name__ == "__main__": main() bx-python-0.8.13/scripts/bed_complement.py000077500000000000000000000024621415666465100205760ustar00rootroot00000000000000#!/usr/bin/env python """ Complement the regions of a bed file. Requires a file that maps source names to sizes. This should be in the simple LEN file format (each line contains a source name followed by a size, separated by whitespace). usage: %prog bed_file chrom_length_file """ from bx.bitset_builders import binned_bitsets_from_file from bx.cookbook import doc_optparse def read_len(f): """Read a 'LEN' file and return a mapping from chromosome to length""" mapping = dict() for line in f: fields = line.split() mapping[fields[0]] = int(fields[1]) return mapping options, args = doc_optparse.parse(__doc__) try: in_fname, len_fname = args except Exception: doc_optparse.exit() bitsets = binned_bitsets_from_file(open(in_fname)) lens = read_len(open(len_fname)) for chrom in lens: if chrom in bitsets: bits = bitsets[chrom] bits.invert() len = lens[chrom] end = 0 while True: start = bits.next_set(end) if start == bits.size: break end = bits.next_clear(start) if end > len: end = len print("%s\t%d\t%d" % (chrom, start, end)) if end == len: break else: print("%s\t%d\t%d" % (chrom, 0, lens[chrom])) bx-python-0.8.13/scripts/bed_count_by_interval.py000077500000000000000000000014421415666465100221560ustar00rootroot00000000000000#!/usr/bin/env python """ For each interval in `bed1` count the number of intersecting regions in `bed2`. usage: %prog bed1 bed2 """ import sys from bx.intervals import ( Intersecter, Interval ) bed1, bed2 = sys.argv[1:3] ranges = {} for line in open(bed2): fields = line.strip().split() chrom, start, end, = fields[0], int(fields[1]), int(fields[2]) if chrom not in ranges: ranges[chrom] = Intersecter() ranges[chrom].add_interval(Interval(start, end)) for line in open(bed1): fields = line.strip().split() chrom, start, end = fields[0], int(fields[1]), int(fields[2]) other = " ".join(fields[3:]) out = " ".join(fields[:3] + [other]) if chrom in ranges: print(out, len(ranges[chrom].find(start, end))) else: print(out, 0) bx-python-0.8.13/scripts/bed_count_overlapping.py000077500000000000000000000014421415666465100221660ustar00rootroot00000000000000#!/usr/bin/env python """ For each interval in `bed1` count the number of intersecting regions in `bed2`. usage: %prog bed1 bed2 """ import sys from bx.intervals import ( Intersecter, Interval ) bed1, bed2 = sys.argv[1:3] ranges = {} for line in open(bed2): fields = line.strip().split() chrom, start, end, = fields[0], int(fields[1]), int(fields[2]) if chrom not in ranges: ranges[chrom] = Intersecter() ranges[chrom].add_interval(Interval(start, end)) for line in open(bed1): fields = line.strip().split() chrom, start, end = fields[0], int(fields[1]), int(fields[2]) other = " ".join(fields[3:]) out = " ".join(fields[:3] + [other]) if chrom in ranges: print(out, len(ranges[chrom].find(start, end))) else: print(out, 0) bx-python-0.8.13/scripts/bed_coverage.py000077500000000000000000000011441415666465100202220ustar00rootroot00000000000000#!/usr/bin/env python """ Print number of bases covered by all intervals in a bed file (bases covered by more than one interval are counted only once). Multiple bed files can be provided on the command line or to stdin. usage: %prog bed files ... """ import fileinput import sys from bx.bitset_builders import binned_bitsets_from_file bed_filenames = sys.argv[1:] if bed_filenames: input = fileinput.input(bed_filenames) else: input = sys.stdin bitsets = binned_bitsets_from_file(input) total = 0 for chrom in bitsets: total += bitsets[chrom].count_range(0, bitsets[chrom].size) print(total) bx-python-0.8.13/scripts/bed_coverage_by_interval.py000077500000000000000000000024501415666465100226210ustar00rootroot00000000000000#!/usr/bin/env python """ For each interval in `bed1` print the fraction of bases covered by `bed2`. usage: %prog bed1 bed2 [mask] """ import sys from bx.bitset import BinnedBitSet from bx.bitset_builders import binned_bitsets_from_file bed1_fname, bed2_fname = sys.argv[1:3] bitsets = binned_bitsets_from_file(open(bed2_fname)) def clone(bits): b = BinnedBitSet(bits.size) b.ior(bits) return b if len(sys.argv) > 3: mask_fname = sys.argv[3] mask = binned_bitsets_from_file(open(mask_fname)) new_bitsets = dict() for key in bitsets: if key in mask: b = clone(mask[key]) b.invert() b.iand(bitsets[key]) new_bitsets[key] = b bitsets = new_bitsets else: mask = None for line in open(bed1_fname): fields = line.split() chr, start, end = fields[0], int(fields[1]), int(fields[2]) bases_covered = 0 if chr in bitsets: bases_covered = bitsets[chr].count_range(start, end-start) length = end - start if mask and chr in mask: bases_masked = mask[chr].count_range(start, end-start) length -= bases_masked assert bases_covered <= length, f"{bases_covered!r}, {bases_masked!r}, {length!r}" if length == 0: print(0.0) else: print(bases_covered / length) bx-python-0.8.13/scripts/bed_diff_basewise_summary.py000077500000000000000000000020371415666465100230000ustar00rootroot00000000000000#!/usr/bin/env python """ Given two bed files print the number of bases covered 1) by both, 2) only by the first, and 3) only by the second. usage: %prog bed_file_1 bed_file_2 """ from bx.bitset_builders import binned_bitsets_from_file from bx.cookbook import doc_optparse def coverage(bitsets): total = 0 for chrom in bitsets: total += bitsets[chrom].count_range(0, bitsets[chrom].size) return total options, args = doc_optparse.parse(__doc__) try: in_fname, in2_fname = args except ValueError: doc_optparse.exit() bits1 = binned_bitsets_from_file(open(in_fname)) bits2 = binned_bitsets_from_file(open(in2_fname)) bits1_covered = coverage(bits1) bits2_covered = coverage(bits2) bitsets = dict() for key in bits1: if key in bits2: bits1[key].iand(bits2[key]) bitsets[key] = bits1[key] both_covered = coverage(bitsets) print("in both: \t%d" % both_covered) print("only in %s:\t%d" % (in_fname, bits1_covered - both_covered)) print("only in %s:\t%d" % (in2_fname, bits2_covered - both_covered)) bx-python-0.8.13/scripts/bed_extend_to.py000077500000000000000000000020541415666465100204210ustar00rootroot00000000000000#!/usr/bin/env python """ Read BED file and extend each record to the specified minimum length. If chromosome size information is provided trim extended intervals. usage: %prog amount [ chrom_file ] < bed_file """ import sys from bx.intervals.io import GenomicIntervalReader length = int(sys.argv[1]) chrom_len = None if len(sys.argv) > 2: chrom_len = {fields[0]: int(fields[1]) for fields in map(str.split, open(sys.argv[2]))} for interval in GenomicIntervalReader(sys.stdin): if interval.end - interval.start < length: start = interval.start end = interval.end # Extend in positive direction on strand if interval.strand == "+": end = start + length else: start = end - length # Trim if start < 0: start = 0 if chrom_len and end > chrom_len[interval.chrom]: end = chrom_len[interval.chrom] # Set new start and end interval.start = start interval.end = end # Output possibly adjusted interval print(interval) bx-python-0.8.13/scripts/bed_intersect.py000077500000000000000000000036451415666465100204370ustar00rootroot00000000000000#!/usr/bin/env python """ Find regions of first bed file that overlap regions in a second bed file. The output preserves all fields from the input. NOTE: -u and -d options are currently not functional! usage: %prog bed_file_1 bed_file_2 -m, --mincols=N: Require this much overlap (default 1bp) -u, --upstream_pad=N: upstream interval padding (default 0bp) -d, --downstream_pad=N: downstream interval padding (default 0bp) -v, --reverse: Print regions that DO NOT overlap -b, --booleans: Just print '1' if interval overlaps or '0' otherwise """ from warnings import warn from bx.bitset_builders import binned_bitsets_from_file from bx.cookbook import doc_optparse mincols = 1 upstream_pad = 0 downstream_pad = 0 options, args = doc_optparse.parse(__doc__) try: if options.mincols: mincols = int(options.mincols) if options.upstream_pad: upstream_pad = int(options.upstream_pad) if options.downstream_pad: downstream_pad = int(options.downstream_pad) reverse = bool(options.reverse) booleans = bool(options.booleans) in_fname, in2_fname = args except Exception: doc_optparse.exit() # Read first bed into some bitsets bitsets = binned_bitsets_from_file(open(in2_fname)) # Read second BED and intersect for line in open(in_fname): if line.startswith("#") or line.isspace(): continue fields = line.split() start, end = int(fields[1]), int(fields[2]) if start > end: warn("Bed interval start after end!") if fields[0] in bitsets and bitsets[fields[0]].count_range(start, end-start) >= mincols: if booleans: if reverse: print(0) else: print(1) elif not reverse: print(line, end=' ') else: if booleans: if reverse: print(1) else: print(0) elif reverse: print(line, end=' ') bx-python-0.8.13/scripts/bed_intersect_basewise.py000077500000000000000000000017031415666465100223120ustar00rootroot00000000000000#!/usr/bin/env python """ Find regions of first bed file that overlap regions in a second bed file. This program performs a base-by-base intersection, so only runs of bases that are covered in both of the inputs will be output. usage: %prog bed_file_1 bed_file_2 """ from bx.bitset_builders import binned_bitsets_from_file from bx.cookbook import doc_optparse options, args = doc_optparse.parse(__doc__) try: in_fname, in2_fname = args except ValueError: doc_optparse.exit() bits1 = binned_bitsets_from_file(open(in_fname)) bits2 = binned_bitsets_from_file(open(in2_fname)) bitsets = dict() for key in bits1: if key in bits2: bits1[key].iand(bits2[key]) bitsets[key] = bits1[key] for chrom in bitsets: bits = bitsets[chrom] end = 0 while True: start = bits.next_set(end) if start == bits.size: break end = bits.next_clear(start) print("%s\t%d\t%d" % (chrom, start, end)) bx-python-0.8.13/scripts/bed_merge_overlapping.py000077500000000000000000000014011415666465100221300ustar00rootroot00000000000000#!/usr/bin/env python """ Merge any overlapping regions of bed files. Bed files can be provided on the command line or on stdin. Merged regions are always reported on the '+' strand, and any fields beyond chrom/start/stop are lost. usage: %prog bed files ... """ import fileinput import sys from bx.bitset_builders import binned_bitsets_from_bed_file bed_filenames = sys.argv[1:] if bed_filenames: input = fileinput.input(bed_filenames) else: input = sys.stdin bitsets = binned_bitsets_from_bed_file(input) for chrom in bitsets: bits = bitsets[chrom] end = 0 while True: start = bits.next_set(end) if start == bits.size: break end = bits.next_clear(start) print("%s\t%d\t%d" % (chrom, start, end)) bx-python-0.8.13/scripts/bed_rand_intersect.py000077500000000000000000000127361415666465100214440ustar00rootroot00000000000000#!/usr/bin/env python """ From a set of regions and two sets of intervals inside those regions compute (for each region separately) the overlap between the two sets of intervals and the overlap in `nsamples` random coverings of the regions with intervals having the same lengths. Prints the z-score relative to the mean and sample stdev of the random coverings. Currently intervals must be in bed 3+ format. TODO: There are a few versions of this floating around, including a better/faster one using gap lists instead of bitsets. Need to track that down and merge as necessary. usage: %prog bounding_region_file intervals1 intervals2 nsamples """ import sys from numpy import zeros from bx.bitset import BitSet from bx.intervals.random_intervals import throw_random_bits from bx_extras import stats maxtries = 10 class MaxtriesException(Exception): pass def bit_clone(bits): """ Clone a bitset """ new = BitSet(bits.size) new.ior(bits) return new def throw_random(lengths, mask): """ Try multiple times to run 'throw_random' """ saved = None for i in range(maxtries): try: return throw_random_bits(lengths, mask) except MaxtriesException as e: saved = e continue raise saved def as_bits(region_start, region_length, intervals): """ Convert a set of intervals overlapping a region of a chromosome into a bitset for just that region with the bits covered by the intervals set. """ bits = BitSet(region_length) for chr, start, stop in intervals: bits.set_range(start - region_start, stop - start) return bits def interval_lengths(bits): """ Get the length distribution of all contiguous runs of set bits from """ end = 0 while True: start = bits.next_set(end) if start == bits.size: break end = bits.next_clear(start) yield end - start def count_overlap(bits1, bits2): """ Count the number of bits that overlap between two sets """ b = BitSet(bits1.size) b |= bits1 b &= bits2 return b.count_range(0, b.size) def overlapping_in_bed(fname, r_chr, r_start, r_stop): """ Get from a bed all intervals that overlap the region defined by r_chr, r_start, r_stop. """ rval = [] for line in open(fname): if line.startswith("#") or line.startswith("track"): continue fields = line.split() chr, start, stop = fields[0], int(fields[1]), int(fields[2]) if chr == r_chr and start < r_stop and stop >= r_start: rval.append((chr, max(start, r_start), min(stop, r_stop))) return rval def main(): region_fname = sys.argv[1] mask_fname = sys.argv[2] nsamples = int(sys.argv[3]) intervals1_fname = sys.argv[4] intervals2_fnames = sys.argv[5:] nfeatures = len(intervals2_fnames) total_actual = zeros(nfeatures) # total_lengths1 = 0 total_lengths2 = zeros(nfeatures) total_samples = zeros((nsamples, nfeatures)) for line in open(region_fname): # Load lengths for all intervals overlapping region fields = line.split() print("Processing region:", fields[3], file=sys.stderr) r_chr, r_start, r_stop = fields[0], int(fields[1]), int(fields[2]) r_length = r_stop - r_start # Load the mask mask = overlapping_in_bed(mask_fname, r_chr, r_start, r_stop) bits_mask = as_bits(r_start, r_length, mask) bits_not_masked = bit_clone(bits_mask) bits_not_masked.invert() # Load the first set intervals1 = overlapping_in_bed(intervals1_fname, r_chr, r_start, r_stop) bits1 = as_bits(r_start, r_length, intervals1) # Intersect it with the mask bits1.iand(bits_not_masked) # Sanity checks assert count_overlap(bits1, bits_mask) == 0 # For each data set for featnum, intervals2_fname in enumerate(intervals2_fnames): print(intervals2_fname, file=sys.stderr) intervals2 = overlapping_in_bed(intervals2_fname, r_chr, r_start, r_stop) bits2 = as_bits(r_start, r_length, intervals2) bits2.iand(bits_not_masked) assert count_overlap(bits2, bits_mask) == 0 # Observed values actual_overlap = count_overlap(bits1, bits2) total_actual[featnum] += actual_overlap # Sample lengths2 = list(interval_lengths(bits2)) total_lengths2[featnum] += sum(lengths2) for i in range(nsamples): # Build randomly covered bitmask for second set random2 = throw_random(lengths2, bits_mask) # Find intersection random2 &= bits1 # Print amount intersecting total_samples[i, featnum] += random2.count_range(0, random2.size) print(total_samples[i, featnum], file=sys.stderr) fraction_overlap = total_samples / total_lengths2 print("\t".join(intervals2_fnames)) print("\t".join(map(str, total_actual/total_lengths2))) for row in fraction_overlap: print("\t".join(map(str, row))) print("observed overlap: %d, sample mean: %d, sample stdev: %d" % (total_actual, stats.amean(total_samples), stats.asamplestdev(total_samples))) print("z-score:", (total_actual - stats.amean(total_samples)) / stats.asamplestdev(total_samples)) print("percentile:", sum(total_actual > total_samples) / nsamples) if __name__ == "__main__": main() bx-python-0.8.13/scripts/bed_subtract_basewise.py000077500000000000000000000017711415666465100221460ustar00rootroot00000000000000#!/usr/bin/env python """ Find continuous regions that are covered by the first bed file (`bed_file_1`) but not by the second bed file (`bed_file_2`) usage: %prog bed_file_1 bed_file_2 """ from bx.bitset_builders import binned_bitsets_from_file from bx.cookbook import doc_optparse def print_bits_as_bed(bits): end = 0 while True: start = bits.next_set(end) if start == bits.size: break end = bits.next_clear(start) print("%s\t%d\t%d" % (chrom, start, end)) options, args = doc_optparse.parse(__doc__) try: in_fname, in2_fname = args except ValueError: doc_optparse.exit() # Read first bed into some bitsets bitsets1 = binned_bitsets_from_file(open(in_fname)) bitsets2 = binned_bitsets_from_file(open(in2_fname)) for chrom in bitsets1: if chrom not in bitsets1: continue bits1 = bitsets1[chrom] if chrom in bitsets2: bits2 = bitsets2[chrom] bits2.invert() bits1.iand(bits2) print_bits_as_bed(bits1) bx-python-0.8.13/scripts/bnMapper.py000077500000000000000000000365611415666465100173740ustar00rootroot00000000000000#!/usr/bin/env python """Map features from the target species to the query species of a chain alignment file. This is intended for mapping relatively short features such as Chip-Seq peaks on TF binding events. Features that when mapped span multiple chains or multiple chromosomes are silently filtered out. TODO: (1)for narrowPeak input, map the predicted peak position. """ import logging import os import sys from functools import reduce from itertools import groupby from operator import attrgetter, itemgetter import numpy as np from bx.align import epo from bx.align.epo import bed_union as elem_u from bx.cookbook import argparse from bx.intervals.intersection import ( Interval, IntervalTree, ) elem_t = np.dtype([('chrom', np.str_, 30), ('start', np.int64), ('end', np.int64), ('id', np.str_, 100)]) narrowPeak_t = np.dtype([('chrom', np.str_, 30), ('start', np.int64), ('end', np.int64), ('id', np.str_, 100), ('score', np.int64), ('strand', np.str_, 1), ('signalValue', np.float), ('pValue', np.float), ('qValue', np.float), ('peak', np.int64)]) LOG_LEVELS = {"info": logging.INFO, "debug": logging.DEBUG, "silent": logging.ERROR} logging.basicConfig() log = logging.getLogger() class GIntervalTree(IntervalTree): """a set of IntervalTrees that is indexed by chromosomes""" def __init__(self): self._trees = {} def add(self, chrom, element): """insert an element. use this method as the IntervalTree one. this will simply call the IntervalTree.add method on the right tree :param chrom: chromosome :param element: the argument of IntervalTree.insert_interval :return: None """ self._trees.setdefault(chrom, IntervalTree()).insert_interval(element) def find(self, chrom, start, end): """find the intersecting elements :param chrom: chromosome :param start: start :param end: end :return: a list of intersecting elements""" tree = self._trees.get(chrom, None) if tree: return tree.find(start, end) # return always a list return [] def transform(elem, chain_CT_CQ, max_gap): """transform the coordinates of this elem into the other species. elem intersects this chain's ginterval. :return: a list of the type [(to_chr, start, end, elem[id]) ... ]""" (chain, CT, CQ) = chain_CT_CQ start, end = max(elem['start'], chain.tStart) - chain.tStart, min(elem['end'], chain.tEnd) - chain.tStart assert np.all((CT[:, 1] - CT[:, 0]) == (CQ[:, 1] - CQ[:, 0])) to_chrom = chain.qName to_gab_start = chain.qStart start_idx = np.where(CT[:, 1] > start)[0][0] end_idx = np.where(CT[:, 0] < end)[0][-1] if start_idx > end_idx: # maps to a gap region on the other species return [] # apply the gap threshold if max_gap >= 0 and start_idx < end_idx - 1: if np.max(CT[(start_idx+1):end_idx, 0] - CT[start_idx:(end_idx-1), 1]) > max_gap or np.max(CQ[(start_idx+1):end_idx, 0] - CQ[start_idx:(end_idx-1), 1]) > max_gap: return [] assert start < CT[start_idx, 1] assert CT[end_idx, 0] < end to_start = CQ[start_idx, 0] + max(0, start - CT[start_idx, 0]) # correct if on middle of interval to_end = CQ[end_idx, 1] - max(0, CT[end_idx, 1] - end) # idem if start_idx == end_idx: # elem falls in a single run of matches slices = [(to_start, to_end)] else: slices = [(to_start, CQ[start_idx, 1])] slices += [(CQ[i, 0], CQ[i, 1]) for i in range(start_idx+1, end_idx)] slices.append((CQ[end_idx, 0], to_end)) if chain.qStrand == '-': Sz = chain.qEnd - chain.qStart slices = [(Sz-t[1], Sz-t[0]) for t in slices] return [(to_chrom, to_gab_start + t[0], to_gab_start + t[1], elem['id']) for t in slices] def union_elements(elements): """elements = [(chr, s, e, id), ...], this is to join elements that have a deletion in the 'to' species """ if len(elements) < 2: return elements assert {e[3] for e in elements} == {elements[0][3]}, "more than one id" el_id = elements[0][3] unioned_elements = [] for ch, chgrp in groupby(elements, key=itemgetter(0)): for (s, e) in elem_u(np.array([itemgetter(1, 2)(_) for _ in chgrp], dtype=np.uint)): if s < e: unioned_elements.append((ch, s, e, el_id)) assert len(unioned_elements) <= len(elements) return unioned_elements def transform_by_chrom(all_epo, from_elem_list, tree, chrom, opt, out_fd): BED4_FRM = "%s\t%d\t%d\t%s\n" BED12_FRM = "%s\t%d\t%d\t%s\t1000\t+\t%d\t%d\t0,0,0\t%d\t%s\t%s\n" NPEAK_FRM = "%s\t%d\t%d\t%s\t%d\t%s\t%f\t%f\t%f\t%d\n" assert len(set(from_elem_list['chrom'])) <= 1 mapped_elem_count = 0 mapped_summit_count = 0 for from_elem in from_elem_list: matching_block_ids = [attrgetter("value")(_) for _ in tree.find(chrom, from_elem['start'], from_elem['end'])] # do the actual mapping to_elem_slices = [_ for _ in (transform(from_elem, all_epo[i], opt.gap) for i in matching_block_ids) if _] """ # Original version: silently discard split alignments if len(to_elem_slices) > 1 or len(to_elem_slices) == 0: log.debug("%s no match or in different chain/chromosomes" % (str(from_elem))) continue to_elem_slices = to_elem_slices[0] """ """ Modified version below allows liftOver-like behavior of keeping the longest alignment when alignments are split across multiple chains. Added by Adam Diehl (adadiehl@umich.edu) """ max_elem_idx = 0 if len(to_elem_slices) == 0: log.debug("%s: no match in target: discarding." % (str(from_elem))) continue elif len(to_elem_slices) > 1 and opt.keep_split: log.debug("%s spans multiple chains/chromosomes. Using longest alignment." % (str(from_elem))) max_elem_len = 0 for i in range(len(to_elem_slices)): elem_len = to_elem_slices[i][-1][2] - to_elem_slices[i][0][2] if elem_len > max_elem_len: max_elem_len = elem_len max_elem_idx = i elif len(to_elem_slices) > 1: log.debug("%s spans multiple chains/chromosomes: discarding." % (str(from_elem))) continue to_elem_slices = to_elem_slices[max_elem_idx] """ End AGD modifications """ # apply threshold if (from_elem[2] - from_elem[1]) * opt.threshold > reduce(lambda b, a: a[2]-a[1] + b, to_elem_slices, 0): log.debug("%s did not pass threshold" % (str(from_elem))) continue # if to_species had insertions you can join elements to_elem_list = sorted(union_elements(to_elem_slices), key=lambda a: a[1]) if to_elem_list: mapped_elem_count += 1 log.debug("\tjoined to %d elements" % (len(to_elem_list))) start = to_elem_list[0][1] end = to_elem_list[-1][2] if opt.format == "BED4": for tel in to_elem_list: out_fd.write(BED4_FRM % tel) elif opt.format == "BED12": out_fd.write(BED12_FRM % ( to_elem_list[0][0], start, end, from_elem['id'], start, end, len(to_elem_list), ",".join("%d" % (e[2]-e[1]) for e in to_elem_list), ",".join("%d" % (e[1]-start) for e in to_elem_list))) else: # narrowPeak convention is to report the peak location relative to start peak = int((start + end)/2) - start if opt.in_format == "narrowPeak": # Map the peak location # sys.stderr.write("{}\n".format(from_elem)) matching_block_ids = [attrgetter("value")(_) for _ in tree.find(chrom, from_elem['peak'], from_elem['peak'])] p_elem_slices = [_ for _ in (transform(np.array((chrom, from_elem['peak'], from_elem['peak'], '.'), dtype=elem_t), all_epo[i], opt.gap) for i in matching_block_ids) if _] if len(p_elem_slices) >= 1: mapped_summit_count += 1 sys.stderr.write(f"{p_elem_slices}\n") # Make sure the peak is between the start and end positions if p_elem_slices[0][0][1] >= start and p_elem_slices[0][0][1] <= end: peak = p_elem_slices[0][0][1] - start else: mapped_summit_count -= 1 log.debug(f"Warning: elem {from_elem} summit mapped location falls outside the mapped element start and end. Using the mapped elem midpoint instead.") else: log.debug(f"Warning: elem {from_elem} summit maps to a gap region in the target alignment. Using the mapped elem midpoint instead.") out_fd.write(NPEAK_FRM % (to_elem_list[0][0], start, end, from_elem['id'], from_elem['score'], from_elem['strand'], from_elem['signalValue'], from_elem['pValue'], from_elem['qValue'], peak)) log.info("%s: %d of %d elements mapped" % (chrom, mapped_elem_count, from_elem_list.shape[0])) if opt.format == "narrowPeak" and opt.in_format == "narrowPeak": log.info("%s: %d peak summits from %d mapped elements mapped" % (chrom, mapped_summit_count, mapped_elem_count)) def transform_file(ELEMS, ofname, EPO, TREE, opt): "transform/map the elements of this file and dump the output on 'ofname'" BED4_FRM = "%s\t%d\t%d\t%s\n" log.info("%s (%d) elements ..." % (opt.screen and "screening" or "transforming", ELEMS.shape[0])) with open(ofname, 'w') as out_fd: if opt.screen: for elem in ELEMS.flat: matching_blocks = [attrgetter("value")(_) for _ in TREE.find(elem['chrom'], elem['start'], elem['end'])] assert set(matching_blocks) <= set(EPO.keys()) if matching_blocks: out_fd.write(BED4_FRM % elem) else: for chrom in set(ELEMS['chrom']): transform_by_chrom(EPO, ELEMS[ELEMS['chrom'] == chrom], TREE, chrom, opt, out_fd) log.info("DONE!") def loadChains(path): "name says it." EPO = epo.Chain._parse_file(path, True) # convert coordinates w.r.t the forward strand (into slices) # compute cummulative intervals for i in range(len(EPO)): ch, S, T, Q = EPO[i] if ch.tStrand == '-': ch = ch._replace(tEnd=ch.tSize - ch.tStart, tStart=ch.tSize - ch.tEnd) if ch.qStrand == '-': ch = ch._replace(qEnd=ch.qSize - ch.qStart, qStart=ch.qSize - ch.qEnd) EPO[i] = (ch, epo.cummulative_intervals(S, T), epo.cummulative_intervals(S, Q)) # now each element of epo is (chain_header, target_intervals, query_intervals) assert all(t[0].tStrand == '+' for t in EPO), "all target strands should be +" return EPO def loadFeatures(path, opt): """ Load features. For BED, only BED4 columns are loaded. For narrowPeak, all columns are loaded. """ log.info("loading from %s ..." % path) data = [] if opt.in_format == "BED": with open(path) as fd: for line in fd: cols = line.split() data.append((cols[0], int(cols[1]), int(cols[2]), cols[3])) data = np.array(data, dtype=elem_t) else: with open(path) as fd: for line in fd: cols = line.split() data.append(( cols[0], int(cols[1]), int(cols[2]), cols[3], int(cols[4]), cols[5], float(cols[6]), float(cols[7]), float(cols[8]), int(cols[-1])+int(cols[1]))) data = np.array(data, dtype=narrowPeak_t) return data if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__, epilog="Olgert Denas (Taylor Lab)", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("input", nargs='+', help="Input to process. If more than a file is specified, all files will be mapped and placed on --output, which should be a directory.") parser.add_argument("alignment", help="Alignment file (.chain or .pkl)") parser.add_argument("-f", '--format', choices=("BED4", "BED12", "narrowPeak"), default="BED4", help="Output format. BED4 output reports all aligned blocks as separate BED records. BED12 reports a single BED record for each mapped element, with individual blocks given in the BED12 fields. NarrowPeak reports a single narrowPeak record for each mapped element, in which the chromosome, start, end, and peak positions are mapped to the target species and all other columns are passed through unchanged.") parser.add_argument("-o", '--output', metavar="FILE", default='stdout', type=lambda s: ((s in ('stdout', '-') and "/dev/stdout") or s), help="Output file. Mandatory if more than on file in input.") parser.add_argument("-t", '--threshold', metavar="FLOAT", default=0., type=float, help="Mapping threshold i.e., |elem| * threshold <= |mapped_elem|") parser.add_argument("-s", '--screen', default=False, action='store_true', help="Only report elements in the alignment (without mapping). -t has not effect here (TODO)") parser.add_argument('-g', '--gap', type=int, default=-1, help="Ignore elements with an insertion/deletion of this or bigger size.") parser.add_argument('-v', '--verbose', type=str, choices=list(LOG_LEVELS.keys()), default='info', help='Verbosity level') parser.add_argument("-k", '--keep_split', default=False, action='store_true', help="If elements span multiple chains, report the segment with the longest overlap instead of silently dropping them. (This is the default behavior for liftOver.)") parser.add_argument("-i", "--in_format", choices=["BED", "narrowPeak"], default="BED", help="Input file format.") opt = parser.parse_args() log.setLevel(LOG_LEVELS[opt.verbose]) # check for output if input is a directory arguments if len(opt.input) > 1 and (not os.path.isdir(opt.output)): parser.error("For multiple inputs, output is mandatory and should be a dir.") # loading alignments from opt.alignment EPO = {ch[0].id: ch for ch in loadChains(opt.alignment)} # create an interval tree based on chain headers (from_species side) # for fast feature-to-chain_header searching log.info("indexing %d chains ..." % (len(EPO),)) TREE = GIntervalTree() for gabid in EPO: chain, t, q = EPO[gabid] TREE.add(chain.tName, Interval(chain.tStart, chain.tEnd, chain.id)) # transform elements if len(opt.input) > 1: for inpath in opt.input: if not os.path.isfile(inpath): log.warning("skipping %s (not a file) ..." % inpath) continue outpath = os.path.join(opt.output, os.path.basename(inpath)) if os.path.isfile(outpath): log.warning("overwriting %s ..." % outpath) transform_file(loadFeatures(inpath), outpath, EPO, TREE, opt) else: transform_file(loadFeatures(opt.input[0], opt), opt.output, EPO, TREE, opt) bx-python-0.8.13/scripts/div_snp_table_chr.py000077500000000000000000000115301415666465100212620ustar00rootroot00000000000000#!/usr/bin/env python """ FIXME! usage: %prog feature.bed ar.bed snp.bed div_directory [options] -m, --mask=M: Mask AR and features with this file -s, --suffix=S: append suffix to chromosomes to get filenames from div_directory -l, --lens=l: Set chromosome ends using LEN file """ import sys from bx.bitset import BinnedBitSet from bx.bitset_builders import binned_bitsets_from_file from bx.cookbook import doc_optparse def main(): options, args = doc_optparse.parse(__doc__) try: lens = {} if options.lens: for line in open(options.lens): chrom, length = line.split() lens[chrom] = int(length) if options.suffix: suffix = options.suffix else: suffix = "" print("\nReading feature", end=' ', file=sys.stderr) interval_file = open(args[0]) feature = binned_bitsets_from_file(interval_file, lens=lens) interval_file.close() # reuse interval file intervals = {} interval_file = open(args[0]) for line in interval_file: fields = line.split() chrom, start, end = fields[0], int(fields[1]), int(fields[2]) if chrom not in intervals: intervals[chrom] = [] intervals[chrom].append([start, end]) interval_file.close() print("\nReading ar", end=' ', file=sys.stderr) ar = binned_bitsets_from_file(open(args[1]), lens=lens) print("\nReading snps", end=' ', file=sys.stderr) snp = binned_bitsets_from_file(open(args[2]), lens=lens) snp_mask = clone_inverted(snp) snp_copy = clone(snp) print("\nMasking AR", end=' ', file=sys.stderr) ar_mask = clone_inverted(ar) print(file=sys.stderr) dirname = args[3] if options.mask: mask = binned_bitsets_from_file(open(options.mask), lens=lens) else: mask = None except Exception: doc_optparse.exit() if mask: for chrom in mask.keys(): if chrom in feature: feature[chrom].iand(mask[chrom]) if chrom in ar: ar[chrom].iand(mask[chrom]) # divergence and snp counts for all features feature_div_count = 0 feature_snp_count = 0 ar_div_count = 0 ar_snp_count = 0 # collect snp and div for chr in feature.keys(): if chr not in snp: continue if chr not in ar: continue print("reading %s ..." % chr, end=' ', file=sys.stderr) try: div = binned_bitsets_from_file(open(dirname + "/%s.bed" % (chr+suffix)), lens=lens) except Exception: print("%s.bed not found" % chr, file=sys.stderr) continue div[chr].iand(snp_mask[chr]) # div/snp sites count snp-only div_copy = clone(div) print("AR:", chr, end=' ', file=sys.stderr) snp[chr].iand(ar[chr]) div[chr].iand(ar[chr]) snp_count = snp[chr].count_range(0, snp[chr].size) ar_snp_count += snp_count print(snp_count, end=' ', file=sys.stderr) try: div_count = div[chr].count_range(0, div[chr].size) ar_div_count += div_count print(div_count, file=sys.stderr) except Exception: print(chr, "failed", file=sys.stderr) div = div_copy snp[chr] = snp_copy[chr] print("feature:", chr, end=' ', file=sys.stderr) feature[chr].iand(ar_mask[chr]) # clip to non-AR only snp[chr].iand(feature[chr]) div[chr].iand(feature[chr]) feature_snp_count += snp[chr].count_range(0, snp[chr].size) print(snp[chr].count_range(0, snp[chr].size), div[chr].count_range(0, div[chr].size), file=sys.stderr) feature_div_count += div[chr].count_range(0, div[chr].size) print(snp[chr].count_range(0, snp[chr].size), div[chr].count_range(0, div[chr].size), file=sys.stderr) # Note: can loop over feature intervals here for individual counts if chr in intervals: for start, end in intervals[chr]: ind_div_count = div[chr].count_range(start, end-start) ind_snp_count = snp[chr].count_range(start, end-start) print(chr, start, end, ind_div_count, ind_snp_count) print("feature snp\t%d" % feature_snp_count) print("feature div\t%d" % feature_div_count) print("ar snp\t%d" % ar_snp_count) print("ar div\t%d" % ar_div_count) # copies a dictionary of bitsets def copybits(binnedbits): bitset = BinnedBitSet(binnedbits.size) bitset.ior(binnedbits) return bitset def clone(bitsets): r = {} for k, b in bitsets.items(): r[k] = copybits(b) return r def clone_inverted(bitsets): r = {} for k, b in bitsets.items(): r[k] = copybits(b) r[k].invert() return r main() bx-python-0.8.13/scripts/find_in_sorted_file.py000077500000000000000000000036351415666465100216110ustar00rootroot00000000000000#!/usr/bin/env python """ Extract ranges of scores from a sorted file in which each line contains a position followed by a score. TODO: The finder class might actually be useful, it strides through a file and builds an index based on the first line. Maybe move it into the library and get rid of this very specific script? usage: %prog start_pos stop_pos """ import sys max_cats = 1000 class Finder: def __init__(self, file, segments): self.file = file self.segments = segments self.make_index() def make_index(self): self.values = [] self.positions = [] file.seek(0, 2) end = file.tell() step = end / (self.segments - 1) for i in range(0, self.segments - 1): file.seek(i * step, 0) file.readline() position = file.tell() fields = file.readline().split() self.values.append(int(fields[0])) self.positions.append(position) def scores_in_range(self, start, end): position = self.positions[-1] for i in range(1, len(self.values)): if self.values[i] > start: position = self.positions[i - 1] break self.file.seek(position, 0) result = [] while True: line = file.readline() if line == "": break fields = line.split() pos = int(fields[0]) if pos < start: continue if pos > end: break result.append((pos, fields[1])) return result file = open(sys.argv[1]) finder = Finder(file, 100) scores = finder.scores_in_range(int(sys.argv[2]), int(sys.argv[3])) rng = scores[-1][0] - scores[0][0] if rng > max_cats: stride = rng // max_cats else: stride = 1 for score in scores: if score[0] % stride == 0: print(score[0], score[1]) bx-python-0.8.13/scripts/gene_fourfold_sites.py000077500000000000000000000155661415666465100216570ustar00rootroot00000000000000#!/usr/bin/python """ Returns a bed-like translation of a CDS in which each record corresponds to a single site in the CDS and includes additional fields for site degenaracy, position ind CDS, and amino acid encoded. usage: %prog nibdir genefile [options] -o, --outfile=o: output file -f, --format=f: format bed (default), or gtf|gff -a, --allpositions: 1st, 2nd and 3rd positions are evaluated for degeneracy given the sequence at the other two positions. Many 1d sites in 1st codon positions become 2d sites when considered this way. -n, --include_name: include the 'name' or 'id' field from the source file on every line of output """ import os import re import string import sys from bx.cookbook import doc_optparse from bx.gene_reader import CDSReader from bx.seq import nib GENETIC_CODE = """ TTT (Phe/F)Phenylalanine TTC (Phe/F)Phenylalanine TTA (Leu/L)Leucine TTG (Leu/L)Leucine, Start TCT (Ser/S)Serine TCC (Ser/S)Serine TCA (Ser/S)Serine TCG (Ser/S)Serine TAT (Tyr/Y)Tyrosine TAC (Tyr/Y)Tyrosine TAA Ochre (Stop) TAG Amber (Stop) TGT (Cys/C)Cysteine TGC (Cys/C)Cysteine TGA Opal (Stop) TGG (Trp/W)Tryptophan CTT (Leu/L)Leucine CTC (Leu/L)Leucine CTA (Leu/L)Leucine CTG (Leu/L)Leucine, Start CCT (Pro/P)Proline CCC (Pro/P)Proline CCA (Pro/P)Proline CCG (Pro/P)Proline CAT (His/H)Histidine CAC (His/H)Histidine CAA (Gln/Q)Glutamine CAG (Gln/Q)Glutamine CGT (Arg/R)Arginine CGC (Arg/R)Arginine CGA (Arg/R)Arginine CGG (Arg/R)Arginine ATT (Ile/I)Isoleucine, Start2 ATC (Ile/I)Isoleucine ATA (Ile/I)Isoleucine ATG (Met/M)Methionine, Start1 ACT (Thr/T)Threonine ACC (Thr/T)Threonine ACA (Thr/T)Threonine ACG (Thr/T)Threonine AAT (Asn/N)Asparagine AAC (Asn/N)Asparagine AAA (Lys/K)Lysine AAG (Lys/K)Lysine AGT (Ser/S)Serine AGC (Ser/S)Serine AGA (Arg/R)Arginine AGG (Arg/R)Arginine GTT (Val/V)Valine GTC (Val/V)Valine GTA (Val/V)Valine GTG (Val/V)Valine, Start2 GCT (Ala/A)Alanine GCC (Ala/A)Alanine GCA (Ala/A)Alanine GCG (Ala/A)Alanine GAT (Asp/D)Aspartic acid GAC (Asp/D)Aspartic acid GAA (Glu/E)Glutamic acid GAG (Glu/E)Glutamic acid GGT (Gly/G)Glycine GGC (Gly/G)Glycine GGA (Gly/G)Glycine GGG (Gly/G)Glycine """ def translate(codon, genetic_code): c1, c2, c3 = codon return genetic_code[c1][c2][c3] """ parse the doc string to hash the genetic code""" GEN_CODE = {} for line in GENETIC_CODE.split('\n'): if line.strip() == '': continue f = re.split(r'\s|\(|\)|\/', line) codon = f[0] c1, c2, c3 = codon aminoacid = f[3] if c1 not in GEN_CODE: GEN_CODE[c1] = {} if c2 not in GEN_CODE[c1]: GEN_CODE[c1][c2] = {} GEN_CODE[c1][c2][c3] = aminoacid def getnib(nibdir): seqs = {} for nibf in os.listdir(nibdir): if not nibf.endswith('.nib'): continue chr = nibf.replace('.nib', '') file = os.path.join(nibdir, nibf) seqs[chr] = nib.NibFile(open(file)) return seqs REVMAP = string.maketrans("ACGTacgt", "TGCAtgca") def revComp(seq): return seq[::-1].translate(REVMAP) def Comp(seq): return seq.translate(REVMAP) def main(): options, args = doc_optparse.parse(__doc__) try: if options.outfile: out = open(options.outfile, "w") else: out = sys.stdout if options.format: format = options.format else: format = 'bed' allpositions = bool(options.allpositions) include_name = bool(options.include_name) nibdir = args[0] bedfile = args[1] except Exception: doc_optparse.exit() nibs = getnib(nibdir) for chrom, strand, cds_exons, name in CDSReader(open(bedfile), format=format): cds_seq = '' # genome_seq_index maps the position in CDS to position on the genome genome_seq_index = [] for (c_start, c_end) in cds_exons: cds_seq += nibs[chrom].get(c_start, c_end-c_start) for i in range(c_start, c_end): genome_seq_index.append(i) cds_seq = cds_seq.upper() if strand == '+': frsts = range(0, len(cds_seq), 3) offsign = 1 else: cds_seq = Comp(cds_seq) frsts = range(2, len(cds_seq), 3) offsign = -1 offone = 1 * offsign offtwo = 2 * offsign all = ['A', 'C', 'G', 'T'] for first_pos in frsts: c1 = first_pos c2 = first_pos + offone c3 = first_pos + offtwo try: assert c3 < len(cds_seq) except AssertionError: print("out of sequence at %d for %s, %d" % (c3, chrom, genome_seq_index[first_pos]), file=sys.stderr) continue codon = cds_seq[c1], cds_seq[c2], cds_seq[c3] aa = translate(codon, GEN_CODE) degeneracy3 = str(list(GEN_CODE[codon[0]][codon[1]].values()).count(aa)) + "d" if not include_name: name_text = '' else: name_text = name.replace(' ', '_') if allpositions: try: degeneracy1 = str([GEN_CODE[k][codon[1]][codon[2]] for k in all].count(aa)) + "d" degeneracy2 = str([GEN_CODE[codon[0]][k][codon[2]] for k in all].count(aa)) + "d" except TypeError as s: print(list(GEN_CODE.values()), file=sys.stderr) raise TypeError(s) if strand == '+': print(chrom, genome_seq_index[c1], genome_seq_index[c1] + 1, cds_seq[c1], degeneracy1, aa, name_text, file=out) print(chrom, genome_seq_index[c2], genome_seq_index[c2] + 1, cds_seq[c2], degeneracy2, aa, name_text, file=out) print(chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text, file=out) else: print(chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text, file=out) print(chrom, genome_seq_index[c2], genome_seq_index[c2] + 1, cds_seq[c2], degeneracy2, aa, name_text, file=out) print(chrom, genome_seq_index[c1], genome_seq_index[c1] + 1, cds_seq[c1], degeneracy1, aa, name_text, file=out) else: if strand == '+': for b in c1, c2: print(chrom, genome_seq_index[b], genome_seq_index[b] + 1, cds_seq[b], "1d", aa, name_text, file=out) print(chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text, file=out) else: print(chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text, file=out) for b in c2, c1: print(chrom, genome_seq_index[b], genome_seq_index[b] + 1, cds_seq[b], "1d", aa, name_text, file=out) out.close() if __name__ == '__main__': main() bx-python-0.8.13/scripts/get_scores_in_intervals.py000077500000000000000000000030301415666465100225230ustar00rootroot00000000000000#!/usr/bin/env python """ Read scores in "wiggle" format from `score_file` and intervals in "bed" format from `interval_file` and print all scores overlapping intervals. TODO: Support binned array format scores also. usage: %prog score_file interval_file [out_file] """ import sys import bx.wiggle from bx import misc from bx.binned_array import BinnedArray from bx.cookbook import doc_optparse def read_scores(f): scores_by_chrom = dict() for chrom, pos, val in bx.wiggle.Reader(f): if chrom not in scores_by_chrom: scores_by_chrom[chrom] = BinnedArray() scores_by_chrom[chrom][pos] = val return scores_by_chrom def main(): # Parse command line options, args = doc_optparse.parse(__doc__) try: score_file = open(args[0]) interval_file = open(args[1]) if len(args) > 2: out_file = open(args[2], 'w') else: out_file = sys.stdout except Exception: doc_optparse.exit() scores_by_chrom = read_scores(misc.open_compressed(sys.argv[1])) for line in open(sys.argv[2]): fields = line.split() chrom, start, stop = fields[0], int(fields[1]), int(fields[2]) if chrom in scores_by_chrom: ba = scores_by_chrom[chrom] scores = [ba[i] for i in range(start, stop)] else: scores = [] print(" ".join(fields), " ".join(map(str, scores)), file=out_file) score_file.close() interval_file.close() out_file.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/int_seqs_to_char_strings.py000077500000000000000000000011661415666465100227160ustar00rootroot00000000000000#!/usr/bin/env python """ Translate lists of space separated integers (magnitude less than 62) and print as strings of alphanumeric characters. This is useful mainly for some machine learning algorithms that only take string input. usage: %prog < int_seqs > strings """ import sys table = "012345678ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" def main(): for line in sys.stdin: ints = [int(f) for f in line.split()] if max(ints) > len(table): raise ValueError("Alphabet size too large!") print(str.join('', [table[i] for i in ints])) if __name__ == "__main__": main() bx-python-0.8.13/scripts/interval_count_intersections.py000077500000000000000000000020731415666465100236240ustar00rootroot00000000000000#!/usr/bin/env python """ Read two lists of intervals (with chromosomes) and count the number of entries in the second set that intersect any entry in the first set. TODO: This could use bitsets rather than the intervals package, would it be faster? usage: %prog bed1 bed2 > out """ import sys from bx import intervals from bx import misc def main(): intersecters = {} # Read ranges for chr, start, end in read_intervals(misc.open_compressed(sys.argv[1])): if chr not in intersecters: intersecters[chr] = intervals.Intersecter() intersecters[chr].add_interval(intervals.Interval(start, end)) # Count intersection total = 0 for chr, start, end in read_intervals(misc.open_compressed(sys.argv[2])): if chr in intersecters: intersection = intersecters[chr].find(start, end) if intersection: total += 1 print(total) def read_intervals(input): for line in input: fields = line.split() yield fields[0], int(fields[1]), int(fields[2]) main() bx-python-0.8.13/scripts/interval_join.py000077500000000000000000000017461415666465100204700ustar00rootroot00000000000000#!/usr/bin/env python """ Match up intersecting intervals from two files. This performs a "full join", any pair of intervals with any basewise overlap will be printed side-by-side. usage: %prog bed1 bed2 """ import sys import bx.intervals.intersection import bx.intervals.io def main(): intersecters = {} # Read second set into intersecter for interval in bx.intervals.io.GenomicIntervalReader(open(sys.argv[2])): if interval.chrom not in intersecters: intersecters[interval.chrom] = bx.intervals.Intersecter() intersecters[interval.chrom].add_interval(interval) # Join with first set for interval in bx.intervals.io.GenomicIntervalReader(open(sys.argv[1])): if interval.chrom in intersecters: intersection = intersecters[interval.chrom].find(interval.start, interval.end) for interval2 in intersection: print("\t".join([str(interval), str(interval2)])) if __name__ == "__main__": main() bx-python-0.8.13/scripts/lav_to_axt.py000077500000000000000000000023461415666465100177620ustar00rootroot00000000000000#!/usr/bin/env python """ Application to convert LAV file to AXT file. Reads a LAV file from standard input and writes a AXT file to standard out; some statistics are written to standard error. usage: lav_to_axt [--silent] [path=replacement] < lav_file > axt_file """ __author__ = "Bob Harris (rsharris@bx.psu.edu)" import sys import bx.align.axt import bx.align.lav def usage(s=None): message = __doc__ if s is None: sys.exit(message) else: sys.exit(f"{s}\n{message}") def main(): # parse the command line silent = False pathSubs = [] for arg in sys.argv[1:]: if "=" in arg: ix = arg.find("=") pathSubs.append((arg[:ix], arg[ix+1:])) elif arg == "--silent": silent = True else: usage("unrecognized argument: " + arg) # read the alignments and other info out = bx.align.axt.Writer(sys.stdout) lavsRead = axtsWritten = 0 for lavBlock in bx.align.lav.Reader(sys.stdin, path_subs=pathSubs): lavsRead += 1 out.write(lavBlock) axtsWritten += 1 if not silent: sys.stderr.write("%d blocks read, %d written\n" % (lavsRead, axtsWritten)) if __name__ == "__main__": main() bx-python-0.8.13/scripts/lav_to_maf.py000077500000000000000000000022641415666465100177300ustar00rootroot00000000000000#!/usr/bin/env python """ Application to convert LAV file to MAF file. Reads a LAV file from standard input and writes a MAF file to standard out; some statistics are written to standard error. usage: lav_to_maf [--silent] [path=replacement] < lav_file > maf_file """ import sys import bx.align.lav import bx.align.maf def usage(s=None): message = __doc__ if s is None: sys.exit(message) else: sys.exit(f"{s}\n{message}") def main(): # parse the command line silent = False pathSubs = [] for arg in sys.argv[1:]: if "=" in arg: ix = arg.find("=") pathSubs.append((arg[:ix], arg[ix+1:])) elif arg == "--silent": silent = True else: usage("unrecognized argument: " + arg) # read the alignments and other info out = bx.align.maf.Writer(sys.stdout) lavsRead = mafsWritten = 0 for lavBlock in bx.align.lav.Reader(sys.stdin, path_subs=pathSubs): lavsRead += 1 out.write(lavBlock) mafsWritten += 1 if not silent: sys.stderr.write("%d blocks read, %d written\n" % (lavsRead, mafsWritten)) if __name__ == "__main__": main() bx-python-0.8.13/scripts/line_select.py000077500000000000000000000012141415666465100201010ustar00rootroot00000000000000#!/usr/bin/env python """ Read a file containing a 0 or 1 on each line (`feature_file`), output all lines from stdin for which that value was 1 TODO: no need to read the feature_file into memory here, just iterate in parallel. usage: %prog feature_file < ... """ import sys def __main__(): feature_file = sys.argv[1] if len(sys.argv) > 2: match = int(sys.argv[2]) else: match = 1 feature_vector = [int(line) for line in open(feature_file)] for index, line in enumerate(sys.stdin): if feature_vector[index] == match: print(line, end='') if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/lzop_build_offset_table.py000077500000000000000000000054661415666465100225100ustar00rootroot00000000000000#!/usr/bin/env python """ Read a compressed file as created by 'lzop' from stdin and write a table to stdout containing the blocksize and the start offset (in bytes) of each compressed block. usage: %prog < FILENAME.lzo > FILENAME.lzot """ import struct import sys MAGIC = b"\x89\x4c\x5a\x4f\x00\x0d\x0a\x1a\x0a" F_ADLER32_D = 0x00000001 F_ADLER32_C = 0x00000002 F_H_EXTRA_FIELD = 0x00000040 F_H_GMTDIFF = 0x00000080 F_CRC32_D = 0x00000100 F_CRC32_C = 0x00000200 F_MULTIPART = 0x00000400 F_H_FILTER = 0x00000800 F_H_CRC32 = 0x00001000 assert struct.calcsize("!H") == 2 assert struct.calcsize("!I") == 4 class UnpackWrapper: def __init__(self, file): self.file = file def read(self, amt): return self.file.read(amt) def get(self, fmt): t = struct.unpack(fmt, self.file.read(struct.calcsize(fmt))) return t[0] def main(): try: binary_stdin = sys.stdin.buffer except AttributeError: # Python 2 binary_stdin = sys.stdin f = UnpackWrapper(binary_stdin) # Read header magic = f.read(9) assert magic == MAGIC, "Not LZOP file" version = f.get("!H") f.get("!H") # lib_version if version >= 0x0940: f.get("!H") # extract_version method = f.get("!B") assert 1 <= method <= 3, "Only LZO compression is currently supported" f.get("!B") # level flags = f.get("!I") assert not(flags & F_H_FILTER), "LZOP filters not supported" has_compressed_crc = (flags & F_CRC32_C or flags & F_ADLER32_C) has_uncompressed_crc = (flags & F_CRC32_D or flags & F_ADLER32_D) f.get("!I") # mode f.get("!I") # time f.get("!I") # time_offset fname_len = f.get("!B") fname = f.read(fname_len) assert len(fname) == fname_len, "EOF reading filename" f.get("!I") # header_crc if flags & F_H_EXTRA_FIELD: extra_len = f.get("!I") extra = f.read(extra_len) assert len(extra) == extra_len, "EOF reading extra field" # Done with header block_size = None expect_no_more = False # Read blocks while True: size = f.get("!I") if size == 0: break assert not(expect_no_more), \ "Encountered an undersized block that was not the last block" if block_size is None: print("s", size) block_size = size else: if size < block_size: expect_no_more = True compressed_size = f.get("!I") if has_uncompressed_crc: f.get("!I") # crc if has_compressed_crc: f.get("!I") # compressed_crc print("o", f.file.tell(), compressed_size, size) compressed_data = f.read(compressed_size) assert len(compressed_data) == compressed_size, \ "EOF reading compressed data" if __name__ == "__main__": main() bx-python-0.8.13/scripts/mMK_bitset.py000066400000000000000000000122071415666465100176520ustar00rootroot00000000000000#!/usr/bin/env python from optparse import OptionParser from rpy import r import bx.align.maf import bx.bitset from bx.bitset_builders import binned_bitsets_from_file def main(): parser = OptionParser(usage="usage: %prog [options] maf_file snp_file neutral_file window_size step_size") parser.add_option("-o", "--outfile", help="Specify file for output") parser.add_option("-s", "--species", type="string", default="panTro2") parser.add_option("-b", "--build", type="string", default="hg18") (options, args) = parser.parse_args() if len(args) != 5: parser.error("Incorrect number of arguments") else: maf_filename = args[0] snp_filename = args[1] neutral_filename = args[2] window_size = int(args[3]) step_size = int(args[4]) if options.outfile is not None: out_file = open(options.outfile, 'w') # Generate snp and neutral bitsets AR_snp_bitsets = binned_bitsets_from_file(open(snp_filename)) neutral_bitsets = binned_bitsets_from_file(open(neutral_filename)) # Generate divergence bitset from maf file AR_div_bitsets = dict() chr_lens = dict() reader = bx.align.maf.Reader(open(maf_filename)) for block in reader: comp1 = block.get_component_by_src_start(options.build) comp2 = block.get_component_by_src_start(options.species) if comp1 is None or comp2 is None: continue # Chromosome, start, and stop of reference species alignment chr = comp1.src.split('.')[1] start = comp1.start # Get or create bitset for this chromosome if chr in AR_div_bitsets: bitset = AR_div_bitsets[chr] else: bitset = AR_div_bitsets[chr] = bx.bitset.BinnedBitSet() chr_lens[chr] = comp1.get_src_size() # Iterate over text and set diverged bit pos = start for ch1, ch2 in zip(comp1.text.upper(), comp2.text.upper()): if ch1 == '-': continue if ch2 == '-': pos += 1 continue if ch1 != ch2 and not AR_snp_bitsets[chr][pos]: bitset.set(pos) pos += 1 # Debugging Code # for chr in AR_div_bitsets: # for pos in range(0, AR_div_bitsets[chr].size): # if AR_div_bitsets[pos]: # print >> sys.stderr, chr, pos, pos+1 # Copy div and snp bitsets nonAR_snp_bitsets = dict() for chr in AR_snp_bitsets: nonAR_snp_bitsets[chr] = bx.bitset.BinnedBitSet() nonAR_snp_bitsets[chr].ior(AR_snp_bitsets[chr]) nonAR_div_bitsets = dict() for chr in AR_div_bitsets: nonAR_div_bitsets[chr] = bx.bitset.BinnedBitSet() nonAR_div_bitsets[chr].ior(AR_div_bitsets[chr]) # Generates AR snps by intersecting with neutral intervals for chr in AR_snp_bitsets: AR_snp_bitsets[chr].iand(neutral_bitsets[chr]) # Generates AR divs by intersecting with neutral intervals for chr in AR_div_bitsets: AR_div_bitsets[chr].iand(neutral_bitsets[chr]) # Inverts the neutral intervals so now represents nonAR for chr in neutral_bitsets: neutral_bitsets[chr].invert() # Generates nonAR snps by intersecting with masked neutral intervals for chr in nonAR_snp_bitsets: nonAR_snp_bitsets[chr].iand(neutral_bitsets[chr]) # Generates nonAR divs by intersecting with masked neutral intervals for chr in nonAR_div_bitsets: nonAR_div_bitsets[chr].iand(neutral_bitsets[chr]) for chr in AR_div_bitsets: for window in range(0, chr_lens[chr] - window_size, step_size): # neutral_size = neutral_bitsets[chr].count_range(window, window_size) # if neutral_size < 9200: continue AR_snp = AR_snp_bitsets[chr].count_range(window, window_size) AR_div = AR_div_bitsets[chr].count_range(window, window_size) nonAR_snp = nonAR_snp_bitsets[chr].count_range(window, window_size) nonAR_div = nonAR_div_bitsets[chr].count_range(window, window_size) if nonAR_snp >= 6 and nonAR_div >= 6 and AR_snp >= 6 and AR_div >= 6: MK_pval = MK_chi_pvalue(nonAR_snp, nonAR_div, AR_snp, AR_div) else: MK_pval = MK_fisher_pvalue(nonAR_snp, nonAR_div, AR_snp, AR_div) if options.outfile is not None: out_file.write("%s\t%d\t%d\t%d\t%d\t%d\t%d\t%1.15f\n" % (chr, window, window+window_size, nonAR_snp, nonAR_div, AR_snp, AR_div, MK_pval)) else: print("%s\t%d\t%d\t%d\t%d\t%d\t%d\t%1.15f" % (chr, window, window+window_size, nonAR_snp, nonAR_div, AR_snp, AR_div, MK_pval)) if options.outfile is not None: out_file.close() def MK_fisher_pvalue(win_snp, win_div, AR_snp, AR_div): if win_snp == 0 and win_div == 0 and AR_snp == 0 and AR_div == 0: return 1.0 fisher_result = r.fisher_test(r.matrix(r.c([win_snp, win_div, AR_snp, AR_div]), nr=2)) return fisher_result['p.value'] def MK_chi_pvalue(win_snp, win_div, AR_snp, AR_div): chi_result = r.chisq_test(r.matrix(r.c([win_snp, win_div, AR_snp, AR_div]), nr=2)) return chi_result['p.value'] main() bx-python-0.8.13/scripts/maf_build_index.py000077500000000000000000000053421415666465100207320ustar00rootroot00000000000000#!/usr/bin/env python """ Build an index file for a set of MAF alignment blocks. If index_file is not provided maf_file.index is used. usage: %prog maf_file index_file -s, --species=a,b,c: only index the position of the block in the listed species """ import os.path from io import TextIOWrapper import bx.align.maf from bx import interval_index_file from bx.cookbook import doc_optparse from bx.misc.seekbzip2 import SeekableBzip2File from bx.misc.seeklzop import SeekableLzopFile def main(): options, args = doc_optparse.parse(__doc__) try: maf_file = args[0] # If it appears to be a bz2 file, attempt to open with table if maf_file.endswith(".bz2"): table_file = maf_file + "t" if not os.path.exists(table_file): doc_optparse.exit("To index bz2 compressed files first " "create a bz2t file with bzip-table.") # Open with SeekableBzip2File so we have tell support maf_in = SeekableBzip2File(maf_file, table_file) # Strip .bz2 from the filename before adding ".index" maf_file = maf_file[:-4] elif maf_file.endswith(".lzo"): table_file = maf_file + "t" if not os.path.exists(table_file): doc_optparse.exit("To index lzo compressed files first " "create a lzot file with lzop_build_offset_table.") # Open with SeekableBzip2File so we have tell support maf_in = SeekableLzopFile(maf_file, table_file) # Strip .lzo from the filename before adding ".index" maf_file = maf_file[:-4] else: maf_in = open(maf_file, "rb") # Determine the name of the index file if len(args) > 1: index_file = args[1] else: index_file = maf_file + ".index" if options.species: species = options.species.split(",") else: species = None except Exception: doc_optparse.exception() maf_in = TextIOWrapper(maf_in, encoding="ascii") maf_reader = bx.align.maf.Reader(maf_in, parse_e_rows=True) indexes = interval_index_file.Indexes() # Need to be a bit tricky in our iteration here to get the 'tells' right while True: pos = maf_reader.file.tell() block = next(maf_reader) if block is None: break for c in block.components: if species is not None and c.src.split('.')[0] not in species: continue indexes.add(c.src, c.forward_strand_start, c.forward_strand_end, pos, max=c.src_size) out = open(index_file, 'wb') indexes.write(out) out.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_chop.py000077500000000000000000000027321415666465100173750ustar00rootroot00000000000000#!/usr/bin/env python """ Chops alignments in a MAF file to piece of a specified length. A random set of non overlapping chunks of exactly the specified chop length will be produced usage: %prog [options] < maf > maf -l, --length: Chop to exactly this length in columns (default 100) """ import random import sys from optparse import OptionParser import bx.align.maf def main(): # Parse command line arguments parser = OptionParser() parser.add_option("-l", "--length", action="store", type="int", default=100, help="") (options, args) = parser.parse_args() length = options.length maf_reader = bx.align.maf.Reader(sys.stdin) maf_writer = bx.align.maf.Writer(sys.stdout) for m in maf_reader: for chopped in chop(m, length): maf_writer.write(chopped) def chop(m, length): maf_length = m.text_size chunk_count = maf_length // length lost_bases = maf_length % length skip_amounts = [0] * (chunk_count + 1) for i in range(0, lost_bases): skip_amounts[random.randrange(0, chunk_count + 1)] += 1 start = 0 rval = [] for i in range(0, chunk_count): start += skip_amounts[i] n = m.slice(start, start + length) if check_len(n): rval.append(m.slice(start, start + length)) start += length return rval def check_len(a): for c in a.components: if c.size == 0: return False return True if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_chunk.py000077500000000000000000000045441415666465100175570ustar00rootroot00000000000000#!/usr/bin/env python """ Read a MAF from stdin and break into several new mafs containing no more than `chunk_size` columns. The new mafs will be written to `out_dir` along with a file "intervals.txt" specifying the range covered by each new maf file. A probability for writing each chunk can optionally be specified, resulting in a random fraction of chunks from the input MAF being produced. usage: %prog [options] chunk_size out_dir < maf --prob: probability of writing versus skipping each chunk. """ import random import sys from optparse import OptionParser import numpy as np import bx.align.maf INF = np.inf def __main__(): parser = OptionParser("usage: %prog chunk_size out_dir") parser.add_option("--prob", action="store", default=None, type="float", help="Probability of writing a given chunk") (options, args) = parser.parse_args() chunk_size = int(args[0]) out_dir = args[1] prob = options.prob maf_reader = bx.align.maf.Reader(sys.stdin, parse_e_rows=True) maf_writer = None count = 0 current_chunk = -1 chunk_min = INF chunk_max = 0 write_current_chunk = True interval_file = open("%s/intervals.txt" % out_dir, "w") for m in maf_reader: if not maf_writer or count + m.text_size > chunk_size: current_chunk += 1 # Finish the last chunk if maf_writer: maf_writer.close() interval_file.write(f"{chunk_min} {chunk_max}\n") chunk_min = INF chunk_max = 0 # Decide if the new chunk will be written if prob: write_current_chunk = bool(random.random() <= prob) else: write_current_chunk = True if write_current_chunk: maf_writer = bx.align.maf.Writer(open("%s/%09d.maf" % (out_dir, current_chunk), "w")) else: maf_writer = None count = 0 if maf_writer: maf_writer.write(m) # count += m.text_size count += m.components[0].size chunk_min = min(chunk_min, m.components[0].start) chunk_max = max(chunk_max, m.components[0].end) if maf_writer: maf_writer.close() interval_file.write(f"{chunk_min} {chunk_max}\n") interval_file.close() if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_col_counts.py000077500000000000000000000016511415666465100206130ustar00rootroot00000000000000#!/usr/bin/env python """ For every column that occurs in a multiple alignment print the column and the number of times it occurs (one column/count per line, tab separated), sorted by count descending. Note: all blocks must have exactly the same number of species. usage: %prog < maf > column_counts """ import sys import bx.align.maf counts = {} nspecies = None for block in bx.align.maf.Reader(sys.stdin): # Ensure all blocks have the same number of rows if nspecies: assert len(block.components) == nspecies else: nspecies = len(block.components) # Increment count for each column for col in zip(* [iter(comp.text.upper()) for comp in block.components]): try: counts[col] += 1 except Exception: counts[col] = 1 counts = sorted((value, key) for key, value in counts.items()) counts.reverse() for count, col in counts: print("".join(col), count) bx-python-0.8.13/scripts/maf_col_counts_all.py000077500000000000000000000030351415666465100214410ustar00rootroot00000000000000#!/usr/bin/env python """ For every column that occurs in a multiple alignment print the column and the number of times it occurs (one column/count per line, tab separated), sorted by count descending. This version allows special handling of the 'wildcard' symbol in alignments. Note: all blocks must have exactly the same number of species. usage: %prog [options] < maf > column_counts -w, --wildcard: include wildcards -m, --maxwildcards=N: only allow N missing species """ import sys import bx.align.maf from bx.cookbook import ( cross_lists, doc_optparse, ) counts = {} nspecies = None for block in bx.align.maf.Reader(sys.stdin): # Ensure all blocks have the same number of rows if nspecies: assert len(block.components) == nspecies else: nspecies = len(block.components) # Increment count for each column for col in zip(* [iter(comp.text.upper()) for comp in block.components]): col = ''.join(col) try: counts[col] += 1 except Exception: counts[col] = 1 options, args = doc_optparse.parse(__doc__) wildcard = False if options.wildcard: wildcard = True max_wildcard = nspecies - 1 if options.maxwildcards: wildcard = True max_wildcard = int(options.maxwildcards) nucs = "ACGT-" if wildcard: nucs += "*" for col in cross_lists(*([nucs] * nspecies)): col = ''.join(col) if wildcard and col.count("*") > max_wildcard: continue if col.count("-") == nspecies: continue print(col, counts.get(col, 0)) bx-python-0.8.13/scripts/maf_count.py000077500000000000000000000031301415666465100175650ustar00rootroot00000000000000#!/usr/bin/env python """ Read a MAF from standard input and print counts of alignments, bases, or columns. usage: %prog [options] -c, --cols: count alignment columns rather than number of alignments -b, --bases: count bases in first species rather than number of alignments -s, --skip=N: when counting bases, skip this base -e, --each: print a count for each alignment rather than whole file -r, --ref=N: reference sequence (first by default, 0..n) """ import sys import bx.align.maf from bx.cookbook import doc_optparse def __main__(): options, args = doc_optparse.parse(__doc__) try: if options.cols: action = "cols" elif options.bases: action = "bases" else: action = "aligns" print_each = bool(options.each) if options.ref: ref = int(options.ref) else: ref = 0 if options.skip: skip = options.skip else: skip = None except Exception: doc_optparse.exit() maf_reader = bx.align.maf.Reader(sys.stdin) count = 0 for m in maf_reader: if action == "aligns": count += 1 elif action == "cols": count += m.text_size elif action == "bases": if skip: count += (m.components[ref].size - m.components[ref].text.count(skip)) else: count += m.components[ref].size if print_each: print(count) count = 0 if not print_each: print(count) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_covered_ranges.py000077500000000000000000000026601415666465100214320ustar00rootroot00000000000000#!/usr/bin/env python """ usage: %prog species1,species2,... nrequired < maf """ import sys import bx.align.maf from bx.cookbook import doc_optparse SPAN = 100 MIN = 100 def main(): options, args = doc_optparse.parse(__doc__) try: species = args[0].split(',') nrequired = int(args[1]) except Exception: doc_optparse.exit() maf_reader = bx.align.maf.Reader(sys.stdin) interval_start = None interval_end = None for m in maf_reader: ref = m.components[0] # Does this alignment have enough of the required species if nrequired <= len([comp for comp in m.components if comp.src.split('.')[0] in species]): if interval_start is None: interval_start = ref.start interval_end = ref.end else: if ref.start - interval_end < SPAN: interval_end = ref.end else: if interval_end - interval_start >= MIN: print(ref.src.split('.')[1], interval_start, interval_end) interval_start = ref.start interval_end = ref.end else: if interval_start is not None and interval_end - interval_start >= MIN: print(ref.src.split('.')[1], interval_start, interval_end) interval_start = None interval_end = None if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_covered_regions.py000077500000000000000000000031061415666465100216150ustar00rootroot00000000000000#!/usr/bin/env python """ Read a maf file and print the regions covered to a set of bed files (one for each sequence source referenced in the maf). Only blocks with a positive percent identity are written out. TODO: Can this be generalized to be made more useful? usage: %prog bed_outfile_prefix < maf """ import sys import bx.align.maf def block_pid(comp1, comp2): match = 0 total = 0 t1 = comp1.text.lower() t2 = comp2.text.lower() for i in range(0, len(t1)): a, b = t1[i], t2[i] if a == '-' or b == '-': continue elif a == b: match += 1 total += 1 if total == 0: return None return (match / total) def main(): out_prefix = sys.argv[1] print(out_prefix) out_files = dict() for block in bx.align.maf.Reader(sys.stdin): ref_comp = block.components[0] ref_chrom = ref_comp.src.split('.')[1] for comp in block.components[1:]: comp_species, comp_chrom = comp.src.split('.')[:2] if comp_species not in out_files: f = open(f"{out_prefix}{comp_species}.bed", "w") out_files[comp_species] = f pid = block_pid(ref_comp, comp) if pid: out_files[comp_species].write( "%s\t%d\t%d\t%s:%d-%d,%s\t%f\n" % (ref_chrom, ref_comp.forward_strand_start, ref_comp.forward_strand_end, comp_chrom, comp.start, comp.end, comp.strand, pid)) for f in out_files.values(): f.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_div_sites.py000077500000000000000000000026361415666465100204400ustar00rootroot00000000000000#!/usr/bin/env python """ Create a bed file listing all the divergent sites between two specific species in a maf. usage: %prog maf_file reference_species_name other_species_name """ import sys import bx.align.maf import bx.bitset def main(): bitsets = {} maf = sys.argv[1] reference_sp, other_sp = sys.argv[2], sys.argv[3] for block in bx.align.maf.Reader(open(maf)): ref = block.get_component_by_src_start(reference_sp) other = block.get_component_by_src_start(other_sp) if not ref or not other: continue ref_chrom = ref.src.split('.')[1] ref_start = ref.start chrom_size = ref.get_src_size() if ref_chrom not in bitsets: bitsets[ref_chrom] = bx.bitset.BinnedBitSet(chrom_size) pos = ref_start for i, j in zip(ref.text.upper(), other.text.upper()): if i != '-': if i != j: # mismatch if i != 'N' and j != 'N' and j != '-': # set if all valid chars bitsets[ref_chrom].set(pos) pos += 1 # bits --> bed file for chrom in bitsets: bits = bitsets[chrom] end = 0 while True: start = bits.next_set(end) if start == bits.size: break end = bits.next_clear(start) print("%s\t%d\t%d" % (chrom, start, end)) main() bx-python-0.8.13/scripts/maf_drop_overlapping.py000077500000000000000000000022321415666465100220110ustar00rootroot00000000000000#!/usr/bin/env python """ Remove any blocks from a maf that overlap any of a set of intervals. usage: %prog interval files... < maf """ import sys import bx.align.maf from bx import intervals from bx.cookbook import doc_optparse def __main__(): options, args = doc_optparse.parse(__doc__) try: assert len(args) > 0 except AssertionError: doc_optparse.exit() # Load Intervals intersector = intervals.Intersecter() for f in args: for line in open(f): if line.startswith("#") or line.isspace(): continue fields = line.split() intersector.add_interval(intervals.Interval(int(fields[0]), int(fields[1]))) # Start MAF on stdout out = bx.align.maf.Writer(sys.stdout) # Iterate over input MAF for maf in bx.align.maf.Reader(sys.stdin): # Find overlap with reference component intersections = intersector.find(maf.components[0].start, maf.components[0].end) # Write only if no overlap if len(intersections) == 0: out.write(maf) # Close output MAF out.close() if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_extract_chrom_ranges.py000077500000000000000000000052641415666465100226500ustar00rootroot00000000000000#!/usr/bin/env python """ Reads a list of intervals and a maf. Produces a new maf containing the portions of the original that overlapped the intervals NOTE: See maf_extract_ranges_indexed.py which works better / faster for many use cases. TODO: Combine with maf_extract_ranges, and possibly share some code with maf_extract_ranges_indexed. usage: %prog interval_file refname|refindex [options] < maf_file -m, --mincols=10: Minimum length (columns) required for alignment to be output -p, --prefix=PREFIX: Prefix """ import sys import bx.align.maf from bx import intervals from bx.cookbook import doc_optparse def __main__(): options, args = doc_optparse.parse(__doc__) try: range_filename = args[0] try: refindex = int(args[1]) refname = None except ValueError: refindex = None refname = args[1] if options.mincols: mincols = int(options.mincols) else: mincols = 10 if options.prefix: prefix = options.prefix else: prefix = "" except Exception: doc_optparse.exit() # Load Intervals intersecters = dict() for line in open(range_filename): fields = line.split() src = prefix + fields[0] if src not in intersecters: intersecters[src] = intervals.Intersecter() intersecters[src].add_interval(intervals.Interval(int(fields[1]), int(fields[2]))) # Start MAF on stdout out = bx.align.maf.Writer(sys.stdout) # Iterate over input MAF for maf in bx.align.maf.Reader(sys.stdin): if refname: sourcenames = [cmp.src.split('.')[0] for cmp in maf.components] try: refindex = sourcenames.index(refname) except Exception: continue ref_component = maf.components[refindex] # Find overlap with reference component if ref_component.src not in intersecters: continue intersections = sorted(intersecters[ref_component.src].find(ref_component.start, ref_component.end)) # Keep output maf ordered # Write each intersecting block for interval in intersections: start = max(interval.start, ref_component.start) end = min(interval.end, ref_component.end) sliced = maf.slice_by_component(refindex, start, end) good = True for c in sliced.components: if c.size < 1: good = False if good and sliced.text_size > mincols: out.write(sliced) # Close output MAF out.close() if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_extract_ranges.py000077500000000000000000000042471415666465100214600ustar00rootroot00000000000000#!/usr/bin/env python """ Reads a list of intervals (start, stop) and a maf. Produces a new maf containing the blocks from the original that overlapped the intervals. NOTE: See maf_extract_ranges_indexed.py which works better / faster for many use cases. NOTE: chromosome/src information in the MAF is ignored by this variant. NOTE: if a single alignment in a block become empty during slicing, the block is ignored. usage: %prog interval_file refindex [options] < maf_file -m, --mincols=10: Minimum length (columns) required for alignment to be output """ import sys import bx.align.maf from bx import intervals from bx.cookbook import doc_optparse def __main__(): # Parse Command Line options, args = doc_optparse.parse(__doc__) try: range_filename = args[0] refindex = int(args[1]) if options.mincols: mincols = int(options.mincols) else: mincols = 10 except Exception: doc_optparse.exit() # Load Intervals intersecter = intervals.Intersecter() for line in open(range_filename): fields = line.split() intersecter.add_interval(intervals.Interval(int(fields[0]), int(fields[1]))) # Start MAF on stdout out = bx.align.maf.Writer(sys.stdout) # Iterate over input MAF for maf in bx.align.maf.Reader(sys.stdin, parse_e_rows=True): ref = maf.components[refindex] # Find overlap with reference component intersections = sorted(intersecter.find(ref.get_forward_strand_start(), ref.get_forward_strand_end())) # Keep output maf ordered # Write each intersecting block for interval in intersections: start = max(interval.start, ref.get_forward_strand_start()) end = min(interval.end, ref.get_forward_strand_end()) sliced = maf.slice_by_component(refindex, start, end) good = True for c in sliced.components: if c.size < 1 and not c.empty: good = False if good and sliced.text_size > mincols: out.write(sliced) # Close output MAF out.close() if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_extract_ranges_indexed.py000077500000000000000000000110341415666465100231500ustar00rootroot00000000000000#!/usr/bin/env python """ Reads a list of intervals and a maf. Produces a new maf containing the blocks or parts of blocks in the original that overlapped the intervals. It is assumed that each file `maf_fname` has a corresponding `maf_fname`.index file. NOTE: If two intervals overlap the same block it will be written twice. With non-overlapping intervals and --chop this is never a problem. NOTE: Intervals are origin-zero, half-open. For example, the interval 100,150 is 50 bases long, and there are 100 bases to its left in the sequence. NOTE: Intervals are relative to the + strand, regardless of the strands in the alignments. WARNING: bz2/bz2t support and file cache support are new and not as well tested. usage: %prog maf_fname1 maf_fname2 ... [options] < interval_file -m, --mincols=0: Minimum length (columns) required for alignment to be output -c, --chop: Should blocks be chopped to only portion overlapping (no by default) -s, --src=s: Use this src for all intervals -p, --prefix=p: Prepend this to each src before lookup -d, --dir=d: Write each interval as a separate file in this directory -S, --strand: Strand is included as an additional column, and the blocks are reverse complemented (if necessary) so that they are always on that strand w/r/t the src species. -C, --usecache: Use a cache that keeps blocks of the MAF files in memory (requires ~20MB per MAF) """ import os import sys import bx.align.maf from bx.cookbook import doc_optparse def main(): # Parse Command Line options, args = doc_optparse.parse(__doc__) try: maf_files = args if options.mincols: mincols = int(options.mincols) else: mincols = 0 if options.src: fixed_src = options.src else: fixed_src = None if options.prefix: prefix = options.prefix else: prefix = None if options.dir: dir = options.dir else: dir = None chop = bool(options.chop) do_strand = bool(options.strand) use_cache = bool(options.usecache) except Exception: doc_optparse.exit() # Open indexed access to mafs index = bx.align.maf.MultiIndexed(maf_files, keep_open=True, parse_e_rows=True, use_cache=use_cache) # Start MAF on stdout if dir is None: out = bx.align.maf.Writer(sys.stdout) # Iterate over input ranges for line in sys.stdin: strand = None fields = line.split() if fixed_src: src, start, end = fixed_src, int(fields[0]), int(fields[1]) if do_strand: strand = fields[2] else: src, start, end = fields[0], int(fields[1]), int(fields[2]) if do_strand: strand = fields[3] if prefix: src = prefix + src # Find overlap with reference component blocks = index.get(src, start, end) # Open file if needed if dir: out = bx.align.maf.Writer(open(os.path.join(dir, "%s:%09d-%09d.maf" % (src, start, end)), 'w')) # Write each intersecting block if chop: for block in blocks: for ref in block.get_components_by_src(src): slice_start = max(start, ref.get_forward_strand_start()) slice_end = min(end, ref.get_forward_strand_end()) if slice_end <= slice_start: continue sliced = block.slice_by_component(ref, slice_start, slice_end) # If the block is shorter than the minimum allowed size, stop if mincols and (sliced.text_size < mincols): continue # If the reference component is empty, don't write the block if sliced.get_component_by_src(src).size < 1: continue # Keep only components that are not empty sliced.components = [c for c in sliced.components if c.size > 0 or c.empty] # Reverse complement if needed if strand is not None and ref.strand != strand: sliced = sliced.reverse_complement() # Write the block out.write(sliced) else: for block in blocks: out.write(block) if dir: out.close() # Close output MAF out.close() index.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_filter.py000077500000000000000000000030441415666465100177260ustar00rootroot00000000000000#!/usr/bin/env python """ Filter each block in a maf file. Can filter blocks for a minimum number of components (rows), a minimum length in columns, or an arbitrary python expression (which will be evaluated for each block with the variable 'm' containing that block). usage: %prog [options] < maf > maf --component_count=N: Minimum number of components (rows) --min_cols=N: Minimum number of columns -e, --expr=EXPR: Python expression that must evaulate to true """ import sys from optparse import OptionParser from bx.align import maf def __main__(): # Parse command line arguments parser = OptionParser() parser.add_option("--component_count", action="store", default=None, type="int", help="") parser.add_option("--min_cols", action="store", default=None, type="int", help="") parser.add_option("-e", "--expr", action="store", default=None) (options, args) = parser.parse_args() component_count = options.component_count min_cols = options.min_cols expr = options.expr # Compile expression for SPEED if expr: expr = compile(expr, '', 'eval') maf_reader = maf.Reader(sys.stdin, parse_e_rows=True) maf_writer = maf.Writer(sys.stdout) for m in maf_reader: if component_count and len(m.components) != component_count: continue if min_cols and m.text_size < min_cols: continue if expr and not bool(eval(expr, {"m": m, "maf": m})): continue maf_writer.write(m) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_filter_max_wc.py000077500000000000000000000014541415666465100212670ustar00rootroot00000000000000#!/usr/bin/env python """ Filter maf blocks for presence of wildcard columns. Blocks must meet the criteria of having at least `min_good` columns, each of which has more than `min_species` rows that are NOT wildcard bases ('*'). TODO: Allow specifying the character of the wildcard base. usage: %prog min_good min_species < maf > maf """ import sys from bx.align import maf def main(): min_good = int(sys.argv[1]) min_species = int(sys.argv[2]) maf_reader = maf.Reader(sys.stdin, parse_e_rows=True) maf_writer = maf.Writer(sys.stdout) for m in maf_reader: good = 0 for col in m.column_iter(): if col.count('*') <= min_species: good += 1 if good >= min_good: maf_writer.write(m) if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_gap_frequency.py000077500000000000000000000006351415666465100212740ustar00rootroot00000000000000#!/usr/bin/env python """ Read a MAF from standard input and print the fraction of gap columns in each block. usage: %prog < maf > out """ import sys import bx.align.maf def main(): for m in bx.align.maf.Reader(sys.stdin): gaps = 0 for col in m.column_iter(): if '-' in col: gaps += 1 print(gaps / m.text_size) if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_gc_content.py000077500000000000000000000011171415666465100205630ustar00rootroot00000000000000#!/usr/bin/env python """ Read a MAF from standard input and print average GC content of each alignment usage: %prog < maf > out """ import sys from bx.align import maf def __main__(): maf_reader = maf.Reader(sys.stdin) for m in maf_reader: gc = 0 bases = 0 for c in m.components: gc += c.text.count('G') gc += c.text.count('C') gc += c.text.count('g') gc += c.text.count('c') bases += (len(c.text) - c.text.count('-')) print(gc / bases) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_interval_alignibility.py000077500000000000000000000075631415666465100230370ustar00rootroot00000000000000#!/usr/bin/env python """ WARNING: bz2/bz2t support and file cache support are new and not as well tested. usage: %prog maf_files [options] < interval_file -s, --species=SPECIES: Comma separated list of species to include -p, --prefix=PREFIX: Prefix to add to each interval chrom (usually reference species) -C, --usecache: Use a cache that keeps blocks of the MAF files in memory (requires ~20MB per MAF) """ import sys from numpy import zeros import bx.align.maf from bx.cookbook import doc_optparse def main(): # Parse Command Line options, args = doc_optparse.parse(__doc__) try: maf_files = args species = options.species.split(",") prefix = options.prefix use_cache = bool(options.usecache) if not prefix: prefix = "" except Exception: doc_optparse.exit() # Open indexed access to mafs index = bx.align.maf.MultiIndexed(maf_files, parse_e_rows=True, use_cache=use_cache) # Print header print("#chr", "start", "end", end=' ') for s in species: print(s, end=' ') print() # Iterate over input ranges for line in sys.stdin: fields = line.split() # Input is BED3+ chr, start, end = fields[0], int(fields[1]), int(fields[2]) length = end - start assert length > 0, "Interval has length less than one" # Prepend prefix if specified src = prefix + chr # Keep a bitset for each species noting covered pieces aligned_bits = [] missing_bits = [] for s in species: aligned_bits.append(zeros(length, dtype=bool)) missing_bits.append(zeros(length, dtype=bool)) # Find overlap with reference component blocks = index.get(src, start, end) # Determine alignability for each position for block in blocks: # Determine the piece of the human interval this block covers, # relative to the start of the interval of interest ref = block.get_component_by_src(src) assert ref.strand == "+", \ "Reference species blocks must be on '+' strand" rel_start = max(start, ref.start) - start rel_end = min(end, ref.end) - start # Check alignability for each species for i, s in enumerate(species): other = block.get_component_by_src_start(s) # Species does not appear at all indicates unaligned (best we # can do here?) if other is None: continue # An empty component might indicate missing data, all other # cases (even contiguous) we count as not aligned if other.empty: if other.synteny_empty == bx.align.maf.MAF_MISSING_STATUS: missing_bits[i][rel_start:rel_end] = True # Otherwise we have a local alignment with some text, call # it aligned else: aligned_bits[i][rel_start:rel_end] = True # Now determine the total alignment coverage of each interval print(chr, start, end, end=' ') for i, s in enumerate(species): aligned = sum(aligned_bits[i]) missing = sum(missing_bits[i]) # An interval will be called missing if it is < 100bp and <50% # present, or more than 100bp and less that 50bp present (yes, # arbitrary) if length < 100 and missing > (length / 2): print("NA", end=' ') elif length >= 100 and missing > 50: print("NA", end=' ') else: print(aligned / (length - missing), end=' ') print() # Close MAF files index.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_limit_to_species.py000077500000000000000000000015401415666465100217730ustar00rootroot00000000000000#!/usr/bin/env python """ Read a maf file from stdin and write out a new maf with only blocks having all of the required in species, after dropping any other species and removing columns containing only gaps. usage: %prog species,species2,... < maf """ import sys import bx.align.maf def main(): species = sys.argv[1].split(',') maf_reader = bx.align.maf.Reader(sys.stdin, parse_e_rows=True) maf_writer = bx.align.maf.Writer(sys.stdout) for m in maf_reader: new_components = [] for comp in m.components: if comp.src.split('.')[0] in species: new_components.append(comp) m.components = new_components m.remove_all_gap_columns() if len(m.components) > 1: maf_writer.write(m) maf_reader.close() maf_writer.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_mapping_word_frequency.py000077500000000000000000000032071415666465100232110ustar00rootroot00000000000000#!/usr/bin/env python """ Reads a maf file from stdin and applies the mapping file specified by `mapping_file` to produce a sequence of integers. Then for each possible word of length `motif_len` in this integer alphabet print the number of times that word occurs in the block. usage: %prog motif_len mapping_file < maf_file > counts """ import sys from numpy import ( zeros ) import bx.align.maf from bx import seqmapping def main(): word_length = int(sys.argv[1]) with open(sys.argv[2]) as f: align_count, alpha_map = seqmapping.alignment_mapping_from_file(f) for maf in bx.align.maf.Reader(sys.stdin): assert len(maf.components) == align_count # Translate alignment to ints ints = seqmapping.DNA.translate_list([c.text for c in maf.components]) # Apply mapping ints = alpha_map.translate(ints) # Count words radix = alpha_map.get_out_size() counts = zeros(radix ** word_length, int) total = 0 for i in range(word_length, len(ints)): index = 0 factor = 1 skip = False for j in range(word_length): assert 0 < i-j < len(ints) letter = ints[i-j] if letter < 0: skip = True break index += letter * factor factor *= radix if skip: continue else: counts[index] += 1 total += 1 # Write ints separated by tabs print('\t'.join([str(total)] + [str(_) for _ in counts])) if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_mask_cpg.py000066400000000000000000000021211415666465100202150ustar00rootroot00000000000000#!/usr/bin/env python """ Mask out potential CpG sites from a maf. Restricted or inclusive definition of CpG sites can be used. The total fraction masked is printed to stderr. usage: %prog < input > output -m, --mask=N: Character to use as mask ('?' is default) -r, --restricted: Use restricted definition of CpGs """ import sys import bx.align import bx.align.maf import bx.align.sitemask.cpg from bx.cookbook import doc_optparse def main(): options, args = doc_optparse.parse(__doc__) try: if options.mask: mask = options.mask else: mask = "?" except Exception: doc_optparse.exception() reader = bx.align.maf.Reader(sys.stdin) writer = bx.align.maf.Writer(sys.stdout) if options.restricted: cpgfilter = bx.align.sitemask.cpg.Restricted(mask=mask) else: cpgfilter = bx.align.sitemask.cpg.Inclusive(mask=mask) cpgfilter.run(reader, writer.write) print(str(float(cpgfilter.masked)/float(cpgfilter.total) * 100) + "% bases masked.", file=sys.stderr) if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_mean_length_ungapped_piece.py000077500000000000000000000013201415666465100237450ustar00rootroot00000000000000#!/usr/bin/env python """ Read a MAF from standard input and determine the mean length of ungapped pieces in each block. usage: %prog < maf > out """ import sys import bx.align.maf def main(): for m in bx.align.maf.Reader(sys.stdin): ungapped_columns = 0 ungapped_runs = 0 in_ungapped = False for col in m.column_iter(): is_gap = ('-' in col) if not is_gap: ungapped_columns += 1 if in_ungapped and is_gap: ungapped_runs += 1 in_ungapped = not is_gap if in_ungapped: ungapped_runs += 1 print(ungapped_columns / ungapped_runs) if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_percent_columns_matching.py000077500000000000000000000012061415666465100235110ustar00rootroot00000000000000#!/usr/bin/env python """ Read a PAIRWISE maf from stdin and print the fraction of columns whose bases match for each alignment. TODO: generalize for more than two speceis. usage: %prog < maf > out """ import sys from bx.align import maf def __main__(): maf_reader = maf.Reader(sys.stdin) for m in maf_reader: match = 0 total = 0 for i in range(0, m.text_size): a = m.components[0].text[i].lower() b = m.components[1].text[i].lower() if a == b: match += 1 total += 1 print(match / total) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_percent_identity.py000077500000000000000000000014411415666465100220110ustar00rootroot00000000000000#!/usr/bin/env python """ Read a PAIRWISE maf from stdin and print the percent identity of each alignment, where percent identity is defined as the number of matching columns over the number of aligned (non-gap) columns. TODO: Generalize for more than two species usage: %prog < maf > out """ import sys from bx.align import maf def __main__(): maf_reader = maf.Reader(sys.stdin) for m in maf_reader: match = 0 total = 0 for i in range(0, m.text_size): a = m.components[0].text[i].lower() b = m.components[1].text[i].lower() if a == '-' or b == '-': continue elif a == b: match += 1 total += 1 print(match / total) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_print_chroms.py000077500000000000000000000014231415666465100211470ustar00rootroot00000000000000#!/usr/bin/env python """ Read a maf from stdin and print the chromosome number for each alignment. It searches for 'chr' in each alignment block src, and may not be robust if other src formats are used. NOTE: See 'align_print_template.py' for a more general variation of this program. usage: %prog refindex [options] """ import sys from bx.align import maf from bx.cookbook import doc_optparse def __main__(): # Parse command line arguments options, args = doc_optparse.parse(__doc__) try: refindex = int(args[0]) except Exception: doc_optparse.exit() maf_reader = maf.Reader(sys.stdin) for m in maf_reader: c = m.components[refindex].src print(c[c.rfind("chr") + 3:]) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_print_scores.py000077500000000000000000000027261415666465100211610ustar00rootroot00000000000000#!/usr/bin/env python """ Read a MAF from standard input and print the score of each block. It can optionally recalculate each score using the hox70 matrix, and normalize the score by the number of columns in the alignment. TODO: Should be able to read an arbitrary scoring matrix. usage: %prog [options] -r, --recalculate: don't use the score from the maf, recalculate (using hox70 matrix) -l, --lnorm: divide (normalize) score by alignment text length """ import sys from bx.align import maf, score from bx.cookbook import doc_optparse def main(): # Parse command line arguments options, args = doc_optparse.parse(__doc__) try: lnorm = bool(options.lnorm) recalculate = bool(options.recalculate) except Exception: doc_optparse.exit() hox70 = score.build_scoring_scheme(""" A C G T 91 -114 -31 -123 -114 100 -125 -31 -31 -125 100 -114 -123 -31 -114 91 """, 400, 30, default=0) maf_reader = maf.Reader(sys.stdin) for m in maf_reader: if m.text_size == 0: print("NA") continue s = m.score # Recalculate? if recalculate: s = hox70.score_alignment(m) # Normalize? if lnorm: s = s / m.text_size # Print print(s) if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_randomize.py000077500000000000000000000012651415666465100204340ustar00rootroot00000000000000#!/usr/bin/env python """ Randomize the order of blocks in a MAF file. If `sample_size` is specified, that many random blocks will be kept from the original maf usage: %prog [sample_size] < maf > maf """ import random import sys from bx.align import maf def __main__(): if len(sys.argv) > 1: sample_size = int(sys.argv[1]) maf_reader = maf.Reader(sys.stdin) maf_writer = maf.Writer(sys.stdout) mafs = list(maf_reader) # for m in maf_reader: mafs.append( m ) random.shuffle(mafs) if not sample_size: sample_size = len(mafs) for i in range(0, sample_size): maf_writer.write(mafs[i]) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_region_coverage_by_src.py000077500000000000000000000034551415666465100231460ustar00rootroot00000000000000#!/usr/bin/env python """ Reads a list of intervals and a set of indexed mafs. For each interval print the amount covered by each species other than the reference. usage: %prog maf_files [options] < interval_file -s, --src=s: Use this src for all intervals -p, --prefix=p: Prepend this to each src before lookup """ import sys from collections import defaultdict import bx.align.maf from bx.cookbook import doc_optparse def __main__(): # Parse Command Line options, args = doc_optparse.parse(__doc__) try: maf_files = args if options.prefix: prefix = options.prefix else: prefix = None except Exception: doc_optparse.exit() # Open indexed access to mafs indexes = [bx.align.maf.Indexed(maf_file, maf_file + ".index") for maf_file in maf_files] # Iterate over input ranges for line in sys.stdin: fields = line.split() src, start, end = fields[0], int(fields[1]), int(fields[2]) if prefix: src = prefix + src total_length = end - start # Find overlap with reference component blocks = [] for index in indexes: blocks += index.get(src, start, end) coverage = defaultdict(int) for block in blocks: overlap_start = max(start, block.components[0].start) overlap_end = min(end, block.components[0].end) length = overlap_end - overlap_start assert length > 0 for c in block.components[1:]: species = c.src.split('.')[0] coverage[species] += length print(line, end=' ') for key, value in coverage.items(): print(" ", key.ljust(10), "%0.2f" % (value / total_length)) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_select.py000077500000000000000000000013271415666465100177220ustar00rootroot00000000000000#!/usr/bin/env python """ Read a feature file containing a 0 or 1 on each line, output all mafs whose index in maf_file corresponds to a row having a 1 usage: %prog feature_file < maf_file """ import sys import bx.align.maf def __main__(): feature_file = sys.argv[1] if len(sys.argv) > 2: match = int(sys.argv[2]) else: match = 1 feature_vector = [int(line) for line in open(feature_file)] maf_reader = bx.align.maf.Reader(sys.stdin, parse_e_rows=True) maf_writer = bx.align.maf.Writer(sys.stdout) index = 0 for m in maf_reader: if feature_vector[index] == match: maf_writer.write(m) index += 1 if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_shuffle_columns.py000077500000000000000000000011201415666465100216260ustar00rootroot00000000000000#!/usr/bin/env python """ Randomly shuffle the columns of each block of a maf file. Note that this does not change any other features of the maf block, thus the text of each row no longer will match the sequence refered to by the other row attributes! usage: %prog < maf > maf """ import sys from bx import align from bx.align import maf def __main__(): maf_reader = maf.Reader(sys.stdin, parse_e_rows=True) maf_writer = maf.Writer(sys.stdout) for m in maf_reader: align.shuffle_columns(m) maf_writer.write(m) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_species_in_all_files.py000077500000000000000000000010661415666465100225760ustar00rootroot00000000000000#!/usr/bin/env python """ Takes a list of maf filenames on the command line and prints a comma separated list of the species that occur in all of the mafs. usage %prog maf1 maf2 ... """ import operator import sys from functools import reduce import bx.align.maf files = sys.argv[1:] sets = [] for file in files: sys.stderr.write(".") s = set() for block in bx.align.maf.Reader(open(file)): for comp in block.components: s.add(comp.src.split('.')[0]) sets.append(s) inter = reduce(operator.and_, sets) print(",".join(inter)) bx-python-0.8.13/scripts/maf_split_by_src.py000077500000000000000000000030641415666465100211370ustar00rootroot00000000000000#!/usr/bin/env python """ Read a MAF from stdin and break into several mafs based on the source of each block. If the `component` option is provided then only that component will be used to determine the new file for each block, otherwise the src for *all* components will be used. TODO: Should be able to specify component by species/prefix? usage: %prog [options] < maf -o, --outprefix: prepend this to the name of each generate maf -c, --component: use only this component (by index!) to split """ import string import sys from optparse import OptionParser import bx.align.maf INF = "inf" def __main__(): # Parse command line arguments parser = OptionParser() parser.add_option("-o", "--outprefix", action="store", default="") parser.add_option("-c", "--component", action="store", default=None) (options, args) = parser.parse_args() out_prefix = options.outprefix comp = options.component if comp is not None: comp = int(comp) maf_reader = bx.align.maf.Reader(sys.stdin) writers = {} for m in maf_reader: if comp is None: writer_key = string.join([c.src for c in m.components], '_') else: writer_key = m.components[comp].src if writer_key not in writers: writer = bx.align.maf.Writer(open(f"{out_prefix}{writer_key}.maf", "w")) writers[writer_key] = writer else: writer = writers[writer_key] writer.write(m) for key in writers: writers[key].close() if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_thread_for_species.py000077500000000000000000000027601415666465100222750ustar00rootroot00000000000000#!/usr/bin/env python """ Read a maf file from stdin and write out a new maf with only blocks having all of the passed in species, after dropping any other species and removing columns containing only gaps. By default this will attempt to fuse together any blocks which are adjacent after the unwanted species have been dropped. usage: %prog species1 species2 ... < maf -n, --nofuse: Don't attempt to join blocks, just remove rows. """ import sys import bx.align.maf from bx.align.tools.fuse import FusingAlignmentWriter from bx.align.tools.thread import ( get_components_for_species ) from bx.cookbook import doc_optparse def main(): options, args = doc_optparse.parse(__doc__) try: species = args # Allow a comma separated list, TODO: allow a newick format tree if len(species) == 1 and ',' in species[0]: species = species[0].split(',') fuse = not(bool(options.nofuse)) except Exception: doc_optparse.exit() maf_reader = bx.align.maf.Reader(sys.stdin, parse_e_rows=True) maf_writer = bx.align.maf.Writer(sys.stdout) if fuse: maf_writer = FusingAlignmentWriter(maf_writer) for m in maf_reader: new_components = get_components_for_species(m, species) if new_components: m.components = new_components m.score = 0.0 m.remove_all_gap_columns() maf_writer.write(m) maf_reader.close() maf_writer.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_tile.py000077500000000000000000000103121415666465100173720ustar00rootroot00000000000000#!/usr/bin/env python """ 'Tile' the blocks of a maf file over each of a set of intervals. The highest scoring block that covers any part of a region will be used, and pieces not covered by any block filled with "-" or optionally "*". The list of species to tile is specified by `tree` (either a tree or just a comma separated list). The `seq_db` is a lookup table mapping chromosome names to nib file for filling in the reference species. Maf files must be indexed. NOTE: See maf_tile_2.py for a more sophisticated version of this program, I think this one will be eliminated in the future. usage: %prog tree maf_files... -m, --missingData: Inserts wildcards for missing block rows instead of '-' """ import string import sys import bx.align as align import bx.align.maf import bx.seq.nib from bx.cookbook import doc_optparse tree_tx = string.maketrans("(),", " ") def main(): options, args = doc_optparse.parse(__doc__) try: sources = args[0].translate(tree_tx).split() seq_db = load_seq_db(args[1]) index = bx.align.maf.MultiIndexed(args[2:]) out = bx.align.maf.Writer(sys.stdout) missing_data = bool(options.missingData) except Exception: doc_optparse.exception() for line in sys.stdin: ref_src, start, end = line.split()[0:3] do_interval(sources, index, out, ref_src, int(start), int(end), seq_db, missing_data) out.close() def load_seq_db(fname): db = {} for line in open(fname): fields = line.split(',') src = fields[1] + "." + fields[2] seq = fields[4] db[src] = seq.strip() return db def do_interval(sources, index, out, ref_src, start, end, seq_db, missing_data): assert sources[0].split('.')[0] == ref_src.split('.')[0], "{} != {}".format(sources[0].split('.')[0], ref_src.split('.')[0]) base_len = end - start blocks = index.get(ref_src, start, end) # From low to high score blocks.sort(key=lambda _: _.score) mask = [-1] * base_len ref_src_size = None for i, block in enumerate(blocks): ref = block.get_component_by_src_start(ref_src) ref_src_size = ref.src_size assert ref.strand == "+" slice_start = max(start, ref.start) slice_end = min(end, ref.end) for j in range(slice_start, slice_end): mask[j-start] = i tiled = [] for i in range(len(sources)): tiled.append([]) for ss, ee, index in intervals_from_mask(mask): if index < 0: tiled[0].append(bx.seq.nib.NibFile(open(seq_db[ref_src])).get(start+ss, ee-ss)) for row in tiled[1:]: if missing_data: row.append("*" * (ee - ss)) else: row.append("-" * (ee - ss)) else: slice_start = start + ss slice_end = start + ee block = blocks[index] ref = block.get_component_by_src_start(ref_src) sliced = block.slice_by_component(ref, slice_start, slice_end) sliced = sliced.limit_to_species(sources) sliced.remove_all_gap_columns() for i, src in enumerate(sources): comp = sliced.get_component_by_src_start(src) if comp: tiled[i].append(comp.text) else: if missing_data: tiled[i].append("*" * sliced.text_size) else: tiled[i].append("-" * sliced.text_size) a = align.Alignment() for i, name in enumerate(sources): text = "".join(tiled[i]) size = len(text) - text.count("-") if i == 0: if ref_src_size is None: ref_src_size = bx.seq.nib.NibFile(open(seq_db[ref_src])).length c = align.Component(ref_src, start, end-start, "+", ref_src_size, text) else: c = align.Component(name + ".fake", 0, size, "?", size, text) a.add_component(c) out.write(a) def intervals_from_mask(mask): start = 0 last = mask[0] for i in range(1, len(mask)): if mask[i] != last: yield start, i, last start = i last = mask[i] yield start, len(mask), last main() bx-python-0.8.13/scripts/maf_tile_2.py000077500000000000000000000262311415666465100176220ustar00rootroot00000000000000#!/usr/bin/env python """ 'Tile' the blocks of a maf file over each of a set of intervals. The highest scoring block that covers any part of a region will be used, and pieces not covered by any block filled with "-" or optionally "*". This version uses synteny annotation if found on the alignment blocks, and will attempt to fill gaps with special characters depending on the type of gap, similar to the projected alignment display of the UCSC genome browser: '*' for new, '=' for inverse/inset, '#' for contig, 'X' for missing. - The list of species to tile is specified by the first argument (either a newick tree or just a comma separated list). - The `seq_db` is a lookup table mapping species and chromosome names to nib file for filling in the reference species sequence. In this file column 1 contains the species, column 2 the chromomsome or contig, and column 4 the directory containing the sequences in nib format. - The remaining arguments are a list of maf files which must have corresponding ".index" files. TODO: The seq_db format is specific to something old and obsure at PSU, need to standardize. usage: %prog list,of,species,to,keep seq_db_file indexed_maf_files ... -m, --missingData: Inserts wildcards for missing block rows instead of '-' -s, --strand: Use strand information for intervals, reveres complement if '-' """ import string import sys from cookbook import doc_optparse import bx.align as align import bx.align.maf as maf import bx.seq.nib tree_tx = string.maketrans("(),", " ") def main(): options, args = doc_optparse.parse(__doc__) try: sources = args[0].translate(tree_tx).split() seq_db = load_seq_db(args[1]) index = maf.MultiIndexed(args[2:]) out = maf.Writer(sys.stdout) missing_data = bool(options.missingData) use_strand = bool(options.strand) except Exception: doc_optparse.exception() for line in sys.stdin: fields = line.split() ref_src, start, end = fields[0:3] if use_strand and len(fields) > 5: strand = fields[5] else: strand = '+' do_interval(sources, index, out, ref_src, int(start), int(end), seq_db, missing_data, strand) out.close() def load_seq_db(fname): db = {} for line in open(fname): fields = line.split(',') src = fields[1] + "." + fields[2] seq = fields[4] db[src] = seq.strip() return db def get_fill_char(maf_status): """ Return the character that should be used to fill between blocks having a given status """ # assert maf_status not in (maf.MAF_CONTIG_NESTED_STATUS, maf.MAF_NEW_NESTED_STATUS, # maf.MAF_MAYBE_NEW_NESTED_STATUS ), \ # "Nested rows do not make sense in a single coverage MAF (or do they?)" if maf_status in (maf.MAF_NEW_STATUS, maf.MAF_MAYBE_NEW_STATUS, maf.MAF_NEW_NESTED_STATUS, maf.MAF_MAYBE_NEW_NESTED_STATUS): return "*" elif maf_status in (maf.MAF_INVERSE_STATUS, maf.MAF_INSERT_STATUS): return "=" elif maf_status in (maf.MAF_CONTIG_STATUS, maf.MAF_CONTIG_NESTED_STATUS): return "#" elif maf_status == maf.MAF_MISSING_STATUS: return "X" else: raise ValueError("Unknwon maf status") def guess_fill_char(left_comp, right_comp): """ For the case where there is no annotated synteny we will try to guess it """ # No left component, obiously new return "*" # First check that the blocks have the same src (not just species) and # orientation if (left_comp.src == right_comp.src and left_comp.strand != right_comp.strand): # Are they completely contiguous? Easy to call that a gap if left_comp.end == right_comp.start: return "-" # TODO: should be able to make some guesses about short insertions # here # All other cases we have no clue about return "*" def remove_all_gap_columns(texts): """ Remove any columns containing only gaps from alignment texts """ seqs = [list(t) for t in texts] i = 0 text_size = len(texts[0]) while i < text_size: all_gap = True for seq in seqs: if seq[i] not in ('-', '#', '*', '=', 'X', '@'): all_gap = False if all_gap: for seq in seqs: del seq[i] text_size -= 1 else: i += 1 return [''.join(s) for s in seqs] def do_interval(sources, index, out, ref_src, start, end, seq_db, missing_data, strand): """ Join together alignment blocks to create a semi human projected local alignment (small reference sequence deletions are kept as supported by the local alignment). """ ref_src_size = None # Make sure the reference component is also the first in the source list assert sources[0].split('.')[0] == ref_src.split('.')[0], "%s != %s" \ % (sources[0].split('.')[0], ref_src.split('.')[0]) # Counter for the last reference species base we have processed last_stop = start # Rows in maf blocks come in in arbitrary order, we'll convert things # to the destred order of the tiled block source_to_index = {name: i for (i, name) in enumerate(sources)} # This gets all the maf blocks overlapping our interval of interest # NOTE: Unlike maf_tile we're expecting # things to be single coverage in the reference species, so we won't # sort by score and lay down. blocks = index.get(ref_src, start, end) # The last component seen for each species onto which we are tiling last_components = [None] * len(sources) last_status = [None] * len(sources) cols_needing_fill = [0] * len(sources) # The list of strings in which we build up the tiled alignment tiled_rows = ["" for i in range(len(sources))] # Enumerate the (ordered) list of blocks for i, block in enumerate(blocks): # Check for overlap in reference species ref = block.get_component_by_src_start(ref_src) if ref.start < last_stop: if ref.end < last_stop: continue block = block.slice_by_component(ref, last_stop, min(end, ref.end)) ref = block.get_component_by_src_start(ref_src) block = block.slice_by_component(ref, max(start, ref.start), min(end, ref.end)) ref = block.get_component_by_src_start(ref_src) # print block assert last_components[0] is None or ref.start >= last_components[0].end, \ "MAF must be sorted and single coverage in reference species!" assert ref.strand == "+", \ "MAF must have all reference species blocks on the plus strand" # Store the size of the reference sequence for building fake block if ref_src_size is None: ref_src_size = ref.src_size # Handle the reference component seperately, it has no synteny status # but we will try to fill in missing sequence if ref.start > last_stop: # Need to fill in some reference sequence chunk_len = ref.start - last_stop text = bx.seq.nib.NibFile(open(seq_db[ref_src])).get(last_stop, chunk_len) tiled_rows[0] += text for source in sources[1:]: cols_needing_fill[source_to_index[source]] += chunk_len # Do reference component chunk_len = len(ref.text) tiled_rows[0] += ref.text # Do each other component for source in sources[1:]: source_index = source_to_index[source] comp = block.get_component_by_src_start(source) if comp: if comp.synteny_left is None: left_status, left_length = None, -1 else: left_status, left_length = comp.synteny_left if comp.synteny_right is None: right_status, right_length = None, -1 else: right_status, right_length = comp.synteny_right # We have a component, do we need to do some filling? cols_to_fill = cols_needing_fill[source_index] if cols_to_fill > 0: # Adjacent components should have matching status # assert last_status[ source_index ] is None or last_status[ source_index ] == left_status, \ # "left status (%s) does not match right status (%s) of last component for %s" \ # % ( left_status, last_status[ source_index ], source ) if left_status is None: fill_char = guess_fill_char(last_components[source_index], comp) else: fill_char = get_fill_char(left_status) tiled_rows[source_index] += (fill_char * cols_to_fill) cols_needing_fill[source_index] = 0 # Okay, filled up to current position, now append the text tiled_rows[source_index] += comp.text assert len(tiled_rows[source_index]) == len(tiled_rows[0]), \ "length of tiled row should match reference row" last_components[source_index] = comp last_status[source_index] = right_status else: # No component, we'll have to fill this region when we know # the status cols_needing_fill[source_index] += chunk_len last_stop = ref.end # No more components, clean up the ends if last_stop < end: # Need to fill in some reference sequence chunk_len = end - last_stop tiled_rows[0] += bx.seq.nib.NibFile(open(seq_db[ref_src])).get(last_stop, chunk_len) for source in sources[1:]: cols_needing_fill[source_to_index[source]] += chunk_len # Any final filling that needs to be done? for source in sources[1:]: source_index = source_to_index[source] fill_needed = cols_needing_fill[source_index] if fill_needed > 0: if last_components[source_index] is None: # print >>sys.stderr, "Never saw any components for %s, filling with @" % source fill_char = '@' else: if last_status[source_index] is None: fill_char = '*' else: fill_char = get_fill_char(last_status[source_index]) tiled_rows[source_index] += fill_char * fill_needed assert len(tiled_rows[source_index]) == len(tiled_rows[0]), \ "length of tiled row should match reference row" # Okay, now make up the fake alignment from the tiled rows. tiled_rows = remove_all_gap_columns(tiled_rows) a = align.Alignment() for i, name in enumerate(sources): text = "".join(tiled_rows[i]) size = len(text) - text.count("-") if i == 0: if ref_src_size is None: ref_src_size = bx.seq.nib.NibFile(open(seq_db[ref_src])).length c = align.Component(ref_src, start, end-start, "+", ref_src_size, text) else: c = align.Component(name + ".fake", 0, size, "?", size, text) a.add_component(c) if strand == '-': a = a.reverse_complement() out.write(a) main() bx-python-0.8.13/scripts/maf_tile_2bit.py000077500000000000000000000256321415666465100203250ustar00rootroot00000000000000#!/usr/bin/env python """ 'Tile' the blocks of a maf file over each of a set of intervals. The highest scoring block that covers any part of a region will be used, and pieces not covered by any block filled with "-" or optionally "*". This version uses synteny annotation if found on the alignment blocks, and will attempt to fill gaps with special characters depending on the type of gap, similar to the projected alignment display of the UCSC genome browser: '*' for new, '=' for inverse/inset, '#' for contig, 'X' for missing. - The list of species to tile is specified by the first argument (either a newick tree or just a comma separated list). - a 2bit file is expected for the reference species to fill in missing sequence - The remaining arguments are a list of maf files which must have corresponding ".index" files. TODO: The seq_db format is specific to something old and obsure at PSU, need to standardize. usage: %prog list,of,species,to,keep ref.2bit indexed_maf_files ... -m, --missingData: Inserts wildcards for missing block rows instead of '-' -s, --strand: Use strand information for intervals, reveres complement if '-' """ import string import sys import bx.align as align import bx.align.maf as maf import bx.seq.nib import bx.seq.twobit from bx.cookbook import doc_optparse tree_tx = string.maketrans("(),", " ") def main(): options, args = doc_optparse.parse(__doc__) try: sources = args[0].translate(tree_tx).split() ref_2bit = bx.seq.twobit.TwoBitFile(open(args[1])) index = maf.MultiIndexed(args[2:]) out = maf.Writer(sys.stdout) missing_data = bool(options.missingData) use_strand = bool(options.strand) except Exception: doc_optparse.exception() for line in sys.stdin: fields = line.split() ref_src, start, end = fields[0:3] if use_strand and len(fields) > 5: strand = fields[5] else: strand = '+' do_interval(sources, index, out, ref_src, int(start), int(end), ref_2bit, missing_data, strand) out.close() def get_fill_char(maf_status): """ Return the character that should be used to fill between blocks having a given status """ # assert maf_status not in (maf.MAF_CONTIG_NESTED_STATUS, maf.MAF_NEW_NESTED_STATUS, # maf.MAF_MAYBE_NEW_NESTED_STATUS ), \ # "Nested rows do not make sense in a single coverage MAF (or do they?)" if maf_status in (maf.MAF_NEW_STATUS, maf.MAF_MAYBE_NEW_STATUS, maf.MAF_NEW_NESTED_STATUS, maf.MAF_MAYBE_NEW_NESTED_STATUS): return "*" elif maf_status in (maf.MAF_INVERSE_STATUS, maf.MAF_INSERT_STATUS): return "=" elif maf_status in (maf.MAF_CONTIG_STATUS, maf.MAF_CONTIG_NESTED_STATUS): return "#" elif maf_status == maf.MAF_MISSING_STATUS: return "X" else: raise ValueError("Unknwon maf status") def guess_fill_char(left_comp, right_comp): """ For the case where there is no annotated synteny we will try to guess it """ # No left component, obiously new return "*" # First check that the blocks have the same src (not just species) and # orientation if left_comp.src == right_comp.src and left_comp.strand != right_comp.strand: # Are they completely contiguous? Easy to call that a gap if left_comp.end == right_comp.start: return "-" # TODO: should be able to make some guesses about short insertions # here # All other cases we have no clue about return "*" def remove_all_gap_columns(texts): """ Remove any columns containing only gaps from alignment texts """ seqs = [list(t) for t in texts] i = 0 text_size = len(texts[0]) while i < text_size: all_gap = True for seq in seqs: if seq[i] not in ('-', '#', '*', '=', 'X', '@'): all_gap = False if all_gap: for seq in seqs: del seq[i] text_size -= 1 else: i += 1 return [''.join(s) for s in seqs] def do_interval(sources, index, out, ref_src, start, end, ref_2bit, missing_data, strand): """ Join together alignment blocks to create a semi human projected local alignment (small reference sequence deletions are kept as supported by the local alignment). """ ref_src_size = None # Make sure the reference component is also the first in the source list assert sources[0].split('.')[0] == ref_src.split('.')[0], "%s != %s" \ % (sources[0].split('.')[0], ref_src.split('.')[0]) # Extract non-species part from ref_src for grabbing sequence ref_chr = ref_src if "." in ref_src: ref_chr = ref_src[ref_src.index(".")+1:] # Counter for the last reference species base we have processed last_stop = start # Rows in maf blocks come in in arbitrary order, we'll convert things # to the destred order of the tiled block source_to_index = {name: i for (i, name) in enumerate(sources)} # This gets all the maf blocks overlapping our interval of interest # NOTE: Unlike maf_tile we're expecting # things to be single coverage in the reference species, so we won't # sort by score and lay down. blocks = index.get(ref_src, start, end) # The last component seen for each species onto which we are tiling last_components = [None] * len(sources) last_status = [None] * len(sources) cols_needing_fill = [0] * len(sources) # The list of strings in which we build up the tiled alignment tiled_rows = ["" for i in range(len(sources))] # Enumerate the (ordered) list of blocks for i, block in enumerate(blocks): # Check for overlap in reference species ref = block.get_component_by_src_start(ref_src) if ref.start < last_stop: if ref.end < last_stop: continue block = block.slice_by_component(ref, last_stop, min(end, ref.end)) ref = block.get_component_by_src_start(ref_src) block = block.slice_by_component(ref, max(start, ref.start), min(end, ref.end)) ref = block.get_component_by_src_start(ref_src) # print block assert last_components[0] is None or ref.start >= last_components[0].end, \ "MAF must be sorted and single coverage in reference species!" assert ref.strand == "+", \ "MAF must have all reference species blocks on the plus strand" # Store the size of the reference sequence for building fake block if ref_src_size is None: ref_src_size = ref.src_size # Handle the reference component seperately, it has no synteny status # but we will try to fill in missing sequence if ref.start > last_stop: # Need to fill in some reference sequence chunk_len = ref.start - last_stop text = ref_2bit[ref_chr].get(last_stop, last_stop + chunk_len) tiled_rows[0] += text for source in sources[1:]: cols_needing_fill[source_to_index[source]] += chunk_len # Do reference component chunk_len = len(ref.text) tiled_rows[0] += ref.text # Do each other component for source in sources[1:]: source_index = source_to_index[source] comp = block.get_component_by_src_start(source) if comp: if comp.synteny_left is None: left_status, left_length = None, -1 else: left_status, left_length = comp.synteny_left if comp.synteny_right is None: right_status, right_length = None, -1 else: right_status, right_length = comp.synteny_right # We have a component, do we need to do some filling? cols_to_fill = cols_needing_fill[source_index] if cols_to_fill > 0: # Adjacent components should have matching status # assert last_status[ source_index ] is None or last_status[ source_index ] == left_status, \ # "left status (%s) does not match right status (%s) of last component for %s" \ # % ( left_status, last_status[ source_index ], source ) if left_status is None: fill_char = guess_fill_char(last_components[source_index], comp) else: fill_char = get_fill_char(left_status) tiled_rows[source_index] += (fill_char * cols_to_fill) cols_needing_fill[source_index] = 0 # Okay, filled up to current position, now append the text tiled_rows[source_index] += comp.text assert len(tiled_rows[source_index]) == len(tiled_rows[0]), \ "length of tiled row should match reference row" last_components[source_index] = comp last_status[source_index] = right_status else: # No component, we'll have to fill this region when we know # the status cols_needing_fill[source_index] += chunk_len last_stop = ref.end # No more components, clean up the ends if last_stop < end: # Need to fill in some reference sequence chunk_len = end - last_stop tiled_rows[0] += ref_2bit[ref_chr].get(last_stop, last_stop + chunk_len) for source in sources[1:]: cols_needing_fill[source_to_index[source]] += chunk_len # Any final filling that needs to be done? for source in sources[1:]: source_index = source_to_index[source] fill_needed = cols_needing_fill[source_index] if fill_needed > 0: if last_components[source_index] is None: # print >>sys.stderr, "Never saw any components for %s, filling with @" % source fill_char = '@' else: if last_status[source_index] is None: fill_char = '*' else: fill_char = get_fill_char(last_status[source_index]) tiled_rows[source_index] += fill_char * fill_needed assert len(tiled_rows[source_index]) == len(tiled_rows[0]), \ "length of tiled row should match reference row" # Okay, now make up the fake alignment from the tiled rows. tiled_rows = remove_all_gap_columns(tiled_rows) a = align.Alignment() for i, name in enumerate(sources): text = "".join(tiled_rows[i]) size = len(text) - text.count("-") if i == 0: if ref_src_size is None: ref_src_size = ref_2bit[ref_chr].length c = align.Component(ref_src, start, end-start, "+", ref_src_size, text) else: c = align.Component(name + ".fake", 0, size, "?", size, text) a.add_component(c) if strand == '-': a = a.reverse_complement() out.write(a) main() bx-python-0.8.13/scripts/maf_to_axt.py000077500000000000000000000052671415666465100177500ustar00rootroot00000000000000#!/usr/bin/env python """ Application to convert MAF file to AXT file, projecting to any two species. Reads a MAF file from standard input and writes an AXT file to standard out; some statistics are written to standard error. The user must specify the two species of interest. usage: %prog primary_species secondary_species < maf_file > axt_file """ __author__ = "Bob Harris (rsharris@bx.psu.edu)" import copy import sys import bx.align.axt import bx.align.maf def usage(s=None): message = """ maf_to_axt primary_species secondary_species < maf_file > axt_file """ if s is None: sys.exit(message) else: sys.exit(f"{s}\n{message}") def main(): primary = None secondary = None args = sys.argv[1:] while len(args) > 0: arg = args.pop(0) val = None fields = arg.split("=", 1) if len(fields) == 2: arg = fields[0] val = fields[1] if val == "": usage("missing a value in %s=" % arg) if primary is None and val is None: primary = arg elif secondary is None and val is None: secondary = arg else: usage("unknown argument: %s" % arg) if primary is None: usage("missing primary species") if secondary is None: usage("missing secondary species") # read the alignments and other info out = bx.align.axt.Writer(sys.stdout) axtsRead = 0 mafsWritten = 0 for mafBlock in bx.align.maf.Reader(sys.stdin): axtsRead += 1 p = mafBlock.get_component_by_src_start(primary) if p is None: continue s = mafBlock.get_component_by_src_start(secondary) if s is None: continue axtBlock = bx.align.Alignment(mafBlock.score, mafBlock.attributes) axtBlock.add_component(clone_component(p)) axtBlock.add_component(clone_component(s)) remove_mutual_gaps(axtBlock) if axtBlock.text_size == 0: continue out.write(axtBlock) mafsWritten += 1 sys.stderr.write("%d blocks read, %d written\n" % (axtsRead, mafsWritten)) def clone_component(c): return bx.align.Component(c.src, c.start, c.size, c.strand, c.src_size, copy.copy(c.text)) def remove_mutual_gaps(block): if len(block.components) == 0: return nonGaps = [] for c in block.components: for ix in range(0, block.text_size): if ix not in nonGaps and c.text[ix] != "-": nonGaps.append(ix) nonGaps.sort() for c in block.components: c.text = "".join([c.text[ix] for ix in nonGaps]) block.text_size = len(nonGaps) if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_to_concat_fasta.py000077500000000000000000000033061415666465100215710ustar00rootroot00000000000000#!/usr/bin/env python """ Read a maf and print the text as a fasta file, concatenating blocks. A specific subset of species can be chosen. usage %prog [options] species1,species2,... < maf_file > fasta_file --fill="expression": Insert this between blocks --wrap=columns: Wrap FASTA to this many columns """ import sys from optparse import OptionParser from bx.align import maf def __main__(): # Parse command line arguments parser = OptionParser() parser.add_option("--fill", action="store", default=None, type="string", help="") parser.add_option("--wrap", action="store", default=None, type="int", help="") parser.add_option("--nowrap", action="store_true", default=False, dest="nowrap", help="") (options, args) = parser.parse_args() species = [] for arg in args: species.extend(arg.split(',')) fill = "" if options.fill: fill = eval(options.fill) wrap = 50 if options.wrap is not None: wrap = options.wrap elif options.nowrap: wrap = 0 # create the concatenated sequences texts = {} for s in species: texts[s] = [] maf_reader = maf.Reader(sys.stdin) for m in maf_reader: for s in species: c = m.get_component_by_src_start(s) if c: texts[s].append(c.text) else: texts[s].append("-" * m.text_size) for s in species: print(">" + s) print_n(fill.join(texts[s]), wrap) def print_n(s, n, f=sys.stdout): if n <= 0: print(s, file=f) else: p = 0 while p < len(s): print(s[p:min(p+n, len(s))], file=f) p += n if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_to_fasta.py000077500000000000000000000014521415666465100202420ustar00rootroot00000000000000#!/usr/bin/env python. """ Read a maf and print the text as a fasta file. usage: %prog < maf > fasta """ import sys from bx.align import maf def __main__(): maf_reader = maf.Reader(sys.stdin) # Confusing since maf_to_concat_fasta takes names. # if len( sys.argv ) > 1: # comps = map( int, sys.argv[1:] ) # else: # comps = None comps = None for i, m in enumerate(maf_reader): if comps: l = [m.components[i] for i in comps] else: l = m.components for c in l: print(">%s:%d-%d" % (c.src, c.start, c.end)) print(c.text) def print_n(s, n, f=sys.stdout): p = 0 while p < len(s): print(s[p:min(p+n, len(s))], file=f) p += n if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_to_int_seqs.py000077500000000000000000000017221415666465100207710ustar00rootroot00000000000000#!/usr/bin/env python """ For each block in a maf file (read from stdin) write a sequence of ints corresponding to the columns of the block after applying the provided sequence mapping. The 'correct' number of species is determined by the mapping file, blocks not having this number of species will be ignored. usage: %prog mapping_file """ import sys import bx.align.maf from bx import seqmapping def main(): if len(sys.argv) > 1: _, alpha_map = seqmapping.alignment_mapping_from_file(open(sys.argv[1])) else: alpha_map = None for maf in bx.align.maf.Reader(sys.stdin): # Translate alignment to ints int_seq = seqmapping.DNA.translate_list([c.text for c in maf.components]) # Apply mapping if alpha_map: int_seq = alpha_map.translate(int_seq) # Write ints separated by spaces for i in int_seq: print(i, end=' ') print() if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_translate_chars.py000077500000000000000000000013371415666465100216210ustar00rootroot00000000000000#!/usr/bin/env python """ Translate a maf file containing gap ambiguity characters as produced by 'maf_tile_2.py' to a new file in which "#" (contiguous) is replaced by "-" and all other types are replaces by "*". TODO: This could be much more general, should just take the translation table from the command line. usage: %prog < maf > maf """ import string import sys from bx.align import maf table = string.maketrans("#=X@", "-***") def main(): maf_reader = maf.Reader(sys.stdin) maf_writer = maf.Writer(sys.stdout) for m in maf_reader: for c in m.components: c.text = c.text.translate(table) maf_writer.write(m) maf_writer.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/maf_truncate.py000077500000000000000000000013751415666465100202730ustar00rootroot00000000000000#!/usr/bin/env python """ Pass through blocks from a maf file until a certain number of columns have been passed. usage: %prog -c cols < maf > maf """ import sys from optparse import OptionParser from bx.align import maf def __main__(): parser = OptionParser() parser.add_option("-c", "--cols", action="store") (options, args) = parser.parse_args() maf_reader = maf.Reader(sys.stdin, parse_e_rows=True) maf_writer = maf.Writer(sys.stdout) if not options.cols: raise Exception("Cols argument is required") cols = int(options.cols) count = 0 for m in maf_reader: maf_writer.write(m) count += m.text_size if count >= cols: return if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/maf_word_frequency.py000077500000000000000000000017511415666465100215000ustar00rootroot00000000000000#!/usr/bin/env python """ Read a MAF and print counts and frequencies of all n-mers (words composed on n consecutive alignment columns) TODO: reconcile this and maf_mapping_word_frequency.py usage: %prog n < maf_file """ import string import sys import psyco from bx.align import maf psyco.profile() def __main__(): motif_len = int(sys.argv[1]) big_map = {} total = 0 maf_reader = maf.Reader(sys.stdin) for m in maf_reader: texts = [c.text.upper() for c in m.components] for i in range(m.text_size - motif_len): motif = string.join([text[i: i + motif_len] for text in texts]) if motif in big_map: big_map[motif] += 1 else: big_map[motif] = 1 total += 1 items = sorted(zip(big_map.values(), big_map.keys())) items.reverse() for count, motif in items: print("%d\t%0.10f\t%s" % (count, count / total, motif)) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/mask_quality.py000066400000000000000000000056421415666465100203240ustar00rootroot00000000000000#!/usr/bin/env python """ Masks an AXT or MAF file based on quality (from a binned_array) and outputs AXT or MAF. Binned array form of quality scores can be generated with `qv_to_bqv.py`. usage: %prog input output -i, --input=N: Format of input (axt or maf) -o, --output=N: Format of output (axt or maf) -m, --mask=N: Character to use as mask character -q, --quality=N: Min quality allowed -t, --type=N: base_pair or nqs -l, --list=N: colon seperated list of species,len_file[,qualityfile]. """ import fileinput import sys import bx.align.axt import bx.align.maf import bx.binned_array from bx.align.sitemask.quality import Simple from bx.cookbook import doc_optparse def main(): options, args = doc_optparse.parse(__doc__) try: inputformat = options.input outputformat = options.output mask = options.mask minqual = int(options.quality) speciesAndLens = options.list inputfile = args[0] outputfile = args[1] except Exception: doc_optparse.exception() outstream = open(outputfile, "w") instream = open(inputfile) qualfiles = {} # read lens specieslist = speciesAndLens.split(":") species_to_lengths = {} for entry in specieslist: fields = entry.split(",") lenstream = fileinput.FileInput(fields[1]) lendict = dict() for line in lenstream: region = line.split() lendict[region[0]] = int(region[1]) species_to_lengths[fields[0]] = lendict if len(fields) >= 3: qualfiles[fields[0]] = fields[2] specieslist = [a.split(":")[0] for a in specieslist] # open quality binned_arrays reader = None writer = None if inputformat == "axt": # load axt if len(specieslist) != 2: print("AXT is pairwise only.") sys.exit() reader = bx.align.axt.Reader(instream, species1=specieslist[0], species2=specieslist[1], species_to_lengths=species_to_lengths) elif outputformat == "maf": # load maf reader = bx.align.maf.Reader(instream, species_to_lengths=species_to_lengths) if outputformat == "axt": # setup axt if len(specieslist) != 2: print("AXT is pairwise only.") sys.exit() writer = bx.align.axt.Writer(outstream, attributes=reader.attributes) elif outputformat == "maf": # setup maf writer = bx.align.maf.Writer(outstream, attributes=reader.attributes) qualfilter = Simple(mask=mask, qualspecies=species_to_lengths, qualfiles=qualfiles, minqual=minqual, cache=50) qualfilter.run(reader, writer.write) print("For "+str(qualfilter.total)+" base pairs, "+str(qualfilter.masked)+" base pairs were masked.") print(str(float(qualfilter.masked)/float(qualfilter.total) * 100)+"%") if __name__ == "__main__": main() bx-python-0.8.13/scripts/nib_chrom_intervals_to_fasta.py000077500000000000000000000020261415666465100235240ustar00rootroot00000000000000#!/usr/bin/env python """ Read a set of ranges and a nib file, print portions of nib overlapping those ranges to stdout TODO: General sequence handling would be nice, as well as merging with 'nib_intervals_to_fasta.py'. usage: %prog nib_dir < range_file """ import sys import bx.seq.nib from bx.cookbook import doc_optparse def __main__(): options, args = doc_optparse.parse(__doc__) try: nib_dir = args[0] except IndexError: doc_optparse.exit() nibs = {} for line in sys.stdin: fields = line.split() chrom, start, end = fields[0], int(fields[1]), int(fields[2]) print(">", chrom, start, end) if chrom in nibs: nib = nibs[chrom] else: nibs[chrom] = nib = bx.seq.nib.NibFile(open(f"{nib_dir}/{chrom}.nib")) print_wrapped(nib.get(start, end - start)) def print_wrapped(s): l = len(s) c = 0 while c < l: b = min(c + 50, l) print(s[c:b]) c = b if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/nib_intervals_to_fasta.py000077500000000000000000000014601415666465100223350ustar00rootroot00000000000000#!/usr/bin/env python """ Read a set of ranges and a nib file, print portions of nib overlapping those ranges to stdout usage: %prog range_file nib_file """ import bx.seq.nib from bx.cookbook import doc_optparse def __main__(): options, args = doc_optparse.parse(__doc__) try: range_file = open(args[0]) nib_file = open(args[1]) except Exception: doc_optparse.exit() nib = bx.seq.nib.NibFile(nib_file) for line in range_file: fields = line.split() start, end = int(fields[0]), int(fields[1]) print(">", start, end) print_wrapped(nib.get(start, end - start)) def print_wrapped(s): l = len(s) c = 0 while c < l: b = min(c + 50, l) print(s[c:b]) c = b if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/nib_length.py000077500000000000000000000003331415666465100177250ustar00rootroot00000000000000#!/usr/bin/env python """ Print the number of bases in a nib file. usage: %prog nib_file """ import sys from bx.seq import nib as seq_nib with open(sys.argv[1]) as f: nib = seq_nib.NibFile(f) print(nib.length) bx-python-0.8.13/scripts/one_field_per_line.py000077500000000000000000000003531415666465100214170ustar00rootroot00000000000000#!/usr/bin/env python """ Read a file from stdin, split each line and write fields one per line to stdout. TODO: is this really that useful? """ import sys for line in sys.stdin: for field in line.split(): print(field) bx-python-0.8.13/scripts/out_to_chain.py000077500000000000000000000046661415666465100203040ustar00rootroot00000000000000#!/usr/bin/env python import logging import sys from collections import OrderedDict from itertools import product import numpy as np from bx.align.epo import Chain, EPOitem from bx.cookbook import argparse logging.basicConfig(level=logging.INFO) log = logging.getLogger() def outFile(s): if (s in ('-', 'stdout')) or (s is None): return sys.stdout return open(s, 'w') def loadChrSizes(path): data = OrderedDict() with open(path) as fd: for ch, s in (l.split() for l in fd): data[ch] = int(s) return data def convert_action(trg_comp, qr_comp, ts, qs, opt): for i, (a, b) in enumerate(product(trg_comp, qr_comp)): try: ch, S, T, Q = Chain._make_from_epo(a, b, ts, qs) if np.sum(S) == 0: log.info("insignificant genomic alignment block %s ..." % ch.id) continue new_id = "%si%d" % (ch.id, i) print(str(ch._replace(id=new_id)), file=opt.output) for s, t, q in zip(S, T, Q): print("%d %d %d" % (s, t, q), file=opt.output) print("%d\n" % S[-1], file=opt.output) except KeyError: log.warning(f"skipping chromosome/contig ({a.chrom}, {b.chrom})") if __name__ == '__main__': parser = argparse.ArgumentParser( description="""EPO alignments (.out) to .chain converter.""", epilog="Olgert Denas (Taylor Lab)", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("input", help="File to process.") parser.add_argument("--species", nargs=2, default=["homo_sapiens", "mus_musculus"], help="Names of target and query species (respectively) in the alignment.") parser.add_argument("--chrsizes", nargs=2, required=True, help="Chromosome sizes for the given species.") parser.add_argument("-o", '--output', metavar="FILE", default='stdout', type=outFile, help="Output file") opt = parser.parse_args() log.info("loading sizes ...") tsizes = loadChrSizes(opt.chrsizes[0]) qsizes = loadChrSizes(opt.chrsizes[1]) log.info("loading alignments ...") data = OrderedDict(sorted(EPOitem._parse_epo(opt.input).items())) log.info("dumping ...") for k in data: components = data[k] trg_comp = [c for c in components if c.species == opt.species[0]] qr_comp = [c for c in components if c.species == opt.species[1]] convert_action(trg_comp, qr_comp, tsizes, qsizes, opt) bx-python-0.8.13/scripts/prefix_lines.py000077500000000000000000000002411415666465100203010ustar00rootroot00000000000000#!/usr/bin/env python """ Simple script to add a prefix to every line in a file. """ import sys for line in sys.stdin: print(sys.argv[1] + line, end=' ') bx-python-0.8.13/scripts/pretty_table.py000077500000000000000000000016441415666465100203200ustar00rootroot00000000000000#!/usr/bin/env python """ Read some whitespace separated data from stdin and pretty print it so that the columns line up. """ import sys def main(): pad = "\t" align = None if len(sys.argv) > 1: pad = " " * int(sys.argv[1]) if len(sys.argv) > 2: align = sys.argv[2] rows = [line.split() for line in sys.stdin] print_tabular(rows, pad, align) def print_tabular(rows, pad, align=None): if len(rows) == 0: return "" lengths = [len(col) for col in rows[0]] for row in rows[1:]: for i in range(0, len(row)): lengths[i] = max(lengths[i], len(row[i])) rval = "" for row in rows: rval = "" for i in range(0, len(row)): if align and align[i] == "l": rval += row[i].ljust(lengths[i]) else: rval += row[i].rjust(lengths[i]) rval += pad print(rval) main() bx-python-0.8.13/scripts/qv_to_bqv.py000066400000000000000000000040551415666465100176160ustar00rootroot00000000000000#!/usr/bin/env python """ Convert a qual (qv) file to several BinnedArray files for fast seek. This script takes approximately 4 seconds per 1 million base pairs. The input format is fasta style quality -- fasta headers followed by whitespace separated integers. usage: %prog qual_file output_file """ import fileinput import sys from bx.binned_array import BinnedArrayWriter def main(): args = sys.argv[1:] try: qual_file = args[0] output_file = args[1] except IndexError: print("usage: qual_file output_file") sys.exit() qual = fileinput.FileInput(qual_file) outfile = None outbin = None base_count = 0 mega_count = 0 region = "" for line in qual: line = line.rstrip("\r\n") if line.startswith(">"): # close old if outbin and outfile: print("\nFinished region " + region + " at " + str(base_count) + " base pairs.") outbin.finish() outfile.close() # start new file region = line.lstrip(">") outfname = output_file + "." + region + ".bqv" print("Writing region " + region + " to file " + outfname) outfile = open(outfname, "wb") outbin = BinnedArrayWriter(outfile, typecode='b', default=0) base_count = 0 mega_count = 0 else: if outfile and outbin: nums = line.split() for val in nums: outval = int(val) assert outval <= 255 and outval >= 0 outbin.write(outval) base_count += 1 if (mega_count * 1000000) <= base_count: sys.stdout.write(str(mega_count)+" ") sys.stdout.flush() mega_count = base_count // 1000000 + 1 if outbin and outfile: print("\nFinished region " + region + " at " + str(base_count) + " base pairs.") outbin.finish() outfile.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/random_lines.py000077500000000000000000000004561415666465100202740ustar00rootroot00000000000000#!/usr/bin/env python """ Script to select random lines from a file. Reads entire file into memory! TODO: Replace this with a more elegant implementation. """ import random import sys ndesired = int(sys.argv[1]) for line in random.sample(sys.stdin.readlines(), ndesired): print(line, end=' ') bx-python-0.8.13/scripts/table_add_column.py000077500000000000000000000022741415666465100210760ustar00rootroot00000000000000#!/usr/bin/env python """ Tool for adding a column to a table. Expressions for the column are similar to those supported by table_filter.py usage: %prog expression colname < table -H, --header: keep header in output -C, --comments: keep comments in output """ import sys import bx.tabular.io from bx.cookbook import doc_optparse def __main__(): # Parse command line arguments options, args = doc_optparse.parse(__doc__) try: keep_header = bool(options.header) keep_comments = bool(options.comments) expr = args[0] colname = args[1] except Exception: doc_optparse.exception() # Compile expression for SPEED if expr: expr = compile(expr, '', 'eval') for element in bx.tabular.io.Reader(sys.stdin): if isinstance(element, bx.tabular.io.Header): if keep_header: print(str(element) + "\t" + colname) elif isinstance(element, bx.tabular.io.Comment): if keep_comments: print(element) else: val = eval(expr, dict(row=element)) print(str(element) + "\t" + str(val)) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/table_filter.py000077500000000000000000000042561415666465100202600ustar00rootroot00000000000000#!/usr/bin/env python """ Tool for filtering a tabular data file. Fields are separated by tabs, the header line is denoted by a '#' in the first byte, comments are denoted by a '#' at the start of any subsequent line. Expressions can use column names as well as numbers. The -c options allows cutting, again using field name or numbers. usage: %prog expression < table -H, --header: keep header in output -C, --comments: keep comments in output --force-header: assume the first line is a header even if it does not start with "#" -c, --cols=1,2: names or indexes of columns to keep """ import sys import bx.tabular.io from bx.cookbook import doc_optparse def __main__(): # Parse command line arguments options, args = doc_optparse.parse(__doc__) try: keep_header = bool(options.header) keep_comments = bool(options.comments) cols = [] if options.cols: for c in options.cols.split(','): try: v = int(c) except ValueError: v = c cols.append(v) if len(args) > 0: expr = args[0] else: expr = None if options.force_header: force_header = bx.tabular.io.FIRST_LINE_IS_HEADER else: force_header = None except Exception: doc_optparse.exception() # Compile expression for SPEED if expr: expr = compile(expr, '', 'eval') for element in bx.tabular.io.TableReader(sys.stdin, force_header=force_header): if isinstance(element, bx.tabular.io.Header): if keep_header: if cols: print("#" + "\t".join(element[c] for c in cols)) else: print(element) elif isinstance(element, bx.tabular.io.Comment): if keep_comments: print(element) else: if expr is None or bool(eval(expr, dict(row=element))): if cols: print("\t".join(element[c] for c in cols)) else: print(element) if __name__ == "__main__": __main__() bx-python-0.8.13/scripts/tfloc_summary.py000077500000000000000000000011771415666465100205070ustar00rootroot00000000000000#!/usr/bin/env python """ Read TFLOC output from stdin and write out a summary in which the nth line contains the number of sites found in the nth alignment of the input. TODO: This is very special case, should it be here? """ import sys from collections import defaultdict counts = defaultdict(int) max_index = -1 for line in sys.stdin: if line[0].isdigit(): current_index = int(line) max_index = max(current_index, max_index) elif line[0] == "'": counts[current_index] += 1 else: raise ValueError("Invalid input line " + line) for i in range(max_index + 1): print(counts.get(i, 0)) bx-python-0.8.13/scripts/ucsc_gene_table_to_intervals.py000077500000000000000000000067661415666465100235270ustar00rootroot00000000000000#!/usr/bin/env python """ Read a table dump in the UCSC gene table format and print a tab separated list of intervals corresponding to requested features of each gene. usage: ucsc_gene_table_to_intervals.py [options] < gene_table.txt options: -h, --help show this help message and exit -rREGION, --region=REGION Limit to region: one of coding, utr3, utr5, transcribed [default] -e, --exons Only print intervals overlapping an exon """ import optparse import string import sys def main(): # Parse command line parser = optparse.OptionParser(usage="%prog [options] < gene_table.txt") parser.add_option("-r", "--region", dest="region", default="transcribed", help="Limit to region: one of coding, utr3, utr5, transcribed [default]") parser.add_option("-e", "--exons", action="store_true", dest="exons", help="Only print intervals overlapping an exon") parser.add_option("-s", "--strand", action="store_true", dest="strand", help="Print strand after interval") parser.add_option("-b", "--nobin", action="store_false", dest="discard_first_column", default=True, help="file doesn't contain a 'bin' column (use this for pre-hg18 files)") options, args = parser.parse_args() assert options.region in ('coding', 'utr3', 'utr5', 'transcribed'), "Invalid region argument" # Read table from stdin and handle each gene for line in sys.stdin: # Parse fields from gene tabls fields = line.split('\t') if options.discard_first_column: fields.pop(0) chrom = fields[1] strand = fields[2] tx_start = int(fields[3]) tx_end = int(fields[4]) cds_start = int(fields[5]) cds_end = int(fields[6]) # Determine the subset of the transcribed region we are interested in if options.region == 'utr3': if strand == '-': region_start, region_end = tx_start, cds_start else: region_start, region_end = cds_end, tx_end elif options.region == 'utr5': if strand == '-': region_start, region_end = cds_end, tx_end else: region_start, region_end = tx_start, cds_start elif options.region == 'coding': region_start, region_end = cds_start, cds_end else: region_start, region_end = tx_start, tx_end # If only interested in exons, print the portion of each exon overlapping # the region of interest, otherwise print the span of the region if options.exons: exon_starts = [int(_) for _ in fields[8].rstrip(',\n').split(',')] exon_ends = [int(_) for _ in fields[9].rstrip(',\n').split(',')] for start, end in zip(exon_starts, exon_ends): start = max(start, region_start) end = min(end, region_end) if start < end: if strand: print_tab_sep(chrom, start, end, strand) else: print_tab_sep(chrom, start, end) else: if strand: print_tab_sep(chrom, region_start, region_end, strand) else: print_tab_sep(chrom, region_start, region_end) def print_tab_sep(*args): """Print items in `l` to stdout separated by tabs""" print(string.join((str(f) for f in args), '\t')) if __name__ == "__main__": main() bx-python-0.8.13/scripts/wiggle_to_array_tree.py000077500000000000000000000014231415666465100220120ustar00rootroot00000000000000#!/usr/bin/env python """ Read data in UCSC wiggle format and write it to an "array tree" file. usage: %prog array_length output.array_tree < input.wig """ import sys from bx.arrays.array_tree import ( array_tree_dict_from_reader, FileArrayTreeDict ) from bx.arrays.wiggle import WiggleReader def main(): sizes_fname = sys.argv[1] out_fname = sys.argv[2] sizes = {} for line in open(sizes_fname): fields = line.split() sizes[fields[0]] = int(fields[1]) # Fill array from wiggle d = array_tree_dict_from_reader(WiggleReader(sys.stdin), sizes) for value in d.values(): value.root.build_summary() with open(out_fname, "w") as f: FileArrayTreeDict.dict_to_file(d, f) if __name__ == "__main__": main() bx-python-0.8.13/scripts/wiggle_to_binned_array.py000077500000000000000000000024151415666465100223140ustar00rootroot00000000000000#!/usr/bin/env python """ Convert wiggle data to a binned array. This assumes the input data is on a single chromosome and does no sanity checks! usage: %prog score_file out_file < wiggle_data -c, --comp=type: compression type (none, zlib, lzo) """ import bx.wiggle from bx import misc from bx.binned_array import BinnedArray from bx.cookbook import doc_optparse def main(): # Parse command line options, args = doc_optparse.parse(__doc__) try: if options.comp: comp_type = options.comp else: comp_type = None score_fname = args[0] out_fname = args[1] except Exception: doc_optparse.exit() scores = BinnedArray() for i, (chrom, pos, val) in enumerate(bx.wiggle.Reader(misc.open_compressed(score_fname))): # if last_chrom is None: # last_chrom = chrom # else: # assert chrom == last_chrom, "This script expects a 'wiggle' input on only one chromosome" scores[pos] = val # Status if i % 10000 == 0: print(i, "scores processed") out = open(out_fname, "w") if comp_type: scores.to_file(out, comp_type=comp_type) else: scores.to_file(out) out.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/wiggle_to_chr_binned_array.py000077500000000000000000000013431415666465100231470ustar00rootroot00000000000000#!/usr/bin/env python """ Writes compressed data from a wiggle file by chromosome. usage: %prog score_file < wiggle_data """ import sys import bx.wiggle from bx.binned_array import BinnedArray from bx.cookbook import doc_optparse def main(): options, args = doc_optparse.parse(__doc__) scores = {} for i, (chrom, pos, val) in enumerate(bx.wiggle.Reader(open(sys.argv[1]))): if chrom not in scores: scores[chrom] = BinnedArray() scores[chrom][pos] = val # Status if i % 10000 == 0: print(i, "scores processed") for chr in scores.keys(): out = open(chr, "w") scores[chr].to_file(out) out.close() if __name__ == "__main__": main() bx-python-0.8.13/scripts/wiggle_to_simple.py000077500000000000000000000010031415666465100211400ustar00rootroot00000000000000#!/usr/bin/env python """ Read a wiggle track and print out a series of lines containing "chrom position score". Ignores track lines, handles bed, variableStep and fixedStep wiggle lines. """ import sys import bx.wiggle if len(sys.argv) > 1: in_file = open(sys.argv[1]) else: in_file = sys.stdin if len(sys.argv) > 2: out_file = open(sys.argv[2], "w") else: out_file = sys.stdout for fields in bx.wiggle.Reader(in_file): print(" ".join(map(str, fields))) in_file.close() out_file.close() bx-python-0.8.13/setup.cfg000066400000000000000000000032001415666465100153650ustar00rootroot00000000000000[metadata] author = James Taylor, Bob Harris, David King, Brent Pedersen, Kanwei Li, Nicola Soranzo, and others author_email = james@jamestaylor.org classifiers = Development Status :: 5 - Production/Stable Intended Audience :: Developers Intended Audience :: Science/Research License :: OSI Approved :: MIT License Operating System :: POSIX Programming Language :: Python :: 3 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Topic :: Scientific/Engineering :: Bio-Informatics Topic :: Software Development :: Libraries :: Python Modules name = bx-python description = Tools for manipulating biological data, particularly multiple sequence alignments license = MIT long_description = file: README.md long_description_content_type = text/markdown project_urls = Bug Tracker = https://github.com/bxlab/bx-python/issues Source Code = https://github.com/bxlab/bx-python url = https://github.com/bxlab/bx-python version = attr: bx.__version__ [options] install_requires = numpy packages = find: package_dir = =lib py_modules = psyco_full python_requires = >=3.7 zip_safe = False [options.package_data] * = *.ps [options.packages.find] where=lib [aliases] snapshot = egg_info -rb_DEV bdist_egg rotate -m.egg -k1 build_docs = build_sphinx build_apidocs [flake8] ignore = E226,E501,E741,W503 exclude = .git,.tox,.venv,build,doc/source/conf.py import-order-style = smarkets application-import-names = bx,bx_extras [build_sphinx] source-dir = doc/source build-dir = doc/docbuild all_files = 1 bx-python-0.8.13/setup.py000066400000000000000000000157361415666465100152770ustar00rootroot00000000000000import os import os.path import platform import sys from distutils.core import Command from glob import glob from setuptools import ( Extension, setup, ) from setuptools.command.sdist import sdist def main(): metadata = dict( scripts=glob("scripts/*.py"), cmdclass=command_classes) if len(sys.argv) >= 2 and \ ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): # For these actions, NumPy is not required. # # They are required to succeed without Numpy for example when # pip is used to install when Numpy is not yet present in # the system. pass else: try: import numpy # Suppress numpy tests numpy.test = None except Exception as e: raise Exception(f"numpy must be installed to build: {e}") metadata['ext_modules'] = get_extension_modules(numpy_include=numpy.get_include()) setup(**metadata) # ---- Commands ------------------------------------------------------------- # Use build_ext from Cython if found command_classes = {} try: import Cython.Distutils command_classes['build_ext'] = Cython.Distutils.build_ext class build_ext_sdist(sdist): def run(self): # Make sure the compiled Cython files in the distribution are up-to-date self.run_command("build_ext") super().run() command_classes['sdist'] = build_ext_sdist except ImportError: pass # Use epydoc if found try: import epydoc.cli # Create command class to build API documentation class BuildAPIDocs(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): # Save working directory and args old_argv = sys.argv old_cwd = os.getcwd() # Build command line for Epydoc sys.argv = """epydoc.py bx --verbose --html --simple-term --exclude=._ --exclude=_tests --docformat=reStructuredText --output=../doc/docbuild/html/apidoc""".split() # Make output directory if not os.path.exists("./doc/docbuild/html/apidoc"): os.mkdir("./doc/docbuild/html/apidoc") # Move to lib directory (so bx package is in current directory) os.chdir("./lib") # Invoke epydoc epydoc.cli.cli() # Restore args and working directory sys.argv = old_argv os.chdir(old_cwd) # Add to extra_commands command_classes['build_apidocs'] = BuildAPIDocs except Exception: pass # ---- Extension Modules ---------------------------------------------------- # # suppress C++ #warning, e.g., to silence NumPy deprecation warnings: # from functools import partial # _Extension = Extension # Extension = partial(_Extension, extra_compile_args=["-Wno-cpp"]) def get_extension_modules(numpy_include=None): extensions = [] # Bitsets extensions.append(Extension("bx.bitset", ["lib/bx/bitset.pyx", "src/binBits.c", "src/kent/bits.c", "src/kent/common.c"], include_dirs=["src/kent", "src"])) # Interval intersection extensions.append(Extension("bx.intervals.intersection", ["lib/bx/intervals/intersection.pyx"])) # Alignment object speedups extensions.append(Extension("bx.align._core", ["lib/bx/align/_core.pyx"])) # NIB reading speedups extensions.append(Extension("bx.seq._nib", ["lib/bx/seq/_nib.pyx"])) # 2bit reading speedups extensions.append(Extension("bx.seq._twobit", ["lib/bx/seq/_twobit.pyx"])) # Translation if character / integer strings extensions.append(Extension("bx._seqmapping", ["lib/bx/_seqmapping.pyx"])) # BGZF extensions.append(Extension("bx.misc.bgzf", ["lib/bx/misc/bgzf.pyx", "src/samtools/bgzf.c"], include_dirs=["src/samtools"], libraries=['z'])) # The following extensions won't (currently) compile on windows if platform.system() not in ('Microsoft', 'Windows'): # Interval clustering extensions.append(Extension("bx.intervals.cluster", ["lib/bx/intervals/cluster.pyx", "src/cluster.c"], include_dirs=["src"])) # Position weight matrices extensions.append(Extension("bx.pwm._position_weight_matrix", ["lib/bx/pwm/_position_weight_matrix.pyx", "src/pwm_utils.c"], include_dirs=["src"])) extensions.append(Extension("bx.motif._pwm", ["lib/bx/motif/_pwm.pyx"], include_dirs=[numpy_include])) # Sparse arrays with summaries organized as trees on disk extensions.append(Extension("bx.arrays.array_tree", ["lib/bx/arrays/array_tree.pyx"], include_dirs=[numpy_include])) # Reading UCSC "big binary index" files extensions.append(Extension("bx.bbi.bpt_file", ["lib/bx/bbi/bpt_file.pyx"])) extensions.append(Extension("bx.bbi.cirtree_file", ["lib/bx/bbi/cirtree_file.pyx"])) extensions.append(Extension("bx.bbi.bbi_file", ["lib/bx/bbi/bbi_file.pyx"], include_dirs=[numpy_include])) extensions.append(Extension("bx.bbi.bigwig_file", ["lib/bx/bbi/bigwig_file.pyx"], include_dirs=[numpy_include])) extensions.append(Extension("bx.bbi.bigbed_file", ["lib/bx/bbi/bigbed_file.pyx"], include_dirs=[numpy_include])) # EPO and Chain arithmetics and IO speedups extensions.append(Extension("bx.align._epo", ["lib/bx/align/_epo.pyx"], include_dirs=[numpy_include])) # Reading UCSC bed and wiggle formats extensions.append(Extension("bx.arrays.bed", ["lib/bx/arrays/bed.pyx"])) extensions.append(Extension("bx.arrays.wiggle", ["lib/bx/arrays/wiggle.pyx"])) # CpG masking extensions.append(Extension("bx.align.sitemask._cpg", ["lib/bx/align/sitemask/_cpg.pyx", "lib/bx/align/sitemask/find_cpg.c"])) # Counting n-grams in integer strings extensions.append(Extension("bx.intseq.ngramcount", ["lib/bx/intseq/ngramcount.pyx"], include_dirs=["src"])) # Seekable access to bzip2 files extensions.append(Extension("bx.misc._seekbzip2", ["lib/bx/misc/_seekbzip2.pyx", "src/bunzip/micro-bunzip.c"], include_dirs=["src/bunzip"])) return extensions if __name__ == "__main__": main() bx-python-0.8.13/src/000077500000000000000000000000001415666465100143405ustar00rootroot00000000000000bx-python-0.8.13/src/binBits.c000066400000000000000000000164051415666465100161040ustar00rootroot00000000000000#include "common.h" #include "bits.h" #include "binBits.h" static Bits * ALL_ZERO = NULL; static Bits * ALL_ONE = ( Bits * ) &"ONE"; struct BinBits* binBitsAlloc( int size, int granularity ) { struct BinBits * bb; AllocVar(bb); bb->size = size; bb->bin_size = (int) ceil( size / (float) granularity ); bb->nbins = (int) ceil( size / (float) bb->bin_size ); AllocArray( bb->bins, bb->nbins ); return bb; } void binBitsFree( struct BinBits *bb ) { int i; for ( i = 0; i < bb->nbins; i++ ) { if ( ( bb->bins[i] != ALL_ZERO ) && ( bb->bins[i] != ALL_ONE ) ) { bitFree( &(bb->bins[i]) ); } } freeMem( bb->bins ); freeMem( bb ); } #ifdef _MSC_VER #define INLINE static __inline #else #define INLINE static inline #endif INLINE int binBitsGetBin( struct BinBits * bb, int pos ) { return pos / bb->bin_size; } INLINE int binBitsGetOffset( struct BinBits * bb, int pos ) { return pos % bb->bin_size; } boolean binBitsReadOne( struct BinBits * bb, int pos ) { int bin = binBitsGetBin( bb, pos ); if ( bb->bins[bin] == ALL_ZERO ) { return 0; } else if ( bb->bins[bin] == ALL_ONE ) { return 1; } else { return bitReadOne( bb->bins[bin], binBitsGetOffset( bb, pos ) ); } } void binBitsSetOne( struct BinBits * bb, int pos ) { int bin = binBitsGetBin( bb, pos ); int offset = binBitsGetOffset( bb, pos ); if ( bb->bins[bin] == ALL_ONE ) { return; } if ( bb->bins[bin] == ALL_ZERO ) { bb->bins[bin] = bitAlloc( bb->bin_size ); } bitSetOne( bb->bins[bin], offset ); } void binBitsClearOne( struct BinBits * bb, int pos ) { int bin = binBitsGetBin( bb, pos ); int offset = binBitsGetOffset( bb, pos ); if ( bb->bins[bin] == ALL_ZERO ) { return; } if ( bb->bins[bin] == ALL_ONE ) { bb->bins[bin] = bitAlloc( bb->bin_size ); bitSetRange( bb->bins[bin], 0, bb->bin_size ); } bitClearOne( bb->bins[bin], offset ); } void binBitsSetRange( struct BinBits *bb, int start, int size ) { int bin, offset, delta; while ( size > 0 ) { bin = binBitsGetBin( bb, start ); offset = binBitsGetOffset( bb, start ); delta = bb->bin_size - offset; if ( bb->bins[bin] == ALL_ZERO ) { bb->bins[bin] = bitAlloc( bb->bin_size ); } if ( delta < size ) { if ( bb->bins[bin] != ALL_ONE ) { bitSetRange( bb->bins[bin], offset, delta ); } size -= delta; start += delta; } else { if ( bb->bins[bin] != ALL_ONE ) { bitSetRange( bb->bins[bin], offset, size ); } size = 0; } } } int binBitsCountRange( struct BinBits *bb, int start, int size ) { int delta; int count = 0; while ( size > 0 ) { int bin = binBitsGetBin( bb, start ); int offset = binBitsGetOffset( bb, start ); delta = bb->bin_size - offset; if ( bb->bins[bin] == ALL_ZERO ) { if ( delta < size ) { size -= delta; start += delta; } else { size = 0; } } else if ( bb->bins[bin] == ALL_ONE ) { if ( delta < size ) { count += ( delta - offset ); size -= delta; start += delta; } else { count += ( size - offset ); size = 0; } } else if ( delta < size ) { count += bitCountRange( bb->bins[bin], offset, delta ); size -= delta; start += delta; } else { count += bitCountRange( bb->bins[bin], offset, size ); size = 0; } } return count; } int binBitsFindSet( struct BinBits *bb, int start ) { int ns; int bin = binBitsGetBin( bb, start ); int offset = binBitsGetOffset( bb, start ); while ( bin < bb->nbins ) { if ( bb->bins[bin] == ALL_ONE ) { return bin * bb->bin_size + offset; } else if ( bb->bins[bin] != ALL_ZERO ) { ns = bitFindSet( bb->bins[bin], offset, bb->bin_size ); if ( ns < bb->bin_size ) { return bin * bb->bin_size + ns; } } bin += 1; offset = 0; } return bb->size; } int binBitsFindClear( struct BinBits *bb, int start ) { int ns; int bin = binBitsGetBin( bb, start ); int offset = binBitsGetOffset( bb, start ); while ( bin < bb->nbins ) { if ( bb->bins[bin] == ALL_ZERO ) { return bin*bb->bin_size + offset; } else if ( bb->bins[bin] != ALL_ONE ) { ns = bitFindClear( bb->bins[bin], offset, bb->bin_size ); if ( ns < bb->bin_size ) { return bin*bb->bin_size + ns; } } bin += 1; offset = 0; } return bb->size; } void binBitsAnd( struct BinBits *bb1, struct BinBits *bb2 ) { int i; assert( bb1->bin_size == bb2->bin_size && bb1->nbins == bb2->nbins && bb1->size == bb2->size ); for ( i = 0; i < bb1->nbins; i++ ) { if ( bb1->bins[i] == ALL_ZERO ) { // Do nothing } else if ( bb2->bins[i] == ALL_ZERO ) { if ( bb1->bins[i] != ALL_ONE ) { bitFree( &bb1->bins[i] ); } bb1->bins[i] = ALL_ZERO; } else if ( bb2->bins[i] == ALL_ONE ) { // Do nothing } else if ( bb1->bins[i] == ALL_ONE ) { bb1->bins[i] = bitClone( bb2->bins[i], bb1->bin_size ); } else { bitAnd( bb1->bins[i], bb2->bins[i], bb1->bin_size ); } } } void binBitsOr( struct BinBits *bb1, struct BinBits *bb2 ) { int i; assert( bb1->bin_size == bb2->bin_size && bb1->nbins == bb2->nbins && bb1->size == bb2->size ); for ( i = 0; i < bb1->nbins; i++ ) { if ( bb1->bins[i] == ALL_ONE ) { // Do nothing } else if ( bb2->bins[i] == ALL_ONE ) { if ( bb1->bins[i] != ALL_ZERO ) { bitFree( &bb1->bins[i] ); } bb1->bins[i] = ALL_ONE; } else if ( bb2->bins[i] == ALL_ZERO ) { // Do nothing } else if ( bb1->bins[i] == ALL_ZERO ) { bb1->bins[i] = bitClone( bb2->bins[i], bb1->bin_size ); } else { bitOr( bb1->bins[i], bb2->bins[i], bb1->bin_size ); } } } void binBitsNot( struct BinBits *bb ) { int i; for ( i = 0; i < bb->nbins; i++ ) { if ( bb->bins[i] == ALL_ONE ) { bb->bins[i] = ALL_ZERO; } else if ( bb->bins[i] == ALL_ZERO ) { bb->bins[i] = ALL_ONE; } else { bitNot( bb->bins[i], bb->bin_size ); } } } bx-python-0.8.13/src/binBits.h000066400000000000000000000014761415666465100161130ustar00rootroot00000000000000#ifndef BINBITS_H #define BINBITS_H #include "common.h" #include "bits.h" struct BinBits { int size; int bin_size; int nbins; Bits ** bins; }; struct BinBits* binBitsAlloc( int size, int granularity ); void binBitsFree( struct BinBits *bb ); boolean binBitsReadOne( struct BinBits * bb, int pos ); void binBitsSetOne( struct BinBits * bb, int pos ); void binBitsClearOne( struct BinBits * bb, int pos ); void binBitsSetRange( struct BinBits *bb, int start, int size ); int binBitsCountRange( struct BinBits *bb, int start, int size ); int binBitsFindSet( struct BinBits *bb, int start ); int binBitsFindClear( struct BinBits *bb, int start ); void binBitsAnd( struct BinBits *bb1, struct BinBits *bb2 ); void binBitsOr( struct BinBits *bb1, struct BinBits *bb2 ); void binBitsNot( struct BinBits *bb ); #endif bx-python-0.8.13/src/bunzip/000077500000000000000000000000001415666465100156475ustar00rootroot00000000000000bx-python-0.8.13/src/bunzip/micro-bunzip.c000066400000000000000000000630121415666465100204330ustar00rootroot00000000000000/* vi: set sw=4 ts=4: */ /* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), which also acknowledges contributions by Mike Burrows, David Wheeler, Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten, Robert Sedgewick, and Jon L. Bentley. This code is licensed under the LGPLv2: LGPL (http://www.gnu.org/copyleft/lgpl.html */ /* Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org). More efficient reading of huffman codes, a streamlined read_bunzip() function, and various other tweaks. In (limited) tests, approximately 20% faster than bzcat on x86 and about 10% faster on arm. Note that about 2/3 of the time is spent in read_unzip() reversing the Burrows-Wheeler transformation. Much of that time is delay resulting from cache misses. I would ask that anyone benefiting from this work, especially those using it in commercial products, consider making a donation to my local non-profit hospice organization (see www.hospiceacadiana.com) in the name of the woman I loved, Toni W. Hagan, who passed away Feb. 12, 2003. Manuel */ #include #include #include #include #include #include /* Constants for huffman coding */ #define MAX_GROUPS 6 #define GROUP_SIZE 50 /* 64 would have been more efficient */ #define MAX_HUFCODE_BITS 20 /* Longest huffman code allowed */ #define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ #define SYMBOL_RUNA 0 #define SYMBOL_RUNB 1 /* Status return values */ #define RETVAL_OK 0 #define RETVAL_LAST_BLOCK (-1) #define RETVAL_NOT_BZIP_DATA (-2) #define RETVAL_UNEXPECTED_INPUT_EOF (-3) #define RETVAL_UNEXPECTED_OUTPUT_EOF (-4) #define RETVAL_DATA_ERROR (-5) #define RETVAL_OUT_OF_MEMORY (-6) #define RETVAL_OBSOLETE_INPUT (-7) #define RETVAL_END_OF_BLOCK (-8) #define RETVAL_STOPCHAR (-9) #define RETVAL_BUFFER_FULL (-10) /* Other housekeeping constants */ #define IOBUF_SIZE 4096 /* This is what we know about each huffman coding group */ struct group_data { /* We have an extra slot at the end of limit[] for a sentinal value. */ int limit[MAX_HUFCODE_BITS+1],base[MAX_HUFCODE_BITS],permute[MAX_SYMBOLS]; int minLen, maxLen; }; /* Structure holding all the housekeeping data, including IO buffers and memory that persists between calls to bunzip */ typedef struct { /* State for interrupting output loop */ int writeCopies,writePos,writeRunCountdown,writeCount,writeCurrent; /* I/O tracking data (file handles, buffers, positions, etc.) */ int in_fd,out_fd,inbufCount,inbufPos /*,outbufPos*/; unsigned char *inbuf /*,*outbuf*/; unsigned int inbufBitCount, inbufBits; /* The CRC values stored in the block header and calculated from the data */ unsigned int crc32Table[256],headerCRC, totalCRC, writeCRC; /* Intermediate buffer and its size (in bytes) */ unsigned int *dbuf, dbufSize; /* These things are a bit too big to go on the stack */ unsigned char selectors[32768]; /* nSelectors=15 bits */ struct group_data groups[MAX_GROUPS]; /* huffman coding tables */ /* For I/O error handling */ jmp_buf jmpbuf; } bunzip_data; /* Return the next nnn bits of input. All reads from the compressed input are done through this function. All reads are big endian */ unsigned int get_bits(bunzip_data *bd, char bits_wanted) { unsigned int bits=0; /* If we need to get more data from the byte buffer, do so. (Loop getting one byte at a time to enforce endianness and avoid unaligned access.) */ while (bd->inbufBitCountinbufPos==bd->inbufCount) { if((bd->inbufCount = read(bd->in_fd, bd->inbuf, IOBUF_SIZE)) <= 0) longjmp(bd->jmpbuf,RETVAL_UNEXPECTED_INPUT_EOF); bd->inbufPos=0; } /* Avoid 32-bit overflow (dump bit buffer to top of output) */ if(bd->inbufBitCount>=24) { bits=bd->inbufBits&((1<inbufBitCount)-1); bits_wanted-=bd->inbufBitCount; bits<<=bits_wanted; bd->inbufBitCount=0; } /* Grab next 8 bits of input from buffer. */ bd->inbufBits=(bd->inbufBits<<8)|bd->inbuf[bd->inbufPos++]; bd->inbufBitCount+=8; } /* Calculate result */ bd->inbufBitCount-=bits_wanted; bits|=(bd->inbufBits>>bd->inbufBitCount)&((1<dbuf; dbufSize=bd->dbufSize; selectors=bd->selectors; /* Reset longjmp I/O error handling */ i=setjmp(bd->jmpbuf); if(i) return i; /* Read in header signature and CRC, then validate signature. (last block signature means CRC is for whole file, return now) */ i = get_bits(bd,24); j = get_bits(bd,24); bd->headerCRC=get_bits(bd,32); if ((i == 0x177245) && (j == 0x385090)) return RETVAL_LAST_BLOCK; if ((i != 0x314159) || (j != 0x265359)) return RETVAL_NOT_BZIP_DATA; /* We can add support for blockRandomised if anybody complains. There was some code for this in busybox 1.0.0-pre3, but nobody ever noticed that it didn't actually work. */ if(get_bits(bd,1)) return RETVAL_OBSOLETE_INPUT; if((origPtr=get_bits(bd,24)) > dbufSize) return RETVAL_DATA_ERROR; /* mapping table: if some byte values are never used (encoding things like ascii text), the compression code removes the gaps to have fewer symbols to deal with, and writes a sparse bitfield indicating which values were present. We make a translation table to convert the symbols back to the corresponding bytes. */ t=get_bits(bd, 16); symTotal=0; for (i=0;i<16;i++) { if(t&(1<<(15-i))) { k=get_bits(bd,16); for(j=0;j<16;j++) if(k&(1<<(15-j))) symToByte[symTotal++]=(16*i)+j; } } /* How many different huffman coding groups does this block use? */ groupCount=get_bits(bd,3); if (groupCount<2 || groupCount>MAX_GROUPS) return RETVAL_DATA_ERROR; /* nSelectors: Every GROUP_SIZE many symbols we select a new huffman coding group. Read in the group selector list, which is stored as MTF encoded bit runs. (MTF=Move To Front, as each value is used it's moved to the start of the list.) */ if(!(nSelectors=get_bits(bd, 15))) return RETVAL_DATA_ERROR; for(i=0; i=groupCount) return RETVAL_DATA_ERROR; /* Decode MTF to get the next selector */ uc = mtfSymbol[j]; for(;j;j--) mtfSymbol[j] = mtfSymbol[j-1]; mtfSymbol[0]=selectors[i]=uc; } /* Read the huffman coding tables for each group, which code for symTotal literal symbols, plus two run symbols (RUNA, RUNB) */ symCount=symTotal+2; for (j=0; j (MAX_HUFCODE_BITS-1)) return RETVAL_DATA_ERROR; /* If first bit is 0, stop. Else second bit indicates whether to increment or decrement the value. Optimization: grab 2 bits and unget the second if the first was 0. */ k = get_bits(bd,2); if (k < 2) { bd->inbufBitCount++; break; } /* Add one if second bit 1, else subtract 1. Avoids if/else */ t+=(((k+1)&2)-1); } /* Correct for the initial -1, to get the final symbol length */ length[i]=t+1; } /* Find largest and smallest lengths in this group */ minLen=maxLen=length[0]; for(i = 1; i < symCount; i++) { if(length[i] > maxLen) maxLen = length[i]; else if(length[i] < minLen) minLen = length[i]; } /* Calculate permute[], base[], and limit[] tables from length[]. * * permute[] is the lookup table for converting huffman coded symbols * into decoded symbols. base[] is the amount to subtract from the * value of a huffman symbol of a given length when using permute[]. * * limit[] indicates the largest numerical value a symbol with a given * number of bits can have. This is how the huffman codes can vary in * length: each code with a value>limit[length] needs another bit. */ hufGroup=bd->groups+j; hufGroup->minLen = minLen; hufGroup->maxLen = maxLen; /* Note that minLen can't be smaller than 1, so we adjust the base and limit array pointers so we're not always wasting the first entry. We do this again when using them (during symbol decoding).*/ base=hufGroup->base-1; limit=hufGroup->limit-1; /* Calculate permute[]. Concurently, initialize temp[] and limit[]. */ pp=0; for(i=minLen;i<=maxLen;i++) { temp[i]=limit[i]=0; for(t=0;tpermute[pp++] = t; } /* Count symbols coded for at each bit length */ for (i=0;ilimit[length] comparison. */ limit[i]= (pp << (maxLen - i)) - 1; pp<<=1; base[i+1]=pp-(t+=temp[i]); } limit[maxLen+1] = INT_MAX; /* Sentinal value for reading next sym. */ limit[maxLen]=pp+temp[maxLen]-1; base[minLen]=0; } /* We've finished reading and digesting the block header. Now read this block's huffman coded symbols from the file and undo the huffman coding and run length encoding, saving the result into dbuf[dbufCount++]=uc */ /* Initialize symbol occurrence counters and symbol Move To Front table */ for(i=0;i<256;i++) { byteCount[i] = 0; mtfSymbol[i]=(unsigned char)i; } /* Loop through compressed symbols. */ runPos=dbufCount=symCount=selector=0; for(;;) { /* Determine which huffman coding group to use. */ if(!(symCount--)) { symCount=GROUP_SIZE-1; if(selector>=nSelectors) return RETVAL_DATA_ERROR; hufGroup=bd->groups+selectors[selector++]; base=hufGroup->base-1; limit=hufGroup->limit-1; } /* Read next huffman-coded symbol. */ /* Note: It is far cheaper to read maxLen bits and back up than it is to read minLen bits and then an additional bit at a time, testing as we go. Because there is a trailing last block (with file CRC), there is no danger of the overread causing an unexpected EOF for a valid compressed file. As a further optimization, we do the read inline (falling back to a call to get_bits if the buffer runs dry). The following (up to got_huff_bits:) is equivalent to j=get_bits(bd,hufGroup->maxLen); */ while (bd->inbufBitCountmaxLen) { if(bd->inbufPos==bd->inbufCount) { j = get_bits(bd,hufGroup->maxLen); goto got_huff_bits; } bd->inbufBits=(bd->inbufBits<<8)|bd->inbuf[bd->inbufPos++]; bd->inbufBitCount+=8; }; bd->inbufBitCount-=hufGroup->maxLen; j = (bd->inbufBits>>bd->inbufBitCount)&((1<maxLen)-1); got_huff_bits: /* Figure how how many bits are in next symbol and unget extras */ i=hufGroup->minLen; while(j>limit[i]) ++i; bd->inbufBitCount += (hufGroup->maxLen - i); /* Huffman decode value to get nextSym (with bounds checking) */ if ((i > hufGroup->maxLen) || (((unsigned)(j=(j>>(hufGroup->maxLen-i))-base[i])) >= MAX_SYMBOLS)) return RETVAL_DATA_ERROR; nextSym = hufGroup->permute[j]; /* We have now decoded the symbol, which indicates either a new literal byte, or a repeated run of the most recent literal byte. First, check if nextSym indicates a repeated run, and if so loop collecting how many times to repeat the last literal. */ if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */ /* If this is the start of a new run, zero out counter */ if(!runPos) { runPos = 1; t = 0; } /* Neat trick that saves 1 symbol: instead of or-ing 0 or 1 at each bit position, add 1 or 2 instead. For example, 1011 is 1<<0 + 1<<1 + 2<<2. 1010 is 2<<0 + 2<<1 + 1<<2. You can make any bit pattern that way using 1 less symbol than the basic or 0/1 method (except all bits 0, which would use no symbols, but a run of length 0 doesn't mean anything in this context). Thus space is saved. */ t += (runPos << nextSym); /* +runPos if RUNA; +2*runPos if RUNB */ runPos <<= 1; continue; } /* When we hit the first non-run symbol after a run, we now know how many times to repeat the last literal, so append that many copies to our buffer of decoded symbols (dbuf) now. (The last literal used is the one at the head of the mtfSymbol array.) */ if(runPos) { runPos=0; if(dbufCount+t>=dbufSize) return RETVAL_DATA_ERROR; uc = symToByte[mtfSymbol[0]]; byteCount[uc] += t; while(t--) dbuf[dbufCount++]=uc; } /* Is this the terminating symbol? */ if(nextSym>symTotal) break; /* At this point, nextSym indicates a new literal character. Subtract one to get the position in the MTF array at which this literal is currently to be found. (Note that the result can't be -1 or 0, because 0 and 1 are RUNA and RUNB. But another instance of the first symbol in the mtf array, position 0, would have been handled as part of a run above. Therefore 1 unused mtf position minus 2 non-literal nextSym values equals -1.) */ if(dbufCount>=dbufSize) return RETVAL_DATA_ERROR; i = nextSym - 1; uc = mtfSymbol[i]; /* Adjust the MTF array. Since we typically expect to move only a * small number of symbols, and are bound by 256 in any case, using * memmove here would typically be bigger and slower due to function * call overhead and other assorted setup costs. */ do { mtfSymbol[i] = mtfSymbol[i-1]; } while (--i); mtfSymbol[0] = uc; uc=symToByte[uc]; /* We have our literal byte. Save it into dbuf. */ byteCount[uc]++; dbuf[dbufCount++] = (unsigned int)uc; } /* At this point, we've read all the huffman-coded symbols (and repeated runs) for this block from the input stream, and decoded them into the intermediate buffer. There are dbufCount many decoded bytes in dbuf[]. Now undo the Burrows-Wheeler transform on dbuf. See http://dogma.net/markn/articles/bwt/bwt.htm */ /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */ j=0; for(i=0;i<256;i++) { k=j+byteCount[i]; byteCount[i] = j; j=k; } /* Figure out what order dbuf would be in if we sorted it. */ for (i=0;i=dbufCount) return RETVAL_DATA_ERROR; bd->writePos=dbuf[origPtr]; bd->writeCurrent=(unsigned char)(bd->writePos&0xff); bd->writePos>>=8; bd->writeRunCountdown=5; } bd->writeCount=dbufCount; return RETVAL_OK; } /* Undo burrows-wheeler transform on intermediate buffer to produce output. If start_bunzip was initialized with out_fd=-1, then up to len bytes of data are written to outbuf. Return value is number of bytes written or error (all errors are negative numbers). If out_fd!=-1, outbuf and len are ignored, data is written to out_fd and return is RETVAL_OK or error. */ int read_bunzip(bunzip_data *bd, char *outbuf, int len) { const unsigned int *dbuf; int pos,current,previous,gotcount; /* If last read was short due to end of file, return last block now */ /* if(bd->writeCount<0) return bd->writeCount; */ /* james@bx.psu.edu: writeCount goes to -1 when the buffer is fully decoded, which results in this returning RETVAL_LAST_BLOCK, also equal to -1... Confusing, I'm returning 0 here to indicate no bytes written into the buffer */ if(bd->writeCount<0) return 0; gotcount = 0; dbuf=bd->dbuf; pos=bd->writePos; current=bd->writeCurrent; /* We will always have pending decoded data to write into the output buffer unless this is the very first call (in which case we haven't huffman-decoded a block into the intermediate buffer yet). */ if (bd->writeCopies) { /* Inside the loop, writeCopies means extra copies (beyond 1) */ --bd->writeCopies; /* Loop outputting bytes */ for(;;) { /* Write next byte into output buffer, updating CRC */ /* If the output buffer is full, snapshot state and return */ if(gotcount >= len) { bd->writePos=pos; bd->writeCurrent=current; bd->writeCopies++; return len; } outbuf[gotcount++] = current; bd->writeCRC=(((bd->writeCRC)<<8) ^bd->crc32Table[((bd->writeCRC)>>24)^current]); /* Loop now if we're outputting multiple copies of this byte */ if (bd->writeCopies) { --bd->writeCopies; continue; } decode_next_byte: if (!bd->writeCount--) break; /* Follow sequence vector to undo Burrows-Wheeler transform */ previous=current; pos=dbuf[pos]; current=pos&0xff; pos>>=8; /* After 3 consecutive copies of the same byte, the 4th is a repeat count. We count down from 4 instead * of counting up because testing for non-zero is faster */ if(--bd->writeRunCountdown) { if(current!=previous) bd->writeRunCountdown=4; } else { /* We have a repeated run, this byte indicates the count */ bd->writeCopies=current; current=previous; bd->writeRunCountdown=5; /* Sometimes there are just 3 bytes (run length 0) */ if(!bd->writeCopies) goto decode_next_byte; /* Subtract the 1 copy we'd output anyway to get extras */ --bd->writeCopies; } } /* Decompression of this block completed successfully */ bd->writeCRC=~bd->writeCRC; bd->totalCRC=((bd->totalCRC<<1) | (bd->totalCRC>>31)) ^ bd->writeCRC; /* If this block had a CRC error, force file level CRC error. */ if(bd->writeCRC!=bd->headerCRC) { // fprintf( stderr, "CRC ERROR\n" ); fflush( stderr ); bd->totalCRC=bd->headerCRC+1; return RETVAL_LAST_BLOCK; } /* james@bx.psu.edu -- rather than falling through we return here */ return gotcount; } goto decode_next_byte; } /** * Same as read_bunzip, but will stop if it encounters `stop_char`. */ int read_bunzip_to_char(bunzip_data *bd, char *outbuf, int len, int* gotcount_out, char stopchar ) { const unsigned int *dbuf; int pos,current,previous,gotcount; /* If last read was short due to end of file, return last block now */ /* if(bd->writeCount<0) return bd->writeCount; */ /* james@bx.psu.edu: writeCount goes to -1 when the buffer is fully decoded, which results in this returning RETVAL_LAST_BLOCK, also equal to -1... Confusing, I'm returning 0 here to indicate no bytes written into the buffer */ if(bd->writeCount<0) return RETVAL_END_OF_BLOCK; gotcount = 0; dbuf=bd->dbuf; pos=bd->writePos; current=bd->writeCurrent; /* We will always have pending decoded data to write into the output buffer unless this is the very first call (in which case we haven't huffman-decoded a block into the intermediate buffer yet). */ if (bd->writeCopies) { /* Inside the loop, writeCopies means extra copies (beyond 1) */ --bd->writeCopies; /* Loop outputting bytes */ for(;;) { /* Write next byte into output buffer, updating CRC */ /* If the output buffer is full, snapshot state and return */ if(gotcount >= len) { bd->writePos=pos; bd->writeCurrent=current; bd->writeCopies++; *gotcount_out = gotcount; return RETVAL_BUFFER_FULL; } /* Also stop if we hit stop char (this could be faster) */ if( gotcount && outbuf[gotcount-1] == stopchar ) { bd->writePos=pos; bd->writeCurrent=current; bd->writeCopies++; *gotcount_out = gotcount; return RETVAL_STOPCHAR; } outbuf[gotcount++] = current; bd->writeCRC=(((bd->writeCRC)<<8) ^bd->crc32Table[((bd->writeCRC)>>24)^current]); /* Loop now if we're outputting multiple copies of this byte */ if (bd->writeCopies) { --bd->writeCopies; continue; } decode_next_byte: if (!bd->writeCount--) break; /* Follow sequence vector to undo Burrows-Wheeler transform */ previous=current; pos=dbuf[pos]; current=pos&0xff; pos>>=8; /* After 3 consecutive copies of the same byte, the 4th is a repeat count. We count down from 4 instead * of counting up because testing for non-zero is faster */ if(--bd->writeRunCountdown) { if(current!=previous) bd->writeRunCountdown=4; } else { /* We have a repeated run, this byte indicates the count */ bd->writeCopies=current; current=previous; bd->writeRunCountdown=5; /* Sometimes there are just 3 bytes (run length 0) */ if(!bd->writeCopies) goto decode_next_byte; /* Subtract the 1 copy we'd output anyway to get extras */ --bd->writeCopies; } } /* Decompression of this block completed successfully */ bd->writeCRC=~bd->writeCRC; bd->totalCRC=((bd->totalCRC<<1) | (bd->totalCRC>>31)) ^ bd->writeCRC; /* If this block had a CRC error, force file level CRC error. */ if(bd->writeCRC!=bd->headerCRC) { // fprintf( stderr, "CRC ERROR\n" ); fflush( stderr ); bd->totalCRC=bd->headerCRC+1; return RETVAL_LAST_BLOCK; } /* james@bx.psu.edu -- rather than falling through we return here */ *gotcount_out = gotcount; return RETVAL_END_OF_BLOCK; } goto decode_next_byte; } int init_block( bunzip_data *bd ) { int status; /* Refill the intermediate buffer by huffman-decoding next block of input */ /* (previous is just a convenient unused temp variable here) */ status=get_next_block(bd); if(status) { bd->writeCount=status; return status; } bd->writeCRC=0xffffffffUL; return RETVAL_OK; } /* Allocate the structure, read file header. If in_fd==-1, inbuf must contain a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are ignored, and data is read from file handle into temporary buffer. */ int start_bunzip(bunzip_data **bdp, int in_fd, char *inbuf, int len) { bunzip_data *bd; unsigned int i,j,c; const unsigned int BZh0=(((unsigned int)'B')<<24)+(((unsigned int)'Z')<<16) +(((unsigned int)'h')<<8)+(unsigned int)'0'; /* Figure out how much data to allocate */ i=sizeof(bunzip_data); if(in_fd!=-1) i+=IOBUF_SIZE; /* Allocate bunzip_data. Most fields initialize to zero. */ if(!(bd=*bdp=malloc(i))) return RETVAL_OUT_OF_MEMORY; memset(bd,0,sizeof(bunzip_data)); /* Setup input buffer */ if(-1==(bd->in_fd=in_fd)) { bd->inbuf=inbuf; bd->inbufCount=len; } else bd->inbuf=(unsigned char *)(bd+1); /* Init the CRC32 table (big endian) */ for(i=0;i<256;i++) { c=i<<24; for(j=8;j;j--) c=c&0x80000000 ? (c<<1)^0x04c11db7 : (c<<1); bd->crc32Table[i]=c; } /* Setup for I/O error handling via longjmp */ i=setjmp(bd->jmpbuf); if(i) return i; /* Ensure that file starts with "BZh['1'-'9']." */ i = get_bits(bd,32); if (((unsigned int)(i-BZh0-1)) >= 9) return RETVAL_NOT_BZIP_DATA; /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of uncompressed data. Allocate intermediate buffer for block. */ bd->dbufSize=100000*(i-BZh0); if(!(bd->dbuf=malloc(bd->dbufSize * sizeof(int)))) return RETVAL_OUT_OF_MEMORY; return RETVAL_OK; } /* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip data, not end of file.) */ extern int uncompressStream(int src_fd, int dst_fd) { char *outbuf; bunzip_data *bd; int i; if(!(outbuf=malloc(IOBUF_SIZE))) return RETVAL_OUT_OF_MEMORY; if(!(i=start_bunzip(&bd,src_fd,0,0))) { for(;;) { if (((i=init_block(bd)) < 0)) break; // fprintf( stderr, "init: %d\n", i ); for(;;) { if((i=read_bunzip(bd,outbuf,IOBUF_SIZE)) <= 0) break; // fprintf( stderr, "read: %d\n", i ); if(i!=write(dst_fd,outbuf,i)) { i=RETVAL_UNEXPECTED_OUTPUT_EOF; break; } } } } /* Check CRC and release memory */ if(i==RETVAL_LAST_BLOCK && bd->headerCRC==bd->totalCRC) i=RETVAL_OK; if(bd->dbuf) free(bd->dbuf); free(bd); free(outbuf); return i; } #ifdef MICRO_BUNZIP_MAIN static char * const bunzip_errors[]={NULL,"Bad file checksum","Not bzip data", "Unexpected input EOF","Unexpected output EOF","Data error", "Out of memory","Obsolete (pre 0.9.5) bzip format not supported."}; /* Dumb little test thing, decompress stdin to stdout */ int main(int argc, char *argv[]) { int i=uncompressStream(0,1); char c; if(i) fprintf(stderr,"%d: %s\n", i, bunzip_errors[-i]); else if(read(0,&c,1)) fprintf(stderr,"Trailing garbage ignored\n"); return -i; } #endifbx-python-0.8.13/src/bunzip/micro-bunzip.h000066400000000000000000000054541415666465100204460ustar00rootroot00000000000000#ifndef __MICRO_BUNZIP_H__ #define __MICRO_BUNZIP_H__ /* ---- Duplicated from micro-bzip.c -------------------------------------- */ #include #include #include #include #include #include /* Constants for huffman coding */ #define MAX_GROUPS 6 #define GROUP_SIZE 50 /* 64 would have been more efficient */ #define MAX_HUFCODE_BITS 20 /* Longest huffman code allowed */ #define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ #define SYMBOL_RUNA 0 #define SYMBOL_RUNB 1 /* Status return values */ #define RETVAL_OK 0 #define RETVAL_LAST_BLOCK (-1) #define RETVAL_NOT_BZIP_DATA (-2) #define RETVAL_UNEXPECTED_INPUT_EOF (-3) #define RETVAL_UNEXPECTED_OUTPUT_EOF (-4) #define RETVAL_DATA_ERROR (-5) #define RETVAL_OUT_OF_MEMORY (-6) #define RETVAL_OBSOLETE_INPUT (-7) #define RETVAL_END_OF_BLOCK (-8) #define RETVAL_STOPCHAR (-9) #define RETVAL_BUFFER_FULL (-10) /* Other housekeeping constants */ #define IOBUF_SIZE 4096 /* This is what we know about each huffman coding group */ struct group_data { /* We have an extra slot at the end of limit[] for a sentinal value. */ int limit[MAX_HUFCODE_BITS+1],base[MAX_HUFCODE_BITS],permute[MAX_SYMBOLS]; int minLen, maxLen; }; /* Structure holding all the housekeeping data, including IO buffers and memory that persists between calls to bunzip */ typedef struct { /* State for interrupting output loop */ int writeCopies,writePos,writeRunCountdown,writeCount,writeCurrent; /* I/O tracking data (file handles, buffers, positions, etc.) */ int in_fd,out_fd,inbufCount,inbufPos /*,outbufPos*/; unsigned char *inbuf /*,*outbuf*/; unsigned int inbufBitCount, inbufBits; /* The CRC values stored in the block header and calculated from the data */ unsigned int crc32Table[256],headerCRC, totalCRC, writeCRC; /* Intermediate buffer and its size (in bytes) */ unsigned int *dbuf, dbufSize; /* These things are a bit too big to go on the stack */ unsigned char selectors[32768]; /* nSelectors=15 bits */ struct group_data groups[MAX_GROUPS]; /* huffman coding tables */ /* For I/O error handling */ jmp_buf jmpbuf; } bunzip_data; static char * const bunzip_errors[]={NULL,"Bad file checksum","Not bzip data", "Unexpected input EOF","Unexpected output EOF","Data error", "Out of memory","Obsolete (pre 0.9.5) bzip format not supported."}; /* ---- Forward declarations for micro-bzip.c ---------------------------- */ unsigned int get_bits(bunzip_data *bd, char bits_wanted); int get_next_block( bunzip_data *bd ); int read_bunzip(bunzip_data *bd, char *outbuf, int len); int start_bunzip(bunzip_data **bdp, int in_fd, char *inbuf, int len); int read_bunzip_to_char(bunzip_data *bd, char *outbuf, int len, int* gotcount_out, char stopchar ); #endifbx-python-0.8.13/src/cluster.c000066400000000000000000000176561415666465100162040ustar00rootroot00000000000000/* Kanwei Li, 2009 Inspired by previous ClusterTree This clustering algorithm uses a binary tree structure. Nodes correspond to non-overlapping intervals, where overlapping means that the distance between two intervals is less or equal to max_dist, which is the max separation. The tree self-balances using rotations based on the binomial sequence. Merges among nodes are performed whenever a node is changed/added that will cause other nodes to form a new cluster. */ #include #include #include #include "cluster.h" #define ALLOC(pt) (malloc(sizeof(pt))) static int min(int a, int b) { if( a < b ) return a; else return b; } static int max(int a, int b) { if( a > b ) return a; else return b; } /* Create new tree with given max_dist (max distance between intervals to be considered a cluster), and min_intervals, the minimum number of intervals needed for a cluster to be considered significant */ clustertree* create_clustertree(int max_dist, int min_intervals) { clustertree *tree = ALLOC(clustertree); tree->max_dist = max_dist; tree->min_intervals = min_intervals; tree->root = NULL; return tree; } static interval* create_interval(int start, int end, int id) { interval *ival = ALLOC(interval); ival->start = start; ival->end = end; ival->id = id; ival->next = NULL; return ival; } static clusternode* create_node(int start, int end, int id) { clusternode *new_node = ALLOC(clusternode); new_node->start = start; new_node->end = end; new_node->interval_head = create_interval(start, end, id); new_node->interval_tail = new_node->interval_head; new_node->num_ivals = 1; new_node->left = NULL; new_node->right = NULL; double uniform = ((double)rand()) / (RAND_MAX); if (uniform == 1.0) uniform = 0; new_node->priority = (int)ceil( (-1.0 / log(.5)) * log( -1.0 / (uniform - 1))); return new_node; } static void recursively_free_intervals(interval *ival) { interval *next; if(ival) { next = ival->next; free(ival); recursively_free_intervals(next); } } static void recursively_free_nodes(clusternode *node) { if(node) { recursively_free_nodes(node->left); recursively_free_nodes(node->right); recursively_free_intervals(node->interval_head); free(node); } } void free_tree(clustertree *tree) { recursively_free_nodes(tree->root); free(tree); } void cluster_rotateright(clusternode **node) { clusternode* root = (*node)->left; (*node)->left = (*node)->left->right; root->right = (*node); *node = root; } void cluster_rotateleft(clusternode **node) { clusternode* root = (*node)->right; (*node)->right = (*node)->right->left; root->left = (*node); *node = root; } /* Go down the tree and merge nodes if necessary */ void cluster_fixup(clustertree *tree, clusternode **ln, clusternode **rn) { clusternode* local = *ln; clusternode* root = *rn; int maxstart = max(root->start, local->start); int maxend = max(local->end, root->end); int minstart = min(root->start, local->start); int minend = min(root->end, local->end); if( maxstart - minend <= tree->max_dist ) { /* Have to merge this node and children */ root->start = minstart; root->end = maxend; root->interval_tail->next = local->interval_head; root->interval_tail = local->interval_tail; root->num_ivals += local->num_ivals; if( local->right) cluster_fixup(tree, &(local->right), rn); if( local->left) cluster_fixup(tree, &(local->left), rn); if((local->right == NULL) && (local->left == NULL)) { free(local); *ln = NULL; } else if(local->right) { *ln = local->right; free(local); } else if (local->left) { *ln = local->left; free(local); } return; } // Even if we miss, we still have to check children if(local->left) { cluster_fixup(tree, &(local->left), rn); } if(local->right) { cluster_fixup(tree, &(local->right), rn); } } /* Pyrex "getregions" implements this. Only used for C debugging */ void clustereach(clustertree *tree, clusternode *node) { interval* ival; if (node == NULL) { exit(1); /* Shouldn't happen */ } if (node->left != NULL) { clustereach(tree, node->left); } printf("Node: %d\t%d\n", node->start, node->end); ival = node->interval_head; while(ival) { printf("\tInterval %d: %d\t%d\n", ival->id, ival->start, ival->end); ival = ival->next; } if (node->right != NULL) { clustereach(tree, node->right); } } void clusteritr_recursive(clustertree *tree, clusternode *node, treeitr* *itr) { treeitr *newitr; if (node == NULL) { return; } if (node->right != NULL) { clusteritr_recursive(tree, node->right, itr); } if (node->num_ivals >= tree->min_intervals) { newitr = ALLOC(treeitr); newitr->next = *itr; newitr->node = node; *itr = newitr; } if (node->left != NULL) { clusteritr_recursive(tree, node->left, itr); } } /* Create an infix iterator */ treeitr* clusteritr(clustertree *tree) { treeitr *itr = NULL; clusteritr_recursive(tree, tree->root, &itr); if (itr != NULL) { return itr; } return NULL; } /* Free iterator (tail recursive) */ void freeclusteritr(treeitr *itr) { treeitr *next; if (itr == NULL) { return; } next = itr->next; free(itr); freeclusteritr(next); } /* Insert based on the start position of intervals */ clusternode* clusternode_insert(clustertree *tree, clusternode *node, int start, int end, int id) { int oldstart; int oldend; interval* ival; // printf("Inserting %d %d %d\n", start, end, id); if (node == NULL) { node = create_node(start, end, id); } else if ( (start - tree->max_dist) > node->end ) { /* We're to the right of this cluster */ node->right = clusternode_insert(tree, node->right, start, end, id); if (node->priority < node->right->priority) cluster_rotateleft(&node); } else if ( (end + tree->max_dist) < node->start) { /* We're to the left of this cluster */ node->left = clusternode_insert(tree, node->left, start, end, id); if (node->priority < node->left->priority) cluster_rotateright(&node); } else { /* We're in the range of this cluster */ /* Update the start and end to match to new values */ oldstart = node->start; oldend = node->end; node->start = min(start, node->start); node->end = max(end, node->end); ival = create_interval(start, end, id); ival->next = node->interval_head; /* Add this interval as the head of the interval list */ node->interval_head = ival; node->num_ivals += 1; if ( oldstart > node->start && node->left != NULL ) { /* New interval added to the start, and there's a left child */ cluster_fixup(tree, &(node->left), &node); } if ( oldend < node->end && node->right != NULL ) { /* New interval added to the end, and there's a right child */ cluster_fixup(tree, &(node->right), &node); } } return node; } int main() { // Simple test clustertree* tree = create_clustertree(0, 1); tree->root = clusternode_insert(tree, tree->root, 3, 4, 0); tree->root = clusternode_insert(tree, tree->root, 6, 7, 1); tree->root = clusternode_insert(tree, tree->root, 9, 10, 2); tree->root = clusternode_insert(tree, tree->root, 1, 2, 3); tree->root = clusternode_insert(tree, tree->root, 3, 8, 4); clustereach(tree, tree->root); return 0; } bx-python-0.8.13/src/cluster.h000066400000000000000000000016111415666465100161710ustar00rootroot00000000000000typedef struct struct_interval { int start; int end; int id; struct struct_interval *next; } interval; typedef struct struct_clusternode { int start; int end; int priority; struct struct_interval *interval_head; struct struct_interval *interval_tail; int num_ivals; struct struct_clusternode *left; struct struct_clusternode *right; } clusternode; typedef struct { int max_dist; int min_intervals; clusternode *root; } clustertree; typedef struct struct_treeitr { struct struct_treeitr *next; struct struct_clusternode *node; } treeitr; clusternode* clusternode_insert(clustertree *tree, clusternode *node, int start, int end, int id); clustertree* create_clustertree(int max_dist, int min_intervals); treeitr* clusteritr(clustertree *tree); void freeclusteritr(treeitr *itr); void free_tree(clustertree *tree); bx-python-0.8.13/src/kent/000077500000000000000000000000001415666465100153015ustar00rootroot00000000000000bx-python-0.8.13/src/kent/bits.c000066400000000000000000000141521415666465100164110ustar00rootroot00000000000000/* bits - handle operations on arrays of bits. * * This file is copyright 2002 Jim Kent, but license is hereby * granted for all use - public, private or commercial. */ #include "common.h" #include "bits.h" static char const rcsid[] = "$Id: bits.c,v 1.20 2008/03/25 16:32:31 angie Exp $"; static Bits oneBit[8] = { 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1}; static Bits leftMask[8] = {0xFF, 0x7F, 0x3F, 0x1F, 0xF, 0x7, 0x3, 0x1,}; static Bits rightMask[8] = {0x80, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC, 0xFE, 0xFF,}; int bitsInByte[256]; static boolean inittedBitsInByte = FALSE; void bitsInByteInit(void) /* Initialize bitsInByte array. */ { int i; if (!inittedBitsInByte) { inittedBitsInByte = TRUE; for (i=0; i<256; ++i) { int count = 0; if (i&1) count = 1; if (i&2) ++count; if (i&4) ++count; if (i&8) ++count; if (i&0x10) ++count; if (i&0x20) ++count; if (i&0x40) ++count; if (i&0x80) ++count; bitsInByte[i] = count; } } } Bits *bitAlloc(int bitCount) /* Allocate bits. */ { int byteCount = ((bitCount+7)>>3); return needLargeZeroedMem(byteCount); } Bits *bitClone(Bits* orig, int bitCount) /* Clone bits. */ { int byteCount = ((bitCount+7)>>3); Bits* bits = needLargeZeroedMem(byteCount); if(orig!=NULL) memcpy(bits, orig, byteCount); return bits; } void bitFree(Bits **pB) /* Free bits. */ { freez(pB); } void bitSetOne(Bits *b, int bitIx) /* Set a single bit. */ { b[bitIx>>3] |= oneBit[bitIx&7]; } void bitClearOne(Bits *b, int bitIx) /* Clear a single bit. */ { b[bitIx>>3] &= ~oneBit[bitIx&7]; } void bitSetRange(Bits *b, int startIx, int bitCount) /* Set a range of bits. */ { int endIx = (startIx + bitCount - 1); int startByte = (startIx>>3); int endByte = (endIx>>3); int startBits = (startIx&7); int endBits = (endIx&7); int i; if (bitCount <= 0) return; if (startByte == endByte) { b[startByte] |= (leftMask[startBits] & rightMask[endBits]); return; } b[startByte] |= leftMask[startBits]; for (i = startByte+1; i>3] & oneBit[bitIx&7]) != 0; } int bitCountRange(Bits *b, int startIx, int bitCount) /* Count number of bits set in range. */ { int endIx = (startIx + bitCount - 1); int startByte = (startIx>>3); int endByte = (endIx>>3); int startBits = (startIx&7); int endBits = (endIx&7); int i; int count = 0; if (bitCount <= 0) return 0; if (!inittedBitsInByte) bitsInByteInit(); if (startByte == endByte) return bitsInByte[b[startByte] & leftMask[startBits] & rightMask[endBits]]; count = bitsInByte[b[startByte] & leftMask[startBits]]; for (i = startByte+1; i>3); int iByte; /* scan initial byte */ while (((iBit & 7) != 0) && (iBit < bitCount)) { if (bitReadOne(b, iBit) == val) return iBit; iBit++; } /* scan byte at a time, if not already in last byte */ iByte = (iBit >> 3); if (iByte < endByte) { while ((iByte < endByte) && (b[iByte] == notByteVal)) iByte++; iBit = iByte << 3; } /* scan last byte */ while (iBit < bitCount) { if (bitReadOne(b, iBit) == val) return iBit; iBit++; } return bitCount; /* not found */ } int bitFindSet(Bits *b, int startIx, int bitCount) /* Find the index of the the next set bit. */ { return bitFind(b, startIx, TRUE, bitCount); } int bitFindClear(Bits *b, int startIx, int bitCount) /* Find the index of the the next clear bit. */ { return bitFind(b, startIx, FALSE, bitCount); } void bitClear(Bits *b, int bitCount) /* Clear many bits (possibly up to 7 beyond bitCount). */ { int byteCount = ((bitCount+7)>>3); zeroBytes(b, byteCount); } void bitClearRange(Bits *b, int startIx, int bitCount) /* Clear a range of bits. */ { int endIx = (startIx + bitCount - 1); int startByte = (startIx>>3); int endByte = (endIx>>3); int startBits = (startIx&7); int endBits = (endIx&7); int i; if (bitCount <= 0) return; if (startByte == endByte) { b[startByte] &= ~(leftMask[startBits] & rightMask[endBits]); return; } b[startByte] &= ~leftMask[startBits]; for (i = startByte+1; i>3); while (--byteCount >= 0) { *a = (*a & *b++); a++; } } void bitOr(Bits *a, Bits *b, int bitCount) /* Or two bitmaps. Put result in a. */ { int byteCount = ((bitCount+7)>>3); while (--byteCount >= 0) { *a = (*a | *b++); a++; } } void bitXor(Bits *a, Bits *b, int bitCount) { int byteCount = ((bitCount+7)>>3); while (--byteCount >= 0) { *a = (*a ^ *b++); a++; } } void bitNot(Bits *a, int bitCount) /* Flip all bits in a. */ { int byteCount = ((bitCount+7)>>3); while (--byteCount >= 0) { *a = ~*a; a++; } } void bitPrint(Bits *a, int startIx, int bitCount, FILE* out) /* Print part or all of bit map as a string of 0s and 1s. Mostly useful for * debugging */ { int i; for (i = startIx; i < bitCount; i++) { if (bitReadOne(a, i)) fputc('1', out); else fputc('0', out); } fputc('\n', out); } bx-python-0.8.13/src/kent/bits.h000066400000000000000000000034261415666465100164200ustar00rootroot00000000000000/* bits - handle operations on arrays of bits. * * This file is copyright 2002 Jim Kent, but license is hereby * granted for all use - public, private or commercial. */ #ifndef BITS_H #define BITS_H #include "common.h" typedef unsigned char Bits; Bits *bitAlloc(int bitCount); /* Allocate bits. */ Bits *bitClone(Bits* orig, int bitCount); /* Clone bits. */ void bitFree(Bits **pB); /* Free bits. */ void bitSetOne(Bits *b, int bitIx); /* Set a single bit. */ void bitClearOne(Bits *b, int bitIx); /* Clear a single bit. */ void bitSetRange(Bits *b, int startIx, int bitCount); /* Set a range of bits. */ boolean bitReadOne(Bits *b, int bitIx); /* Read a single bit. */ int bitCountRange(Bits *b, int startIx, int bitCount); /* Count number of bits set in range. */ int bitFindSet(Bits *b, int startIx, int bitCount); /* Find the index of the the next set bit. */ int bitFindClear(Bits *b, int startIx, int bitCount); /* Find the index of the the next clear bit. */ void bitClear(Bits *b, int bitCount); /* Clear many bits (possibly up to 7 beyond bitCount). */ void bitClearRange(Bits *b, int startIx, int bitCount); /* Clear a range of bits. */ void bitAnd(Bits *a, Bits *b, int bitCount); /* And two bitmaps. Put result in a. */ void bitOr(Bits *a, Bits *b, int bitCount); /* Or two bitmaps. Put result in a. */ void bitXor(Bits *a, Bits *b, int bitCount); /* Xor two bitmaps. Put result in a. */ void bitNot(Bits *a, int bitCount); /* Flip all bits in a. */ void bitPrint(Bits *a, int startIx, int bitCount, FILE* out); /* Print part or all of bit map as a string of 0s and 1s. Mostly useful for * debugging */ extern int bitsInByte[256]; /* Lookup table for how many bits are set in a byte. */ void bitsInByteInit(void); /* Initialize bitsInByte array. */ #endif /* BITS_H */ bx-python-0.8.13/src/kent/common.c000066400000000000000000000027441415666465100167440ustar00rootroot00000000000000#include "common.h" void *needMem(size_t size) /* Need mem calls abort if the memory allocation fails. The memory * * is initialized to zero. */ { void *pt; if ((pt = malloc(size)) == NULL) { fprintf( stderr, "Out of memory needMem - request size %llu bytes\n", (unsigned long long)size); exit(1); } memset(pt, 0, size); return pt; } void freeMem(void *pt) /* Free memory will check for null before freeing. */ { if (pt != NULL) free(pt); } void *needLargeZeroedMem(size_t size) /* Request a large block of memory and zero it. */ { void *v; /*v = needLargeMem(size);*/ v = malloc(size); /* * If you do memset(NULL,0,size), there will be a segfault. * So check v for NULL */ if( v != NULL ) memset(v, 0, size); return v; } void freez(void *vpt) /* Pass address of pointer. Will free pointer and set it * * to NULL. */ { void **ppt = (void **)vpt; void *pt = *ppt; *ppt = NULL; freeMem(pt); } /* fill a specified area of memory with zeroes * If you do zeroBytes(NULL,count), there will be a segfault. * So check pt for NULL */ void zeroBytes(void *vpt, int count) { char *pt = (char*)vpt; if(pt != NULL ){ while (--count>=0) *pt++=0; } } bx-python-0.8.13/src/kent/common.h000066400000000000000000000011771415666465100167500ustar00rootroot00000000000000#ifndef __COMMON_H__ #define __COMMON_H__ #include #include #include #include #include /* Let's pretend C has a boolean type. */ #define TRUE 1 #define FALSE 0 #define boolean int #define bool char #define AllocVar(pt) (pt = needMem(sizeof(*pt))) /* Shortcut to allocating a single variable on the heap and * * assigning pointer to it. */ #define AllocArray(pt, size) (pt = needLargeZeroedMem(sizeof(*pt) * (size))) void *needMem(size_t size); void freeMem(void *pt); void *needLargeZeroedMem(size_t size); void freez(void *vpt); void zeroBytes(void *vpt, int count); #endif bx-python-0.8.13/src/npy_capsule_as_void_ptr.h000066400000000000000000000014511415666465100214250ustar00rootroot00000000000000/* * extracted from: * https://github.com/numpy/numpy/blob/v1.13.3/numpy/core/include/numpy/npy_3kcompat.h * licensed under BSD-3-Clause: * https://github.com/numpy/numpy/blob/v1.13.3/LICENSE.txt */ #ifndef _NPY_CAPSULE_AS_VOID_PTR_H_ #define _NPY_CAPSULE_AS_VOID_PTR_H_ #include /* * PyCObject functions adapted to PyCapsules. * * The main job here is to get rid of the improved error handling * of PyCapsules. It's a shame... */ #if PY_VERSION_HEX >= 0x03000000 static /*NPY_INLINE*/ void * NpyCapsule_AsVoidPtr(PyObject *obj) { void *ret = PyCapsule_GetPointer(obj, NULL); if (ret == NULL) { PyErr_Clear(); } return ret; } #else static /*NPY_INLINE*/ void * NpyCapsule_AsVoidPtr(PyObject *ptr) { return PyCObject_AsVoidPtr(ptr); } #endif #endif bx-python-0.8.13/src/pwm_utils.c000066400000000000000000000031731415666465100165330ustar00rootroot00000000000000 #include #include #include int symbol_match( char, char); int pattern_match( char*, char*, int); int main(int argc, char** argv) { if (argc == 3) { int string_size = strlen(argv[1]); if (strlen(argv[2]) != string_size) { fprintf(stdout, "%s != %s\n", argv[1], argv[2]); return 1; } if ( pattern_match( argv[1], argv[2], string_size) ) fprintf(stdout, "%s == %s\n", argv[1], argv[2]); else fprintf(stdout, "%s != %s\n", argv[1], argv[2]); } return 0; } int pattern_match( char* string, char* pattern, int n){ int i = 0; while (i #include #include #include #include #include #include #include "bgzf.h" #include "khash.h" typedef struct { int size; uint8_t *block; int64_t end_offset; } cache_t; KHASH_MAP_INIT_INT64(cache, cache_t) #if defined(_WIN32) || defined(_MSC_VER) #define ftello(fp) ftell(fp) #define fseeko(fp, offset, whence) fseek(fp, offset, whence) #else extern off_t ftello(FILE *stream); extern int fseeko(FILE *stream, off_t offset, int whence); #endif typedef int8_t bgzf_byte_t; static const int DEFAULT_BLOCK_SIZE = 64 * 1024; static const int MAX_BLOCK_SIZE = 64 * 1024; static const int BLOCK_HEADER_LENGTH = 18; static const int BLOCK_FOOTER_LENGTH = 8; static const int GZIP_ID1 = 31; static const int GZIP_ID2 = 139; static const int CM_DEFLATE = 8; static const int FLG_FEXTRA = 4; static const int OS_UNKNOWN = 255; static const int BGZF_ID1 = 66; // 'B' static const int BGZF_ID2 = 67; // 'C' static const int BGZF_LEN = 2; static const int BGZF_XLEN = 6; // BGZF_LEN+4 static const int GZIP_WINDOW_BITS = -15; // no zlib header static const int Z_DEFAULT_MEM_LEVEL = 8; inline void packInt16(uint8_t* buffer, uint16_t value) { buffer[0] = value; buffer[1] = value >> 8; } inline int unpackInt16(const uint8_t* buffer) { return (buffer[0] | (buffer[1] << 8)); } inline void packInt32(uint8_t* buffer, uint32_t value) { buffer[0] = value; buffer[1] = value >> 8; buffer[2] = value >> 16; buffer[3] = value >> 24; } static inline int bgzf_min(int x, int y) { return (x < y) ? x : y; } static void report_error(BGZF* fp, const char* message) { fp->error = message; } static BGZF *bgzf_read_init() { BGZF *fp; fp = calloc(1, sizeof(BGZF)); fp->uncompressed_block_size = MAX_BLOCK_SIZE; fp->uncompressed_block = malloc(MAX_BLOCK_SIZE); fp->compressed_block_size = MAX_BLOCK_SIZE; fp->compressed_block = malloc(MAX_BLOCK_SIZE); fp->cache_size = 0; fp->cache = kh_init(cache); return fp; } static BGZF* open_read(int fd) { #ifdef _USE_KNETFILE knetFile *file = knet_dopen(fd, "r"); #else FILE* file = fdopen(fd, "r"); #endif BGZF* fp; if (file == 0) return 0; fp = bgzf_read_init(); fp->file_descriptor = fd; fp->open_mode = 'r'; #ifdef _USE_KNETFILE fp->x.fpr = file; #else fp->file = file; #endif return fp; } static BGZF* open_write(int fd, bool is_uncompressed) { FILE* file = fdopen(fd, "w"); BGZF* fp; if (file == 0) return 0; fp = malloc(sizeof(BGZF)); fp->file_descriptor = fd; fp->open_mode = 'w'; fp->owned_file = 0; fp->is_uncompressed = is_uncompressed; #ifdef _USE_KNETFILE fp->x.fpw = file; #else fp->file = file; #endif fp->uncompressed_block_size = DEFAULT_BLOCK_SIZE; fp->uncompressed_block = NULL; fp->compressed_block_size = MAX_BLOCK_SIZE; fp->compressed_block = malloc(MAX_BLOCK_SIZE); fp->block_address = 0; fp->block_offset = 0; fp->block_length = 0; fp->error = NULL; return fp; } #ifdef __SUNPRO_C BGZF* bgzf_open(const char* path, const char* mode) #else BGZF* bgzf_open(const char* __restrict path, const char* __restrict mode) #endif { BGZF* fp = NULL; if (mode[0] == 'r' || mode[0] == 'R') { /* The reading mode is preferred. */ #ifdef _USE_KNETFILE knetFile *file = knet_open(path, mode); if (file == 0) return 0; fp = bgzf_read_init(); fp->file_descriptor = -1; fp->open_mode = 'r'; fp->x.fpr = file; #else int fd, oflag = O_RDONLY; #ifdef _WIN32 oflag |= O_BINARY; #endif fd = open(path, oflag); if (fd == -1) return 0; fp = open_read(fd); #endif } else if (mode[0] == 'w' || mode[0] == 'W') { int fd, oflag = O_WRONLY | O_CREAT | O_TRUNC; #ifdef _WIN32 oflag |= O_BINARY; #endif fd = open(path, oflag, 0666); if (fd == -1) return 0; fp = open_write(fd, strstr(mode, "u")? 1 : 0); } if (fp != NULL) { fp->owned_file = 1; } return fp; } #ifdef __SUNPRO_C BGZF* bgzf_fdopen(int fd, const char * mode) #else BGZF* bgzf_fdopen(int fd, const char * __restrict mode) #endif { if (fd == -1) return 0; if (mode[0] == 'r' || mode[0] == 'R') { return open_read(fd); } else if (mode[0] == 'w' || mode[0] == 'W') { return open_write(fd, strstr(mode, "u")? 1 : 0); } else { return NULL; } } static int deflate_block(BGZF* fp, int block_length) { // Deflate the block in fp->uncompressed_block into fp->compressed_block. // Also adds an extra field that stores the compressed block length. bgzf_byte_t* buffer = fp->compressed_block; int buffer_size = fp->compressed_block_size; // Init gzip header buffer[0] = GZIP_ID1; buffer[1] = GZIP_ID2; buffer[2] = CM_DEFLATE; buffer[3] = FLG_FEXTRA; buffer[4] = 0; // mtime buffer[5] = 0; buffer[6] = 0; buffer[7] = 0; buffer[8] = 0; buffer[9] = OS_UNKNOWN; buffer[10] = BGZF_XLEN; buffer[11] = 0; buffer[12] = BGZF_ID1; buffer[13] = BGZF_ID2; buffer[14] = BGZF_LEN; buffer[15] = 0; buffer[16] = 0; // placeholder for block length buffer[17] = 0; // loop to retry for blocks that do not compress enough int input_length = block_length; int compressed_length = 0; while (1) { int compress_level = fp->is_uncompressed? 0 : Z_DEFAULT_COMPRESSION; z_stream zs; zs.zalloc = NULL; zs.zfree = NULL; zs.next_in = fp->uncompressed_block; zs.avail_in = input_length; zs.next_out = (void*)&buffer[BLOCK_HEADER_LENGTH]; zs.avail_out = buffer_size - BLOCK_HEADER_LENGTH - BLOCK_FOOTER_LENGTH; int status = deflateInit2(&zs, compress_level, Z_DEFLATED, GZIP_WINDOW_BITS, Z_DEFAULT_MEM_LEVEL, Z_DEFAULT_STRATEGY); if (status != Z_OK) { report_error(fp, "deflate init failed"); return -1; } status = deflate(&zs, Z_FINISH); if (status != Z_STREAM_END) { deflateEnd(&zs); if (status == Z_OK) { // Not enough space in buffer. // Can happen in the rare case the input doesn't compress enough. // Reduce the amount of input until it fits. input_length -= 1024; if (input_length <= 0) { // should never happen report_error(fp, "input reduction failed"); return -1; } continue; } report_error(fp, "deflate failed"); return -1; } status = deflateEnd(&zs); if (status != Z_OK) { report_error(fp, "deflate end failed"); return -1; } compressed_length = zs.total_out; compressed_length += BLOCK_HEADER_LENGTH + BLOCK_FOOTER_LENGTH; if (compressed_length > MAX_BLOCK_SIZE) { // should never happen report_error(fp, "deflate overflow"); return -1; } break; } packInt16((uint8_t*)&buffer[16], compressed_length-1); uint32_t crc = crc32(0L, NULL, 0L); crc = crc32(crc, fp->uncompressed_block, input_length); packInt32((uint8_t*)&buffer[compressed_length-8], crc); packInt32((uint8_t*)&buffer[compressed_length-4], input_length); int remaining = block_length - input_length; if (remaining > 0) { if (remaining > input_length) { // should never happen (check so we can use memcpy) report_error(fp, "remainder too large"); return -1; } memcpy(fp->uncompressed_block, fp->uncompressed_block + input_length, remaining); } fp->block_offset = remaining; return compressed_length; } static int inflate_block(BGZF* fp, int block_length) { // Inflate the block in fp->compressed_block into fp->uncompressed_block z_stream zs; zs.zalloc = NULL; zs.zfree = NULL; zs.next_in = fp->compressed_block + 18; zs.avail_in = block_length - 16; zs.next_out = fp->uncompressed_block; zs.avail_out = fp->uncompressed_block_size; int status = inflateInit2(&zs, GZIP_WINDOW_BITS); if (status != Z_OK) { report_error(fp, "inflate init failed"); return -1; } status = inflate(&zs, Z_FINISH); if (status != Z_STREAM_END) { inflateEnd(&zs); report_error(fp, "inflate failed"); return -1; } status = inflateEnd(&zs); if (status != Z_OK) { report_error(fp, "inflate failed"); return -1; } return zs.total_out; } static int check_header(const bgzf_byte_t* header) { return (header[0] == GZIP_ID1 && header[1] == (bgzf_byte_t) GZIP_ID2 && header[2] == Z_DEFLATED && (header[3] & FLG_FEXTRA) != 0 && unpackInt16((uint8_t*)&header[10]) == BGZF_XLEN && header[12] == BGZF_ID1 && header[13] == BGZF_ID2 && unpackInt16((uint8_t*)&header[14]) == BGZF_LEN); } static void free_cache(BGZF *fp) { khint_t k; khash_t(cache) *h = (khash_t(cache)*)fp->cache; if (fp->open_mode != 'r') return; for (k = kh_begin(h); k < kh_end(h); ++k) if (kh_exist(h, k)) free(kh_val(h, k).block); kh_destroy(cache, h); } static int load_block_from_cache(BGZF *fp, int64_t block_address) { khint_t k; cache_t *p; khash_t(cache) *h = (khash_t(cache)*)fp->cache; k = kh_get(cache, h, block_address); if (k == kh_end(h)) return 0; p = &kh_val(h, k); if (fp->block_length != 0) fp->block_offset = 0; fp->block_address = block_address; fp->block_length = p->size; memcpy(fp->uncompressed_block, p->block, MAX_BLOCK_SIZE); #ifdef _USE_KNETFILE knet_seek(fp->x.fpr, p->end_offset, SEEK_SET); #else fseeko(fp->file, p->end_offset, SEEK_SET); #endif return p->size; } static void cache_block(BGZF *fp, int size) { int ret; khint_t k; cache_t *p; khash_t(cache) *h = (khash_t(cache)*)fp->cache; if (MAX_BLOCK_SIZE >= fp->cache_size) return; if ((kh_size(h) + 1) * MAX_BLOCK_SIZE > fp->cache_size) { /* A better way would be to remove the oldest block in the * cache, but here we remove a random one for simplicity. This * should not have a big impact on performance. */ for (k = kh_begin(h); k < kh_end(h); ++k) if (kh_exist(h, k)) break; if (k < kh_end(h)) { free(kh_val(h, k).block); kh_del(cache, h, k); } } k = kh_put(cache, h, fp->block_address, &ret); if (ret == 0) return; // if this happens, a bug! p = &kh_val(h, k); p->size = fp->block_length; p->end_offset = fp->block_address + size; p->block = malloc(MAX_BLOCK_SIZE); memcpy(kh_val(h, k).block, fp->uncompressed_block, MAX_BLOCK_SIZE); } static int read_block(BGZF* fp) { bgzf_byte_t header[BLOCK_HEADER_LENGTH]; int size = 0; #ifdef _USE_KNETFILE int64_t block_address = knet_tell(fp->x.fpr); if (load_block_from_cache(fp, block_address)) return 0; int count = knet_read(fp->x.fpr, header, sizeof(header)); #else int64_t block_address = ftello(fp->file); if (load_block_from_cache(fp, block_address)) return 0; int count = fread(header, 1, sizeof(header), fp->file); #endif if (count == 0) { fp->block_length = 0; return 0; } size = count; if (count != sizeof(header)) { report_error(fp, "read failed"); return -1; } if (!check_header(header)) { report_error(fp, "invalid block header"); return -1; } int block_length = unpackInt16((uint8_t*)&header[16]) + 1; bgzf_byte_t* compressed_block = (bgzf_byte_t*) fp->compressed_block; memcpy(compressed_block, header, BLOCK_HEADER_LENGTH); int remaining = block_length - BLOCK_HEADER_LENGTH; #ifdef _USE_KNETFILE count = knet_read(fp->x.fpr, &compressed_block[BLOCK_HEADER_LENGTH], remaining); #else count = fread(&compressed_block[BLOCK_HEADER_LENGTH], 1, remaining, fp->file); #endif if (count != remaining) { report_error(fp, "read failed"); return -1; } size += count; count = inflate_block(fp, block_length); if (count < 0) { return -1; } if (fp->block_length != 0) { // Do not reset offset if this read follows a seek. fp->block_offset = 0; } fp->block_address = block_address; fp->block_length = count; cache_block(fp, size); return 0; } int bgzf_read(BGZF* fp, void* data, int length) { if (length <= 0) { return 0; } if (fp->open_mode != 'r') { report_error(fp, "file not open for reading"); return -1; } int bytes_read = 0; bgzf_byte_t* output = data; while (bytes_read < length) { int available = fp->block_length - fp->block_offset; if (available <= 0) { if (read_block(fp) != 0) { return -1; } available = fp->block_length - fp->block_offset; if (available <= 0) { break; } } int copy_length = bgzf_min(length-bytes_read, available); bgzf_byte_t* buffer = fp->uncompressed_block; memcpy(output, buffer + fp->block_offset, copy_length); fp->block_offset += copy_length; output += copy_length; bytes_read += copy_length; } if (fp->block_offset == fp->block_length) { #ifdef _USE_KNETFILE fp->block_address = knet_tell(fp->x.fpr); #else fp->block_address = ftello(fp->file); #endif fp->block_offset = 0; fp->block_length = 0; } return bytes_read; } static int flush_block(BGZF* fp) { while (fp->block_offset > 0) { int block_length = deflate_block(fp, fp->block_offset); if (block_length < 0) { return -1; } #ifdef _USE_KNETFILE int count = fwrite(fp->compressed_block, 1, block_length, fp->x.fpw); #else int count = fwrite(fp->compressed_block, 1, block_length, fp->file); #endif if (count != block_length) { report_error(fp, "write failed"); return -1; } fp->block_address += block_length; } return 0; } int bgzf_write(BGZF* fp, const void* data, int length) { if (fp->open_mode != 'w') { report_error(fp, "file not open for writing"); return -1; } if (fp->uncompressed_block == NULL) { fp->uncompressed_block = malloc(fp->uncompressed_block_size); } const bgzf_byte_t* input = data; int block_length = fp->uncompressed_block_size; int bytes_written = 0; while (bytes_written < length) { int copy_length = bgzf_min(block_length - fp->block_offset, length - bytes_written); bgzf_byte_t* buffer = fp->uncompressed_block; memcpy(buffer + fp->block_offset, input, copy_length); fp->block_offset += copy_length; input += copy_length; bytes_written += copy_length; if (fp->block_offset == block_length) { if (flush_block(fp) != 0) { break; } } } return bytes_written; } int bgzf_close(BGZF* fp) { if (fp->open_mode == 'w') { if (flush_block(fp) != 0) { return -1; } { // add an empty block int count, block_length = deflate_block(fp, 0); #ifdef _USE_KNETFILE count = fwrite(fp->compressed_block, 1, block_length, fp->x.fpw); #else count = fwrite(fp->compressed_block, 1, block_length, fp->file); #endif } #ifdef _USE_KNETFILE if (fflush(fp->x.fpw) != 0) { #else if (fflush(fp->file) != 0) { #endif report_error(fp, "flush failed"); return -1; } } if (fp->owned_file) { #ifdef _USE_KNETFILE int ret; if (fp->open_mode == 'w') ret = fclose(fp->x.fpw); else ret = knet_close(fp->x.fpr); if (ret != 0) return -1; #else if (fclose(fp->file) != 0) { return -1; } #endif } free(fp->uncompressed_block); free(fp->compressed_block); free_cache(fp); free(fp); return 0; } int64_t bgzf_tell(BGZF* fp) { return ((fp->block_address << 16) | (fp->block_offset & 0xFFFF)); } void bgzf_set_cache_size(BGZF *fp, int cache_size) { if (fp) fp->cache_size = cache_size; } int bgzf_check_EOF(BGZF *fp) { static uint8_t magic[28] = "\037\213\010\4\0\0\0\0\0\377\6\0\102\103\2\0\033\0\3\0\0\0\0\0\0\0\0\0"; uint8_t buf[28]; off_t offset; #ifdef _USE_KNETFILE offset = knet_tell(fp->x.fpr); if (knet_seek(fp->x.fpr, -28, SEEK_END) != 0) return -1; knet_read(fp->x.fpr, buf, 28); knet_seek(fp->x.fpr, offset, SEEK_SET); #else offset = ftello(fp->file); if (fseeko(fp->file, -28, SEEK_END) != 0) return -1; fread(buf, 1, 28, fp->file); fseeko(fp->file, offset, SEEK_SET); #endif return (memcmp(magic, buf, 28) == 0)? 1 : 0; } int64_t bgzf_seek(BGZF* fp, int64_t pos, int where) { if (fp->open_mode != 'r') { report_error(fp, "file not open for read"); return -1; } if (where != SEEK_SET) { report_error(fp, "unimplemented seek option"); return -1; } int block_offset = pos & 0xFFFF; int64_t block_address = (pos >> 16) & 0xFFFFFFFFFFFFLL; #ifdef _USE_KNETFILE if (knet_seek(fp->x.fpr, block_address, SEEK_SET) != 0) { #else if (fseeko(fp->file, block_address, SEEK_SET) != 0) { #endif report_error(fp, "seek failed"); return -1; } fp->block_length = 0; // indicates current block is not loaded fp->block_address = block_address; fp->block_offset = block_offset; return 0; } bx-python-0.8.13/src/samtools/bgzf.h000066400000000000000000000077551415666465100173200ustar00rootroot00000000000000/* The MIT License Copyright (c) 2008 Broad Institute / Massachusetts Institute of Technology Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef __BGZF_H #define __BGZF_H #include #include #include #include #ifdef _USE_KNETFILE #include "knetfile.h" #endif //typedef int8_t bool; typedef struct { int file_descriptor; char open_mode; // 'r' or 'w' bool owned_file, is_uncompressed; #ifdef _USE_KNETFILE union { knetFile *fpr; FILE *fpw; } x; #else FILE* file; #endif int uncompressed_block_size; int compressed_block_size; void* uncompressed_block; void* compressed_block; int64_t block_address; int block_length; int block_offset; int cache_size; const char* error; void *cache; // a pointer to a hash table } BGZF; #ifdef __cplusplus extern "C" { #endif /* * Open an existing file descriptor for reading or writing. * Mode must be either "r" or "w". * A subsequent bgzf_close will not close the file descriptor. * Returns null on error. */ #ifdef __SUNPRO_C BGZF* bgzf_fdopen(int fd, const char* mode); #else BGZF* bgzf_fdopen(int fd, const char* __restrict mode); #endif /* * Open the specified file for reading or writing. * Mode must be either "r" or "w". * Returns null on error. */ #ifdef __SUNPRO_C BGZF* bgzf_open(const char* path, const char* mode); #else BGZF* bgzf_open(const char* path, const char* __restrict mode); #endif /* * Close the BGZ file and free all associated resources. * Does not close the underlying file descriptor if created with bgzf_fdopen. * Returns zero on success, -1 on error. */ int bgzf_close(BGZF* fp); /* * Read up to length bytes from the file storing into data. * Returns the number of bytes actually read. * Returns zero on end of file. * Returns -1 on error. */ int bgzf_read(BGZF* fp, void* data, int length); /* * Write length bytes from data to the file. * Returns the number of bytes written. * Returns -1 on error. */ int bgzf_write(BGZF* fp, const void* data, int length); /* * Return a virtual file pointer to the current location in the file. * No interpetation of the value should be made, other than a subsequent * call to bgzf_seek can be used to position the file at the same point. * Return value is non-negative on success. * Returns -1 on error. */ int64_t bgzf_tell(BGZF* fp); /* * Set the file to read from the location specified by pos, which must * be a value previously returned by bgzf_tell for this file (but not * necessarily one returned by this file handle). * The where argument must be SEEK_SET. * Seeking on a file opened for write is not supported. * Returns zero on success, -1 on error. */ int64_t bgzf_seek(BGZF* fp, int64_t pos, int where); /* * Set the cache size. Zero to disable. By default, caching is * disabled. The recommended cache size for frequent random access is * about 8M bytes. */ void bgzf_set_cache_size(BGZF *fp, int cache_size); int bgzf_check_EOF(BGZF *fp); #ifdef __cplusplus } #endif #endif bx-python-0.8.13/src/samtools/khash.h000066400000000000000000000402121415666465100174470ustar00rootroot00000000000000/* The MIT License Copyright (c) 2008 Genome Research Ltd (GRL). Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Contact: Heng Li */ /* An example: #include "khash.h" KHASH_MAP_INIT_INT(32, char) int main() { int ret, is_missing; khiter_t k; khash_t(32) *h = kh_init(32); k = kh_put(32, h, 5, &ret); if (!ret) kh_del(32, h, k); kh_value(h, k) = 10; k = kh_get(32, h, 10); is_missing = (k == kh_end(h)); k = kh_get(32, h, 5); kh_del(32, h, k); for (k = kh_begin(h); k != kh_end(h); ++k) if (kh_exist(h, k)) kh_value(h, k) = 1; kh_destroy(32, h); return 0; } */ /* 2008-09-19 (0.2.3): * Corrected the example * Improved interfaces 2008-09-11 (0.2.2): * Improved speed a little in kh_put() 2008-09-10 (0.2.1): * Added kh_clear() * Fixed a compiling error 2008-09-02 (0.2.0): * Changed to token concatenation which increases flexibility. 2008-08-31 (0.1.2): * Fixed a bug in kh_get(), which has not been tested previously. 2008-08-31 (0.1.1): * Added destructor */ #ifndef __AC_KHASH_H #define __AC_KHASH_H /*! @header Generic hash table library. @copyright Heng Li */ #define AC_VERSION_KHASH_H "0.2.2" #include #include #include typedef uint32_t khint_t; typedef khint_t khiter_t; #define __ac_HASH_PRIME_SIZE 32 static const uint32_t __ac_prime_list[__ac_HASH_PRIME_SIZE] = { 0ul, 3ul, 11ul, 23ul, 53ul, 97ul, 193ul, 389ul, 769ul, 1543ul, 3079ul, 6151ul, 12289ul, 24593ul, 49157ul, 98317ul, 196613ul, 393241ul, 786433ul, 1572869ul, 3145739ul, 6291469ul, 12582917ul, 25165843ul, 50331653ul, 100663319ul, 201326611ul, 402653189ul, 805306457ul, 1610612741ul, 3221225473ul, 4294967291ul }; #define __ac_isempty(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&2) #define __ac_isdel(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&1) #define __ac_iseither(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&3) #define __ac_set_isdel_false(flag, i) (flag[i>>4]&=~(1ul<<((i&0xfU)<<1))) #define __ac_set_isempty_false(flag, i) (flag[i>>4]&=~(2ul<<((i&0xfU)<<1))) #define __ac_set_isboth_false(flag, i) (flag[i>>4]&=~(3ul<<((i&0xfU)<<1))) #define __ac_set_isdel_true(flag, i) (flag[i>>4]|=1ul<<((i&0xfU)<<1)) static const double __ac_HASH_UPPER = 0.77; #define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ typedef struct { \ khint_t n_buckets, size, n_occupied, upper_bound; \ uint32_t *flags; \ khkey_t *keys; \ khval_t *vals; \ } kh_##name##_t; \ static inline kh_##name##_t *kh_init_##name() { \ return (kh_##name##_t*)calloc(1, sizeof(kh_##name##_t)); \ } \ static inline void kh_destroy_##name(kh_##name##_t *h) \ { \ if (h) { \ free(h->keys); free(h->flags); \ free(h->vals); \ free(h); \ } \ } \ static inline void kh_clear_##name(kh_##name##_t *h) \ { \ if (h && h->flags) { \ memset(h->flags, 0xaa, ((h->n_buckets>>4) + 1) * sizeof(uint32_t)); \ h->size = h->n_occupied = 0; \ } \ } \ static inline khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \ { \ if (h->n_buckets) { \ khint_t inc, k, i, last; \ k = __hash_func(key); i = k % h->n_buckets; \ inc = 1 + k % (h->n_buckets - 1); last = i; \ while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \ if (i + inc >= h->n_buckets) i = i + inc - h->n_buckets; \ else i += inc; \ if (i == last) return h->n_buckets; \ } \ return __ac_iseither(h->flags, i)? h->n_buckets : i; \ } else return 0; \ } \ static inline void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \ { \ uint32_t *new_flags = 0; \ khint_t j = 1; \ { \ khint_t t = __ac_HASH_PRIME_SIZE - 1; \ while (__ac_prime_list[t] > new_n_buckets) --t; \ new_n_buckets = __ac_prime_list[t+1]; \ if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; \ else { \ new_flags = (uint32_t*)malloc(((new_n_buckets>>4) + 1) * sizeof(uint32_t)); \ memset(new_flags, 0xaa, ((new_n_buckets>>4) + 1) * sizeof(uint32_t)); \ if (h->n_buckets < new_n_buckets) { \ h->keys = (khkey_t*)realloc(h->keys, new_n_buckets * sizeof(khkey_t)); \ if (kh_is_map) \ h->vals = (khval_t*)realloc(h->vals, new_n_buckets * sizeof(khval_t)); \ } \ } \ } \ if (j) { \ for (j = 0; j != h->n_buckets; ++j) { \ if (__ac_iseither(h->flags, j) == 0) { \ khkey_t key = h->keys[j]; \ khval_t val; \ if (kh_is_map) val = h->vals[j]; \ __ac_set_isdel_true(h->flags, j); \ while (1) { \ khint_t inc, k, i; \ k = __hash_func(key); \ i = k % new_n_buckets; \ inc = 1 + k % (new_n_buckets - 1); \ while (!__ac_isempty(new_flags, i)) { \ if (i + inc >= new_n_buckets) i = i + inc - new_n_buckets; \ else i += inc; \ } \ __ac_set_isempty_false(new_flags, i); \ if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { \ { khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \ if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \ __ac_set_isdel_true(h->flags, i); \ } else { \ h->keys[i] = key; \ if (kh_is_map) h->vals[i] = val; \ break; \ } \ } \ } \ } \ if (h->n_buckets > new_n_buckets) { \ h->keys = (khkey_t*)realloc(h->keys, new_n_buckets * sizeof(khkey_t)); \ if (kh_is_map) \ h->vals = (khval_t*)realloc(h->vals, new_n_buckets * sizeof(khval_t)); \ } \ free(h->flags); \ h->flags = new_flags; \ h->n_buckets = new_n_buckets; \ h->n_occupied = h->size; \ h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \ } \ } \ static inline khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \ { \ khint_t x; \ if (h->n_occupied >= h->upper_bound) { \ if (h->n_buckets > (h->size<<1)) kh_resize_##name(h, h->n_buckets - 1); \ else kh_resize_##name(h, h->n_buckets + 1); \ } \ { \ khint_t inc, k, i, site, last; \ x = site = h->n_buckets; k = __hash_func(key); i = k % h->n_buckets; \ if (__ac_isempty(h->flags, i)) x = i; \ else { \ inc = 1 + k % (h->n_buckets - 1); last = i; \ while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \ if (__ac_isdel(h->flags, i)) site = i; \ if (i + inc >= h->n_buckets) i = i + inc - h->n_buckets; \ else i += inc; \ if (i == last) { x = site; break; } \ } \ if (x == h->n_buckets) { \ if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \ else x = i; \ } \ } \ } \ if (__ac_isempty(h->flags, x)) { \ h->keys[x] = key; \ __ac_set_isboth_false(h->flags, x); \ ++h->size; ++h->n_occupied; \ *ret = 1; \ } else if (__ac_isdel(h->flags, x)) { \ h->keys[x] = key; \ __ac_set_isboth_false(h->flags, x); \ ++h->size; \ *ret = 2; \ } else *ret = 0; \ return x; \ } \ static inline void kh_del_##name(kh_##name##_t *h, khint_t x) \ { \ if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \ __ac_set_isdel_true(h->flags, x); \ --h->size; \ } \ } /* --- BEGIN OF HASH FUNCTIONS --- */ /*! @function @abstract Integer hash function @param key The integer [uint32_t] @return The hash value [khint_t] */ #define kh_int_hash_func(key) (uint32_t)(key) /*! @function @abstract Integer comparison function */ #define kh_int_hash_equal(a, b) ((a) == (b)) /*! @function @abstract 64-bit integer hash function @param key The integer [uint64_t] @return The hash value [khint_t] */ #define kh_int64_hash_func(key) (uint32_t)((key)>>33^(key)^(key)<<11) /*! @function @abstract 64-bit integer comparison function */ #define kh_int64_hash_equal(a, b) ((a) == (b)) /*! @function @abstract const char* hash function @param s Pointer to a null terminated string @return The hash value */ static inline khint_t __ac_X31_hash_string(const char *s) { khint_t h = *s; if (h) for (++s ; *s; ++s) h = (h << 5) - h + *s; return h; } /*! @function @abstract Another interface to const char* hash function @param key Pointer to a null terminated string [const char*] @return The hash value [khint_t] */ #define kh_str_hash_func(key) __ac_X31_hash_string(key) /*! @function @abstract Const char* comparison function */ #define kh_str_hash_equal(a, b) (strcmp(a, b) == 0) /* --- END OF HASH FUNCTIONS --- */ /* Other necessary macros... */ /*! @abstract Type of the hash table. @param name Name of the hash table [symbol] */ #define khash_t(name) kh_##name##_t /*! @function @abstract Initiate a hash table. @param name Name of the hash table [symbol] @return Pointer to the hash table [khash_t(name)*] */ #define kh_init(name) kh_init_##name() /*! @function @abstract Destroy a hash table. @param name Name of the hash table [symbol] @param h Pointer to the hash table [khash_t(name)*] */ #define kh_destroy(name, h) kh_destroy_##name(h) /*! @function @abstract Reset a hash table without deallocating memory. @param name Name of the hash table [symbol] @param h Pointer to the hash table [khash_t(name)*] */ #define kh_clear(name, h) kh_clear_##name(h) /*! @function @abstract Resize a hash table. @param name Name of the hash table [symbol] @param h Pointer to the hash table [khash_t(name)*] @param s New size [khint_t] */ #define kh_resize(name, h, s) kh_resize_##name(h, s) /*! @function @abstract Insert a key to the hash table. @param name Name of the hash table [symbol] @param h Pointer to the hash table [khash_t(name)*] @param k Key [type of keys] @param r Extra return code: 0 if the key is present in the hash table; 1 if the bucket is empty (never used); 2 if the element in the bucket has been deleted [int*] @return Iterator to the inserted element [khint_t] */ #define kh_put(name, h, k, r) kh_put_##name(h, k, r) /*! @function @abstract Retrieve a key from the hash table. @param name Name of the hash table [symbol] @param h Pointer to the hash table [khash_t(name)*] @param k Key [type of keys] @return Iterator to the found element, or kh_end(h) is the element is absent [khint_t] */ #define kh_get(name, h, k) kh_get_##name(h, k) /*! @function @abstract Remove a key from the hash table. @param name Name of the hash table [symbol] @param h Pointer to the hash table [khash_t(name)*] @param k Iterator to the element to be deleted [khint_t] */ #define kh_del(name, h, k) kh_del_##name(h, k) /*! @function @abstract Test whether a bucket contains data. @param h Pointer to the hash table [khash_t(name)*] @param x Iterator to the bucket [khint_t] @return 1 if containing data; 0 otherwise [int] */ #define kh_exist(h, x) (!__ac_iseither((h)->flags, (x))) /*! @function @abstract Get key given an iterator @param h Pointer to the hash table [khash_t(name)*] @param x Iterator to the bucket [khint_t] @return Key [type of keys] */ #define kh_key(h, x) ((h)->keys[x]) /*! @function @abstract Get value given an iterator @param h Pointer to the hash table [khash_t(name)*] @param x Iterator to the bucket [khint_t] @return Value [type of values] @discussion For hash sets, calling this results in segfault. */ #define kh_val(h, x) ((h)->vals[x]) /*! @function @abstract Alias of kh_val() */ #define kh_value(h, x) ((h)->vals[x]) /*! @function @abstract Get the start iterator @param h Pointer to the hash table [khash_t(name)*] @return The start iterator [khint_t] */ #define kh_begin(h) (khint_t)(0) /*! @function @abstract Get the end iterator @param h Pointer to the hash table [khash_t(name)*] @return The end iterator [khint_t] */ #define kh_end(h) ((h)->n_buckets) /*! @function @abstract Get the number of elements in the hash table @param h Pointer to the hash table [khash_t(name)*] @return Number of elements in the hash table [khint_t] */ #define kh_size(h) ((h)->size) /*! @function @abstract Get the number of buckets in the hash table @param h Pointer to the hash table [khash_t(name)*] @return Number of buckets in the hash table [khint_t] */ #define kh_n_buckets(h) ((h)->n_buckets) /* More conenient interfaces */ /*! @function @abstract Instantiate a hash set containing integer keys @param name Name of the hash table [symbol] */ #define KHASH_SET_INIT_INT(name) \ KHASH_INIT(name, uint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal) /*! @function @abstract Instantiate a hash map containing integer keys @param name Name of the hash table [symbol] @param khval_t Type of values [type] */ #define KHASH_MAP_INIT_INT(name, khval_t) \ KHASH_INIT(name, uint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) /*! @function @abstract Instantiate a hash map containing 64-bit integer keys @param name Name of the hash table [symbol] */ #define KHASH_SET_INIT_INT64(name) \ KHASH_INIT(name, uint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal) /*! @function @abstract Instantiate a hash map containing 64-bit integer keys @param name Name of the hash table [symbol] @param khval_t Type of values [type] */ #define KHASH_MAP_INIT_INT64(name, khval_t) \ KHASH_INIT(name, uint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal) typedef const char *kh_cstr_t; /*! @function @abstract Instantiate a hash map containing const char* keys @param name Name of the hash table [symbol] */ #define KHASH_SET_INIT_STR(name) \ KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal) /*! @function @abstract Instantiate a hash map containing const char* keys @param name Name of the hash table [symbol] @param khval_t Type of values [type] */ #define KHASH_MAP_INIT_STR(name, khval_t) \ KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal) #endif /* __AC_KHASH_H */ bx-python-0.8.13/test_data/000077500000000000000000000000001415666465100155215ustar00rootroot00000000000000bx-python-0.8.13/test_data/bbi_tests/000077500000000000000000000000001415666465100174775ustar00rootroot00000000000000bx-python-0.8.13/test_data/bbi_tests/make_expectation.sh000077500000000000000000000006451415666465100233630ustar00rootroot00000000000000#!/bin/sh BW=$1 REGIONS=$2 cat $REGIONS | while read chr start end n; do \ echo $chr $start $end $n mean `bigWigSummary -type=mean $BW $chr $start $end $n`; echo $chr $start $end $n min `bigWigSummary -type=min $BW $chr $start $end $n`; echo $chr $start $end $n max `bigWigSummary -type=max $BW $chr $start $end $n`; echo $chr $start $end $n std `bigWigSummary -type=std $BW $chr $start $end $n`; done; bx-python-0.8.13/test_data/bbi_tests/test.bw000066400000000000000000002223461415666465100210210ustar00rootroot00000000000000&(rSkrP@=?P{  ' G~?hP@ (!D@xchr17 x-؍{ff;d-T(M޲lY2Ρ!{YeoJF?z|ssADDDY u.R.r\U\5\︎빁[=q;w̙ 0]#_?dԉ 0(8O$Otٳrgx9/"/w/+!'u:o&o9Nn-{?o>v=OS>8|\gDdc160.1>0!110i3LL: Ft:=30c3_әY٘3s2s32 `$͘&d o G_".KK-ge9g+z֕XUGUw5w쫩k6ee=yA`ݐؘ y7-ؒcbnvl;?agvaWvcw`Oboa_c@~\ =C88#8 hXs88S88_rgf9z.+~o.2. ~˕\\õ븞sw2 wq7?s^~Aa x'xx>óxx;e^U^u&o6.>!_|ĿOKbDTX8xLDL$LdLLTL4LtQuzf`Ff_&fffe6fr\<|7Xb4cXo KK-aYcyV`EJ*j|5Y..>!1c6e3![#~Vl6lvlNم]ٍك=ًه}ُ9 *qsrsGs~Q1q I)i/9399s9_q>k~Å\\¥\\okqs7r7s}+6nd.?Gğp/q?D'C<#~7H0sH~/89qc9C(0!D'Yp*Y38|p |s}_o|^p)Y_W[׻RjqZ^2؞z Mmnms_n7r|`0]ۭ?۷~}yyGyy'y+ϙE}I?/+k-}|?#w<')_tNJ wlG ޳@< KDL$LdKLTLme:/3Ll= gas!MؔamŧmlɏVN$NrtgYwùgWqa5 x-TuAFRzwAnAKJCT:g[nSg{aaaa #-=,,8S7\E\%K˹+W:n&nn6߂j}1];&0K>!7D39NS<3cD?3> +˷ 23c2c32ỌLDL?tLLLi阞>33 23?hR9+gJf:ίa C2/(^%KK7ퟲy=]?“ };y#6v&n9ڒ[{6lkN_Kv4]tWvcw{쫁^O+a_cA9C99"heK Eoѡ(uqm^tXQD'q=̧sgg9z\\C_K\e\\Ur5pYs7m>i`-m 0q;wwý<o`~Ɏdt]ؕ؝=ؓ؛_s@7!ao99c9κ.<}s<NDNdNTNtLp!q1g.2. *_k븞C&nn6߸;;.{xxGx/<<8ҧygyyyyWyy7[6.>>#>uS/-"BhX8x|2>0!110)19S0%SE{:50-1=30c gn2Ydٙ0's170/1?H`A~B,",b,,R,2,r<+"+2*:k&kuOYX ؘؐMؔ?g߇%[9Ƽ-۱=`~Ɏ®}ؗ؟@{9C99"hג}Fo99c9q<ɜ©A5n>3#,<.".K\e\\uҿW:n&nn6n]п|wsrOcqIiYK.KpE_ ZKk:o&oENv5C>c>S>sKJ>[23c2cGS72>w.323 23S2cj ?/-әgf L}fffe6fg~ü g#Y! 0(+KK ˲?, J*jGкa^XDk)^d}[50oFl&lf9[%[5۰-۱=`~Ɏ®}ؗ؟@7!ao9*20ZX_"C3'I)iwz13t|7%gOe>s8n~M56~zoox-܍ۊUF^d2 !++{e لQHʹVI6!~~{sqqqq G_qq/ԡbE1Vۗ|1_—k ./+b ^]ɿWj\i7Ağb|Ct=b"n3n͸~p|w.n܃{}}W<(?Q~?SNY~/E[_9W?xM~5a&w.s=7\|',>KScR?1>-O3b&ẙO蟌Dl<'y>a~- WE/K`Iy)|g͕2/0`YX+8?Y|0W>B\/%_/ \p^u'\Q}"7ϸIomwϝ_Nn^'G1z'_xOi<cu g<^>o~\WU~ ipܔ;ϿC|q`$b2yrL!<5\hAQgqQE,'Bu<<;ρ9C=υ1`] `A,/EEx(8?%xI,O&<7ay<aXe/+`EyVWUnk5EƗ`|뚫 5lF_csMMPp4-xKl%owmx[lb{`#wή.nCbw0דyޗq`>P>#Hq#>D>OƏ)|*N?sO92?y-xF̄Y0+>gGGh?qx>D> '?}? >q=B\d yZ—_op|9o]*jv.)k%z}qnğoq nopްWxA~G xB?t8x~1wysE7^_W]w'7p|}sI"'"u\ӧ"~#s- Ok.]$ 2<< ϊOȟqA׃l<'>|nnsy0/3?Sߧ+,/EŰ)K+ g|޳?1$籜V'J2VUy5sy Zx8je+{}yy7 }#:777[L[8_-y+sm;b;sa'»b7>`3ܿ佱~|0‡Hq#'$,?qSTs3>ψx`Yf8O>/Rx-dÆ ڠFZFR %%)TACs8׹qwew [X,+*y'+FXXu(QjQF~Zۗp}^_Jjk־Nn&nvUo۸G{;;wK^b O!sXGyǝ'y_y~N^t~I_W*y~ ܵ߳C>cOS>_:_ : ;ᡎcXmqu&'҉u& @2ə:ek'R ̘HmOô{:Cx==3۳x>gg @v9<:uB,)^Q{1F:bq{ d)f˲\xBy]Ϋ'?eu{ ִkں?g]c}6`C6@ ~|az8G[toG'>>IO>UO9>S⏜9<= ?zY/K4hMr/ԫjF:zfEVvvz=y?A}HQ}yɠ n_Y}yy=}5oms]xcO/Ko6aw"B{ SұuWu&d"&~L~R{2\}޵d*v/ 1=f32=΢?YǓy{NK?ckY "^PB,",b A[&Y2Q,,R,2,r, J**SVg gڬYX ؐy%X7aS6cs`K N7/tka[c{v`G~ɯ؉م_+;{'{7/?p 񛠫bƌp(5E`o98c88>hwI)>lg?詜9s88 rb.R.r\U\5\u\ M-[`4wpgzޭp/ψC<#x')i_۽/RDe?׭ky~CԷmܻz}y:'|js q??c2VdWc|&p02zȞI'e2{rSߵd*{jF qjt:!ҟuFO&33tV9tNbnyt^/2Cy y#u]T:BG2^%%}tiaY`+ʺ :?u gڬ=.볁n&)ٛn/ؚmؖ<=;ztt+vbg]5:僚'{{>kGl@/ߔ|`p}Go9cX=9wz"'')?p{tgG9zs'\b_eBԫjFznFnfn۸y|wW=!}XGy~\IiYsNE^eW: }ywP?c>q/kFaQ^BCԱqߞ 퉘؞D'e2&w2TL4tQQY9}fd&ff? HV{6fgdɭx:PX z|1#ibq{ ]XeXr, +Jlz?eu`MAj]Quu=֏ ol zf9[نm=O;>*/_N]_7=ދ}׿| az8Gؿ#(c9>鉜d{zs}&gG=_ǟ1Jx-؍PTMCB"$9GUF%{dQ.d9-df$TV|\}3޸Ec#q/A¸DJcI#c,?\{|\/Eb\K͗g}9_+qjzFܤo-Vc{;q6߃{q/1v !~x ;OSxZ?g9>1q``׼McזG‰͓L|p=$u\2Y') EL04<-^F5̌³z|9y.|syy>̏ b!p|y1׼(#163ZR?4/Li/"V2̫GǼY㮮Ooc64_&4{6Do-=Vyޖzu|tż~mރ^ۼ;|!7[p#q| ̿?q|tuy8q p!.R\f/WjkZQ7fom|߉ݸGߋ_~~;̏?OoY< x/ٻW񆽛w-m?xO`\( Bu? xbg0dY|n'E(yq/T^=28OP=Wd5_,j/f7s`N-ss=q 0a88)%h4>I1Y8?<9_/bJ=O:J3`F|)=2qLdc~ef7s9[+4n>Yl>Ĺ8\g1_—e3.WJ}_k|ף|n7f} ʷv}K߭W!~G7?'i~9Isy4|c"bɣz.BLy^|ŞFb:^L:K%Gl_o2=cyu]Q^^E68Kx\I]YyYː=t9]>*jZYuY#g6d#nusBԭtk_ۨޞ;OsԎ6Eػڻ؝=`O{//c9aGeFr},s}"OdN ~nMuoS3Lgl\|zalsh^-K2]Wr{ƾV룮ʰ:Aof{[۸;:N9>t9g^PݻJ*jyjyk밮?볁n&)?ق-يvl^'~` Owf]u7~ _=u/~þ>g B,l/u1g t.2:!{Xr<+~2j8x穩k{m]uY#g6GpsrBz)ٗ\UoVߏrsۨ7qs8x_pwm;;{OQW.g/y~XG17O9? 8]%ί{k ޴?{/7|h S阸1~\' u"&'s|yR&3S:QK|itZNgfYkי9d.f|@\YPba/E΋,a/K.cu˳V+=2*:k;o1؛c6`˸}6-N?џκnyw݃ۿ=ً_7Wc=P҃9CWο֣}cc82AOt;=IONu>MO L=geJx-Çᥥ;ifD[.AD@ Q9 Hw}r=y=.aaaaK*DтmHe]!y?'sEМ&V:_e_5\|^7pgIo[֠vpo]zޣrqs><4s?>u/ t_ ]kC-<4乁:{9tŻxaнD:1ؓd:NT:5xaN N3x]F{&fffe6ei #d.q: ۋ袺.z%{ti/˹W^Wp^Q3A ҕX~;kúznX7aS{3ݜ--u+Zmu;9;{螺{ e?AKPp=#8>F8>AOԓd=EOtz&g+~9s7g B싹K;_WrW;_u\oߠ7g&[w[^> A}H08?1q}'6h15pJ_~!ow}GxOOS8սinڍl\ [.׋b.Re 쫹k &[۸;z_+{!aGx> /G]rOW9~C-?yw?C?ҏO_~_~÷w:,*d4Fg.،翎~ߞ tb&aR&cr`J: }LoPgu&fgaV{6f:s32  {n]Tu ]G޿tiaY+2ydOX*zT_CZúz>3{#ݘMy36g d+[nuvd'vfпϺnAsW[{ҽ~|>C9p=Bԣ8>F8>AOԓ8>EOtΰԳWz\~|VB^/ҋPUQx-d%TBKBE`NP:D@PCIshnP:,߹ùyov [R)+*E mpRer󻠥g] |Wp%WquYõzszS6-AƜتq;wp.n>w~0c8OOS>ó<}ܿ{xٽ+5hW~yo[;ؽ{}O3oý|7 ʹ{ohz:fx;]g~l{uP'҉uɘ)KS158OtzAgd&χ3۳0+1;sy| zB ۋ袺#p.K,c='d:RGʺ k_fe멾nq ޘMM\`K{+~l^[މ]g]ͽa{~zB v>Da9½kh~oñqo'Iz2اr}gYz6r;_\-K̾\+JZ|}-\o&[[6;;ۥws{>!ۏ>}(t>Lz$ң}c>Fg 'dlizY9ޯz{P/ҋT/˽Wp%W9_?u\ oIov[vwzwzxa1 }Ri3Ee^U^uҷ}W; Sſ_kFa葡=c»bq=3A 2IDA'S:1ɼ䑡RL^jF==30#3ά0=:<:_dL~{{AEt믘.Ώ>pTd2,r}OtVW:BG2WYWaUcϡr^[uu=g6tn&l꼙nSM[[Ouk݆my{vwd'vvE=WCԽtoGtпCas}(0~q#z zz r9>QOғ9S9#C_gr3[y{}, _s})q9s{BԫjF?G:o]>q׃D zl1} }Rҧy~y+Wk>߶]xG|'Οg|n/_~#Jɿk[E翎cw|Bű3=NLcTO:ޅsve7^>/ p{ ԃCP~iƯ9#=z u>F<~|8CN&'79%*Q?LOz9k\-bJx-ÇEER@:$%%vgAPJJJi)iSZZ:NSE|{y:yfw]5***jaȿQOȿ/EEZσEvPiqĶ}VZ¥ez9 +&bӻk:[7p;non6owqaSһޫqwB')}?s真}%}Wx5b{9+]Y_M޲o;waS_=O_g`Kix:{ ОItR&'gϓRҩOôL^z¬0{9tNKfʼ~~. B0؋b븸DzKF ~^Һ.r V`EtY]oWz9fk8-]}ut4cX]".)Wހxܻ!&~O7u wK{+ln^;ݞx?tށwҝ=]tWvs;{#=݋Ou3=8CagOa>RңK̶X_qw{9>9:>=StΈؽr35gq6p.q>."..2.\\\õ\[۸;Nn}0{8b5w&qɜ')a%:{uQgbffaV^f6+n\<_y,Bf{Q]Y"& ^7_yi]eY.bpytEVr27Y՝`M弖|=ԱGc꺬g b'ޱˆl^c݄M{->[ ۲Pwu'YwaWvsޝ={c?ѽto)~o?qȤj voazG ~W;:x z{d=EO4N f,uoBRXa_i_W5z-{^6pb>ۤ7sm~w0̝wq}^~Ar>£<>_ݿ[ܻ?y?~~_0* 1=NDL$%tr˔:{uVczfpQgbffaVl:HP=#iG/} ڿP z"'q2p*yt=۝ɯ9lα8?mo"o} r{\Ww*s}-9_7Bmo6nwCC]z^ ?o=oyP1}{? ?O3淿 9/袱묘.K_ץtie9Uu%VWUk&r^Kf{4cbXu^O볁koFƺ{Mnl鼕~_fe;bށىم]{b=r7?a_cgm C0=\Пԣ|>F/RX~|'z'szsg?k-Rs9Fx-kK(D^Y [EdF,;{ʪs-ɨ([ϫy={\ݢ׎Z?G%ImDK'&T/˹+ҫu.ks&n \dkxmmLlQxk7 9w+ =z> {GQ}L })}g{}?x_E^uX7{?{2nY;)|5/ny oבОȽLbOs?Yϻܽ:~A~*{j{eg;3y}Y9tNbnyt^c~k ۋ+.j^L,iSbi[hgVt^YoX^kZmѱ:躬gD\7ԍ7aS{36g d+\Vuvd'gޝ{x{z9>ˏ18AazOy?F?ң9cx=QOғN4_p}Y|q}^E\|^e\|k}}^õ\|ޠ7Mz_ٷp}D١vqz'wq=z/ٿ҇y~qL俛꿜/٣b#Ƌ '҉Ğ1urB/V4-NL×233<033{Y::py.yu>_k,΋udl^^o 2XtEVWf۬jaMbmXuXX߽lho]ߗ&)ߜ--ينmَ.];S`vv=u|wxك=ً?q>'lr;O>*Zq1rǻ?AOԓd=EOngYmyo_r;_e\n_Wr}5x:_g_ zެW­m}k~ðqiߥw=zoy 6A}?£- y!1e6e36g d+fe;gvd'v{|]ؕC`Oboa_~~Ϗ99E~s/yYr}1xn^z%Kh$ *QrS *.0.2 266:[:>1">B*BFg3EFJ;JNCDNQLx}{XwBB Q '*A92XȄ5VdBrhN'L9ͩl^E|]{>=^::'(h௧u(.1s@O=uNcV|s%no%t*+mΌfԛ|wBpkt0 W;;+̞'|Ә?'7e76Yy'Nr O=桃EVcҫM>`(>.岼V|v)~aVƥ$8VÕA)X|<0KGaFy1`N}g7}>`1̻YDIfU7pNlmi8؁O8XWN:O&zǿ V2Or,ɫox0[^Ae~3Bz+~|{l?^>i[+x3t+*\Q  KtY`)y%+gL,Ikh|3KЇ/Nh=hgd t0ݾV߾ [M43eC<5?``6{;vKkt0 -/; 쫮_۴gFc=[.`}3\Z7,yq}c#H͒ NjҕGF+a~צS`/J_vܡ&l0~J9f|~=U-2zo=ŢGܙ߷㋶N?oJeKg;0]Vi,G)9X> k ìHv73xGݥk4p<:I3t1ĶFk+e]_`KJt*uc|tLl3:* YS>`v?=&Yc3?\㏭`\MJ6郅ک{P^',m`.E?0E6gǍϑO;u)ߜOb旷O;!gm8t2.ߒŃة_֥R6:4[fɼ .[nJ?dn m 0{u9SG,_ޣH:X20ͷؿ ;KӰpUƗG!/`y86xI](GԢU0oI^͡T^?羔f_)aryxe~9suxqMb+$|^.<.``H,A>]郥4_\tW6]n2Ke2?%}?M;P:^;Jƃbz$r@/{Ƽn3[ĭttĩzVD`?yݖX>0sm<}[ rJlfRs>;3g*.8OTϢcB寞=B1";F{A .~Rg0}tgEkNroߨv.쒣z~,j|ƾ1X)v8_rXK@oЙыHx0 'uN`NDI=&pto3X(W3vy~E9rX?D'u[Gǥc{I`9\utV̼|L8]`e\R?ҧjXf.?,Zp=b|- srVUt0ogYSmqa_Fx0gu͌*<b#yk`ɜ_Pws`Ww__]c>i/aC_36`^YhqOV<)d Ư83ƃ;eamK_1+~}!_6СbXE] zP,v3,?X8;1$fn;BR\'hdDW7h']KD?=n1YQ>p `ϸ{m,N_;: t0o61}f3@jg POᯫs@ d> * Ω5=y`g~qc<պo4פD` + qla:Xu=V-̚o都gCO9(]]ikr6h(3?i"Οc\=۾_1[Wu [^/Fܸv-QŘD+⪞?ޓžۣE2/Qkջ:l?%Eo#Zҹl!j$??vU=>/o"J9b vC Tf`עjT#K/,<*S[&07כKZ,t~ol䆥$a`QLKaxKǥr 5xK[b`a̿l nKw+d;9K\>_`nYrCϵSNcUpky+2X- T/ˏLk-%c`Vn849\嫘Phj͋˅]Y1inP~]Eop7u b¿X,2ph?RR;z&mbPt|榋])t??r<+tgnΟlj6Kb#_[\v:?0^˸raiukM#X"~Ɣkro/>Þ'gi~k#;@z8z:X:×#ͣ+`YuK5X$_cNj=K`9ޏ<ҼwY`\gr:d~0__&`AtǍ2oyʧci`>={[B3vov7}d>%\D,lrtj"+yU.O^ֆDz -/qRpKX󢖛hP)lDʟ{TWKkpt\b `yd؉v]TϏ: t Q?'V̏9:YbH::L=?\?o?6]]ߙ /~ ׷(bI5o;X] ̔I/;p~x?.IHa@.~r||E V x4| 7,6i[Gl: '`ae+a~h?1kh[E%͇1࿄ִaϷw|x'ug Q=-tnWr:uOkq1$lpa,:XuOy,ƉRZX+GIMX:z;}$]b?4.17^C[vRH8=Vk:_,Ԍ7Xud N'`gYHE`\|<ﯧUP 6CoʼlͿ}+,?XLJ,Jq8לP_~+~ r++Nf%f]J.)sT?ɝ;y3zBU{?yǗ~0%7RSvVY}oj0aGC0/?ZŰ=’Yy}|s|D1ug't0?/ul&zMRoQv`,K=~;lyX_6`)^o?L-%ѧ1}|KGϯ v`uq cn/;<Ο+\d3g~fR|F": K?3 mߧ2'SU_,fؾ={'xΓG͆_4ΫE?rzD P?ق`%Wt݃[Mӆ`^r71Y|W0aa eA;?9C} in&l}XzV~0wh|+I֏'kwh?Vњ`!F>8xE2?1O 7zK?؊ΕgEt;ɥ8] jvEqm tCƌu{4(?7s]>@!_1 ouwlHkDiൊ7`KSҿa-_`GNY+M=|n :,[o;W,+_ufp3t2=Xz||5[)jƃ0!-R^3ڟ~e'i#_1o.ao|Al4\6~)Oba\_g|d+9*}KfzX ?̇,g܆ aJ t<שssE:8?8WX~s&7}8X-s;*)3!hLﳋK}. fך}\ .K`~5s3aMQ]Rt )&ޗb9~*pnδe2_ L'n1h^_l1L[_ڹ{W}7|ׄy]0̔X/ѭf(Ql3`.ICe>2wJoݛjX rX>.]byY|Ҕo1;*oJ ,+ߍ']_}e!ꗂVϫ`^rQ:@c>燐ߤ3S9`'7sg./6RP?akt?vM-vpϟ?\mc+ɼ ڿ>'x,_ :ob̵OHܥٹxZUigzǢIM ?b>znawYKse?= |&_g+^钋 6|\ɧuԵFydlpy'߁0O?L^}X&O%,|sD0,xvK.8;G_`ϸ;cG4:n:Sp+:)6[aF2J̆:XacH+ T߂ޫ5,{o,#N|) ϡhE3\♏ͬW9a>XZz|HR5(]&iA h$*Qr*Qkx}x]{PWD(-Ry)>(Q0QXL>d)"2hPHAE@nYEWkuV֭k{9 d3~'de21$Ujx2" L; =gQb뙥 L\Bqܺxk賉w$ܔ=6los3 5Psf]. L6"A[ 9ӻAx]O 5MxMː%Uo>+T|s |5sYӇ ӼRx9̹{#&G겒U=Ose0?tMWTGtɣ2 ܪg2 |upn>wR!m87m}J, tpn|zr8niPQ0W̟ t_E܌=~qtfpnv+Ty|Xa~D^1zPf̿ӳ0f?ǿ>k:i~6gQs-xdVUL/\屜rWUFӔ|nfkAs%0fEǭG5k M'wc卉b̪La9^zم+ש~̟YLU,/gf^0`ɳ{̟m't .;S0f*c/ll).}:{?Y0ffsOwlξ6gk?ץ-#6-ϬZj5P=*a_Ո|d?3Ɗ,g#Wa̺;qfTc nJ"xDo^oQ1ԧNթRg"?Y.ytf뷺K0f6؀Oppl<Ѥ5?32|HoA/֭] HnL7U-SM}íZ{i$?,=ws w'~Or_i.Y5>gf-OHx<ܟk0f:f?&ŭZ7onf'Vw1fbE2?7SNլg-cTfIߩ77L+=hT@n?:Nf 9;g&o"j'}.|}nJ"|i^[ҁ>7oN" i?MpWui'F&;&|MasSݗwѾҷ^m} ӓCQI3YesR^K6yE ] WLj)n9:2禧{{O)G~ wMȇ7fOסM.rKhwSDHM)d4|TkՔ`$h$ *R*RA~xc```X@+wƲO6akG830@䖚@ mg1T⽱"[j _pp˼9yR;$KX~ᓎyRgY`G?i$@RA};:h -ȳX"[,:3ѱ}Z3X Jh$*R*Rxc```Xv@CmՁ'݋z}e. A gϲ{ܹms {#h$*R?*RCxc```XU Ulǰ)Qvh$*z *z '&bx-python-0.8.13/test_data/bbi_tests/test.expectation000066400000000000000000001755151415666465100227410ustar00rootroot00000000000000chr1 10000 20000 10 mean -0.175576 -0.0540093 -0.0568922 -0.0365033 0.0361129 0.0064466 0.036949 0.0766383 0.0435181 0.0155475 chr1 10000 20000 10 min -2.525 -3.84387 -3.91 -3.735 -3.61157 -3.594 -3.666 -1.7313 -2.819 -3.099 chr1 10000 20000 10 max 0.0508425 0.289 0.289 0.184 0.184 0.184 0.184 0.184 0.184 0.184 chr1 10000 20000 10 std 0.700141 0.529165 0.546035 0.53675 0.441437 0.467338 0.435692 0.355528 0.410757 0.454092 chr1 10000 12000 10 mean n/a n/a n/a n/a -0.164968 -0.194169 -0.0533029 0.00390226 -0.00226906 -0.0365958 chr1 10000 12000 10 min n/a n/a n/a n/a -2.525 -2.45895 -2.45895 -2.525 -2.21678 -3.84387 chr1 10000 12000 10 max n/a n/a n/a n/a 0.0508425 0.0508425 0.0508425 0.271 0.271 0.11689 chr1 10000 12000 10 std n/a n/a n/a n/a 0.679479 0.724087 0.478575 0.410891 0.427236 0.511289 chr1 10000 20000 100 mean n/a n/a n/a n/a n/a n/a n/a n/a n/a -0.163546 -0.272118 -0.127111 -0.116169 0.0202627 -0.0243522 0.0349913 -0.0054408 -0.00101548 -0.0307148 -0.0374406 -0.0024669 -0.034042 -0.0343892 -0.0199075 -0.0406878 -0.0322236 -0.080479 -0.235991 -0.0897865 -0.041593 0.019942 -0.0608448 0.0144338 -0.0348468 0.0195101 -0.0555526 -0.0892809 0.011672 -0.173894 -0.0849444 0.0718796 0.0608432 0.0313183 0.0439315 -0.0242363 0.03288 0.0318241 0.122005 0.0879138 0.00102565 0.0334753 -0.00263047 -0.103023 -0.0275437 0.0958326 0.0147229 -0.0629772 0.00541821 0.062474 0.069069 -0.0389339 0.0206174 0.0278476 -0.0182009 0.00568736 0.000397305 0.13104 0.105984 -0.00662042 0.143469 0.111045 0.0322849 0.11452 0.0865186 0.055436 0.141388 0.103004 -0.00350213 0.053879 0.10582 0.0983312 -0.0351239 0.10085 0.110439 0.0451646 -0.00949231 0.0487825 0.042197 -0.00328549 -0.00851082 -0.0437525 0.0618735 -0.0315407 -0.00756448 -0.0523631 0.0432315 0.0228745 0.106009 0.0299314 0.0473805 chr1 10000 20000 100 min n/a n/a n/a n/a n/a n/a n/a n/a n/a -2.525 -2.45895 -2.45895 -2.45895 -2.45895 -2.34887 -2.525 -2.08469 -1.95259 -2.17275 -3.84387 -3.84387 -2.68667 -2.02541 -2.02541 -1.99235 -1.99235 -3.91 -2.25685 -1.99235 -1.99235 -1.45149 -3.735 -1.63664 -3.45728 -1.5132 -1.57492 -1.63664 -2.90183 -1.6675 -3.61157 -2.76106 -1.45214 -1.63063 -1.45214 -2.0471 -3.41551 -3.41551 -1.21416 -2.76106 -3.594 -2.82937 -3.173 -3.173 -1.42842 -1.45485 -1.45485 -1.82491 -1.45485 -1.50772 -1.50772 -1.78647 -2.11994 -3.666 -1.6349 -1.6349 -1.51364 -1.27112 -1.57427 -2.81718 -1.51364 -1.51364 -1.499 -1.37973 -1.4725 -1.499 -1.4725 -1.44599 -1.45924 -1.499 -1.499 -1.7313 -1.56578 -1.21109 -1.30568 -1.58943 -1.61307 -2.819 -1.7313 -1.80224 -2.74806 -2.74806 -1.57383 -1.59968 -2.37519 -3.099 -1.65138 -1.65138 -1.21192 -2.8405 -1.59968 chr1 10000 20000 100 max n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.271 0.271 0.11689 0.11689 0.11689 0.11689 0.289 0.289 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.0947559 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 chr1 10000 20000 100 std n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.677135 0.822194 0.62085 0.612407 0.231852 0.328151 0.491656 0.41591 0.426687 0.488369 0.496677 0.593145 0.601794 0.47278 0.437521 0.482739 0.4581 0.611283 0.727005 0.555172 0.508856 0.437795 0.651825 0.450452 0.5853 0.446921 0.533572 0.562739 0.507615 0.641553 0.631814 0.429699 0.385442 0.442343 0.39576 0.474238 0.416927 0.494794 0.257982 0.400201 0.551855 0.468543 0.482704 0.609168 0.474814 0.307805 0.433662 0.555056 0.445149 0.36969 0.360304 0.517681 0.426742 0.515125 0.511975 0.462954 0.466611 0.236385 0.315068 0.519473 0.201616 0.29184 0.43562 0.271206 0.321137 0.399426 0.208569 0.297388 0.471669 0.400127 0.304107 0.339124 0.487022 0.283911 0.270024 0.410538 0.474699 0.448231 0.408883 0.475978 0.480287 0.544524 0.380196 0.519048 0.412103 0.59075 0.415199 0.432567 0.261195 0.480639 0.404943 chr1 10000 12000 100 mean n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.0508425 0.0508425 -0.0768488 -0.189129 -0.439008 -0.182524 -0.304712 -0.197935 -0.462124 -0.194633 -0.2882 -0.0570346 -0.0724456 -0.301409 0.0508425 -0.0702441 -0.182524 0.0508425 0.0508425 -0.55459 0.0508425 0.0508425 0.0508425 0.0508425 0.00460946 -0.0152047 -0.0152047 -0.095562 -0.0570346 0.0695559 0.127898 0.000206554 -0.013003 0.0167182 -0.0559341 0.0508425 0.0904709 0.101479 -0.155005 -0.0812521 0.0145168 0.0992772 0.10368 -0.155005 0.002408 -0.0911592 -0.0768488 0.013416 -0.00529754 -0.0702443 0.0013071 -0.0702442 0.0919354 -0.101144 -0.000301097 chr1 10000 12000 100 min n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.0508425 0.0508425 -2.1067 -2.525 -2.525 -2.45895 -2.43694 -2.45895 -2.45895 -2.45895 -2.43694 -2.1067 -2.41492 -2.45895 0.0508425 -2.37089 -2.45895 0.0508425 0.0508425 -2.45895 0.0508425 0.0508425 0.0508425 0.0508425 -0.0152047 -0.0152047 -0.0152047 -2.34887 -2.1067 0.0508425 -1.79848 -1.64437 -2.525 -2.21678 -2.08469 0.0508425 0.0508425 0.094874 -1.90856 -1.90856 -1.73243 0.094874 0.094874 -1.88654 -1.95259 -2.17275 -1.84251 -1.73243 -1.95259 -1.75445 -1.90856 -1.75445 0.090622 -1.89316 -1.72784 chr1 10000 12000 100 max n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -0.0152047 -0.0152047 0.0508425 0.0508425 0.271 0.271 0.271 0.271 0.271 0.0508425 0.0508425 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.090622 0.090622 chr1 10000 12000 100 std n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0 0 0.478763 0.73063 0.99997 0.720557 0.86116 0.765755 1.01076 0.755768 0.830521 0.482441 0.551361 0.862571 0 0.541516 0.720557 0 0 1.07612 0 0 0 0 0.0310529 0 0 0.531375 0.482441 0.0585867 0.45454 0.581741 0.639311 0.533446 0.47752 0 0.0253547 0.0103511 0.643384 0.567666 0.411522 0.00903517 0.0110658 0.645679 0.46029 0.614762 0.556941 0.411077 0.458617 0.536119 0.449653 0.543163 0.0058737 0.590632 0.40662 chr1 10000 20000 1000 mean n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.0508425 0.0508425 0.0508425 0.0112142 -0.164912 -0.206742 -0.171516 -0.671274 -0.206741 -0.415891 0.0508425 -0.411489 -0.197936 0.0508425 -0.446713 -0.219951 -0.704298 -0.200137 -0.189129 -0.41369 -0.162711 0.0508425 -0.164912 -0.195734 0.0508425 -0.440108 -0.162711 0.0508425 0.0508425 0.0508425 -0.191331 -0.200137 -0.164912 0.0508425 0.0508425 0.0508425 0.0508425 -0.909043 -0.200137 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0244236 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.00199526 -0.189129 -0.164912 0.0508425 0.0508425 0.0882693 0.0310285 0.224767 0.136704 -0.136291 -0.237564 0.211558 -0.0174062 0.0508425 0.0508425 -0.162711 0.0508425 0.0508425 0.0772615 0.10368 0.0992772 0.10368 -0.257378 -0.0526318 0.10368 -0.266185 -0.0812515 0.110285 0.0992772 0.0992772 0.101479 0.105882 -0.248571 -0.061438 -0.103268 0.108084 0.10368 -0.285999 -0.0944612 -0.0592364 -0.0746468 0.101479 -0.109872 0.0992773 -0.0592366 -0.081252 -0.0988646 0.101479 -0.081252 -0.0592364 0.0932488 0.090622 -0.0945312 -0.107756 0.090622 -0.0912242 -0.511124 0.090622 -0.0912242 0.090622 -0.117675 0.090622 0.090622 0.222874 0.269162 -0.253234 0.0873158 -0.27307 -0.0284048 0.222874 0.0278022 -0.107756 -0.140819 -0.173882 0.0410276 0.0707842 -0.117675 -0.319359 0.090622 0.090622 -0.120981 0.090622 -0.0746932 -0.226782 0.090622 0.090622 0.090622 -0.0680802 -0.114368 0.090622 -0.0680802 -0.0680802 0.090622 -0.120981 0.090622 -0.296214 0.090622 0.090622 0.090622 -0.438387 -0.0912242 0.090622 0.090622 -0.117675 -0.0978372 0.090622 -0.117675 0.090622 -0.0680802 -0.107756 -0.0945312 0.090622 -0.0680802 -0.101143 -0.0680802 0.090622 0.090622 0.090622 -0.0945312 0.090622 -0.408629 0.090622 -0.0680802 -0.494593 0.090622 -0.319358 0.090622 -0.319358 0.090622 -0.259846 -0.233395 -0.114368 -0.319359 -0.686359 -0.107756 -0.474756 -0.28299 0.090622 0.0773968 -0.107756 0.090622 -0.107756 0.090622 -0.259846 -0.478062 0.090622 0.090622 0.090622 -0.0746932 -0.117675 -0.256539 -0.0383235 -0.0011127 0.0142796 -0.278874 0.180914 0.0081078 0.168571 0.0358806 0.0327948 0.0327948 -0.0937244 0.177828 -0.0937246 -0.118411 0.0420518 0.171657 -0.349848 0.0420524 -0.127668 0.171657 -0.121496 -0.349849 0.171657 0.0111938 -0.288131 -0.306647 0.0420518 0.0451376 0.0358806 0.168571 0.03588 0.0451376 0.029709 0.165485 -0.130754 -0.39305 0.171657 -0.189385 -0.278874 0.0420518 -0.149269 0.171657 0.03588 0.168571 0.0389658 0.0327948 0.177828 -0.275788 0.168571 0.0358806 0.165485 0.0050226 0.020451 -0.109154 0.171657 -0.368363 -0.152355 0.0482234 -0.0998962 -0.315903 0.174743 0.0266226 -0.106067 0.168571 0.0050226 0.171657 -0.118411 0.0451376 0.0358802 -0.130754 -0.263445 -0.235672 -0.093725 -0.115326 -0.346762 0.0266226 0.174743 0.168571 -0.11224 -0.0937244 -0.405394 0.174743 0.165485 0.171657 0.0019368 0.174743 -0.140012 -0.254188 -0.248016 -0.167784 -0.294303 -0.751005 0.0358806 -0.007321 -0.130754 -0.13384 -0.433165 0.171657 -0.084467 -0.121497 -0.0844674 0.168571 -0.11841 -0.204814 0.0144358 0.175076 0.0203854 0.0025374 0.175076 0.172101 0.169126 0.0322848 0.175076 -0.12538 0.005512 -0.238422 -0.122404 0.169126 0.0084868 0.0174106 0.169126 0.160202 0.175076 0.169126 0.0174114 0.0114618 0.166151 0.0322848 0.17805 0.172101 -0.146203 -0.131329 0.169126 -0.0956314 -0.00638721 0.0471594 -0.0956314 0.172101 0.0263356 0.0293106 0.02931 0.169126 -0.101581 -0.0034124 0.10963 0.0947559 0.0947559 -0.0926567 -0.113481 0.0947559 0.0947559 -0.324691 -0.0986067 -0.0867067 0.0947559 0.0947559 0.0947559 0.0947559 0.175076 0.0174106 0.172101 -0.134303 -0.12538 0.0055116 0.0293106 -0.0986064 -0.18785 -0.11348 0.0441846 0.0114618 0.0412098 0.0412098 0.17805 0.169126 0.0471594 0.169126 0.0322852 0.172101 0.175076 0.0501342 0.172101 0.169126 0.17805 0.172101 0.0412092 0.0382344 0.172101 0.172101 0.169126 0.172101 0.172101 0.172101 -0.110506 -0.262221 0.160202 0.166151 0.169126 -0.128354 -0.49723 0.0501342 -0.101582 0.0263356 0.0352596 0.169126 0.166151 0.169126 -0.101529 0.162854 0.162854 0.0227586 0.0333316 0.162854 0.15228 -0.453037 0.0333312 -0.0406811 0.0227586 0.173427 0.16814 -0.11205 -0.11205 0.162854 -0.138483 -0.0961902 0.00689821 -0.466254 -0.11205 -0.262719 -0.0961908 -0.114694 0.16814 -0.38431 -0.178132 0.162854 0.157567 0.16814 0.0201152 0.162854 0.157567 0.025402 0.157567 0.0227586 -0.225713 -0.786094 0.0333318 0.157567 0.009542 0.0333318 0.146994 0.00425521 0.16814 0.15228 0.141707 0.16814 0.00954181 0.0227586 -0.246859 0.0227586 0.15228 -0.001031 0.00425521 -0.143769 0.0227586 0.162854 0.157567 0.0042556 -0.260075 -0.0036752 0.0148286 -0.138484 0.0359752 -0.00103139 0.0333312 -0.109407 -0.233642 0.173427 -0.249502 -0.00103139 0.0439048 -0.238929 0.157567 0.0439048 -0.231 0.178713 0.162854 0.162854 -0.164916 0.162854 0.157567 0.0333312 0.0386182 0.162854 0.0333318 0.0333312 0.0227586 0.0386178 0.0042548 0.0280446 0.0386182 0.16814 0.0439048 0.0095422 0.16814 0.0333316 0.0201152 0.16814 -0.154343 0.162854 0.162465 -0.0009215 0.0293935 -0.473835 -0.091866 -0.097929 0.026362 0.0627401 0.0930551 -0.128244 -0.103992 0.0112045 -0.134307 0.171874 -0.122181 0.177937 0.165811 0.0930551 0.0930551 0.153685 -0.349543 0.0081735 0.0354565 0.168843 0.168843 -0.131276 -0.14037 -0.164622 -0.155527 0.16278 0.0112045 0.16278 0.014236 -0.173717 -0.128244 0.171874 -0.0069845 0.171874 -0.258598 0.0112045 -0.152496 0.0293935 0.0324255 0.0233305 0.165811 0.159748 0.029394 0.00211 -0.113087 0.0384885 0.00514151 -0.107024 0.0445515 0.0051415 -0.134308 0.0293935 0.168843 0.171874 0.165811 0.159748 0.04152 0.177937 0.171874 0.180969 0.0293935 0.0354565 0.174906 0.171874 0.0112045 0.177937 0.047583 0.165811 0.171874 0.165811 0.165811 0.171874 -0.152496 -0.091866 0.174906 -0.152497 -0.234346 -0.125212 -0.094897 -0.0948975 0.171874 0.168843 0.171874 0.171874 0.174906 0.174906 0.180969 -0.0979295 0.168843 0.168843 0.177937 0.174906 -0.000921503 0.165811 0.174906 0.174906 0.165811 0.026362 0.174906 0.0312215 0.168098 -0.000202395 0.172073 0.176049 0.160146 0.0157002 0.172073 0.164122 -0.132723 -0.155251 0.0342532 -0.0956156 0.0276268 0.0157002 -0.0995912 0.0448544 0.164122 0.172073 0.164122 0.164122 0.164122 0.168098 0.0302776 0.172073 0.168098 0.156171 0.0329272 0.176049 0.0223264 0.0289524 -0.0929658 0.172073 0.168098 0.0064232 0.172073 -0.103567 0.034253 0.172073 -0.148625 0.172073 0.0461798 0.0077488 0.176049 0.160146 0.168098 0.164122 0.168098 0.172073 0.168098 0.0501554 0.176049 0.176049 0.0103988 0.160146 0.172073 0.160146 0.168098 0.168098 0.164122 0.172073 0.164122 -0.267892 -0.108869 -0.0916402 0.0382286 -0.000201996 0.164122 0.0369032 0.0395532 0.168098 0.0130492 -0.289096 0.002448 0.0369032 0.0156998 0.172073 0.168098 -0.0956158 0.176049 0.168098 0.168098 -0.299697 -0.104892 0.0117244 0.168098 -0.0823646 0.176049 0.168098 0.168098 0.180024 0.0342532 0.176049 0.0117244 0.160146 0.164122 0.0103988 0.0183508 0.164122 0.164122 0.168098 0.168098 -0.0194568 0.169813 -0.135216 0.155625 0.165084 -0.109205 -0.0950181 -0.239256 -0.229798 -0.0973833 0.0397617 0.165084 0.169813 0.0350326 0.0350326 0.169813 0.0350326 0.0350327 0.0255746 0.165084 0.165084 0.155625 0.160355 0.169813 0.160355 0.165084 0.0397617 0.169813 0.0350327 0.165084 0.0303037 0.0066574 0.169813 0.174542 -0.137581 0.0350327 0.0208455 0.0137508 0.0397617 0.169813 -0.139945 0.0350327 0.169813 -0.118664 -0.139945 0.0208447 -0.00280059 0.0184806 0.169813 -0.267632 0.0255746 0.165084 -0.0997473 0.0113867 0.0066576 0.0208455 0.165084 0.0161158 0.160355 0.150896 0.155625 -0.139945 0.160354 -0.0051655 0.160355 0.0161156 -0.274725 0.165084 -0.27709 0.169813 0.160354 0.160354 0.103605 -0.0477275 -0.00516539 0.169813 -0.142311 0.165084 0.160355 -0.130487 -0.243986 -0.0335404 0.0184806 -0.251079 0.165084 -0.151768 0.169813 0.0255746 0.0303037 0.0350326 0.00192851 -0.106841 0.155625 -0.402411 0.165084 -0.125758 0.0350328 0.0397618 -0.385859 0.0161164 -0.156497 0.160354 -0.0154285 0.15815 -0.0021233 -0.0021233 0.147809 -0.0124635 0.0185574 0.15815 0.0366527 0.16849 0.0159726 -0.0951843 0.16849 0.16849 -0.0072934 -0.15464 -0.273552 -0.00470819 -0.121034 -0.172736 -0.113279 0.0288977 0.0030468 -0.0021225 -0.170151 0.0935237 0.054748 0.054748 0.054748 0.054748 0.054748 -0.400219 -0.0202177 -0.102939 -0.0021233 0.0082177 -0.15981 -0.146884 0.152979 0.15815 -0.149471 -0.167566 0.0237276 -0.0150484 0.15815 0.16849 0.15815 0.152979 -0.118449 -0.0202184 0.0418228 0.152979 0.0288977 -0.242531 0.0237276 0.17366 0.16332 -0.121034 -0.0124635 0.15815 0.15815 0.15815 0.0185575 0.142639 0.15815 0.15815 0.0263125 -0.0925992 0.16849 0.16332 0.152979 0.17366 0.15815 -0.00987849 0.16849 -0.0124636 -0.0124636 -0.146885 -0.13396 0.0288977 -0.115864 -0.314913 -0.0253886 0.0237276 0.0366527 0.16849 0.16332 0.16849 0.15815 0.16332 chr1 10000 20000 1000 min n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.0508425 0.0508425 0.0508425 -0.0152047 -2.1067 -2.525 -2.1067 -2.525 -2.45895 -2.45895 0.0508425 -2.37089 -2.43694 0.0508425 -2.45895 -2.45895 -2.45895 -2.45895 -2.34887 -2.43694 -2.08469 0.0508425 -2.1067 -2.41492 0.0508425 -2.45895 -2.08469 0.0508425 0.0508425 0.0508425 -2.37089 -2.45895 -2.1067 0.0508425 0.0508425 0.0508425 0.0508425 -2.34887 -2.45895 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -2.34887 -2.1067 0.0508425 0.0508425 0.0508425 -1.79848 0.204953 -0.873819 -1.64437 -2.525 0.204953 -2.21678 0.0508425 0.0508425 -2.08469 0.0508425 0.0508425 0.0508425 0.094874 0.094874 0.094874 -1.90856 -1.51228 0.094874 -1.90856 -1.73243 0.094874 0.094874 0.094874 0.094874 0.094874 -1.88654 -1.55631 -1.95259 0.094874 0.094874 -2.17275 -1.84251 -1.55631 -1.73243 0.094874 -1.95259 0.0728583 -1.51228 -1.75445 -1.90856 0.094874 -1.75445 -1.55631 0.090622 0.090622 -1.76091 -1.89316 0.090622 -1.72784 -3.84387 0.090622 -1.72784 0.090622 -1.99235 0.090622 0.090622 0.090622 0.222874 -2.7528 -1.59559 -1.69478 -2.68667 0.090622 -1.79397 -1.89316 -2.1246 -2.09154 0.0244961 0.0244961 -1.99235 -1.99235 0.090622 0.090622 -2.02541 0.090622 -1.56253 -1.4964 0.090622 0.090622 0.090622 -1.4964 -1.95928 0.090622 -1.4964 -1.4964 0.090622 -2.02541 0.090622 -1.95928 0.090622 0.090622 0.090622 -1.76091 -1.72784 0.090622 0.090622 -1.99235 -1.79397 0.090622 -1.99235 0.090622 -1.4964 -1.89316 -1.76091 0.090622 -1.4964 -1.82703 -1.4964 0.090622 0.090622 0.090622 -1.76091 0.090622 -1.72784 0.090622 -1.4964 -3.91 0.090622 -2.19072 0.090622 -2.19072 0.090622 -1.76091 -1.56253 -1.95928 -1.99235 -2.25685 -1.89316 -1.89316 -1.99235 0.090622 0.0244961 -1.89316 0.090622 -1.89316 0.090622 -1.76091 -1.92622 0.090622 0.090622 0.090622 -1.56253 -1.99235 -1.79397 -1.4964 -1.46334 -1.42063 -1.5132 0.153142 -1.45149 0.153142 -1.20462 -1.20462 -1.20462 -1.20462 0.153142 -1.20462 -1.42063 -1.11205 0.153142 -3.735 -1.17376 -2.80925 0.153142 -1.42063 -1.20462 0.153142 -1.42063 -1.42063 -1.63664 -1.11205 -1.11205 -1.20462 0.153142 -1.14291 -1.11205 -1.20462 0.0914252 -1.5132 -1.45149 0.153142 -3.45728 -1.5132 -1.11205 -1.45149 0.153142 -1.14291 0.153142 -1.14291 -1.20462 0.153142 -1.42063 0.153142 -1.20462 0.153142 -1.5132 -1.2972 -1.38977 0.153142 -1.23548 -1.45149 -1.11205 -1.23548 -1.57492 0.153142 -1.2972 -1.20462 0.153142 -1.5132 0.153142 -1.38977 -1.11205 -1.11205 -1.5132 -1.2972 -1.20462 -1.14291 -1.45149 -1.63664 -1.2972 0.153142 0.153142 -1.2972 -1.20462 -2.90183 0.153142 0.0914252 0.153142 -1.5132 0.153142 -1.45149 -1.42063 -1.2972 -1.6675 -1.42063 -1.45149 -1.20462 -1.57492 -1.5132 -1.42063 -1.45149 0.153142 -1.20462 -1.38977 -1.11205 0.153142 -1.38977 -3.61157 -1.39265 0.154252 -1.39265 -1.42239 0.154252 0.154252 0.154252 -1.21416 0.154252 -2.76106 -1.45214 -1.21416 -1.45214 0.154252 -1.45214 -1.39265 0.154252 0.154252 0.154252 0.154252 -1.27365 -1.42239 0.154252 -1.21416 0.154252 0.154252 -1.63063 -1.39265 0.154252 -1.21416 -1.60088 -1.12491 -1.21416 0.154252 -1.3034 -1.27365 -1.21416 0.154252 -1.21416 -1.45214 0.0947559 0.0947559 0.0947559 -1.77937 -1.98761 0.0947559 0.0947559 -2.0471 -1.83887 -1.71987 0.0947559 0.0947559 0.0947559 0.0947559 0.154252 -1.39265 0.154252 -1.45214 -1.39265 -1.51164 -1.27365 -1.18441 -3.41551 -1.27365 -1.12491 -1.42239 -1.12491 -1.12491 0.154252 0.154252 -1.12491 0.154252 -1.15466 0.154252 0.154252 -1.12491 0.154252 0.154252 0.154252 0.154252 -1.21416 -1.21416 0.154252 0.154252 0.154252 0.154252 0.154252 0.154252 -1.39265 -2.76106 0.154252 0.154252 0.154252 -1.42239 -3.594 -1.12491 -1.21416 -1.3034 -1.21416 0.154252 0.154252 0.154252 -1.21416 0.131134 0.131134 -1.21695 -1.11122 0.131134 0.131134 -2.82937 -1.16409 -1.87778 -1.21695 0.131134 0.131134 -1.21695 -1.21695 0.131134 -1.40198 -1.21695 -1.42842 -3.173 -1.21695 -1.45485 -1.16409 -1.45485 0.131134 -1.29625 -1.56058 0.131134 0.131134 0.131134 -1.29625 0.131134 0.131134 -1.13765 0.131134 -1.21695 -1.21695 -1.42842 -1.21695 0.131134 -1.29625 -1.21695 0.131134 -1.45485 0.131134 0.131134 0.131134 0.131134 -1.45485 -1.21695 -1.21695 -1.21695 0.131134 -1.40198 -1.45485 -1.40198 -1.21695 0.131134 0.131134 -1.40198 -1.45485 -1.58702 -1.29625 -1.50772 -1.13765 -1.45485 -1.16409 -1.40198 -1.82491 0.131134 -1.29625 -1.45485 -1.11122 -1.29625 0.131134 -1.11122 -1.16409 0.131134 0.131134 0.131134 -1.45485 0.131134 0.131134 -1.16409 -1.11122 0.131134 -1.21695 -1.16409 -1.21695 -1.16409 -1.50772 -1.16409 -1.11122 0.131134 -1.11122 -1.40198 0.131134 -1.11122 -1.29625 0.131134 -1.48128 0.131134 0.104701 -1.57427 -1.21049 -1.78647 -1.21049 -1.21049 -1.27112 -0.0282047 0.0930551 -2.11994 -1.14986 -1.45301 -1.39238 0.153685 -1.39238 0.153685 0.0930551 0.0930551 0.0930551 0.0930551 -3.666 -1.48332 -1.21049 0.153685 0.153685 -1.51364 -1.48332 -1.51364 -1.45301 0.153685 -1.39238 0.153685 -1.39238 -1.6349 -1.39238 0.153685 -1.6349 0.153685 -1.30143 -1.39238 -1.60458 -1.21049 -1.11954 -1.21049 0.153685 0.153685 -1.18017 -1.39238 -1.21049 -1.11954 -1.45301 -1.21049 -1.11954 -1.51364 -1.51364 -1.27112 0.153685 0.153685 0.0930551 0.0930551 -1.11954 0.153685 0.153685 0.153685 -1.27112 -1.21049 0.153685 0.153685 -1.51364 0.153685 -1.11954 0.153685 0.153685 0.153685 0.153685 0.153685 -1.57427 -1.14986 0.153685 -1.51364 -1.21049 -2.81718 -1.18017 -1.21049 0.153685 0.153685 0.153685 0.153685 0.153685 0.153685 0.153685 -1.21049 0.153685 0.153685 0.153685 0.153685 -1.51364 0.153685 0.153685 0.153685 0.153685 -1.21049 0.153685 -1.11469 0.144244 -1.499 0.144244 0.144244 0.144244 -1.37973 0.144244 0.144244 -1.4725 -1.499 -1.1942 -1.1942 -1.30022 -1.37973 -1.1942 -1.1677 0.144244 0.144244 0.144244 0.144244 0.144244 0.144244 -1.1942 0.144244 0.144244 0.144244 -1.20746 0.144244 -1.1942 -1.12794 -1.1677 0.144244 0.144244 -1.4725 0.144244 -1.1942 -1.11469 0.144244 -1.4725 0.144244 -1.11469 -1.499 0.144244 0.0912362 0.144244 0.144244 0.144244 0.144244 0.144244 -1.11469 0.144244 0.144244 -1.4725 0.144244 0.144244 0.144244 0.144244 0.144244 0.144244 0.144244 0.144244 -1.44599 -1.40624 -1.1942 -1.11469 -1.45924 0.144244 -1.1677 -1.1412 0.144244 -1.20746 -1.45924 -1.43274 -1.1677 -1.41949 0.144244 0.144244 -1.1942 0.144244 0.144244 0.144244 -1.499 -1.1942 -1.499 0.144244 -1.1412 0.144244 0.144244 0.144244 0.144244 -1.1942 0.144244 -1.499 0.144244 0.144244 -1.4725 -1.39298 0.144244 0.144244 0.144244 0.144244 -1.7313 0.136709 -1.56578 0.136709 0.136709 -1.21109 -1.21109 -1.21109 -1.1638 -1.18745 -1.11651 0.136709 0.136709 -1.11651 -1.11651 0.136709 -1.11651 -1.1638 -1.21109 0.136709 0.136709 0.136709 0.136709 0.136709 0.136709 0.136709 -1.11651 0.136709 -1.1638 0.136709 -1.21109 -1.30568 0.136709 0.136709 -1.58943 -1.1638 -1.21109 -1.42391 -1.11651 0.136709 -1.4712 -1.1638 0.136709 -1.28203 -1.61307 -1.30568 -1.40026 -1.28203 0.136709 -1.4712 -1.21109 0.136709 -1.21109 -1.40026 -1.40026 -1.21109 0.136709 -1.40026 0.136709 0.136709 0.136709 -2.819 0.113063 -1.4712 0.136709 -1.30568 -1.49484 0.136709 -1.4712 0.136709 0.0894173 0.0894173 0.0894173 -1.7313 -1.51849 0.136709 -1.42391 0.136709 0.136709 -1.4712 -1.28203 -1.80224 -1.28203 -1.28203 0.136709 -1.77859 0.136709 -1.21109 -1.21109 -1.11651 -1.40026 -1.21109 0.136709 -2.74806 0.136709 -1.40026 -1.21109 -1.1638 -1.37661 -1.21109 -1.63672 0.113063 -1.57383 0.132299 -1.52213 -1.52213 0.132299 -1.52213 -1.13437 0.132299 -1.13437 0.132299 -1.28947 -1.13437 0.132299 0.132299 -1.52213 -1.59968 -1.57383 -1.59968 -1.44457 -1.52213 -1.21192 -1.21192 -1.52213 -1.41872 -1.52213 0.054748 0.054748 0.054748 0.054748 0.054748 0.054748 -2.37519 -1.41872 -1.21192 -1.52213 -1.41872 -3.099 -1.41872 0.132299 0.132299 -1.52213 -1.49628 -1.21192 -1.59968 0.132299 0.132299 0.132299 0.132299 -1.39287 -1.65138 -1.13437 0.132299 -1.21192 -1.28947 -1.21192 0.132299 0.132299 -1.39287 -1.52213 0.132299 0.132299 0.132299 -1.21192 0.132299 0.132299 0.132299 -1.13437 -1.16022 0.132299 0.132299 0.132299 0.132299 0.132299 -1.49628 0.132299 -1.47043 -1.47043 -1.47043 -2.8405 -1.21192 -1.28947 -1.52213 -1.59968 -1.21192 -1.13437 0.132299 0.132299 0.132299 0.132299 0.132299 chr1 10000 20000 1000 max n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.271 0.271 0.271 0.271 0.271 0.204953 0.271 0.271 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.289 0.289 0.289 0.289 0.289 0.289 0.289 0.289 0.090622 0.090622 0.0575591 0.0575591 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.156748 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.153142 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.0947559 0.0947559 0.0947559 0.0947559 0.0947559 0.0947559 0.0947559 0.0947559 0.0947559 0.0947559 0.0947559 0.0947559 0.0947559 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.153685 0.0930551 0.0930551 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.0930551 0.0930551 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.113063 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.15815 0.054748 0.054748 0.054748 0.054748 0.054748 0.106449 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 0.184 chr1 10000 20000 1000 std n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0 0 0 0.0341066 0.682275 0.814553 0.680271 1.16809 0.791618 0.987457 0 0.95987 0.786706 0 1.04899 0.78732 1.186 0.793666 0.758856 0.982409 0.675315 0 0.682275 0.779743 0 1.03534 0.675315 0 0 0 0.765819 0.793666 0.682275 0 0 0 0 1.23921 0.793666 0 0 0 0 0 0 0 0 0.0341066 0 0 0 0 0 0.0278479 0.758856 0.682275 0 0 0.0804237 0.643661 0.0319037 0.356424 0.738922 0.86623 0.0208859 0.773408 0 0 0.675315 0 0 0.0289852 0.011369 0.00928276 0.011369 0.766452 0.512973 0.011369 0.777281 0.580536 0.0106347 0.00928276 0.00928276 0.0106347 0.0116034 0.764636 0.525358 0.649869 0.011369 0.011369 0.844611 0.61427 0.526132 0.582578 0.0106347 0.647548 0.0202312 0.510653 0.588004 0.635947 0.0106347 0.588004 0.526132 0.00830667 0 0.585506 0.627327 0 0.575048 1.34156 0 0.575048 0 0.658694 0 0 0.0763557 0.0319419 0.991548 0.591952 0.85553 0.934539 0.0763557 0.642715 0.627327 0.697378 0.673997 0.0174257 0.0278811 0.658694 0.864457 0 0 0.669148 0 0.522773 0.669147 0 0 0 0.50186 0.648236 0 0.50186 0.50186 0 0.669148 0 0.817345 0 0 0 0.854822 0.575048 0 0 0.658694 0.59596 0 0.658694 0 0.50186 0.627327 0.585506 0 0.50186 0.606415 0.50186 0 0 0 0.585506 0 0.806334 0 0.50186 1.33368 0 0.871173 0 0.871173 0 0.74033 0.683266 0.648236 0.864457 1.01958 0.627327 0.911278 0.794133 0 0.0278811 0.627327 0 0.627327 0 0.74033 0.917037 0 0 0 0.522773 0.658694 0.735234 0.513027 0.514115 0.504386 0.752222 0.00975815 0.51308 0.0162636 0.43611 0.435054 0.435054 0.5617 0.0130109 0.569556 0.616789 0.4058 0.015935 1.25928 0.427386 0.942337 0.015935 0.622533 0.676549 0.015935 0.503325 0.740034 0.782616 0.4058 0.406854 0.43611 0.0162636 0.414468 0.406854 0.43397 0.039033 0.651491 0.736595 0.015935 1.14831 0.722374 0.4058 0.670382 0.015935 0.414468 0.0162636 0.415552 0.435054 0.0130109 0.707344 0.0162636 0.43611 0.015935 0.533647 0.463229 0.605439 0.015935 0.707343 0.63831 0.40788 0.566912 0.790196 0.0149058 0.465371 0.579171 0.0162636 0.533647 0.015935 0.622932 0.406854 0.403604 0.651491 0.68559 0.654797 0.545067 0.620082 0.835241 0.465371 0.0149058 0.0162636 0.577527 0.5617 1.04529 0.0149058 0.0298119 0.015935 0.532586 0.0149058 0.644053 0.695328 0.668359 0.72035 0.742489 0.780937 0.43611 0.551012 0.651491 0.623484 0.779439 0.015935 0.566399 0.628958 0.541767 0.0162636 0.615413 1.1971 0.49462 0.0143697 0.496644 0.500821 0.0143697 0.0153618 0.0156786 0.438205 0.0143697 0.926202 0.51238 0.653073 0.619579 0.0156786 0.513425 0.495643 0.0156786 0.0125429 0.0143697 0.0156786 0.453801 0.504021 0.0153618 0.438205 0.0125429 0.0153618 0.679873 0.634191 0.0156786 0.56655 0.560443 0.412009 0.56655 0.0153618 0.467432 0.458028 0.437161 0.0156786 0.578742 0.509117 0.0321316 0 0 0.592651 0.658502 0 0 0.884522 0.611466 0.573835 0 0 0 0 0.0143697 0.495643 0.0153618 0.649032 0.622455 0.533257 0.458028 0.57243 1.13418 0.596116 0.411017 0.504021 0.409999 0.409999 0.0125429 0.0156786 0.412009 0.0156786 0.417521 0.0153618 0.0143697 0.412974 0.0153618 0.0156786 0.0125429 0.0153618 0.441191 0.44022 0.0153618 0.0153618 0.0156786 0.0153618 0.0153618 0.0153618 0.608596 0.994177 0.0125429 0.0153618 0.0156786 0.607754 1.25534 0.412974 0.586506 0.467432 0.439225 0.0156786 0.0153618 0.0156786 0.566943 0.0272999 0.0272999 0.436381 0.403012 0.0272999 0.0272999 1.07049 0.421468 0.646185 0.436381 0.0222903 0.0255367 0.582833 0.555555 0.0272999 0.639001 0.563795 0.504935 1.13476 0.555555 0.711944 0.549571 0.621293 0.0255367 0.712576 0.688844 0.0272999 0.0278628 0.0255367 0.463194 0.0272999 0.0278628 0.409498 0.0278628 0.436381 0.648224 0.663939 0.439854 0.0278628 0.45956 0.439854 0.0255367 0.513283 0.0255367 0.0272999 0.0222903 0.0255367 0.515004 0.436381 0.622665 0.436381 0.0272999 0.492944 0.513283 0.63625 0.436381 0.0272999 0.0278628 0.494799 0.717954 0.556765 0.461414 0.66799 0.413122 0.511495 0.421468 0.608719 0.772641 0.0222903 0.66337 0.511495 0.406464 0.658532 0.0278628 0.406464 0.632317 0.0167177 0.0272999 0.0272999 0.680311 0.0272999 0.0278628 0.421468 0.40478 0.0272999 0.439854 0.421468 0.436381 0.42316 0.531708 0.419696 0.40478 0.0255367 0.406464 0.496584 0.0255367 0.403012 0.463194 0.0255367 0.621011 0.0272999 0.0301101 0.553003 0.435911 0.855654 0.566141 0.562994 0.456137 0.0654878 0 0.699811 0.544404 0.514671 0.617191 0.0156546 0.623152 0.0127819 0.029287 0 0 0.0350047 1.24379 0.524252 0.438014 0.0159774 0.0159774 0.638649 0.653949 0.695302 0.676023 0.0146435 0.493399 0.0146435 0.494464 0.708619 0.619965 0.0156546 0.57217 0.0156546 0.692203 0.493399 0.682135 0.435911 0.404956 0.433703 0.0156546 0.0127819 0.425238 0.490066 0.578515 0.407168 0.512564 0.565916 0.40927 0.533837 0.637056 0.457177 0.0159774 0.0156546 0.029287 0.0372654 0.408233 0.0127819 0.0156546 0.00958644 0.457177 0.438014 0.0146435 0.0156546 0.53586 0.0127819 0.410279 0.0156546 0.0156546 0.0156546 0.0156546 0.0156546 0.680637 0.549854 0.0146435 0.701617 0.653286 0.945969 0.556379 0.564578 0.0156546 0.0159774 0.0156546 0.0156546 0.0146435 0.0146435 0.00958644 0.57074 0.0159774 0.0159774 0.0127819 0.0146435 0.531729 0.0156546 0.0146435 0.0146435 0.0156546 0.43482 0.0146435 0.403045 0.0205299 0.526994 0.019204 0.0167626 0.0205299 0.490662 0.019204 0.0209533 0.639597 0.684364 0.432041 0.579138 0.46685 0.490662 0.577138 0.426231 0.0209533 0.019204 0.0209533 0.0209533 0.0209533 0.0205299 0.430691 0.019204 0.0205299 0.019204 0.43623 0.0167626 0.427854 0.406922 0.552904 0.019204 0.0205299 0.519978 0.019204 0.554475 0.40418 0.019204 0.680655 0.019204 0.408223 0.529675 0.0167626 0.0341022 0.0205299 0.0209533 0.0205299 0.019204 0.0205299 0.409476 0.0167626 0.0167626 0.5213 0.0205299 0.019204 0.0205299 0.0205299 0.0205299 0.0209533 0.019204 0.0209533 0.703574 0.610948 0.560696 0.405576 0.513034 0.0209533 0.423669 0.415297 0.0205299 0.429025 0.754058 0.50466 0.423669 0.504545 0.019204 0.0205299 0.558661 0.0167626 0.0205299 0.0205299 0.762657 0.567476 0.530961 0.0205299 0.551244 0.0167626 0.0205299 0.0205299 0.012572 0.432041 0.0167626 0.530961 0.0205299 0.0209533 0.5213 0.496166 0.0209533 0.0209533 0.0205299 0.0205299 0.601772 0.0228437 0.665937 0.024421 0.024421 0.581145 0.576056 0.649712 0.623242 0.556492 0.406884 0.024421 0.0228437 0.405293 0.405293 0.0228437 0.405293 0.421816 0.435155 0.024421 0.024421 0.024421 0.0249245 0.0228437 0.0249245 0.024421 0.406884 0.0228437 0.421816 0.024421 0.436751 0.461647 0.0228437 0.0199396 0.660971 0.421816 0.433495 0.505525 0.406884 0.0228437 0.63644 0.421816 0.0228437 0.577135 0.656429 0.466626 0.491523 0.457557 0.0228437 0.699711 0.435155 0.024421 0.561663 0.496503 0.494899 0.433495 0.024421 0.498052 0.0249245 0.0228437 0.024421 0.941617 0.0294913 0.515648 0.0249245 0.465026 0.701018 0.024421 0.723697 0.0228437 0.03344 0.03344 0.0122106 0.592223 0.532247 0.0228437 0.6054 0.024421 0.0249245 0.641302 0.66847 0.621902 0.457557 0.679324 0.024421 0.700806 0.0228437 0.435155 0.436751 0.405293 0.493239 0.563987 0.024421 1.00319 0.024421 0.618058 0.438284 0.423345 0.728333 0.431772 0.675842 0.0273036 0.54807 0.0272488 0.534632 0.534632 0.024974 0.531064 0.405667 0.0272488 0.412177 0.024974 0.459405 0.548207 0.024974 0.024974 0.532879 0.678099 0.721762 0.560828 0.620504 0.711584 0.559395 0.43666 0.536323 0.498404 0.699402 0.0506124 0 0 0 0 0 0.97482 0.492809 0.564837 0.534632 0.501967 1.03301 0.643991 0.0266983 0.0272488 0.628313 0.700691 0.434922 0.557375 0.0272488 0.024974 0.0272488 0.0266983 0.613192 0.573707 0.413831 0.0266983 0.43666 0.652861 0.434922 0.0217991 0.0266983 0.60573 0.531064 0.0272488 0.0272488 0.0272488 0.433108 0.0217991 0.0272488 0.0272488 0.408631 0.556841 0.024974 0.0266983 0.0266983 0.0217991 0.0272488 0.5229 0.024974 0.512857 0.512857 0.645375 0.951293 0.43666 0.57934 0.770908 0.553686 0.434922 0.412177 0.024974 0.0266983 0.024974 0.0272488 0.0266983 chr1 10000 12000 1000 mean n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0178189 -0.0152047 -0.0152047 0.0178189 0.0508425 0.0508425 0.0508425 -1.02793 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.23708 0.0508425 0.0508425 0.0178189 0.0508425 0.0508425 -1.02793 -1.01692 0.0508425 0.0508425 -1.23708 -1.20405 0.0508425 -1.20405 0.0508425 0.0508425 0.0178189 -1.20405 0.0508425 0.0508425 -1.02793 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.16002 0.0508425 0.0178189 0.0508425 -1.01692 0.0508425 -1.19305 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.18204 0.0508425 -1.20405 -1.20405 0.0508425 0.0508425 0.0178189 -0.0152047 -0.0152047 -1.16002 0.0508425 -1.20405 -1.19305 0.0508425 -1.20405 0.0508425 0.0508425 0.0508425 -1.14901 0.0508425 0.0508425 0.0508425 0.0508425 -1.19305 0.0508425 0.0508425 0.0508425 -1.02793 0.0508425 -1.01692 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.02793 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.18204 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.20405 -1.14901 0.0508425 0.0508425 0.0508425 0.0508425 -1.01692 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.16002 0.0508425 0.0508425 -1.20405 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.02793 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.14901 -2.34887 -1.14901 0.0508425 0.0508425 0.0508425 -1.20405 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 0.0508425 0.0508425 0.0508425 0.0508425 -1.14901 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.02793 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.237976 0.237976 0.237976 0.204953 -0.76374 0.237976 0.271 0.204953 0.204953 0.204953 0.237976 0.271 -0.334433 0.237976 0.237976 0.271 -0.719708 0.204953 0.204953 -0.609628 0.237976 -1.23708 0.0508425 -0.411488 0.204953 0.204953 0.204953 0.237976 0.204953 0.204953 0.204953 0.204953 0.237976 -1.00591 0.237976 0.237976 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -1.01692 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0838662 0.094874 0.105882 0.105882 0.105882 0.105882 0.094874 0.105882 0.105882 0.094874 0.105882 0.094874 0.094874 0.11689 0.094874 0.094874 0.094874 0.11689 -0.895835 -0.697693 0.105882 0.094874 0.105882 0.11689 0.105882 0.094874 -0.697695 0.11689 0.105882 0.105882 0.105882 0.094874 0.105882 -0.906843 0.094874 0.105882 0.105882 -0.730718 0.094874 0.11689 0.11689 0.0728583 -0.80777 0.11689 0.105882 0.11689 0.11689 0.094874 0.105882 0.105882 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.11689 0.105882 0.105882 0.094874 0.105882 0.094874 0.094874 0.11689 0.105882 0.105882 0.105882 0.11689 -0.686685 -0.884825 0.105882 0.105882 0.105882 0.094874 -0.71971 0.105882 0.105882 -0.91785 0.105882 0.094874 0.094874 0.105882 0.11689 0.11689 0.094874 0.094874 0.11689 0.094874 0.105882 0.105882 0.105882 0.105882 0.105882 0.11689 -0.730718 0.105882 -1.02793 0.094874 0.094874 0.094874 0.11689 -0.873818 0.094874 -0.71971 0.11689 0.105882 0.105882 0.11689 0.11689 0.105882 0.094874 -0.80777 0.094874 0.105882 0.11689 0.094874 0.094874 0.094874 0.105882 0.094874 -0.928858 0.0838661 0.0728583 0.0948741 0.094874 0.11689 0.11689 0.105882 0.105882 -0.697695 0.094874 0.094874 0.105882 0.094874 0.105882 0.11689 -0.829788 0.094874 0.105882 -0.906843 0.105882 0.105882 0.105882 0.094874 0.105882 0.105882 0.094874 0.094874 0.11689 0.105882 0.105882 -0.829788 0.094874 0.105882 0.11689 -0.71971 0.105882 0.103756 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 -0.835144 0.090622 0.090622 -0.901269 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 -0.818609 0.090622 0.090622 0.090622 chr1 10000 12000 1000 min n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -0.0152047 -0.0152047 -0.0152047 -0.0152047 0.0508425 0.0508425 0.0508425 -2.1067 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.525 0.0508425 0.0508425 -0.0152047 0.0508425 0.0508425 -2.1067 -2.08469 0.0508425 0.0508425 -2.525 -2.45895 0.0508425 -2.45895 0.0508425 0.0508425 -0.0152047 -2.45895 0.0508425 0.0508425 -2.1067 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.37089 0.0508425 -0.0152047 0.0508425 -2.08469 0.0508425 -2.43694 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.41492 0.0508425 -2.45895 -2.45895 0.0508425 0.0508425 -0.0152047 -0.0152047 -0.0152047 -2.37089 0.0508425 -2.45895 -2.43694 0.0508425 -2.45895 0.0508425 0.0508425 0.0508425 -2.34887 0.0508425 0.0508425 0.0508425 0.0508425 -2.43694 0.0508425 0.0508425 0.0508425 -2.1067 0.0508425 -2.08469 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.1067 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.41492 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.45895 -2.34887 0.0508425 0.0508425 0.0508425 0.0508425 -2.08469 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.37089 0.0508425 0.0508425 -2.45895 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.1067 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.34887 -2.34887 -2.34887 0.0508425 0.0508425 0.0508425 -2.45895 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 0.0508425 0.0508425 0.0508425 0.0508425 -2.34887 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.1067 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.204953 0.204953 0.204953 0.204953 -1.79848 0.204953 0.271 0.204953 0.204953 0.204953 0.204953 0.271 -0.873819 0.204953 0.204953 0.271 -1.64437 0.204953 0.204953 -1.42421 0.204953 -2.525 0.0508425 -0.873819 0.204953 0.204953 0.204953 0.204953 0.204953 0.204953 0.204953 0.204953 0.204953 -2.21678 0.204953 0.204953 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.08469 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.11689 0.094874 0.094874 0.094874 0.11689 -1.90856 -1.49026 0.094874 0.094874 0.094874 0.11689 0.094874 0.094874 -1.51228 0.11689 0.094874 0.094874 0.094874 0.094874 0.094874 -1.90856 0.094874 0.094874 0.094874 -1.55631 0.094874 0.11689 0.11689 0.0508425 -1.73243 0.11689 0.094874 0.11689 0.11689 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.11689 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.11689 0.094874 0.094874 0.094874 0.11689 -1.49026 -1.88654 0.094874 0.094874 0.094874 0.094874 -1.55631 0.094874 0.094874 -1.95259 0.094874 0.094874 0.094874 0.094874 0.11689 0.11689 0.094874 0.094874 0.11689 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.11689 -1.55631 0.094874 -2.17275 0.094874 0.094874 0.094874 0.11689 -1.84251 0.094874 -1.55631 0.11689 0.094874 0.094874 0.11689 0.11689 0.094874 0.094874 -1.73243 0.094874 0.094874 0.11689 0.094874 0.094874 0.094874 0.094874 0.094874 -1.95259 0.0728583 0.0728583 0.0728583 0.094874 0.11689 0.11689 0.094874 0.094874 -1.51228 0.094874 0.094874 0.094874 0.094874 0.094874 0.11689 -1.75445 0.094874 0.094874 -1.90856 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.11689 0.094874 0.094874 -1.75445 0.094874 0.094874 0.11689 -1.55631 0.094874 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 -1.76091 0.090622 0.090622 -1.89316 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 -1.72784 0.090622 0.090622 0.090622 chr1 10000 12000 1000 max n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -0.0152047 -0.0152047 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -0.0152047 -0.0152047 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -2.34887 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 -0.0152047 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.271 0.271 0.271 0.204953 0.271 0.271 0.271 0.204953 0.204953 0.204953 0.271 0.271 0.204953 0.271 0.271 0.271 0.204953 0.204953 0.204953 0.204953 0.271 0.0508425 0.0508425 0.0508425 0.204953 0.204953 0.204953 0.271 0.204953 0.204953 0.204953 0.204953 0.271 0.204953 0.271 0.271 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.0508425 0.11689 0.094874 0.11689 0.11689 0.11689 0.11689 0.094874 0.11689 0.11689 0.094874 0.11689 0.094874 0.094874 0.11689 0.094874 0.094874 0.094874 0.11689 0.11689 0.094874 0.11689 0.094874 0.11689 0.11689 0.11689 0.094874 0.11689 0.11689 0.11689 0.11689 0.11689 0.094874 0.11689 0.094874 0.094874 0.11689 0.11689 0.094874 0.094874 0.11689 0.11689 0.094874 0.11689 0.11689 0.11689 0.11689 0.11689 0.094874 0.11689 0.11689 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.094874 0.11689 0.11689 0.11689 0.094874 0.11689 0.094874 0.094874 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.094874 0.11689 0.11689 0.11689 0.11689 0.11689 0.094874 0.094874 0.11689 0.11689 0.11689 0.094874 0.094874 0.11689 0.094874 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.094874 0.11689 0.11689 0.094874 0.094874 0.094874 0.11689 0.094874 0.094874 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.11689 0.094874 0.11689 0.094874 0.11689 0.11689 0.094874 0.094874 0.094874 0.11689 0.094874 0.094874 0.094874 0.0728583 0.11689 0.094874 0.11689 0.11689 0.11689 0.11689 0.11689 0.094874 0.094874 0.11689 0.094874 0.11689 0.11689 0.094874 0.094874 0.11689 0.094874 0.11689 0.11689 0.11689 0.094874 0.11689 0.11689 0.094874 0.094874 0.11689 0.11689 0.11689 0.094874 0.094874 0.11689 0.11689 0.11689 0.11689 0.11689 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 0.090622 chr1 10000 12000 1000 std n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 0 0 0 0 0 0 0 0 0 0 0 0 0.0467024 0 0 0.0467024 0 0 0 1.52561 0 0 0 0 0 1.8214 0 0 0.0467024 0 0 1.52561 1.51005 0 0 1.8214 1.77469 0 1.77469 0 0 0.0467024 1.77469 0 0 1.52561 0 0 0 0 0 0 1.71242 0 0.0467024 0 1.51005 0 1.75913 0 0 0 0 0 0 0 0 0 0 1.74356 0 1.77469 1.77469 0 0 0.0467024 0 0 1.71242 0 1.77469 1.75913 0 1.77469 0 0 0 1.69685 0 0 0 0 1.75913 0 0 0 1.52561 0 1.51005 0 0 0 0 0 0 0 0 0 1.52561 0 0 0 0 0 0 0 1.74356 0 0 0 0 0 1.77469 1.69685 0 0 0 0 1.51005 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1.71242 0 0 1.77469 0 0 0 0 0 0 0 1.52561 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1.69685 0 1.69685 0 0 0 1.77469 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1.69685 0 0 0 0 0 1.52561 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0467023 0.0467023 0.0467023 0 1.46334 0.0467023 0 0 0 0 0.0467023 0 0.762807 0.0467023 0.0467023 0 1.30767 0 0 1.15199 0.0467023 1.8214 0 0.653834 0 0 0 0.0467023 0 0 0 0 0.0467023 1.71242 0.0467023 0.0467023 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1.51005 0 0 0 0 0 0 0 0 0 0 0 0 0.0467026 0 0.0155677 0.0155677 0.0155677 0.0155677 0 0.0155677 0.0155677 0 0.0155677 0 0 0 0 0 0 0 1.43221 1.12086 0.0155677 0 0.0155677 0 0.0155677 0 1.152 0 0.0155677 0.0155677 0.0155677 0 0.0155677 1.41664 0 0.0155677 0.0155677 1.16756 0 0 0 0.031135 1.30767 0 0.0155677 0 0 0 0.0155677 0.0155677 0 0 0 0 0 0 0 0 0.0155677 0.0155677 0 0.0155677 0 0 0 0.0155677 0.0155677 0.0155677 0 1.13643 1.41664 0.0155677 0.0155677 0.0155677 0 1.18313 0.0155677 0.0155677 1.46334 0.0155677 0 0 0.0155677 0 0 0 0 0 0 0.0155677 0.0155677 0.0155677 0.0155677 0.0155677 0 1.16756 0.0155677 1.61902 0 0 0 0 1.36994 0 1.18313 0 0.0155677 0.0155677 0 0 0.0155677 0 1.30767 0 0.0155677 0 0 0 0 0.0155677 0 1.44778 0.0155675 0 0.0311351 0 0 0 0.0155677 0.0155677 1.152 0 0 0.0155677 0 0.0155677 0 1.30767 0 0.0155677 1.41664 0.0155677 0.0155677 0.0155677 0 0.0155677 0.0155677 0 0 0 0.0155677 0.0155677 1.30767 0 0.0155677 0 1.18313 0.0155677 0.0185743 0 0 0 0 0 0 0 0 0 0 0 0 0 1.30923 0 0 1.40275 0 0 0 0 0 0 0 0 1.28585 0 0 0 bx-python-0.8.13/test_data/bbi_tests/test.regions000066400000000000000000000001761415666465100220520ustar00rootroot00000000000000chr1 10000 20000 10 chr1 10000 12000 10 chr1 10000 20000 100 chr1 10000 12000 100 chr1 10000 20000 1000 chr1 10000 12000 1000 bx-python-0.8.13/test_data/bbi_tests/test.wig000066400000000000000000004175071415666465100212040ustar00rootroot00000000000000variableStep chrom=chr1 span=1 10918 0.0508425 10919 0.0508425 10920 0.0508425 10921 0.0508425 10922 0.0508425 10923 0.0508425 10924 0.0508425 10925 0.0508425 10926 0.0508425 10927 0.0508425 10928 0.0508425 10929 0.0508425 10930 0.0508425 10931 0.0508425 10932 0.0508425 10933 0.0508425 10934 0.0508425 10935 0.0508425 10936 0.0508425 10937 0.0508425 10938 0.0508425 10939 0.0508425 10940 0.0508425 10941 0.0508425 10942 -0.0152047 10943 -0.0152047 10944 -0.0152047 10945 -0.0152047 10946 -0.0152047 10947 -0.0152047 10948 0.0508425 10949 0.0508425 10950 0.0508425 10951 0.0508425 10952 0.0508425 10953 0.0508425 10954 0.0508425 10955 0.0508425 10956 -2.1067 10957 0.0508425 10958 0.0508425 10959 0.0508425 10960 0.0508425 10961 0.0508425 10962 0.0508425 10963 0.0508425 10964 0.0508425 10965 0.0508425 10966 0.0508425 10967 0.0508425 10968 -2.525 10969 0.0508425 10970 0.0508425 10971 0.0508425 10972 0.0508425 10973 -0.0152047 10974 0.0508425 10975 0.0508425 10976 0.0508425 10977 0.0508425 10978 0.0508425 10979 -2.1067 10980 0.0508425 10981 -2.08469 10982 0.0508425 10983 0.0508425 10984 0.0508425 10985 0.0508425 10986 0.0508425 10987 -2.525 10988 0.0508425 10989 -2.45895 10990 0.0508425 10991 0.0508425 10992 0.0508425 10993 -2.45895 10994 0.0508425 10995 0.0508425 10996 0.0508425 10997 0.0508425 10998 0.0508425 10999 -0.0152047 11000 0.0508425 11001 0.0508425 11002 -2.45895 11003 0.0508425 11004 0.0508425 11005 0.0508425 11006 0.0508425 11007 0.0508425 11008 -2.1067 11009 0.0508425 11010 0.0508425 11011 0.0508425 11012 0.0508425 11013 0.0508425 11014 0.0508425 11015 0.0508425 11016 0.0508425 11017 0.0508425 11018 0.0508425 11019 0.0508425 11020 0.0508425 11021 -2.37089 11022 0.0508425 11023 0.0508425 11024 0.0508425 11025 0.0508425 11026 -0.0152047 11027 0.0508425 11028 0.0508425 11029 -2.08469 11030 0.0508425 11031 0.0508425 11032 0.0508425 11033 -2.43694 11034 0.0508425 11035 0.0508425 11036 0.0508425 11037 0.0508425 11038 0.0508425 11039 0.0508425 11040 0.0508425 11041 0.0508425 11042 0.0508425 11043 0.0508425 11044 0.0508425 11045 0.0508425 11046 0.0508425 11047 0.0508425 11048 0.0508425 11049 0.0508425 11050 0.0508425 11051 0.0508425 11052 0.0508425 11053 0.0508425 11054 0.0508425 11055 -2.41492 11056 0.0508425 11057 0.0508425 11058 0.0508425 11059 -2.45895 11060 0.0508425 11061 -2.45895 11062 0.0508425 11063 0.0508425 11064 0.0508425 11065 0.0508425 11066 0.0508425 11067 0.0508425 11068 -0.0152047 11069 -0.0152047 11070 -0.0152047 11071 -0.0152047 11072 -0.0152047 11073 0.0508425 11074 -2.37089 11075 0.0508425 11076 0.0508425 11077 0.0508425 11078 -2.45895 11079 -2.43694 11080 0.0508425 11081 0.0508425 11082 0.0508425 11083 -2.45895 11084 0.0508425 11085 0.0508425 11086 0.0508425 11087 0.0508425 11088 0.0508425 11089 0.0508425 11090 0.0508425 11091 -2.34887 11092 0.0508425 11093 0.0508425 11094 0.0508425 11095 0.0508425 11096 0.0508425 11097 0.0508425 11098 0.0508425 11099 0.0508425 11100 0.0508425 11101 0.0508425 11102 -2.43694 11103 0.0508425 11104 0.0508425 11105 0.0508425 11106 0.0508425 11107 0.0508425 11108 0.0508425 11109 -2.1067 11110 0.0508425 11111 0.0508425 11112 0.0508425 11113 -2.08469 11114 0.0508425 11115 0.0508425 11116 0.0508425 11117 0.0508425 11118 0.0508425 11119 0.0508425 11120 0.0508425 11121 0.0508425 11122 0.0508425 11123 0.0508425 11124 0.0508425 11125 0.0508425 11126 0.0508425 11127 0.0508425 11128 0.0508425 11129 0.0508425 11130 0.0508425 11131 0.0508425 11132 0.0508425 11133 0.0508425 11134 -2.1067 11135 0.0508425 11136 0.0508425 11137 0.0508425 11138 0.0508425 11139 0.0508425 11140 0.0508425 11141 0.0508425 11142 0.0508425 11143 0.0508425 11144 0.0508425 11145 0.0508425 11146 0.0508425 11147 0.0508425 11148 0.0508425 11149 -2.41492 11150 0.0508425 11151 0.0508425 11152 0.0508425 11153 0.0508425 11154 0.0508425 11155 0.0508425 11156 0.0508425 11157 0.0508425 11158 0.0508425 11159 0.0508425 11160 0.0508425 11161 -2.45895 11162 0.0508425 11163 -2.34887 11164 0.0508425 11165 0.0508425 11166 0.0508425 11167 0.0508425 11168 0.0508425 11169 0.0508425 11170 0.0508425 11171 0.0508425 11172 0.0508425 11173 -2.08469 11174 0.0508425 11175 0.0508425 11176 0.0508425 11177 0.0508425 11178 0.0508425 11179 0.0508425 11180 0.0508425 11181 0.0508425 11182 0.0508425 11183 0.0508425 11184 0.0508425 11185 0.0508425 11186 0.0508425 11187 0.0508425 11188 0.0508425 11189 0.0508425 11190 0.0508425 11191 0.0508425 11192 0.0508425 11193 0.0508425 11194 0.0508425 11195 0.0508425 11196 0.0508425 11197 0.0508425 11198 0.0508425 11199 0.0508425 11200 0.0508425 11201 0.0508425 11202 0.0508425 11203 0.0508425 11204 0.0508425 11205 0.0508425 11206 0.0508425 11207 0.0508425 11208 0.0508425 11209 0.0508425 11210 0.0508425 11211 0.0508425 11212 0.0508425 11213 0.0508425 11214 0.0508425 11215 0.0508425 11216 0.0508425 11217 -2.37089 11218 0.0508425 11219 0.0508425 11220 0.0508425 11221 0.0508425 11222 0.0508425 11223 0.0508425 11224 -2.45895 11225 0.0508425 11226 0.0508425 11227 0.0508425 11228 0.0508425 11229 0.0508425 11230 0.0508425 11231 0.0508425 11232 0.0508425 11233 0.0508425 11234 0.0508425 11235 0.0508425 11236 0.0508425 11237 0.0508425 11238 0.0508425 11239 0.0508425 11240 -2.1067 11241 0.0508425 11242 0.0508425 11243 0.0508425 11244 0.0508425 11245 0.0508425 11246 0.0508425 11247 0.0508425 11248 0.0508425 11249 0.0508425 11250 0.0508425 11251 0.0508425 11252 0.0508425 11253 0.0508425 11254 0.0508425 11255 0.0508425 11256 0.0508425 11257 0.0508425 11258 0.0508425 11259 0.0508425 11260 0.0508425 11261 0.0508425 11262 0.0508425 11263 0.0508425 11264 0.0508425 11265 0.0508425 11266 0.0508425 11267 0.0508425 11268 0.0508425 11269 0.0508425 11270 0.0508425 11271 0.0508425 11272 0.0508425 11273 0.0508425 11274 0.0508425 11275 0.0508425 11276 0.0508425 11277 0.0508425 11278 0.0508425 11279 0.0508425 11280 0.0508425 11281 0.0508425 11282 0.0508425 11283 0.0508425 11284 0.0508425 11285 0.0508425 11286 -2.34887 11287 -2.34887 11288 -2.34887 11289 -2.34887 11290 0.0508425 11291 0.0508425 11292 0.0508425 11293 0.0508425 11294 0.0508425 11295 0.0508425 11296 0.0508425 11297 0.0508425 11298 -2.45895 11299 0.0508425 11300 0.0508425 11301 0.0508425 11302 0.0508425 11303 0.0508425 11304 0.0508425 11305 0.0508425 11306 0.0508425 11307 0.0508425 11308 0.0508425 11309 0.0508425 11310 0.0508425 11311 0.0508425 11312 0.0508425 11313 0.0508425 11314 0.0508425 11315 0.0508425 11316 0.0508425 11317 0.0508425 11318 0.0508425 11319 0.0508425 11320 0.0508425 11321 0.0508425 11322 0.0508425 11323 0.0508425 11324 0.0508425 11325 0.0508425 11326 0.0508425 11327 0.0508425 11328 0.0508425 11329 0.0508425 11330 0.0508425 11331 0.0508425 11332 0.0508425 11333 0.0508425 11334 0.0508425 11335 0.0508425 11336 0.0508425 11337 0.0508425 11338 0.0508425 11339 0.0508425 11340 0.0508425 11341 0.0508425 11342 0.0508425 11343 0.0508425 11344 0.0508425 11345 0.0508425 11346 0.0508425 11347 0.0508425 11348 0.0508425 11349 0.0508425 11350 0.0508425 11351 0.0508425 11352 0.0508425 11353 0.0508425 11354 0.0508425 11355 0.0508425 11356 0.0508425 11357 0.0508425 11358 0.0508425 11359 0.0508425 11360 0.0508425 11361 0.0508425 11362 0.0508425 11363 0.0508425 11364 0.0508425 11365 0.0508425 11366 0.0508425 11367 0.0508425 11368 0.0508425 11369 0.0508425 11370 0.0508425 11371 0.0508425 11372 0.0508425 11373 0.0508425 11374 0.0508425 11375 0.0508425 11376 0.0508425 11377 0.0508425 11378 0.0508425 11379 0.0508425 11380 0.0508425 11381 0.0508425 11382 0.0508425 11383 0.0508425 11384 0.0508425 11385 0.0508425 11386 0.0508425 11387 -0.0152047 11388 -0.0152047 11389 -0.0152047 11390 -0.0152047 11391 -0.0152047 11392 -0.0152047 11393 -0.0152047 11394 -0.0152047 11395 -0.0152047 11396 -0.0152047 11397 -0.0152047 11398 -0.0152047 11399 -0.0152047 11400 -0.0152047 11401 -0.0152047 11402 -0.0152047 11403 -0.0152047 11404 -0.0152047 11405 -0.0152047 11406 -0.0152047 11407 -0.0152047 11408 -0.0152047 11409 -0.0152047 11410 -0.0152047 11411 -0.0152047 11412 -0.0152047 11413 -0.0152047 11414 -0.0152047 11415 -0.0152047 11416 -0.0152047 11417 -0.0152047 11418 -0.0152047 11419 -0.0152047 11420 -0.0152047 11421 -0.0152047 11422 -0.0152047 11423 -0.0152047 11424 -0.0152047 11425 -0.0152047 11426 -0.0152047 11427 -0.0152047 11428 -0.0152047 11429 -0.0152047 11430 -0.0152047 11431 -0.0152047 11432 -0.0152047 11433 -0.0152047 11434 -0.0152047 11435 -0.0152047 11436 -0.0152047 11437 -0.0152047 11438 -0.0152047 11439 -0.0152047 11440 -0.0152047 11441 -0.0152047 11442 -0.0152047 11443 -0.0152047 11444 -0.0152047 11445 -0.0152047 11446 -0.0152047 11447 -0.0152047 11448 -0.0152047 11449 0.0508425 11450 0.0508425 11451 0.0508425 11452 0.0508425 11453 0.0508425 11454 0.0508425 11455 0.0508425 11456 0.0508425 11457 -2.34887 11458 0.0508425 11459 0.0508425 11460 0.0508425 11461 0.0508425 11462 0.0508425 11463 0.0508425 11464 0.0508425 11465 0.0508425 11466 0.0508425 11467 0.0508425 11468 0.0508425 11469 -2.1067 11470 0.0508425 11471 0.0508425 11472 0.0508425 11473 0.0508425 11474 0.0508425 11475 0.0508425 11476 0.0508425 11477 0.0508425 11478 0.0508425 11479 0.0508425 11480 0.0508425 11481 0.0508425 11482 0.0508425 11483 0.0508425 11484 0.0508425 11485 0.0508425 11486 0.0508425 11487 0.0508425 11488 0.0508425 11489 0.0508425 11490 0.0508425 11491 0.0508425 11492 0.0508425 11493 0.0508425 11494 0.0508425 11495 0.0508425 11496 0.0508425 11497 0.0508425 11498 0.0508425 11499 0.204953 11500 0.271 11501 0.204953 11502 0.271 11503 0.204953 11504 0.271 11505 0.204953 11506 0.204953 11507 0.271 11508 -1.79848 11509 0.271 11510 0.204953 11511 0.271 11512 0.271 11513 0.204953 11514 0.204953 11515 0.204953 11516 0.204953 11517 0.204953 11518 0.204953 11519 0.204953 11520 0.271 11521 0.271 11522 0.271 11523 -0.873819 11524 0.204953 11525 0.204953 11526 0.271 11527 0.271 11528 0.204953 11529 0.271 11530 0.271 11531 -1.64437 11532 0.204953 11533 0.204953 11534 0.204953 11535 0.204953 11536 0.204953 11537 0.204953 11538 -1.42421 11539 0.271 11540 0.204953 11541 0.0508425 11542 -2.525 11543 0.0508425 11544 0.0508425 11545 0.0508425 11546 -0.873819 11547 0.204953 11548 0.204953 11549 0.204953 11550 0.204953 11551 0.204953 11552 0.204953 11553 0.204953 11554 0.271 11555 0.204953 11556 0.204953 11557 0.204953 11558 0.204953 11559 0.204953 11560 0.204953 11561 0.204953 11562 0.204953 11563 0.271 11564 0.204953 11565 -2.21678 11566 0.204953 11567 0.204953 11568 0.271 11569 0.204953 11570 0.271 11571 0.0508425 11572 0.0508425 11573 0.0508425 11574 0.0508425 11575 0.0508425 11576 0.0508425 11577 0.0508425 11578 0.0508425 11579 0.0508425 11580 0.0508425 11581 0.0508425 11582 0.0508425 11583 0.0508425 11584 0.0508425 11585 0.0508425 11586 0.0508425 11587 0.0508425 11588 0.0508425 11589 0.0508425 11590 0.0508425 11591 0.0508425 11592 0.0508425 11593 0.0508425 11594 0.0508425 11595 0.0508425 11596 0.0508425 11597 0.0508425 11598 0.0508425 11599 -2.08469 11600 0.0508425 11601 0.0508425 11602 0.0508425 11603 0.0508425 11604 0.0508425 11605 0.0508425 11606 0.0508425 11607 0.0508425 11608 0.0508425 11609 0.0508425 11610 0.0508425 11611 0.0508425 11612 0.0508425 11613 0.0508425 11614 0.0508425 11615 0.0508425 11616 0.0508425 11617 0.0508425 11618 0.0508425 11619 0.0508425 11620 0.0508425 11621 0.0508425 11622 0.0508425 11623 0.0508425 11624 0.0508425 11625 0.0508425 11626 0.11689 11627 0.094874 11628 0.094874 11629 0.094874 11630 0.11689 11631 0.094874 11632 0.11689 11633 0.11689 11634 0.094874 11635 0.11689 11636 0.094874 11637 0.094874 11638 0.094874 11639 0.11689 11640 0.094874 11641 0.11689 11642 0.094874 11643 0.094874 11644 0.094874 11645 0.094874 11646 0.11689 11647 0.094874 11648 0.094874 11649 0.094874 11650 0.094874 11651 0.11689 11652 0.11689 11653 0.094874 11654 0.094874 11655 0.094874 11656 0.094874 11657 0.094874 11658 0.094874 11659 0.11689 11660 0.11689 11661 0.11689 11662 -1.90856 11663 -1.49026 11664 0.094874 11665 0.11689 11666 0.094874 11667 0.094874 11668 0.094874 11669 0.11689 11670 0.094874 11671 0.11689 11672 0.11689 11673 0.094874 11674 0.11689 11675 0.094874 11676 0.094874 11677 0.11689 11678 -1.51228 11679 0.11689 11680 0.11689 11681 0.094874 11682 0.11689 11683 0.11689 11684 0.094874 11685 0.11689 11686 0.094874 11687 0.094874 11688 0.094874 11689 0.11689 11690 0.094874 11691 0.094874 11692 -1.90856 11693 0.094874 11694 0.094874 11695 0.094874 11696 0.11689 11697 0.094874 11698 0.11689 11699 -1.55631 11700 0.094874 11701 0.094874 11702 0.094874 11703 0.11689 11704 0.11689 11705 0.11689 11706 0.11689 11707 0.094874 11708 0.0508425 11709 0.11689 11710 -1.73243 11711 0.11689 11712 0.11689 11713 0.11689 11714 0.094874 11715 0.11689 11716 0.11689 11717 0.11689 11718 0.11689 11719 0.094874 11720 0.094874 11721 0.11689 11722 0.094874 11723 0.11689 11724 0.094874 11725 0.094874 11726 0.094874 11727 0.094874 11728 0.094874 11729 0.094874 11730 0.094874 11731 0.094874 11732 0.094874 11733 0.094874 11734 0.094874 11735 0.094874 11736 0.094874 11737 0.094874 11738 0.094874 11739 0.11689 11740 0.11689 11741 0.094874 11742 0.11689 11743 0.094874 11744 0.11689 11745 0.094874 11746 0.094874 11747 0.11689 11748 0.094874 11749 0.094874 11750 0.094874 11751 0.094874 11752 0.094874 11753 0.11689 11754 0.11689 11755 0.094874 11756 0.11689 11757 0.11689 11758 0.094874 11759 0.094874 11760 0.11689 11761 0.11689 11762 0.11689 11763 0.11689 11764 -1.49026 11765 0.11689 11766 -1.88654 11767 0.11689 11768 0.094874 11769 0.094874 11770 0.11689 11771 0.094874 11772 0.11689 11773 0.094874 11774 0.094874 11775 -1.55631 11776 0.11689 11777 0.094874 11778 0.11689 11779 0.094874 11780 0.11689 11781 -1.95259 11782 0.11689 11783 0.11689 11784 0.094874 11785 0.094874 11786 0.094874 11787 0.094874 11788 0.094874 11789 0.094874 11790 0.11689 11791 0.11689 11792 0.11689 11793 0.11689 11794 0.11689 11795 0.094874 11796 0.094874 11797 0.094874 11798 0.094874 11799 0.11689 11800 0.11689 11801 0.094874 11802 0.094874 11803 0.094874 11804 0.11689 11805 0.11689 11806 0.094874 11807 0.11689 11808 0.094874 11809 0.094874 11810 0.11689 11811 0.11689 11812 0.094874 11813 0.11689 11814 0.11689 11815 0.094874 11816 -1.55631 11817 0.11689 11818 0.094874 11819 -2.17275 11820 0.11689 11821 0.094874 11822 0.094874 11823 0.094874 11824 0.094874 11825 0.094874 11826 0.094874 11827 0.11689 11828 0.11689 11829 0.094874 11830 -1.84251 11831 0.094874 11832 0.094874 11833 -1.55631 11834 0.11689 11835 0.11689 11836 0.11689 11837 0.094874 11838 0.11689 11839 0.11689 11840 0.094874 11841 0.11689 11842 0.11689 11843 0.11689 11844 0.11689 11845 0.11689 11846 0.094874 11847 0.094874 11848 0.094874 11849 0.11689 11850 -1.73243 11851 0.094874 11852 0.094874 11853 0.11689 11854 0.094874 11855 0.11689 11856 0.11689 11857 0.094874 11858 0.094874 11859 0.094874 11860 0.094874 11861 0.094874 11862 0.094874 11863 0.11689 11864 0.094874 11865 0.094874 11866 0.094874 11867 0.094874 11868 -1.95259 11869 0.094874 11870 0.0728583 11871 0.0728583 11872 0.0728583 11873 0.0728583 11874 0.11689 11875 0.094874 11876 0.094874 11877 0.11689 11878 0.11689 11879 0.11689 11880 0.11689 11881 0.094874 11882 0.11689 11883 0.094874 11884 0.11689 11885 0.11689 11886 -1.51228 11887 0.094874 11888 0.094874 11889 0.094874 11890 0.094874 11891 0.11689 11892 0.094874 11893 0.094874 11894 0.094874 11895 0.11689 11896 0.094874 11897 0.11689 11898 0.11689 11899 0.094874 11900 -1.75445 11901 0.094874 11902 0.094874 11903 0.11689 11904 0.094874 11905 -1.90856 11906 0.094874 11907 0.11689 11908 0.094874 11909 0.11689 11910 0.094874 11911 0.094874 11912 0.11689 11913 0.094874 11914 0.094874 11915 0.11689 11916 0.094874 11917 0.11689 11918 0.094874 11919 0.094874 11920 0.094874 11921 0.094874 11922 0.094874 11923 0.11689 11924 0.11689 11925 0.094874 11926 0.11689 11927 0.094874 11928 0.11689 11929 0.094874 11930 -1.75445 11931 0.094874 11932 0.094874 11933 0.094874 11934 0.11689 11935 0.11689 11936 0.11689 11937 0.11689 11938 -1.55631 11939 0.11689 11940 0.094874 11941 0.11689 11942 0.090622 11943 0.090622 11944 0.090622 11945 0.090622 11946 0.090622 11947 0.090622 11948 0.090622 11949 0.090622 11950 0.090622 11951 0.090622 11952 0.090622 11953 0.090622 11954 0.090622 11955 0.090622 11956 0.090622 11957 0.090622 11958 0.090622 11959 0.090622 11960 0.090622 11961 0.090622 11962 0.090622 11963 0.090622 11964 0.090622 11965 0.090622 11966 0.090622 11967 0.090622 11968 0.090622 11969 0.090622 11970 -1.76091 11971 0.090622 11972 0.090622 11973 0.090622 11974 0.090622 11975 -1.89316 11976 0.090622 11977 0.090622 11978 0.090622 11979 0.090622 11980 0.090622 11981 0.090622 11982 0.090622 11983 0.090622 11984 0.090622 11985 0.090622 11986 0.090622 11987 0.090622 11988 0.090622 11989 0.090622 11990 0.090622 11991 0.090622 11992 0.090622 11993 -1.72784 11994 0.090622 11995 0.090622 11996 0.090622 11997 0.090622 11998 0.090622 11999 0.090622 12000 0.090622 12001 -1.99235 12002 -3.84387 12003 0.090622 12004 0.090622 12005 0.090622 12006 0.090622 12007 0.090622 12008 0.090622 12009 0.090622 12010 0.090622 12011 0.090622 12012 0.090622 12013 0.090622 12014 0.090622 12015 0.090622 12016 0.090622 12017 0.090622 12018 0.090622 12019 0.090622 12020 0.090622 12021 -1.72784 12022 0.090622 12023 0.090622 12024 0.090622 12025 0.090622 12026 0.090622 12027 0.090622 12028 0.090622 12029 0.090622 12030 0.090622 12031 0.090622 12032 0.090622 12033 0.090622 12034 0.090622 12035 0.090622 12036 0.090622 12037 0.090622 12038 0.090622 12039 0.090622 12040 0.090622 12041 0.090622 12042 0.090622 12043 0.090622 12044 0.090622 12045 0.090622 12046 0.090622 12047 0.090622 12048 0.090622 12049 -1.99235 12050 0.090622 12051 0.090622 12052 0.090622 12053 0.090622 12054 0.090622 12055 0.090622 12056 0.090622 12057 0.090622 12058 0.090622 12059 0.090622 12060 0.090622 12061 0.090622 12062 0.090622 12063 0.090622 12064 0.090622 12065 0.090622 12066 0.090622 12067 0.090622 12068 0.090622 12069 0.090622 12070 0.090622 12071 0.090622 12072 0.090622 12073 0.222874 12074 0.222874 12075 0.289 12076 0.289 12077 0.222874 12078 0.289 12079 0.222874 12080 0.289 12081 0.289 12082 0.222874 12083 0.289 12084 0.222874 12085 0.289 12086 0.289 12087 0.222874 12088 0.289 12089 0.289 12090 0.289 12091 0.222874 12092 0.222874 12093 0.289 12094 0.289 12095 0.289 12096 -2.7528 12097 -0.835142 12098 0.289 12099 -0.835142 12100 0.289 12101 0.222874 12102 0.289 12103 0.289 12104 0.289 12105 0.222874 12106 -1.59559 12107 0.289 12108 0.289 12109 0.289 12110 0.289 12111 0.289 12112 0.222874 12113 -1.62865 12114 -1.69478 12115 0.289 12116 0.222874 12117 0.222874 12118 0.289 12119 0.222874 12120 -1.16577 12121 -2.68667 12122 0.222874 12123 0.222874 12124 0.289 12125 0.222874 12126 0.289 12127 0.289 12128 0.289 12129 0.289 12130 0.289 12131 0.090622 12132 0.090622 12133 0.289 12134 0.222874 12135 0.222874 12136 0.289 12137 0.222874 12138 0.289 12139 0.222874 12140 0.289 12141 0.289 12142 0.222874 12143 0.222874 12144 0.289 12145 0.222874 12146 0.222874 12147 0.222874 12148 0.289 12149 -1.79397 12150 0.090622 12151 -1.89316 12152 0.090622 12153 0.090622 12154 0.090622 12155 0.090622 12156 0.090622 12157 0.090622 12158 0.090622 12159 0.090622 12160 0.090622 12161 0.090622 12162 0.090622 12163 0.090622 12164 0.090622 12165 0.090622 12166 0.090622 12167 0.090622 12168 0.0244961 12169 -2.1246 12170 0.0575591 12171 0.0575591 12172 0.0575591 12173 0.0575591 12174 0.0244961 12175 0.0244961 12176 0.0244961 12177 0.0575591 12178 0.0244961 12179 -2.09154 12180 0.0244961 12181 0.0575591 12182 0.0575591 12183 0.0244961 12184 0.0575591 12185 0.0575591 12186 0.0244961 12187 0.0244961 12188 0.0244961 12189 0.0575591 12190 0.0244961 12191 0.0244961 12192 0.0575591 12193 0.0244961 12194 0.0575591 12195 0.090622 12196 0.090622 12197 0.090622 12198 0.090622 12199 0.090622 12200 0.090622 12201 -1.99235 12202 0.090622 12203 0.090622 12204 0.090622 12205 0.090622 12206 0.090622 12207 0.090622 12208 0.090622 12209 0.090622 12210 0.090622 12211 0.090622 12212 0.090622 12213 0.090622 12214 -1.99235 12215 0.090622 12216 0.090622 12217 0.090622 12218 -1.92622 12219 0.090622 12220 0.090622 12221 0.090622 12222 0.090622 12223 0.090622 12224 0.090622 12225 0.090622 12226 0.090622 12227 0.090622 12228 0.090622 12229 0.090622 12230 0.090622 12231 0.090622 12232 0.090622 12233 0.090622 12234 0.090622 12235 0.090622 12236 0.090622 12237 0.090622 12238 0.090622 12239 0.090622 12240 0.090622 12241 -2.02541 12242 0.090622 12243 0.090622 12244 0.090622 12245 0.090622 12246 0.090622 12247 0.090622 12248 0.090622 12249 0.090622 12250 0.090622 12251 0.090622 12252 0.090622 12253 0.090622 12254 0.090622 12255 0.090622 12256 0.090622 12257 0.090622 12258 0.090622 12259 0.090622 12260 0.090622 12261 0.090622 12262 0.090622 12263 0.090622 12264 0.090622 12265 0.090622 12266 0.090622 12267 -1.56253 12268 0.090622 12269 0.090622 12270 0.090622 12271 0.090622 12272 -1.4964 12273 0.090622 12274 0.090622 12275 0.090622 12276 0.090622 12277 0.090622 12278 0.090622 12279 -1.4964 12280 0.090622 12281 0.090622 12282 0.090622 12283 0.090622 12284 0.090622 12285 0.090622 12286 0.090622 12287 0.090622 12288 0.090622 12289 0.090622 12290 0.090622 12291 0.090622 12292 0.090622 12293 0.090622 12294 0.090622 12295 0.090622 12296 0.090622 12297 0.090622 12298 0.090622 12299 0.090622 12300 0.090622 12301 0.090622 12302 0.090622 12303 0.090622 12304 0.090622 12305 0.090622 12306 0.090622 12307 0.090622 12308 0.090622 12309 0.090622 12310 0.090622 12311 0.090622 12312 0.090622 12313 0.090622 12314 0.090622 12315 0.090622 12316 0.090622 12317 0.090622 12318 0.090622 12319 -1.4964 12320 0.090622 12321 0.090622 12322 0.090622 12323 0.090622 12324 0.090622 12325 0.090622 12326 0.090622 12327 0.090622 12328 0.090622 12329 -1.95928 12330 0.090622 12331 0.090622 12332 0.090622 12333 0.090622 12334 0.090622 12335 0.090622 12336 0.090622 12337 0.090622 12338 0.090622 12339 0.090622 12340 0.090622 12341 0.090622 12342 0.090622 12343 0.090622 12344 0.090622 12345 0.090622 12346 0.090622 12347 0.090622 12348 -1.4964 12349 0.090622 12350 0.090622 12351 0.090622 12352 0.090622 12353 0.090622 12354 0.090622 12355 0.090622 12356 -1.4964 12357 0.090622 12358 0.090622 12359 0.090622 12360 0.090622 12361 0.090622 12362 0.090622 12363 0.090622 12364 0.090622 12365 0.090622 12366 0.090622 12367 0.090622 12368 0.090622 12369 0.090622 12370 0.090622 12371 0.090622 12372 0.090622 12373 0.090622 12374 0.090622 12375 0.090622 12376 0.090622 12377 0.090622 12378 0.090622 12379 -2.02541 12380 0.090622 12381 0.090622 12382 0.090622 12383 0.090622 12384 0.090622 12385 0.090622 12386 0.090622 12387 0.090622 12388 0.090622 12389 0.090622 12390 0.090622 12391 0.090622 12392 0.090622 12393 -1.72784 12394 0.090622 12395 0.090622 12396 0.090622 12397 0.090622 12398 0.090622 12399 -1.95928 12400 0.090622 12401 0.090622 12402 0.090622 12403 0.090622 12404 0.090622 12405 0.090622 12406 0.090622 12407 0.090622 12408 0.090622 12409 0.090622 12410 0.090622 12411 0.090622 12412 0.090622 12413 0.090622 12414 0.090622 12415 0.090622 12416 0.090622 12417 0.090622 12418 0.090622 12419 0.090622 12420 0.090622 12421 0.090622 12422 0.090622 12423 0.090622 12424 0.090622 12425 0.090622 12426 0.090622 12427 0.090622 12428 0.090622 12429 0.090622 12430 0.090622 12431 -1.4964 12432 -1.76091 12433 -1.76091 12434 0.090622 12435 0.090622 12436 0.090622 12437 0.090622 12438 0.090622 12439 0.090622 12440 0.090622 12441 0.090622 12442 0.090622 12443 0.090622 12444 0.090622 12445 0.090622 12446 -1.72784 12447 0.090622 12448 0.090622 12449 0.090622 12450 0.090622 12451 0.090622 12452 0.090622 12453 0.090622 12454 0.090622 12455 0.090622 12456 0.090622 12457 0.090622 12458 0.090622 12459 0.090622 12460 0.090622 12461 0.090622 12462 0.090622 12463 0.090622 12464 0.090622 12465 0.090622 12466 0.090622 12467 0.090622 12468 0.090622 12469 0.090622 12470 0.090622 12471 0.090622 12472 0.090622 12473 -1.99235 12474 0.090622 12475 0.090622 12476 0.090622 12477 0.090622 12478 0.090622 12479 0.090622 12480 0.090622 12481 0.090622 12482 0.090622 12483 0.090622 12484 0.090622 12485 0.090622 12486 0.090622 12487 -1.79397 12488 0.090622 12489 0.090622 12490 0.090622 12491 0.090622 12492 0.090622 12493 0.090622 12494 0.090622 12495 0.090622 12496 0.090622 12497 0.090622 12498 0.090622 12499 0.090622 12500 0.090622 12501 0.090622 12502 0.090622 12503 0.090622 12504 0.090622 12505 0.090622 12506 -1.99235 12507 0.090622 12508 0.090622 12509 0.090622 12510 0.090622 12511 0.090622 12512 0.090622 12513 0.090622 12514 0.090622 12515 0.090622 12516 0.090622 12517 0.090622 12518 0.090622 12519 0.090622 12520 0.090622 12521 0.090622 12522 -1.4964 12523 0.090622 12524 0.090622 12525 0.090622 12526 0.090622 12527 0.090622 12528 0.090622 12529 0.090622 12530 0.090622 12531 0.090622 12532 0.090622 12533 0.090622 12534 -1.89316 12535 0.090622 12536 0.090622 12537 0.090622 12538 0.090622 12539 0.090622 12540 0.090622 12541 0.090622 12542 0.090622 12543 0.090622 12544 0.090622 12545 -1.76091 12546 0.090622 12547 0.090622 12548 0.090622 12549 0.090622 12550 0.090622 12551 0.090622 12552 0.090622 12553 0.090622 12554 0.090622 12555 0.090622 12556 0.090622 12557 0.090622 12558 0.090622 12559 0.090622 12560 0.090622 12561 0.090622 12562 0.090622 12563 0.090622 12564 0.090622 12565 0.090622 12566 0.090622 12567 0.090622 12568 -1.4964 12569 0.090622 12570 0.090622 12571 0.090622 12572 0.090622 12573 0.090622 12574 0.090622 12575 0.090622 12576 -1.82703 12577 0.090622 12578 0.090622 12579 0.090622 12580 0.090622 12581 -1.4964 12582 0.090622 12583 0.090622 12584 0.090622 12585 0.090622 12586 0.090622 12587 0.090622 12588 0.090622 12589 0.090622 12590 0.090622 12591 0.090622 12592 0.090622 12593 0.090622 12594 0.090622 12595 0.090622 12596 0.090622 12597 0.090622 12598 0.090622 12599 0.090622 12600 0.090622 12601 0.090622 12602 0.090622 12603 0.090622 12604 0.090622 12605 0.090622 12606 0.090622 12607 0.090622 12608 0.090622 12609 0.090622 12610 0.090622 12611 0.090622 12612 0.090622 12613 0.090622 12614 0.090622 12615 0.090622 12616 0.090622 12617 0.090622 12618 0.090622 12619 0.090622 12620 0.090622 12621 0.090622 12622 0.090622 12623 0.090622 12624 0.090622 12625 0.090622 12626 -1.76091 12627 0.090622 12628 0.090622 12629 0.090622 12630 0.090622 12631 0.090622 12632 0.090622 12633 0.090622 12634 0.090622 12635 0.090622 12636 0.090622 12637 0.090622 12638 0.090622 12639 0.090622 12640 0.090622 12641 0.090622 12642 -1.72784 12643 0.090622 12644 0.090622 12645 -1.4964 12646 -1.4964 12647 0.090622 12648 0.090622 12649 0.090622 12650 0.090622 12651 0.090622 12652 0.090622 12653 0.090622 12654 0.090622 12655 0.090622 12656 0.090622 12657 0.090622 12658 0.090622 12659 0.090622 12660 0.090622 12661 0.090622 12662 0.090622 12663 0.090622 12664 0.090622 12665 0.090622 12666 0.090622 12667 0.090622 12668 0.090622 12669 0.090622 12670 -1.4964 12671 0.090622 12672 -1.76091 12673 0.090622 12674 0.090622 12675 0.090622 12676 0.090622 12677 0.090622 12678 0.090622 12679 0.090622 12680 -3.91 12681 0.090622 12682 0.090622 12683 0.090622 12684 0.090622 12685 0.090622 12686 0.090622 12687 0.090622 12688 0.090622 12689 0.090622 12690 0.090622 12691 0.090622 12692 0.090622 12693 0.090622 12694 0.090622 12695 0.090622 12696 0.090622 12697 -2.19072 12698 -1.72784 12699 0.090622 12700 0.090622 12701 0.090622 12702 0.090622 12703 0.090622 12704 0.090622 12705 0.090622 12706 0.090622 12707 0.090622 12708 0.090622 12709 0.090622 12710 0.090622 12711 0.090622 12712 0.090622 12713 -1.72784 12714 0.090622 12715 0.090622 12716 0.090622 12717 0.090622 12718 0.090622 12719 0.090622 12720 -2.19072 12721 0.090622 12722 0.090622 12723 0.090622 12724 0.090622 12725 0.090622 12726 0.090622 12727 0.090622 12728 0.090622 12729 0.090622 12730 0.090622 12731 0.090622 12732 0.090622 12733 0.090622 12734 0.090622 12735 0.090622 12736 0.090622 12737 -1.56253 12738 0.090622 12739 -1.76091 12740 0.090622 12741 -1.56253 12742 -1.4964 12743 0.090622 12744 0.090622 12745 0.090622 12746 0.090622 12747 0.090622 12748 0.090622 12749 0.090622 12750 0.090622 12751 0.090622 12752 0.090622 12753 0.090622 12754 0.090622 12755 0.090622 12756 0.090622 12757 0.090622 12758 -1.95928 12759 0.090622 12760 0.090622 12761 0.090622 12762 0.090622 12763 0.090622 12764 0.090622 12765 0.090622 12766 0.090622 12767 0.090622 12768 -1.92622 12769 0.090622 12770 -1.99235 12771 0.090622 12772 -1.76091 12773 0.090622 12774 -2.25685 12775 0.090622 12776 0.090622 12777 0.090622 12778 0.090622 12779 -1.89316 12780 -1.4964 12781 0.090622 12782 -1.89316 12783 0.090622 12784 0.090622 12785 0.090622 12786 0.090622 12787 0.090622 12788 0.090622 12789 0.090622 12790 0.090622 12791 0.090622 12792 0.090622 12793 0.090622 12794 0.090622 12795 -1.89316 12796 0.090622 12797 -1.72784 12798 0.090622 12799 -1.76091 12800 0.090622 12801 0.090622 12802 0.090622 12803 0.090622 12804 0.090622 12805 0.090622 12806 0.090622 12807 -1.99235 12808 0.090622 12809 -1.56253 12810 0.090622 12811 0.090622 12812 0.090622 12813 0.090622 12814 0.090622 12815 0.090622 12816 0.090622 12817 0.090622 12818 0.090622 12819 0.090622 12820 0.090622 12821 0.090622 12822 0.090622 12823 0.090622 12824 0.090622 12825 0.090622 12826 0.090622 12827 0.0244961 12828 0.0244961 12829 0.090622 12830 0.090622 12831 0.090622 12832 -1.89316 12833 0.090622 12834 0.090622 12835 0.090622 12836 0.090622 12837 0.090622 12838 0.090622 12839 0.090622 12840 0.090622 12841 0.090622 12842 0.090622 12843 0.090622 12844 0.090622 12845 0.090622 12846 0.090622 12847 0.090622 12848 0.090622 12849 0.090622 12850 0.090622 12851 0.090622 12852 0.090622 12853 0.090622 12854 0.090622 12855 0.090622 12856 0.090622 12857 0.090622 12858 0.090622 12859 0.090622 12860 -1.89316 12861 0.090622 12862 0.090622 12863 0.090622 12864 0.090622 12865 0.090622 12866 0.090622 12867 0.090622 12868 0.090622 12869 0.090622 12870 0.090622 12871 0.090622 12872 0.090622 12873 0.090622 12874 0.090622 12875 0.090622 12876 0.090622 12877 -1.56253 12878 -1.76091 12879 0.090622 12880 0.090622 12881 0.090622 12882 0.090622 12883 -1.72784 12884 0.090622 12885 0.090622 12886 0.090622 12887 -1.92622 12888 -1.76091 12889 0.090622 12890 0.090622 12891 0.090622 12892 0.090622 12893 0.090622 12894 0.090622 12895 0.090622 12896 0.090622 12897 0.090622 12898 0.090622 12899 0.090622 12900 0.090622 12901 0.090622 12902 0.090622 12903 0.090622 12904 0.090622 12905 0.090622 12906 0.090622 12907 0.090622 12908 0.090622 12909 0.090622 12910 0.090622 12911 0.090622 12912 0.090622 12913 0.090622 12914 0.090622 12915 0.090622 12916 0.090622 12917 0.090622 12918 0.090622 12919 0.090622 12920 0.090622 12921 0.090622 12922 0.090622 12923 0.090622 12924 0.090622 12925 0.090622 12926 -1.56253 12927 0.090622 12928 0.090622 12929 0.090622 12930 0.090622 12931 0.090622 12932 0.090622 12933 0.090622 12934 0.090622 12935 0.090622 12936 0.090622 12937 0.090622 12938 0.090622 12939 -1.99235 12940 0.090622 12941 0.090622 12942 0.090622 12943 -1.4964 12944 0.090622 12945 0.090622 12946 0.090622 12947 0.090622 12948 -1.79397 12949 0.090622 12950 0.090622 12951 0.090622 12952 0.090622 12953 0.090622 12954 -1.4964 12955 0.156748 12956 0.123685 12957 0.156748 12958 0.123685 12959 0.123685 12960 0.156748 12961 0.156748 12962 0.156748 12963 0.156748 12964 0.123685 12965 -1.46334 12966 0.184 12967 0.153142 12968 0.184 12969 0.153142 12970 0.184 12971 0.184 12972 0.184 12973 0.153142 12974 0.184 12975 0.184 12976 0.153142 12977 0.184 12978 -1.42063 12979 0.184 12980 0.153142 12981 0.184 12982 0.184 12983 0.184 12984 0.184 12985 -1.45149 12986 -1.11205 12987 0.184 12988 0.184 12989 0.184 12990 -1.5132 12991 0.184 12992 0.184 12993 0.184 12994 0.184 12995 0.184 12996 0.184 12997 0.184 12998 0.184 12999 0.184 13000 0.153142 13001 0.153142 13002 0.184 13003 0.153142 13004 0.153142 13005 0.153142 13006 0.184 13007 0.184 13008 0.184 13009 -1.45149 13010 0.184 13011 0.153142 13012 0.184 13013 0.184 13014 0.153142 13015 0.184 13016 0.184 13017 0.153142 13018 0.184 13019 0.153142 13020 0.153142 13021 0.184 13022 0.184 13023 0.153142 13024 0.184 13025 -1.20462 13026 0.184 13027 0.184 13028 0.153142 13029 0.153142 13030 0.184 13031 0.153142 13032 0.153142 13033 0.184 13034 0.184 13035 0.184 13036 0.153142 13037 0.184 13038 -1.20462 13039 0.184 13040 0.153142 13041 0.153142 13042 0.184 13043 0.184 13044 -1.20462 13045 0.184 13046 0.153142 13047 0.184 13048 0.184 13049 0.153142 13050 0.153142 13051 0.153142 13052 -1.11205 13053 0.184 13054 0.184 13055 0.153142 13056 -1.20462 13057 0.184 13058 0.184 13059 0.184 13060 0.153142 13061 0.184 13062 0.184 13063 0.184 13064 0.153142 13065 0.184 13066 0.184 13067 0.184 13068 0.153142 13069 0.184 13070 0.184 13071 -1.20462 13072 0.184 13073 0.184 13074 0.153142 13075 0.184 13076 0.184 13077 0.184 13078 0.153142 13079 -1.14291 13080 0.184 13081 0.153142 13082 0.184 13083 0.153142 13084 0.184 13085 0.153142 13086 0.184 13087 -1.42063 13088 0.184 13089 0.184 13090 -1.14291 13091 0.184 13092 -1.11205 13093 0.153142 13094 0.184 13095 0.184 13096 0.153142 13097 0.184 13098 0.153142 13099 0.153142 13100 0.184 13101 0.184 13102 0.184 13103 0.184 13104 0.153142 13105 0.184 13106 0.153142 13107 0.153142 13108 0.184 13109 0.153142 13110 0.184 13111 0.184 13112 0.153142 13113 0.153142 13114 0.153142 13115 0.184 13116 -1.14291 13117 0.184 13118 -3.735 13119 0.184 13120 0.184 13121 -1.17376 13122 0.153142 13123 0.184 13124 0.184 13125 0.184 13126 0.153142 13127 0.184 13128 0.184 13129 0.184 13130 0.184 13131 0.153142 13132 0.184 13133 0.153142 13134 0.184 13135 0.153142 13136 0.184 13137 0.184 13138 -2.80925 13139 0.153142 13140 0.184 13141 0.184 13142 0.153142 13143 0.184 13144 0.153142 13145 0.184 13146 0.184 13147 0.184 13148 0.153142 13149 0.184 13150 0.153142 13151 0.184 13152 0.184 13153 0.153142 13154 0.153142 13155 0.184 13156 0.184 13157 -1.42063 13158 0.184 13159 0.153142 13160 -1.17376 13161 0.184 13162 0.184 13163 -1.11205 13164 -1.11205 13165 0.153142 13166 0.153142 13167 0.184 13168 -1.11205 13169 -1.20462 13170 0.184 13171 0.153142 13172 0.184 13173 0.153142 13174 0.153142 13175 0.184 13176 0.184 13177 0.184 13178 0.184 13179 0.153142 13180 0.184 13181 0.184 13182 0.184 13183 0.153142 13184 0.184 13185 0.184 13186 0.153142 13187 0.184 13188 0.153142 13189 -1.42063 13190 0.153142 13191 0.184 13192 -1.38977 13193 -1.26634 13194 0.153142 13195 0.153142 13196 0.153142 13197 0.184 13198 0.184 13199 0.184 13200 -1.42063 13201 -1.45149 13202 -1.63664 13203 0.184 13204 0.184 13205 0.184 13206 0.153142 13207 0.184 13208 -1.20462 13209 0.184 13210 0.153142 13211 0.153142 13212 0.184 13213 0.184 13214 0.184 13215 0.153142 13216 0.184 13217 -1.11205 13218 0.153142 13219 0.153142 13220 0.184 13221 0.184 13222 0.184 13223 0.153142 13224 0.184 13225 0.184 13226 -1.11205 13227 0.184 13228 0.184 13229 0.153142 13230 0.153142 13231 0.184 13232 0.153142 13233 0.184 13234 0.184 13235 0.184 13236 0.153142 13237 0.184 13238 0.184 13239 -1.20462 13240 0.153142 13241 0.153142 13242 0.184 13243 0.184 13244 0.184 13245 0.153142 13246 0.153142 13247 0.153142 13248 0.184 13249 0.153142 13250 0.184 13251 0.184 13252 0.184 13253 0.153142 13254 0.184 13255 0.184 13256 0.153142 13257 0.153142 13258 0.153142 13259 -1.14291 13260 0.153142 13261 0.184 13262 -1.11205 13263 0.153142 13264 0.184 13265 0.184 13266 0.153142 13267 0.184 13268 0.184 13269 0.153142 13270 0.184 13271 0.184 13272 0.153142 13273 0.184 13274 0.153142 13275 0.184 13276 0.153142 13277 -1.20462 13278 0.153142 13279 0.184 13280 0.153142 13281 0.184 13282 0.184 13283 0.184 13284 0.184 13285 0.184 13286 0.0914252 13287 0.0914252 13288 0.184 13289 0.184 13290 0.184 13291 -1.20462 13292 0.184 13293 0.153142 13294 0.184 13295 0.184 13296 0.153142 13297 0.184 13298 0.184 13299 -1.5132 13300 0.184 13301 0.153142 13302 -1.45149 13303 0.184 13304 0.184 13305 0.153142 13306 0.184 13307 -1.20462 13308 -1.20462 13309 -1.11205 13310 0.184 13311 0.184 13312 0.184 13313 0.153142 13314 0.184 13315 0.184 13316 0.153142 13317 0.153142 13318 0.184 13319 0.153142 13320 0.184 13321 0.153142 13322 0.184 13323 0.184 13324 0.184 13325 0.184 13326 0.153142 13327 -3.45728 13328 0.184 13329 0.153142 13330 0.184 13331 -1.2972 13332 0.153142 13333 0.184 13334 -1.14291 13335 0.153142 13336 0.184 13337 0.153142 13338 0.153142 13339 -1.5132 13340 0.184 13341 0.184 13342 0.184 13343 0.153142 13344 -1.11205 13345 0.184 13346 0.153142 13347 0.153142 13348 0.153142 13349 0.184 13350 0.184 13351 0.153142 13352 0.153142 13353 0.184 13354 -1.45149 13355 0.184 13356 0.184 13357 0.184 13358 0.153142 13359 -1.38977 13360 0.153142 13361 0.184 13362 0.184 13363 0.153142 13364 0.184 13365 0.184 13366 0.153142 13367 0.184 13368 0.153142 13369 0.184 13370 0.153142 13371 0.184 13372 0.184 13373 0.153142 13374 0.153142 13375 0.184 13376 0.153142 13377 0.153142 13378 0.184 13379 0.153142 13380 -1.14291 13381 0.153142 13382 0.184 13383 0.184 13384 0.153142 13385 0.184 13386 0.184 13387 0.184 13388 0.153142 13389 0.153142 13390 0.153142 13391 0.153142 13392 0.184 13393 0.184 13394 0.153142 13395 0.153142 13396 0.153142 13397 0.184 13398 0.184 13399 0.184 13400 -1.14291 13401 0.153142 13402 0.184 13403 0.153142 13404 0.184 13405 -1.20462 13406 0.184 13407 0.153142 13408 0.184 13409 0.184 13410 0.153142 13411 0.184 13412 0.184 13413 0.153142 13414 0.184 13415 0.184 13416 0.184 13417 0.184 13418 0.184 13419 0.153142 13420 0.184 13421 0.153142 13422 0.153142 13423 -1.2972 13424 -1.17376 13425 0.184 13426 0.153142 13427 -1.42063 13428 0.153142 13429 0.153142 13430 0.184 13431 0.153142 13432 0.184 13433 0.153142 13434 0.184 13435 0.153142 13436 0.184 13437 0.184 13438 0.184 13439 0.153142 13440 0.153142 13441 -1.20462 13442 0.153142 13443 0.184 13444 0.153142 13445 0.184 13446 0.153142 13447 0.184 13448 0.184 13449 0.184 13450 0.184 13451 0.153142 13452 0.184 13453 0.153142 13454 0.153142 13455 0.153142 13456 0.184 13457 0.153142 13458 0.184 13459 0.184 13460 0.153142 13461 0.184 13462 0.153142 13463 0.153142 13464 0.184 13465 0.153142 13466 -1.5132 13467 0.184 13468 0.184 13469 0.184 13470 0.184 13471 0.153142 13472 -1.2972 13473 0.153142 13474 0.153142 13475 0.184 13476 0.184 13477 0.153142 13478 0.184 13479 0.153142 13480 0.184 13481 -1.38977 13482 0.184 13483 0.184 13484 0.184 13485 0.153142 13486 -1.11205 13487 0.184 13488 0.153142 13489 0.184 13490 0.184 13491 0.153142 13492 0.184 13493 0.184 13494 0.153142 13495 0.184 13496 0.153142 13497 0.184 13498 0.184 13499 0.184 13500 0.153142 13501 -1.23548 13502 0.184 13503 -1.11205 13504 -1.23548 13505 0.184 13506 0.153142 13507 0.184 13508 -1.17376 13509 0.184 13510 0.184 13511 0.184 13512 0.0914252 13513 0.0914252 13514 -1.45149 13515 0.184 13516 0.184 13517 0.153142 13518 0.153142 13519 0.153142 13520 -1.26634 13521 0.184 13522 0.153142 13523 0.184 13524 0.153142 13525 0.184 13526 0.184 13527 0.184 13528 0.184 13529 -1.11205 13530 0.184 13531 0.153142 13532 0.184 13533 0.184 13534 0.153142 13535 0.184 13536 0.153142 13537 -1.23548 13538 -1.11205 13539 0.184 13540 0.153142 13541 0.184 13542 0.153142 13543 -1.20462 13544 -1.57492 13545 0.184 13546 0.153142 13547 0.184 13548 0.153142 13549 -1.57492 13550 0.184 13551 0.184 13552 0.184 13553 0.184 13554 0.184 13555 0.153142 13556 0.184 13557 0.184 13558 0.153142 13559 0.184 13560 0.153142 13561 0.184 13562 0.184 13563 0.153142 13564 0.184 13565 0.184 13566 0.184 13567 0.153142 13568 0.153142 13569 0.184 13570 -1.2972 13571 -1.20462 13572 0.184 13573 0.184 13574 0.153142 13575 0.153142 13576 0.184 13577 -1.20462 13578 0.153142 13579 0.153142 13580 0.184 13581 0.184 13582 0.153142 13583 0.184 13584 0.153142 13585 0.153142 13586 0.153142 13587 0.184 13588 0.184 13589 0.184 13590 0.153142 13591 0.184 13592 0.184 13593 0.153142 13594 0.184 13595 -1.5132 13596 0.184 13597 0.153142 13598 0.184 13599 0.153142 13600 0.184 13601 0.184 13602 0.153142 13603 0.184 13604 0.153142 13605 0.184 13606 0.184 13607 0.153142 13608 0.184 13609 0.153142 13610 0.184 13611 -1.38977 13612 0.184 13613 -1.20462 13614 0.153142 13615 0.184 13616 0.184 13617 0.184 13618 0.153142 13619 0.184 13620 0.184 13621 0.153142 13622 0.184 13623 0.184 13624 0.184 13625 0.153142 13626 0.184 13627 -1.11205 13628 0.184 13629 0.153142 13630 0.184 13631 0.153142 13632 -1.11205 13633 0.184 13634 0.184 13635 0.153142 13636 0.153142 13637 0.153142 13638 0.153142 13639 0.184 13640 0.153142 13641 0.184 13642 0.184 13643 0.184 13644 0.184 13645 -1.5132 13646 -1.20462 13647 0.153142 13648 0.184 13649 0.184 13650 0.153142 13651 0.184 13652 0.153142 13653 0.153142 13654 0.153142 13655 -1.20462 13656 0.184 13657 0.122283 13658 -1.26634 13659 0.184 13660 -1.2972 13661 0.184 13662 -1.20462 13663 0.153142 13664 -1.20462 13665 0.184 13666 0.184 13667 0.184 13668 -1.14291 13669 0.153142 13670 0.153142 13671 0.184 13672 0.153142 13673 0.153142 13674 0.153142 13675 0.153142 13676 0.184 13677 0.184 13678 0.153142 13679 -1.11205 13680 -1.14291 13681 0.184 13682 0.153142 13683 0.184 13684 -1.11205 13685 -1.45149 13686 0.184 13687 0.184 13688 0.184 13689 0.184 13690 0.153142 13691 0.153142 13692 0.184 13693 0.184 13694 -1.60578 13695 -1.63664 13696 0.184 13697 0.153142 13698 0.184 13699 -1.42063 13700 0.153142 13701 0.184 13702 0.153142 13703 0.153142 13704 0.184 13705 0.184 13706 0.184 13707 0.153142 13708 0.184 13709 0.184 13710 -1.2972 13711 0.184 13712 0.153142 13713 0.184 13714 0.184 13715 0.153142 13716 0.184 13717 0.184 13718 0.184 13719 0.184 13720 0.153142 13721 0.184 13722 0.153142 13723 0.184 13724 0.153142 13725 0.184 13726 0.184 13727 0.153142 13728 0.153142 13729 0.153142 13730 0.184 13731 -1.2972 13732 0.153142 13733 0.153142 13734 0.153142 13735 0.184 13736 0.153142 13737 -1.11205 13738 0.153142 13739 0.184 13740 0.153142 13741 0.153142 13742 0.184 13743 0.184 13744 0.153142 13745 0.153142 13746 0.184 13747 0.184 13748 -1.20462 13749 -1.11205 13750 0.184 13751 -1.11205 13752 0.153142 13753 0.184 13754 0.184 13755 0.184 13756 -2.90183 13757 -1.26634 13758 0.153142 13759 0.184 13760 0.184 13761 0.184 13762 0.184 13763 0.184 13764 0.153142 13765 0.184 13766 0.184 13767 0.184 13768 0.153142 13769 0.153142 13770 0.184 13771 0.153142 13772 0.184 13773 0.153142 13774 0.184 13775 0.153142 13776 0.0914252 13777 0.184 13778 0.184 13779 0.184 13780 0.184 13781 0.153142 13782 0.184 13783 0.153142 13784 0.184 13785 0.184 13786 0.153142 13787 0.184 13788 0.184 13789 0.153142 13790 0.184 13791 0.184 13792 0.184 13793 0.153142 13794 0.184 13795 0.184 13796 0.184 13797 -1.5132 13798 0.153142 13799 0.153142 13800 0.153142 13801 0.184 13802 0.153142 13803 0.184 13804 0.184 13805 0.153142 13806 0.184 13807 0.184 13808 0.184 13809 0.184 13810 0.153142 13811 -1.26634 13812 0.184 13813 0.153142 13814 0.153142 13815 0.184 13816 0.153142 13817 -1.45149 13818 0.153142 13819 0.184 13820 0.153142 13821 -1.42063 13822 0.184 13823 -1.23548 13824 0.184 13825 0.153142 13826 0.184 13827 0.153142 13828 -1.11205 13829 0.184 13830 0.184 13831 0.153142 13832 0.184 13833 -1.20462 13834 0.153142 13835 -1.14291 13836 0.153142 13837 0.184 13838 -1.2972 13839 0.184 13840 0.153142 13841 -1.38977 13842 0.184 13843 0.153142 13844 0.184 13845 0.184 13846 0.153142 13847 -1.6675 13848 0.184 13849 0.153142 13850 0.184 13851 0.184 13852 0.184 13853 -1.2972 13854 0.153142 13855 0.153142 13856 0.153142 13857 0.184 13858 -1.42063 13859 -1.38977 13860 0.153142 13861 -1.20462 13862 -1.38977 13863 -1.38977 13864 0.153142 13865 0.153142 13866 -1.2972 13867 0.153142 13868 -1.38977 13869 -1.45149 13870 0.153142 13871 0.184 13872 0.153142 13873 -1.20462 13874 0.153142 13875 0.184 13876 0.184 13877 0.153142 13878 0.184 13879 0.184 13880 0.184 13881 0.184 13882 0.153142 13883 0.184 13884 0.184 13885 0.153142 13886 0.184 13887 0.153142 13888 0.153142 13889 -1.57492 13890 0.153142 13891 0.184 13892 0.153142 13893 0.184 13894 0.184 13895 0.184 13896 -1.5132 13897 0.184 13898 -1.20462 13899 0.153142 13900 0.184 13901 0.153142 13902 0.184 13903 0.153142 13904 0.184 13905 -1.42063 13906 0.153142 13907 -1.20462 13908 0.153142 13909 0.153142 13910 0.153142 13911 -1.20462 13912 -1.45149 13913 -1.38977 13914 0.153142 13915 0.184 13916 0.153142 13917 0.153142 13918 0.184 13919 0.184 13920 -1.2972 13921 0.184 13922 0.153142 13923 0.153142 13924 0.184 13925 0.184 13926 0.184 13927 0.184 13928 0.153142 13929 0.184 13930 0.153142 13931 0.184 13932 0.184 13933 0.184 13934 0.184 13935 -1.11205 13936 0.184 13937 0.184 13938 -1.20462 13939 0.184 13940 0.184 13941 0.184 13942 -1.38977 13943 -1.23548 13944 0.184 13945 0.184 13946 0.184 13947 0.184 13948 0.184 13949 0.153142 13950 0.153142 13951 0.184 13952 0.153142 13953 0.184 13954 0.153142 13955 0.184 13956 -1.11205 13957 0.153142 13958 -1.11205 13959 0.184 13960 0.184 13961 0.184 13962 0.184 13963 0.153142 13964 0.184 13965 0.184 13966 0.153142 13967 0.153142 13968 0.184 13969 0.153142 13970 0.153142 13971 0.184 13972 0.184 13973 -1.38977 13974 0.184 13975 0.153142 13976 0.184 13977 0.153142 13978 0.184 13979 0.153142 13980 -1.17376 13981 0.153142 13982 -3.61157 13983 0.184 13984 0.153142 13985 0.184 13986 0.184 13987 0.153142 13988 0.184 13989 0.184 13990 0.184 13991 -1.39265 13992 0.154252 13993 0.184 13994 0.184 13995 0.154252 13996 0.184 13997 0.184 13998 0.184 13999 0.154252 14000 0.154252 14001 0.154252 14002 0.184 14003 0.184 14004 0.154252 14005 0.184 14006 0.184 14007 0.184 14008 0.184 14009 0.184 14010 0.154252 14011 0.184 14012 0.154252 14013 0.184 14014 0.184 14015 0.184 14016 -1.39265 14017 0.154252 14018 0.184 14019 0.184 14020 0.184 14021 0.154252 14022 0.184 14023 0.184 14024 0.154252 14025 0.154252 14026 0.154252 14027 0.154252 14028 -1.42239 14029 0.154252 14030 0.154252 14031 0.154252 14032 0.184 14033 0.154252 14034 0.184 14035 0.184 14036 0.184 14037 0.154252 14038 0.184 14039 0.184 14040 0.184 14041 0.154252 14042 0.154252 14043 0.184 14044 0.184 14045 0.154252 14046 0.184 14047 0.184 14048 0.184 14049 0.154252 14050 0.184 14051 0.154252 14052 0.154252 14053 0.184 14054 0.184 14055 0.154252 14056 0.184 14057 0.184 14058 0.154252 14059 0.154252 14060 0.184 14061 0.154252 14062 -1.21416 14063 0.184 14064 0.154252 14065 0.154252 14066 0.184 14067 0.184 14068 0.184 14069 0.154252 14070 0.184 14071 0.184 14072 0.154252 14073 0.184 14074 0.184 14075 0.184 14076 0.154252 14077 0.184 14078 0.184 14079 0.184 14080 0.154252 14081 0.184 14082 0.154252 14083 0.184 14084 0.154252 14085 0.154252 14086 0.184 14087 -2.76106 14088 0.154252 14089 0.184 14090 0.154252 14091 0.154252 14092 0.184 14093 0.154252 14094 0.184 14095 0.184 14096 -1.45214 14097 0.154252 14098 0.154252 14099 0.184 14100 0.154252 14101 -1.15466 14102 -1.18441 14103 0.154252 14104 0.154252 14105 0.184 14106 0.154252 14107 0.184 14108 0.154252 14109 -1.21416 14110 0.184 14111 0.154252 14112 0.154252 14113 0.184 14114 -1.45214 14115 0.154252 14116 0.184 14117 -1.12491 14118 0.184 14119 0.184 14120 0.154252 14121 0.154252 14122 0.184 14123 0.154252 14124 0.184 14125 0.184 14126 0.154252 14127 0.184 14128 0.184 14129 0.154252 14130 0.154252 14131 0.154252 14132 -1.45214 14133 0.184 14134 0.154252 14135 0.184 14136 0.184 14137 0.184 14138 0.154252 14139 0.154252 14140 0.184 14141 0.184 14142 0.184 14143 0.184 14144 0.184 14145 0.154252 14146 0.154252 14147 -1.39265 14148 0.154252 14149 0.184 14150 0.184 14151 0.184 14152 0.154252 14153 0.184 14154 0.154252 14155 0.184 14156 0.154252 14157 0.184 14158 0.184 14159 0.154252 14160 0.154252 14161 0.184 14162 0.154252 14163 0.184 14164 0.154252 14165 0.154252 14166 0.154252 14167 0.154252 14168 0.154252 14169 0.154252 14170 0.154252 14171 0.154252 14172 0.184 14173 0.154252 14174 0.184 14175 0.154252 14176 0.184 14177 0.184 14178 0.184 14179 0.184 14180 0.184 14181 0.154252 14182 0.184 14183 0.154252 14184 0.184 14185 0.154252 14186 0.184 14187 0.154252 14188 0.184 14189 0.154252 14190 0.184 14191 0.154252 14192 0.184 14193 0.154252 14194 0.154252 14195 0.154252 14196 -1.27365 14197 0.184 14198 0.154252 14199 0.154252 14200 0.154252 14201 0.154252 14202 0.154252 14203 -1.42239 14204 0.184 14205 0.154252 14206 0.184 14207 0.184 14208 0.184 14209 0.154252 14210 0.184 14211 0.154252 14212 0.184 14213 0.154252 14214 0.154252 14215 0.154252 14216 0.154252 14217 0.184 14218 0.184 14219 0.154252 14220 0.184 14221 0.184 14222 0.184 14223 0.154252 14224 -1.21416 14225 0.184 14226 0.154252 14227 0.184 14228 0.184 14229 0.154252 14230 0.154252 14231 0.184 14232 0.154252 14233 0.184 14234 0.184 14235 0.154252 14236 0.184 14237 0.184 14238 0.184 14239 0.184 14240 0.184 14241 0.154252 14242 0.184 14243 0.184 14244 0.154252 14245 0.184 14246 0.154252 14247 0.184 14248 0.154252 14249 0.184 14250 0.184 14251 -1.63063 14252 0.184 14253 -1.21416 14254 0.154252 14255 0.184 14256 0.154252 14257 0.184 14258 0.154252 14259 0.184 14260 0.184 14261 0.154252 14262 0.184 14263 0.184 14264 0.184 14265 0.154252 14266 0.184 14267 0.154252 14268 -1.27365 14269 0.154252 14270 -1.39265 14271 0.184 14272 0.184 14273 0.184 14274 0.154252 14275 0.184 14276 0.154252 14277 0.184 14278 0.154252 14279 0.154252 14280 0.154252 14281 -1.12491 14282 -1.21416 14283 0.184 14284 0.184 14285 0.154252 14286 0.184 14287 0.154252 14288 0.184 14289 0.184 14290 0.154252 14291 0.184 14292 0.184 14293 0.184 14294 0.154252 14295 -1.60088 14296 0.184 14297 0.154252 14298 0.154252 14299 0.184 14300 0.154252 14301 0.184 14302 0.184 14303 0.184 14304 0.184 14305 0.184 14306 0.184 14307 0.154252 14308 -1.12491 14309 0.154252 14310 0.184 14311 0.184 14312 0.184 14313 0.154252 14314 0.154252 14315 0.184 14316 -1.21416 14317 -1.12491 14318 0.154252 14319 0.184 14320 0.184 14321 0.154252 14322 0.154252 14323 0.184 14324 0.154252 14325 0.184 14326 0.184 14327 0.184 14328 0.184 14329 0.184 14330 0.154252 14331 0.184 14332 0.184 14333 0.154252 14334 0.184 14335 0.184 14336 -1.3034 14337 0.154252 14338 0.184 14339 0.154252 14340 0.184 14341 0.154252 14342 0.184 14343 0.154252 14344 0.184 14345 0.154252 14346 0.184 14347 0.184 14348 0.184 14349 0.184 14350 -1.27365 14351 0.154252 14352 0.184 14353 0.154252 14354 0.184 14355 -1.21416 14356 0.154252 14357 0.154252 14358 0.154252 14359 0.184 14360 0.184 14361 0.154252 14362 0.154252 14363 0.184 14364 0.184 14365 0.154252 14366 0.184 14367 0.184 14368 0.154252 14369 0.184 14370 0.154252 14371 0.184 14372 0.184 14373 -1.21416 14374 0.154252 14375 0.184 14376 0.154252 14377 0.184 14378 -1.18441 14379 0.154252 14380 0.184 14381 0.184 14382 0.154252 14383 0.154252 14384 -1.45214 14385 0.154252 14386 0.154252 14387 0.154252 14388 0.154252 14389 0.154252 14390 0.154252 14391 0.154252 14392 0.184 14393 0.0947559 14394 0.0947559 14395 0.0947559 14396 0.0947559 14397 0.0947559 14398 0.0947559 14399 0.0947559 14400 0.0947559 14401 0.0947559 14402 0.0947559 14403 0.0947559 14404 0.0947559 14405 0.0947559 14406 0.0947559 14407 0.0947559 14408 0.0947559 14409 0.0947559 14410 0.0947559 14411 0.0947559 14412 0.0947559 14413 0.0947559 14414 0.0947559 14415 0.0947559 14416 0.0947559 14417 0.0947559 14418 0.0947559 14419 0.0947559 14420 0.0947559 14421 0.0947559 14422 0.0947559 14423 0.0947559 14424 0.0947559 14425 0.0947559 14426 0.0947559 14427 0.0947559 14428 -1.77937 14429 0.0947559 14430 0.0947559 14431 0.0947559 14432 0.0947559 14433 0.0947559 14434 0.0947559 14435 0.0947559 14436 -1.98761 14437 0.0947559 14438 0.0947559 14439 0.0947559 14440 0.0947559 14441 0.0947559 14442 0.0947559 14443 0.0947559 14444 0.0947559 14445 0.0947559 14446 0.0947559 14447 0.0947559 14448 0.0947559 14449 0.0947559 14450 0.0947559 14451 0.0947559 14452 0.0947559 14453 0.0947559 14454 0.0947559 14455 0.0947559 14456 0.0947559 14457 0.0947559 14458 0.0947559 14459 0.0947559 14460 0.0947559 14461 0.0947559 14462 0.0947559 14463 0.0947559 14464 0.0947559 14465 -2.0471 14466 0.0947559 14467 0.0947559 14468 0.0947559 14469 -1.95786 14470 0.0947559 14471 0.0947559 14472 0.0947559 14473 0.0947559 14474 0.0947559 14475 0.0947559 14476 0.0947559 14477 0.0947559 14478 0.0947559 14479 -1.83887 14480 0.0947559 14481 0.0947559 14482 0.0947559 14483 0.0947559 14484 0.0947559 14485 0.0947559 14486 -1.71987 14487 0.0947559 14488 0.0947559 14489 0.0947559 14490 0.0947559 14491 0.0947559 14492 0.0947559 14493 0.0947559 14494 0.0947559 14495 0.0947559 14496 0.0947559 14497 0.0947559 14498 0.0947559 14499 0.0947559 14500 0.0947559 14501 0.0947559 14502 0.0947559 14503 0.0947559 14504 0.0947559 14505 0.0947559 14506 0.0947559 14507 0.0947559 14508 0.0947559 14509 0.0947559 14510 0.0947559 14511 0.0947559 14512 0.0947559 14513 0.0947559 14514 0.0947559 14515 0.0947559 14516 0.0947559 14517 0.0947559 14518 0.0947559 14519 0.0947559 14520 0.0947559 14521 0.0947559 14522 0.0947559 14523 0.0947559 14524 0.0947559 14525 0.0947559 14526 0.0947559 14527 0.0947559 14528 0.0947559 14529 0.0947559 14530 0.0947559 14531 0.184 14532 0.154252 14533 0.184 14534 0.154252 14535 0.184 14536 0.184 14537 0.184 14538 0.184 14539 0.154252 14540 0.184 14541 0.154252 14542 -1.39265 14543 0.184 14544 0.184 14545 0.184 14546 0.154252 14547 0.184 14548 0.184 14549 0.184 14550 0.154252 14551 0.154252 14552 0.184 14553 0.184 14554 0.184 14555 0.184 14556 0.154252 14557 0.184 14558 0.184 14559 0.154252 14560 0.154252 14561 0.184 14562 -1.45214 14563 0.154252 14564 0.184 14565 -1.27365 14566 0.154252 14567 0.184 14568 0.184 14569 0.154252 14570 0.184 14571 0.154252 14572 0.184 14573 0.184 14574 -1.39265 14575 0.184 14576 0.154252 14577 -1.21416 14578 0.154252 14579 0.184 14580 0.154252 14581 0.184 14582 0.184 14583 0.154252 14584 0.184 14585 0.184 14586 0.154252 14587 0.154252 14588 0.184 14589 0.184 14590 -1.51164 14591 0.154252 14592 0.184 14593 0.154252 14594 0.184 14595 0.184 14596 0.184 14597 -1.27365 14598 0.184 14599 0.154252 14600 0.184 14601 0.184 14602 0.184 14603 0.154252 14604 -1.18441 14605 0.184 14606 0.184 14607 0.154252 14608 0.154252 14609 0.184 14610 -1.18441 14611 0.154252 14612 0.184 14613 0.184 14614 0.154252 14615 0.184 14616 0.154252 14617 -3.41551 14618 0.154252 14619 0.184 14620 0.184 14621 -1.21416 14622 0.184 14623 -1.27365 14624 0.154252 14625 0.154252 14626 0.184 14627 0.154252 14628 0.184 14629 0.184 14630 0.154252 14631 0.184 14632 0.184 14633 0.184 14634 0.154252 14635 0.184 14636 0.154252 14637 -1.12491 14638 0.154252 14639 0.184 14640 0.184 14641 -1.42239 14642 0.154252 14643 0.184 14644 0.154252 14645 0.184 14646 0.154252 14647 0.184 14648 0.154252 14649 0.184 14650 0.184 14651 0.154252 14652 0.154252 14653 0.184 14654 -1.12491 14655 0.184 14656 0.184 14657 0.184 14658 0.184 14659 0.154252 14660 0.154252 14661 0.184 14662 0.154252 14663 0.184 14664 0.154252 14665 -1.12491 14666 0.184 14667 0.184 14668 0.154252 14669 0.184 14670 0.154252 14671 0.184 14672 0.184 14673 0.184 14674 0.184 14675 0.184 14676 0.184 14677 0.184 14678 0.154252 14679 0.154252 14680 0.184 14681 0.184 14682 0.154252 14683 0.184 14684 0.154252 14685 0.184 14686 0.154252 14687 0.154252 14688 0.184 14689 0.184 14690 0.154252 14691 0.184 14692 0.184 14693 0.184 14694 0.184 14695 0.184 14696 0.184 14697 0.154252 14698 0.154252 14699 -1.12491 14700 0.184 14701 0.154252 14702 0.154252 14703 0.154252 14704 0.184 14705 0.184 14706 0.184 14707 0.154252 14708 0.184 14709 0.154252 14710 0.184 14711 0.184 14712 0.154252 14713 0.184 14714 0.184 14715 0.154252 14716 0.184 14717 -1.15466 14718 0.124504 14719 0.154252 14720 0.154252 14721 0.154252 14722 0.184 14723 0.184 14724 0.154252 14725 0.184 14726 0.154252 14727 0.184 14728 0.184 14729 0.184 14730 0.154252 14731 0.184 14732 0.154252 14733 0.184 14734 0.184 14735 0.184 14736 0.154252 14737 0.184 14738 0.184 14739 0.154252 14740 0.184 14741 0.184 14742 0.184 14743 0.184 14744 0.154252 14745 0.184 14746 0.184 14747 -1.12491 14748 0.184 14749 0.184 14750 0.184 14751 0.154252 14752 0.184 14753 0.154252 14754 0.184 14755 0.184 14756 0.154252 14757 0.184 14758 0.184 14759 0.184 14760 0.154252 14761 0.154252 14762 0.184 14763 0.184 14764 0.154252 14765 0.184 14766 0.154252 14767 0.184 14768 0.154252 14769 0.184 14770 0.154252 14771 0.184 14772 0.154252 14773 0.184 14774 0.184 14775 0.184 14776 0.184 14777 0.184 14778 0.184 14779 0.184 14780 0.154252 14781 0.154252 14782 0.154252 14783 0.184 14784 0.184 14785 0.184 14786 0.154252 14787 0.184 14788 0.184 14789 0.154252 14790 0.184 14791 0.184 14792 -1.21416 14793 0.184 14794 0.184 14795 0.184 14796 0.184 14797 0.184 14798 0.184 14799 0.154252 14800 0.184 14801 0.154252 14802 0.184 14803 0.184 14804 0.154252 14805 0.184 14806 0.184 14807 0.184 14808 0.184 14809 0.184 14810 -1.21416 14811 0.184 14812 0.184 14813 0.154252 14814 0.184 14815 0.184 14816 0.154252 14817 0.154252 14818 0.154252 14819 0.184 14820 0.184 14821 0.184 14822 0.154252 14823 0.184 14824 0.154252 14825 0.184 14826 0.154252 14827 0.154252 14828 0.184 14829 0.184 14830 0.184 14831 0.154252 14832 0.184 14833 0.184 14834 0.154252 14835 0.184 14836 0.184 14837 0.184 14838 0.154252 14839 0.154252 14840 0.154252 14841 0.154252 14842 0.184 14843 0.184 14844 0.184 14845 0.154252 14846 0.184 14847 0.154252 14848 0.184 14849 0.154252 14850 0.184 14851 0.154252 14852 0.184 14853 0.184 14854 0.184 14855 0.154252 14856 0.184 14857 0.184 14858 0.154252 14859 0.154252 14860 0.184 14861 0.184 14862 0.154252 14863 0.184 14864 0.184 14865 0.184 14866 0.154252 14867 0.154252 14868 0.154252 14869 0.184 14870 0.184 14871 0.184 14872 0.154252 14873 0.184 14874 -1.12491 14875 -1.39265 14876 0.154252 14877 0.184 14878 0.184 14879 0.184 14880 0.184 14881 0.184 14882 0.154252 14883 -1.3034 14884 0.184 14885 0.184 14886 0.184 14887 0.184 14888 0.184 14889 -2.76106 14890 0.184 14891 0.154252 14892 0.184 14893 0.154252 14894 0.184 14895 0.154252 14896 0.154252 14897 0.154252 14898 0.154252 14899 0.154252 14900 0.154252 14901 0.154252 14902 0.184 14903 0.154252 14904 0.184 14905 0.184 14906 0.154252 14907 0.154252 14908 0.184 14909 0.154252 14910 0.154252 14911 0.154252 14912 0.154252 14913 0.154252 14914 0.184 14915 0.184 14916 0.184 14917 0.154252 14918 0.184 14919 0.184 14920 0.154252 14921 -1.42239 14922 0.154252 14923 -1.12491 14924 0.154252 14925 0.154252 14926 0.154252 14927 0.154252 14928 0.154252 14929 0.184 14930 0.154252 14931 0.154252 14932 -3.594 14933 0.184 14934 -1.45214 14935 0.184 14936 0.184 14937 -1.18441 14938 0.184 14939 0.184 14940 0.184 14941 0.184 14942 0.184 14943 0.154252 14944 0.184 14945 0.184 14946 0.184 14947 -1.12491 14948 0.184 14949 0.184 14950 0.184 14951 0.184 14952 0.154252 14953 0.184 14954 -1.21416 14955 0.184 14956 0.184 14957 -1.21416 14958 0.184 14959 0.184 14960 0.154252 14961 0.184 14962 0.184 14963 -1.3034 14964 0.154252 14965 0.184 14966 0.184 14967 0.154252 14968 0.154252 14969 0.184 14970 0.184 14971 -1.21416 14972 0.154252 14973 0.154252 14974 0.184 14975 0.184 14976 0.184 14977 0.184 14978 0.184 14979 0.154252 14980 0.184 14981 0.154252 14982 0.154252 14983 0.184 14984 0.154252 14985 0.184 14986 0.184 14987 0.154252 14988 0.184 14989 0.184 14990 0.154252 14991 0.154252 14992 0.184 14993 0.154252 14994 0.154252 14995 0.184 14996 0.154252 14997 0.154252 14998 0.184 14999 0.154252 15000 0.184 15001 0.154252 15002 0.154252 15003 0.184 15004 0.184 15005 0.184 15006 0.154252 15007 0.184 15008 0.154252 15009 0.154252 15010 0.184 15011 0.154252 15012 -1.21416 15013 0.184 15014 0.131134 15015 -1.13765 15016 0.184 15017 0.184 15018 0.184 15019 0.131134 15020 0.184 15021 0.184 15022 0.131134 15023 0.131134 15024 0.184 15025 0.184 15026 0.184 15027 0.131134 15028 0.184 15029 0.184 15030 0.131134 15031 0.184 15032 0.184 15033 0.184 15034 0.131134 15035 0.184 15036 0.131134 15037 0.184 15038 0.131134 15039 0.184 15040 0.131134 15041 0.184 15042 0.184 15043 0.131134 15044 0.131134 15045 0.184 15046 0.184 15047 -1.21695 15048 0.184 15049 0.131134 15050 0.131134 15051 0.131134 15052 0.184 15053 0.184 15054 0.184 15055 0.131134 15056 -1.11122 15057 0.131134 15058 0.184 15059 0.131134 15060 0.184 15061 0.131134 15062 0.184 15063 0.184 15064 0.184 15065 0.184 15066 0.131134 15067 0.184 15068 0.131134 15069 0.131134 15070 0.184 15071 0.131134 15072 0.184 15073 0.131134 15074 0.131134 15075 0.184 15076 0.131134 15077 0.184 15078 0.131134 15079 0.184 15080 0.131134 15081 0.184 15082 0.131134 15083 -1.45485 15084 0.184 15085 -2.82937 15086 0.184 15087 0.131134 15088 0.184 15089 0.184 15090 -1.42842 15091 0.184 15092 0.131134 15093 0.184 15094 -1.16409 15095 0.184 15096 0.131134 15097 0.184 15098 0.184 15099 0.184 15100 0.131134 15101 0.184 15102 0.131134 15103 0.184 15104 0.184 15105 0.184 15106 0.184 15107 0.131134 15108 -1.87778 15109 0.104701 15110 0.184 15111 0.184 15112 0.184 15113 -1.21695 15114 0.131134 15115 0.184 15116 0.131134 15117 0.184 15118 0.131134 15119 0.184 15120 0.131134 15121 0.184 15122 0.184 15123 0.184 15124 0.184 15125 0.131134 15126 0.184 15127 0.184 15128 0.184 15129 0.184 15130 0.131134 15131 0.184 15132 0.184 15133 0.131134 15134 0.131134 15135 0.184 15136 0.184 15137 0.184 15138 0.184 15139 0.131134 15140 0.184 15141 -1.21695 15142 0.184 15143 0.131134 15144 0.184 15145 -1.21695 15146 0.131134 15147 0.131134 15148 0.184 15149 0.184 15150 0.184 15151 0.131134 15152 0.184 15153 0.131134 15154 0.131134 15155 0.131134 15156 0.184 15157 0.184 15158 0.131134 15159 -1.11122 15160 -1.21695 15161 0.184 15162 0.131134 15163 0.184 15164 0.184 15165 0.184 15166 0.131134 15167 0.184 15168 0.131134 15169 0.184 15170 0.131134 15171 0.184 15172 -1.29625 15173 0.131134 15174 0.131134 15175 0.184 15176 0.184 15177 0.184 15178 -1.40198 15179 0.184 15180 0.131134 15181 0.184 15182 0.184 15183 0.184 15184 0.131134 15185 0.184 15186 0.184 15187 0.131134 15188 0.184 15189 -1.21695 15190 -1.11122 15191 0.184 15192 0.184 15193 0.184 15194 -1.42842 15195 0.184 15196 0.131134 15197 0.184 15198 0.131134 15199 0.131134 15200 0.184 15201 0.131134 15202 0.184 15203 0.131134 15204 -3.173 15205 0.131134 15206 0.184 15207 -1.11122 15208 0.184 15209 0.184 15210 -1.50772 15211 0.131134 15212 0.184 15213 0.131134 15214 -1.21695 15215 0.131134 15216 0.184 15217 0.131134 15218 0.184 15219 -1.11122 15220 0.131134 15221 0.184 15222 0.184 15223 -1.29625 15224 -1.45485 15225 -1.11122 15226 0.184 15227 0.131134 15228 0.184 15229 0.184 15230 0.184 15231 0.131134 15232 0.184 15233 0.184 15234 0.184 15235 0.131134 15236 0.184 15237 0.184 15238 0.131134 15239 -1.16409 15240 -1.11122 15241 0.184 15242 0.184 15243 0.184 15244 -1.45485 15245 0.184 15246 0.131134 15247 0.184 15248 0.184 15249 -1.11122 15250 0.184 15251 0.184 15252 0.131134 15253 0.184 15254 0.184 15255 0.184 15256 0.131134 15257 0.184 15258 0.131134 15259 0.184 15260 0.184 15261 0.131134 15262 0.184 15263 0.184 15264 0.131134 15265 0.184 15266 -1.11122 15267 -1.21695 15268 -1.29625 15269 -1.21695 15270 0.184 15271 -1.56058 15272 0.0782677 15273 0.104701 15274 -1.40198 15275 0.131134 15276 0.131134 15277 0.184 15278 0.184 15279 0.184 15280 0.184 15281 0.131134 15282 0.131134 15283 0.131134 15284 0.184 15285 0.131134 15286 0.184 15287 0.184 15288 0.184 15289 0.184 15290 0.184 15291 0.131134 15292 0.184 15293 0.131134 15294 0.184 15295 0.131134 15296 0.184 15297 0.184 15298 0.131134 15299 0.184 15300 0.131134 15301 0.184 15302 0.184 15303 0.184 15304 0.131134 15305 0.184 15306 0.184 15307 0.131134 15308 0.131134 15309 0.184 15310 0.184 15311 0.184 15312 0.131134 15313 0.184 15314 -1.29625 15315 0.184 15316 0.184 15317 0.131134 15318 0.184 15319 0.131134 15320 0.184 15321 0.131134 15322 0.184 15323 0.131134 15324 0.184 15325 0.184 15326 0.131134 15327 0.131134 15328 0.184 15329 0.184 15330 0.184 15331 0.131134 15332 0.131134 15333 0.131134 15334 0.131134 15335 0.184 15336 0.184 15337 0.131134 15338 0.184 15339 0.184 15340 0.184 15341 0.184 15342 0.131134 15343 0.184 15344 -1.13765 15345 0.184 15346 0.131134 15347 0.131134 15348 0.184 15349 0.131134 15350 0.131134 15351 0.184 15352 0.184 15353 0.131134 15354 0.184 15355 0.131134 15356 0.131134 15357 0.131134 15358 0.131134 15359 0.184 15360 0.184 15361 0.131134 15362 -1.21695 15363 0.131134 15364 0.184 15365 0.131134 15366 0.184 15367 0.184 15368 0.184 15369 0.184 15370 0.131134 15371 0.184 15372 0.184 15373 0.184 15374 0.184 15375 -1.21695 15376 -1.16409 15377 0.131134 15378 0.184 15379 0.184 15380 -1.11122 15381 -1.16409 15382 0.184 15383 -1.21695 15384 0.184 15385 -1.11122 15386 0.131134 15387 -1.42842 15388 -1.11122 15389 -1.21695 15390 -1.11122 15391 0.184 15392 0.131134 15393 0.184 15394 0.184 15395 0.184 15396 -1.21695 15397 0.131134 15398 0.184 15399 0.184 15400 0.184 15401 0.131134 15402 0.131134 15403 0.184 15404 0.131134 15405 0.184 15406 0.131134 15407 0.184 15408 0.184 15409 0.131134 15410 0.184 15411 0.131134 15412 0.184 15413 0.131134 15414 0.184 15415 0.131134 15416 0.184 15417 -1.29625 15418 0.184 15419 0.131134 15420 0.131134 15421 0.184 15422 -1.21695 15423 0.184 15424 0.131134 15425 0.184 15426 0.184 15427 0.131134 15428 0.184 15429 0.184 15430 0.184 15431 0.131134 15432 0.131134 15433 0.184 15434 0.131134 15435 0.131134 15436 0.184 15437 0.131134 15438 0.184 15439 0.131134 15440 0.131134 15441 0.184 15442 0.184 15443 0.184 15444 0.131134 15445 0.184 15446 -1.45485 15447 0.131134 15448 0.184 15449 0.131134 15450 0.184 15451 0.184 15452 0.184 15453 0.184 15454 0.131134 15455 0.184 15456 0.131134 15457 0.184 15458 0.184 15459 0.131134 15460 0.184 15461 0.184 15462 0.131134 15463 0.184 15464 0.131134 15465 0.184 15466 0.131134 15467 0.184 15468 0.131134 15469 0.131134 15470 0.131134 15471 0.131134 15472 0.131134 15473 0.131134 15474 0.131134 15475 0.131134 15476 0.131134 15477 0.184 15478 0.131134 15479 0.184 15480 0.131134 15481 0.184 15482 0.184 15483 0.131134 15484 0.184 15485 0.184 15486 0.184 15487 0.131134 15488 0.184 15489 0.184 15490 0.131134 15491 0.184 15492 -1.45485 15493 0.184 15494 0.184 15495 0.184 15496 0.131134 15497 0.184 15498 0.131134 15499 0.184 15500 0.184 15501 0.131134 15502 0.131134 15503 0.131134 15504 0.184 15505 0.184 15506 0.184 15507 0.131134 15508 -1.21695 15509 0.184 15510 0.184 15511 0.131134 15512 0.184 15513 -1.21695 15514 -1.11122 15515 0.184 15516 0.131134 15517 0.184 15518 0.0782677 15519 0.0782677 15520 -1.11122 15521 0.184 15522 0.184 15523 0.131134 15524 0.184 15525 0.131134 15526 0.184 15527 -1.21695 15528 0.131134 15529 0.131134 15530 0.184 15531 0.131134 15532 0.131134 15533 0.184 15534 0.131134 15535 0.131134 15536 0.131134 15537 0.184 15538 0.131134 15539 0.184 15540 0.184 15541 0.131134 15542 0.184 15543 0.131134 15544 0.184 15545 0.131134 15546 0.184 15547 -1.40198 15548 0.131134 15549 0.131134 15550 0.184 15551 0.131134 15552 0.184 15553 0.184 15554 0.131134 15555 0.184 15556 0.184 15557 0.184 15558 0.131134 15559 0.184 15560 -1.45485 15561 0.184 15562 0.184 15563 0.131134 15564 0.131134 15565 -1.40198 15566 -1.29625 15567 0.131134 15568 0.184 15569 0.131134 15570 0.184 15571 0.184 15572 0.131134 15573 -1.21695 15574 0.131134 15575 0.184 15576 0.184 15577 0.131134 15578 0.184 15579 0.184 15580 0.131134 15581 0.184 15582 0.131134 15583 0.131134 15584 0.131134 15585 0.184 15586 0.184 15587 0.184 15588 0.131134 15589 0.184 15590 0.184 15591 0.184 15592 0.131134 15593 0.131134 15594 0.131134 15595 0.184 15596 0.184 15597 0.131134 15598 0.184 15599 0.184 15600 0.131134 15601 0.131134 15602 0.184 15603 0.184 15604 0.131134 15605 0.184 15606 0.184 15607 0.184 15608 0.131134 15609 0.131134 15610 -1.40198 15611 -1.21695 15612 0.184 15613 0.184 15614 0.184 15615 -1.45485 15616 0.184 15617 0.184 15618 0.184 15619 -1.21695 15620 0.184 15621 0.184 15622 -1.58702 15623 0.184 15624 0.184 15625 0.184 15626 0.184 15627 0.131134 15628 0.184 15629 0.131134 15630 0.184 15631 0.184 15632 0.131134 15633 0.184 15634 0.184 15635 0.131134 15636 0.184 15637 0.131134 15638 0.184 15639 -1.29625 15640 0.131134 15641 0.184 15642 0.184 15643 0.184 15644 -1.50772 15645 0.184 15646 -1.29625 15647 0.184 15648 0.184 15649 0.131134 15650 0.184 15651 0.184 15652 0.184 15653 -1.13765 15654 0.131134 15655 0.131134 15656 0.184 15657 0.184 15658 0.131134 15659 0.184 15660 0.184 15661 -1.45485 15662 0.131134 15663 0.184 15664 0.131134 15665 0.184 15666 0.184 15667 0.184 15668 0.184 15669 0.131134 15670 0.131134 15671 0.184 15672 0.184 15673 -1.16409 15674 0.184 15675 0.131134 15676 0.131134 15677 0.184 15678 0.184 15679 0.131134 15680 0.184 15681 0.184 15682 0.184 15683 0.184 15684 0.184 15685 -1.11122 15686 -1.40198 15687 0.184 15688 0.184 15689 0.131134 15690 0.184 15691 -1.82491 15692 0.104701 15693 0.0782677 15694 -1.56058 15695 0.104701 15696 0.0782677 15697 0.184 15698 0.184 15699 0.131134 15700 0.184 15701 0.184 15702 0.184 15703 0.184 15704 0.184 15705 0.131134 15706 0.184 15707 0.184 15708 0.184 15709 0.184 15710 0.131134 15711 0.184 15712 -1.21695 15713 0.131134 15714 0.131134 15715 -1.11122 15716 0.184 15717 0.131134 15718 -1.29625 15719 0.184 15720 0.184 15721 -1.45485 15722 0.184 15723 0.131134 15724 0.184 15725 0.184 15726 0.131134 15727 0.184 15728 0.131134 15729 0.184 15730 0.131134 15731 -1.11122 15732 0.184 15733 0.184 15734 0.184 15735 0.131134 15736 0.184 15737 0.184 15738 0.131134 15739 0.184 15740 0.184 15741 -1.11122 15742 -1.16409 15743 0.131134 15744 0.184 15745 0.184 15746 0.131134 15747 0.184 15748 0.184 15749 0.184 15750 -1.29625 15751 0.131134 15752 0.131134 15753 0.184 15754 0.131134 15755 0.131134 15756 0.184 15757 0.184 15758 0.184 15759 0.184 15760 0.131134 15761 0.131134 15762 0.184 15763 -1.11122 15764 0.184 15765 0.184 15766 0.184 15767 0.131134 15768 0.184 15769 0.184 15770 0.184 15771 0.131134 15772 0.184 15773 -1.16409 15774 -1.11122 15775 0.184 15776 0.131134 15777 -1.16409 15778 0.184 15779 0.184 15780 0.131134 15781 0.184 15782 0.184 15783 0.131134 15784 0.184 15785 0.184 15786 0.184 15787 0.184 15788 0.184 15789 0.184 15790 0.184 15791 0.184 15792 0.131134 15793 0.184 15794 0.131134 15795 0.184 15796 0.184 15797 0.131134 15798 0.131134 15799 0.184 15800 0.184 15801 0.131134 15802 0.184 15803 0.184 15804 0.131134 15805 0.184 15806 0.184 15807 0.131134 15808 0.184 15809 0.184 15810 0.131134 15811 0.131134 15812 0.184 15813 0.131134 15814 0.184 15815 0.184 15816 0.131134 15817 -1.45485 15818 0.184 15819 0.131134 15820 -1.45485 15821 0.184 15822 0.131134 15823 0.131134 15824 0.184 15825 0.131134 15826 0.184 15827 0.184 15828 0.131134 15829 0.184 15830 0.184 15831 0.131134 15832 0.131134 15833 0.131134 15834 0.184 15835 0.184 15836 0.184 15837 0.131134 15838 0.184 15839 0.184 15840 0.131134 15841 0.131134 15842 0.184 15843 0.131134 15844 -1.16409 15845 0.184 15846 0.184 15847 0.131134 15848 0.184 15849 0.184 15850 0.184 15851 0.184 15852 0.131134 15853 0.184 15854 -1.11122 15855 0.131134 15856 0.131134 15857 0.184 15858 0.184 15859 0.184 15860 0.184 15861 0.131134 15862 0.131134 15863 0.184 15864 0.184 15865 0.184 15866 0.184 15867 0.131134 15868 0.131134 15869 0.184 15870 0.184 15871 0.184 15872 0.184 15873 0.184 15874 0.184 15875 -1.21695 15876 0.184 15877 0.184 15878 0.131134 15879 0.131134 15880 0.184 15881 0.184 15882 0.184 15883 0.184 15884 0.184 15885 0.131134 15886 0.131134 15887 0.184 15888 0.184 15889 0.131134 15890 -1.16409 15891 0.131134 15892 0.184 15893 -1.21695 15894 0.131134 15895 0.184 15896 0.184 15897 0.131134 15898 0.184 15899 0.131134 15900 0.184 15901 0.131134 15902 0.184 15903 0.184 15904 0.184 15905 0.184 15906 -1.16409 15907 0.184 15908 0.184 15909 0.184 15910 0.131134 15911 0.184 15912 0.184 15913 -1.50772 15914 0.184 15915 0.131134 15916 0.184 15917 0.184 15918 0.184 15919 0.184 15920 0.131134 15921 0.184 15922 -1.16409 15923 0.131134 15924 0.184 15925 0.184 15926 0.131134 15927 0.184 15928 0.131134 15929 0.131134 15930 0.184 15931 0.184 15932 -1.11122 15933 0.131134 15934 0.184 15935 0.184 15936 0.184 15937 0.131134 15938 0.184 15939 0.184 15940 0.131134 15941 0.184 15942 0.184 15943 0.184 15944 0.131134 15945 0.184 15946 0.184 15947 0.131134 15948 0.184 15949 0.131134 15950 0.184 15951 0.184 15952 0.131134 15953 0.184 15954 0.184 15955 -1.11122 15956 0.184 15957 0.184 15958 0.184 15959 0.131134 15960 0.184 15961 0.184 15962 0.131134 15963 0.184 15964 0.184 15965 0.184 15966 0.184 15967 0.131134 15968 0.131134 15969 0.184 15970 -1.40198 15971 0.184 15972 0.184 15973 0.184 15974 0.184 15975 0.131134 15976 0.131134 15977 0.184 15978 0.184 15979 0.131134 15980 0.184 15981 -1.11122 15982 0.131134 15983 0.131134 15984 0.131134 15985 0.184 15986 0.184 15987 0.184 15988 0.131134 15989 0.184 15990 0.184 15991 0.131134 15992 0.184 15993 0.131134 15994 0.184 15995 0.184 15996 0.184 15997 0.131134 15998 0.184 15999 0.184 16000 -1.29625 16001 0.184 16002 0.131134 16003 0.184 16004 0.184 16005 0.184 16006 0.184 16007 0.131134 16008 0.131134 16009 0.184 16010 0.184 16011 0.131134 16012 0.131134 16013 0.131134 16014 -1.48128 16015 0.131134 16016 0.184 16017 0.131134 16018 -1.16409 16019 0.0782677 16020 0.184 16021 0.184 16022 0.131134 16023 0.131134 16024 0.184 16025 0.131134 16026 0.184 16027 0.184 16028 0.184 16029 0.131134 16030 0.184 16031 0.184 16032 0.131134 16033 0.184 16034 0.184 16035 0.184 16036 0.131134 16037 0.104701 16038 0.184 16039 0.184 16040 0.153685 16041 0.153685 16042 0.184 16043 -1.57427 16044 0.184 16045 0.153685 16046 0.184 16047 0.184 16048 0.153685 16049 0.184 16050 0.184 16051 0.184 16052 0.184 16053 0.153685 16054 0.184 16055 0.153685 16056 0.153685 16057 0.153685 16058 -1.21049 16059 0.184 16060 0.153685 16061 -1.30143 16062 -1.21049 16063 0.153685 16064 0.184 16065 0.184 16066 0.184 16067 0.184 16068 -1.78647 16069 0.184 16070 -1.51364 16071 0.184 16072 -1.11954 16073 0.153685 16074 0.184 16075 0.184 16076 0.184 16077 0.184 16078 0.153685 16079 0.184 16080 -1.21049 16081 0.184 16082 -1.11954 16083 -1.21049 16084 0.184 16085 0.153685 16086 0.184 16087 0.184 16088 0.153685 16089 0.153685 16090 0.153685 16091 -1.27112 16092 0.184 16093 0.153685 16094 0.184 16095 0.153685 16096 0.184 16097 0.153685 16098 0.184 16099 0.153685 16100 0.184 16101 0.153685 16102 0.0930551 16103 -0.0282047 16104 -0.0282047 16105 -0.0282047 16106 0.0930551 16107 0.0930551 16108 0.0930551 16109 0.0930551 16110 0.0930551 16111 0.0930551 16112 0.0930551 16113 0.0930551 16114 0.0930551 16115 0.0930551 16116 0.0930551 16117 0.0930551 16118 0.0930551 16119 0.0930551 16120 0.0930551 16121 0.0930551 16122 0.0930551 16123 0.0930551 16124 0.0930551 16125 -2.11994 16126 0.0930551 16127 0.0930551 16128 0.0930551 16129 0.0930551 16130 0.0930551 16131 0.0930551 16132 0.0930551 16133 0.184 16134 0.184 16135 0.153685 16136 0.184 16137 0.153685 16138 0.184 16139 -1.14986 16140 -1.11954 16141 0.184 16142 -1.45301 16143 0.184 16144 0.184 16145 0.184 16146 0.153685 16147 0.153685 16148 0.184 16149 0.184 16150 0.153685 16151 0.184 16152 0.153685 16153 -1.21049 16154 0.153685 16155 0.184 16156 0.0930551 16157 0.153685 16158 0.184 16159 0.153685 16160 -1.39238 16161 0.153685 16162 0.184 16163 0.153685 16164 0.184 16165 0.184 16166 0.184 16167 0.153685 16168 0.184 16169 0.184 16170 0.153685 16171 -1.39238 16172 0.184 16173 0.184 16174 0.153685 16175 0.184 16176 0.153685 16177 0.184 16178 -1.21049 16179 0.153685 16180 0.184 16181 0.184 16182 0.184 16183 0.184 16184 0.184 16185 0.184 16186 0.153685 16187 0.184 16188 0.184 16189 0.184 16190 0.153685 16191 0.184 16192 0.184 16193 0.153685 16194 0.184 16195 0.153685 16196 0.184 16197 0.184 16198 0.153685 16199 0.184 16200 0.0930551 16201 0.0930551 16202 0.0930551 16203 0.0930551 16204 0.0930551 16205 0.0930551 16206 0.0930551 16207 0.0930551 16208 0.0930551 16209 0.0930551 16210 0.0930551 16211 0.0930551 16212 0.0930551 16213 0.0930551 16214 0.0930551 16215 0.0930551 16216 0.0930551 16217 0.0930551 16218 0.0930551 16219 0.0930551 16220 0.0930551 16221 0.0930551 16222 0.0930551 16223 0.184 16224 0.153685 16225 0.184 16226 0.153685 16227 0.184 16228 0.153685 16229 0.184 16230 0.153685 16231 0.184 16232 0.153685 16233 0.184 16234 -1.21049 16235 -3.666 16236 0.184 16237 0.184 16238 0.153685 16239 0.184 16240 0.153685 16241 0.184 16242 0.153685 16243 -1.48332 16244 0.184 16245 0.153685 16246 0.184 16247 0.184 16248 0.184 16249 0.153685 16250 0.184 16251 0.184 16252 0.153685 16253 0.184 16254 0.153685 16255 0.184 16256 0.184 16257 0.184 16258 0.184 16259 -1.21049 16260 0.153685 16261 0.153685 16262 0.153685 16263 0.184 16264 0.153685 16265 0.184 16266 0.184 16267 0.184 16268 0.153685 16269 0.184 16270 0.153685 16271 0.153685 16272 0.184 16273 0.153685 16274 0.184 16275 0.184 16276 0.153685 16277 0.153685 16278 0.184 16279 0.184 16280 0.153685 16281 0.153685 16282 0.184 16283 0.184 16284 0.184 16285 -1.51364 16286 0.153685 16287 0.153685 16288 -1.14986 16289 0.153685 16290 0.184 16291 0.153685 16292 0.184 16293 0.184 16294 0.184 16295 0.153685 16296 0.184 16297 0.153685 16298 -1.48332 16299 0.153685 16300 -1.27112 16301 0.153685 16302 -1.45301 16303 0.184 16304 0.153685 16305 0.184 16306 0.153685 16307 -1.51364 16308 0.153685 16309 0.153685 16310 0.184 16311 0.184 16312 0.153685 16313 0.184 16314 0.153685 16315 -1.42269 16316 0.153685 16317 -1.45301 16318 0.184 16319 0.153685 16320 0.153685 16321 0.153685 16322 0.153685 16323 0.153685 16324 0.153685 16325 0.184 16326 0.153685 16327 0.184 16328 0.153685 16329 0.153685 16330 0.184 16331 0.184 16332 0.153685 16333 0.184 16334 0.153685 16335 -1.39238 16336 0.184 16337 0.153685 16338 0.153685 16339 0.153685 16340 0.184 16341 0.153685 16342 0.153685 16343 0.153685 16344 0.153685 16345 0.153685 16346 0.153685 16347 0.184 16348 0.153685 16349 0.184 16350 0.184 16351 0.184 16352 0.184 16353 0.153685 16354 0.153685 16355 0.184 16356 0.153685 16357 0.153685 16358 -1.39238 16359 0.184 16360 0.184 16361 -1.6349 16362 0.153685 16363 0.153685 16364 0.184 16365 0.184 16366 0.153685 16367 0.153685 16368 0.153685 16369 0.153685 16370 -1.39238 16371 0.153685 16372 0.153685 16373 0.153685 16374 0.184 16375 0.184 16376 0.153685 16377 0.153685 16378 -1.39238 16379 -1.21049 16380 0.184 16381 0.184 16382 0.184 16383 0.153685 16384 0.153685 16385 0.184 16386 0.184 16387 0.184 16388 0.153685 16389 0.184 16390 0.153685 16391 0.184 16392 0.184 16393 0.184 16394 0.153685 16395 0.184 16396 0.153685 16397 0.184 16398 0.153685 16399 -1.6349 16400 0.184 16401 0.153685 16402 0.153685 16403 0.184 16404 0.153685 16405 0.184 16406 0.184 16407 0.184 16408 0.153685 16409 0.184 16410 0.184 16411 0.184 16412 0.153685 16413 -1.27112 16414 -1.21049 16415 0.184 16416 0.184 16417 -1.30143 16418 0.184 16419 0.153685 16420 0.153685 16421 0.184 16422 0.153685 16423 -1.39238 16424 0.184 16425 0.153685 16426 0.184 16427 0.153685 16428 0.184 16429 0.153685 16430 0.153685 16431 -1.60458 16432 0.153685 16433 0.184 16434 0.184 16435 0.153685 16436 -1.27112 16437 0.153685 16438 0.184 16439 0.184 16440 0.153685 16441 0.184 16442 0.153685 16443 0.153685 16444 0.184 16445 0.153685 16446 0.184 16447 -1.21049 16448 0.153685 16449 0.153685 16450 0.184 16451 0.153685 16452 0.153685 16453 0.153685 16454 0.153685 16455 0.184 16456 -1.11954 16457 0.153685 16458 0.184 16459 0.153685 16460 0.153685 16461 0.153685 16462 0.153685 16463 0.153685 16464 0.153685 16465 0.153685 16466 0.153685 16467 0.184 16468 -1.21049 16469 0.153685 16470 0.184 16471 0.184 16472 0.153685 16473 0.153685 16474 0.184 16475 0.153685 16476 0.153685 16477 0.184 16478 0.153685 16479 0.153685 16480 0.184 16481 0.153685 16482 0.153685 16483 0.184 16484 0.153685 16485 0.153685 16486 0.153685 16487 0.153685 16488 0.153685 16489 0.153685 16490 0.184 16491 0.153685 16492 0.153685 16493 0.153685 16494 0.153685 16495 0.184 16496 0.184 16497 -1.18017 16498 0.153685 16499 0.153685 16500 0.184 16501 0.153685 16502 0.153685 16503 0.153685 16504 0.153685 16505 0.184 16506 0.153685 16507 -1.39238 16508 0.153685 16509 0.153685 16510 0.153685 16511 0.153685 16512 0.153685 16513 -1.21049 16514 0.184 16515 0.153685 16516 0.184 16517 -1.21049 16518 0.153685 16519 0.153685 16520 0.153685 16521 0.153685 16522 0.184 16523 0.153685 16524 0.184 16525 0.153685 16526 -1.11954 16527 0.184 16528 0.153685 16529 0.184 16530 0.153685 16531 -1.45301 16532 0.153685 16533 0.153685 16534 0.184 16535 0.153685 16536 0.153685 16537 0.153685 16538 0.184 16539 0.184 16540 0.184 16541 -1.21049 16542 0.184 16543 0.153685 16544 0.184 16545 0.153685 16546 0.153685 16547 0.153685 16548 0.153685 16549 0.153685 16550 -1.14986 16551 0.184 16552 0.153685 16553 0.184 16554 0.184 16555 -1.11954 16556 0.184 16557 0.153685 16558 0.184 16559 0.153685 16560 0.184 16561 0.184 16562 0.153685 16563 0.184 16564 -1.51364 16565 0.184 16566 0.153685 16567 0.184 16568 0.184 16569 0.153685 16570 0.184 16571 -1.51364 16572 0.184 16573 0.184 16574 0.153685 16575 0.184 16576 0.153685 16577 0.153685 16578 0.153685 16579 0.153685 16580 -1.14986 16581 0.153685 16582 0.184 16583 0.184 16584 0.153685 16585 0.184 16586 -1.27112 16587 0.184 16588 0.184 16589 0.153685 16590 0.184 16591 0.153685 16592 0.153685 16593 0.153685 16594 0.184 16595 0.153685 16596 0.184 16597 0.184 16598 0.153685 16599 0.184 16600 0.184 16601 0.153685 16602 0.184 16603 0.184 16604 0.184 16605 0.153685 16606 0.184 16607 0.184 16608 0.153685 16609 0.184 16610 0.153685 16611 0.153685 16612 0.184 16613 0.184 16614 0.153685 16615 0.184 16616 0.153685 16617 0.184 16618 0.184 16619 0.184 16620 0.0930551 16621 0.0930551 16622 0.0930551 16623 0.184 16624 0.184 16625 0.153685 16626 0.184 16627 0.184 16628 0.153685 16629 0.184 16630 0.184 16631 0.153685 16632 0.184 16633 0.153685 16634 -1.11954 16635 0.184 16636 0.153685 16637 0.184 16638 0.184 16639 0.153685 16640 0.184 16641 0.184 16642 0.184 16643 0.153685 16644 0.184 16645 0.184 16646 0.184 16647 0.184 16648 0.184 16649 0.184 16650 0.153685 16651 0.153685 16652 0.184 16653 0.184 16654 0.184 16655 0.184 16656 0.153685 16657 0.153685 16658 0.184 16659 0.153685 16660 0.184 16661 0.184 16662 0.184 16663 0.184 16664 0.184 16665 0.184 16666 0.184 16667 0.184 16668 0.184 16669 0.184 16670 0.153685 16671 0.153685 16672 0.184 16673 0.153685 16674 -1.27112 16675 0.184 16676 0.184 16677 0.153685 16678 0.184 16679 0.184 16680 0.184 16681 0.153685 16682 0.184 16683 0.153685 16684 -1.21049 16685 0.184 16686 0.184 16687 0.184 16688 0.184 16689 0.153685 16690 0.184 16691 0.184 16692 0.153685 16693 0.184 16694 0.153685 16695 0.184 16696 0.184 16697 0.184 16698 0.184 16699 0.153685 16700 0.184 16701 0.184 16702 0.153685 16703 0.184 16704 0.184 16705 0.153685 16706 0.184 16707 0.153685 16708 0.184 16709 0.184 16710 0.153685 16711 0.184 16712 0.184 16713 0.153685 16714 0.184 16715 0.184 16716 0.184 16717 0.184 16718 0.184 16719 -1.51364 16720 0.184 16721 0.184 16722 0.153685 16723 0.184 16724 0.184 16725 0.153685 16726 0.184 16727 0.184 16728 0.184 16729 0.184 16730 0.184 16731 0.184 16732 0.184 16733 0.184 16734 0.153685 16735 0.184 16736 0.184 16737 -1.11954 16738 0.184 16739 0.184 16740 0.153685 16741 0.184 16742 0.184 16743 0.153685 16744 0.184 16745 0.153685 16746 0.153685 16747 0.153685 16748 0.184 16749 0.153685 16750 0.153685 16751 0.184 16752 0.184 16753 0.184 16754 0.184 16755 0.153685 16756 0.153685 16757 0.184 16758 0.153685 16759 0.153685 16760 0.184 16761 0.153685 16762 0.153685 16763 0.184 16764 0.184 16765 0.153685 16766 0.184 16767 0.153685 16768 0.184 16769 0.153685 16770 0.153685 16771 0.153685 16772 0.184 16773 0.153685 16774 0.184 16775 0.153685 16776 0.153685 16777 0.153685 16778 0.184 16779 0.153685 16780 0.184 16781 0.153685 16782 0.184 16783 0.184 16784 0.184 16785 0.184 16786 0.153685 16787 0.184 16788 0.153685 16789 0.184 16790 0.153685 16791 0.153685 16792 0.184 16793 0.184 16794 -1.30143 16795 -1.57427 16796 0.153685 16797 0.184 16798 0.153685 16799 0.153685 16800 0.184 16801 0.184 16802 0.153685 16803 -1.11954 16804 0.153685 16805 0.184 16806 0.153685 16807 0.184 16808 0.153685 16809 -1.14986 16810 0.184 16811 0.153685 16812 0.184 16813 0.184 16814 0.184 16815 0.153685 16816 0.184 16817 0.184 16818 0.153685 16819 0.184 16820 0.184 16821 0.184 16822 -1.51364 16823 -1.45301 16824 0.184 16825 0.184 16826 0.184 16827 0.184 16828 0.153685 16829 0.184 16830 0.184 16831 -1.21049 16832 -1.21049 16833 -1.11954 16834 0.184 16835 0.153685 16836 0.153685 16837 0.184 16838 0.184 16839 0.184 16840 0.153685 16841 0.184 16842 0.153685 16843 -2.81718 16844 0.184 16845 0.184 16846 0.184 16847 0.153685 16848 0.184 16849 0.184 16850 0.153685 16851 0.184 16852 0.153685 16853 0.184 16854 0.184 16855 0.153685 16856 -1.18017 16857 -1.11954 16858 0.184 16859 0.153685 16860 0.153685 16861 -1.21049 16862 0.153685 16863 0.184 16864 0.153685 16865 0.184 16866 0.184 16867 0.184 16868 0.153685 16869 0.184 16870 -1.11954 16871 0.184 16872 0.184 16873 0.153685 16874 0.153685 16875 0.184 16876 0.184 16877 0.153685 16878 0.184 16879 0.153685 16880 0.184 16881 0.153685 16882 0.184 16883 0.153685 16884 0.184 16885 0.184 16886 0.153685 16887 0.153685 16888 0.184 16889 0.184 16890 0.153685 16891 0.184 16892 0.184 16893 0.184 16894 0.184 16895 0.153685 16896 0.184 16897 0.153685 16898 0.153685 16899 0.184 16900 0.153685 16901 0.184 16902 0.184 16903 0.153685 16904 0.153685 16905 0.153685 16906 0.184 16907 0.184 16908 0.153685 16909 0.184 16910 0.184 16911 0.184 16912 0.153685 16913 0.184 16914 0.184 16915 0.184 16916 0.184 16917 0.184 16918 0.153685 16919 0.184 16920 0.153685 16921 0.184 16922 0.184 16923 0.184 16924 0.184 16925 0.184 16926 0.184 16927 0.153685 16928 0.153685 16929 0.153685 16930 0.184 16931 0.184 16932 0.184 16933 0.184 16934 0.184 16935 0.184 16936 0.184 16937 0.184 16938 0.184 16939 0.184 16940 0.153685 16941 0.184 16942 0.184 16943 0.153685 16944 0.184 16945 -1.21049 16946 0.184 16947 0.184 16948 0.153685 16949 -1.14986 16950 0.153685 16951 0.184 16952 0.153685 16953 0.153685 16954 0.184 16955 0.153685 16956 0.153685 16957 0.184 16958 0.153685 16959 0.184 16960 0.184 16961 0.153685 16962 0.184 16963 0.184 16964 0.153685 16965 0.153685 16966 0.184 16967 0.184 16968 0.184 16969 0.153685 16970 0.153685 16971 0.153685 16972 0.184 16973 0.184 16974 0.184 16975 0.184 16976 0.184 16977 0.184 16978 0.184 16979 0.153685 16980 0.184 16981 0.184 16982 0.153685 16983 0.184 16984 0.153685 16985 0.184 16986 0.184 16987 0.184 16988 0.153685 16989 0.184 16990 0.184 16991 0.153685 16992 0.153685 16993 0.184 16994 0.184 16995 0.153685 16996 0.153685 16997 0.184 16998 0.184 16999 -1.51364 17000 0.153685 17001 0.184 17002 0.153685 17003 0.153685 17004 0.184 17005 0.153685 17006 0.153685 17007 0.153685 17008 0.184 17009 0.153685 17010 0.184 17011 0.153685 17012 0.184 17013 0.184 17014 0.184 17015 0.153685 17016 0.184 17017 0.184 17018 0.153685 17019 0.184 17020 0.184 17021 0.184 17022 0.184 17023 0.184 17024 0.153685 17025 0.184 17026 0.184 17027 0.153685 17028 0.184 17029 0.153685 17030 0.184 17031 0.184 17032 0.184 17033 0.153685 17034 0.184 17035 0.153685 17036 0.153685 17037 0.153685 17038 0.184 17039 0.153685 17040 0.153685 17041 0.184 17042 0.153685 17043 0.153685 17044 0.184 17045 0.153685 17046 0.153685 17047 -1.21049 17048 0.153685 17049 0.184 17050 0.153685 17051 0.184 17052 0.184 17053 0.184 17054 0.153685 17055 0.184 17056 0.184 17057 0.153685 17058 0.184 17059 0.184 17060 0.153685 17061 0.153685 17062 0.184 17063 0.144244 17064 0.144244 17065 0.144244 17066 0.144244 17067 0.184 17068 0.184 17069 -1.11469 17070 0.144244 17071 0.184 17072 0.184 17073 0.184 17074 0.144244 17075 0.184 17076 0.144244 17077 0.144244 17078 0.184 17079 0.144244 17080 0.184 17081 0.144244 17082 0.184 17083 -1.499 17084 0.184 17085 0.184 17086 0.144244 17087 0.184 17088 0.144244 17089 0.184 17090 0.144244 17091 0.184 17092 0.184 17093 0.184 17094 0.144244 17095 0.184 17096 0.184 17097 0.144244 17098 0.184 17099 0.184 17100 0.144244 17101 0.184 17102 0.184 17103 0.184 17104 0.184 17105 0.184 17106 0.144244 17107 0.184 17108 0.184 17109 0.184 17110 0.144244 17111 0.144244 17112 0.184 17113 0.144244 17114 0.144244 17115 0.184 17116 0.144244 17117 0.184 17118 0.184 17119 0.144244 17120 0.144244 17121 0.184 17122 0.144244 17123 0.184 17124 0.144244 17125 0.184 17126 0.184 17127 0.144244 17128 0.184 17129 0.184 17130 -1.37973 17131 0.184 17132 0.184 17133 0.184 17134 0.144244 17135 0.144244 17136 0.184 17137 0.184 17138 0.184 17139 0.144244 17140 0.184 17141 0.184 17142 0.144244 17143 0.144244 17144 0.144244 17145 0.144244 17146 0.144244 17147 0.184 17148 0.184 17149 0.184 17150 0.184 17151 0.184 17152 -1.4725 17153 0.184 17154 0.144244 17155 0.144244 17156 -1.20746 17157 0.144244 17158 0.184 17159 0.184 17160 0.184 17161 0.144244 17162 -1.40624 17163 0.184 17164 -1.499 17165 0.184 17166 0.144244 17167 0.184 17168 0.184 17169 0.144244 17170 0.184 17171 0.144244 17172 0.184 17173 0.184 17174 0.144244 17175 0.184 17176 0.184 17177 -1.1942 17178 0.184 17179 0.144244 17180 0.184 17181 0.184 17182 0.184 17183 0.144244 17184 -1.1942 17185 0.184 17186 0.184 17187 0.184 17188 0.184 17189 0.184 17190 -1.1942 17191 -1.30022 17192 0.184 17193 0.184 17194 0.184 17195 0.184 17196 0.144244 17197 0.184 17198 0.144244 17199 0.184 17200 0.184 17201 0.144244 17202 0.184 17203 0.184 17204 0.144244 17205 0.184 17206 0.144244 17207 0.184 17208 -1.37973 17209 0.184 17210 0.184 17211 0.144244 17212 0.184 17213 0.184 17214 -1.1942 17215 0.184 17216 0.144244 17217 0.184 17218 -1.1942 17219 0.184 17220 0.184 17221 0.184 17222 -1.1677 17223 0.184 17224 0.184 17225 0.184 17226 0.184 17227 0.144244 17228 0.184 17229 0.184 17230 0.184 17231 0.144244 17232 0.184 17233 0.184 17234 0.144244 17235 0.184 17236 0.184 17237 0.144244 17238 0.184 17239 0.144244 17240 0.144244 17241 0.184 17242 0.184 17243 0.144244 17244 0.184 17245 0.184 17246 0.144244 17247 0.184 17248 0.184 17249 0.144244 17250 0.184 17251 0.144244 17252 0.184 17253 0.144244 17254 0.184 17255 0.144244 17256 0.144244 17257 0.184 17258 0.184 17259 0.144244 17260 0.184 17261 0.144244 17262 0.144244 17263 0.184 17264 0.184 17265 0.144244 17266 0.184 17267 0.144244 17268 0.144244 17269 0.184 17270 0.184 17271 0.184 17272 0.184 17273 0.184 17274 0.184 17275 0.144244 17276 0.144244 17277 0.184 17278 0.144244 17279 0.144244 17280 0.144244 17281 0.184 17282 0.144244 17283 0.184 17284 0.184 17285 0.144244 17286 0.144244 17287 0.184 17288 0.144244 17289 0.184 17290 0.184 17291 0.144244 17292 0.184 17293 0.144244 17294 0.184 17295 0.144244 17296 0.184 17297 0.144244 17298 0.184 17299 -1.1942 17300 0.184 17301 0.184 17302 0.184 17303 0.184 17304 0.184 17305 0.184 17306 0.144244 17307 0.184 17308 0.184 17309 0.144244 17310 0.144244 17311 0.184 17312 0.144244 17313 0.184 17314 0.144244 17315 0.144244 17316 0.184 17317 0.184 17318 0.184 17319 0.144244 17320 0.184 17321 0.144244 17322 0.144244 17323 0.144244 17324 0.184 17325 0.144244 17326 0.144244 17327 0.144244 17328 0.184 17329 0.144244 17330 0.184 17331 0.184 17332 0.144244 17333 0.184 17334 0.184 17335 0.144244 17336 0.184 17337 0.184 17338 0.184 17339 0.144244 17340 -1.20746 17341 0.184 17342 0.184 17343 0.184 17344 0.184 17345 0.144244 17346 0.184 17347 0.184 17348 0.144244 17349 0.184 17350 0.184 17351 0.144244 17352 0.144244 17353 0.184 17354 0.144244 17355 0.184 17356 -1.1942 17357 0.144244 17358 0.144244 17359 0.184 17360 0.144244 17361 0.144244 17362 0.184 17363 0.144244 17364 0.144244 17365 -1.12794 17366 0.144244 17367 0.144244 17368 0.184 17369 0.184 17370 0.144244 17371 0.144244 17372 0.184 17373 0.144244 17374 0.184 17375 -1.1677 17376 0.184 17377 0.184 17378 -1.11469 17379 0.184 17380 0.144244 17381 0.184 17382 0.144244 17383 0.144244 17384 0.184 17385 0.184 17386 0.184 17387 0.184 17388 0.184 17389 0.144244 17390 0.184 17391 0.144244 17392 0.184 17393 0.184 17394 0.184 17395 0.144244 17396 0.144244 17397 0.184 17398 0.184 17399 0.144244 17400 0.184 17401 0.144244 17402 0.184 17403 0.144244 17404 0.184 17405 0.184 17406 -1.4725 17407 0.184 17408 0.184 17409 0.144244 17410 0.184 17411 0.144244 17412 0.184 17413 0.184 17414 0.144244 17415 0.184 17416 0.144244 17417 0.184 17418 0.184 17419 0.184 17420 0.184 17421 0.144244 17422 0.144244 17423 0.184 17424 0.144244 17425 0.144244 17426 0.184 17427 0.184 17428 0.144244 17429 -1.11469 17430 -1.1942 17431 -1.11469 17432 0.184 17433 0.184 17434 0.144244 17435 0.184 17436 0.144244 17437 0.144244 17438 0.184 17439 0.144244 17440 0.144244 17441 0.184 17442 0.144244 17443 0.184 17444 0.184 17445 0.184 17446 0.184 17447 0.184 17448 0.144244 17449 0.184 17450 0.144244 17451 0.184 17452 -1.4725 17453 0.184 17454 0.144244 17455 0.184 17456 0.184 17457 0.144244 17458 0.184 17459 -1.40624 17460 0.184 17461 0.144244 17462 0.184 17463 0.184 17464 0.184 17465 0.184 17466 0.144244 17467 0.184 17468 0.184 17469 0.184 17470 0.144244 17471 0.184 17472 0.184 17473 0.184 17474 0.184 17475 0.144244 17476 0.184 17477 0.184 17478 0.184 17479 -1.11469 17480 0.144244 17481 0.184 17482 0.184 17483 0.184 17484 -1.499 17485 0.144244 17486 0.184 17487 0.184 17488 0.184 17489 0.144244 17490 0.184 17491 0.184 17492 0.184 17493 0.184 17494 0.144244 17495 0.184 17496 0.144244 17497 0.184 17498 0.184 17499 0.184 17500 0.184 17501 0.11774 17502 0.0912362 17503 0.184 17504 0.184 17505 0.184 17506 0.144244 17507 0.184 17508 0.184 17509 0.144244 17510 0.184 17511 0.184 17512 0.184 17513 0.184 17514 0.144244 17515 0.144244 17516 0.144244 17517 0.184 17518 0.144244 17519 0.184 17520 0.184 17521 0.144244 17522 0.184 17523 0.144244 17524 0.144244 17525 0.184 17526 0.184 17527 0.144244 17528 0.184 17529 0.144244 17530 0.184 17531 0.144244 17532 0.184 17533 0.144244 17534 0.184 17535 0.144244 17536 0.144244 17537 0.184 17538 0.184 17539 0.184 17540 0.184 17541 0.144244 17542 0.184 17543 0.184 17544 0.184 17545 0.144244 17546 0.184 17547 0.184 17548 0.184 17549 0.184 17550 0.144244 17551 0.184 17552 0.144244 17553 0.144244 17554 0.144244 17555 0.184 17556 0.184 17557 0.184 17558 0.144244 17559 0.184 17560 0.184 17561 0.184 17562 -1.11469 17563 0.184 17564 0.184 17565 0.184 17566 0.184 17567 0.184 17568 0.144244 17569 0.184 17570 0.184 17571 0.184 17572 0.184 17573 0.184 17574 0.184 17575 0.184 17576 0.144244 17577 0.184 17578 0.144244 17579 0.184 17580 0.184 17581 0.184 17582 0.184 17583 0.144244 17584 0.184 17585 0.184 17586 0.184 17587 0.184 17588 0.184 17589 0.184 17590 0.144244 17591 0.184 17592 0.184 17593 0.184 17594 -1.4725 17595 0.144244 17596 0.184 17597 0.144244 17598 0.184 17599 0.184 17600 0.184 17601 0.184 17602 0.144244 17603 0.144244 17604 0.144244 17605 0.184 17606 0.144244 17607 0.184 17608 0.184 17609 0.144244 17610 0.144244 17611 0.184 17612 0.144244 17613 0.184 17614 0.184 17615 0.184 17616 0.144244 17617 0.184 17618 0.184 17619 0.144244 17620 0.184 17621 0.144244 17622 0.144244 17623 0.184 17624 0.144244 17625 0.144244 17626 0.184 17627 0.144244 17628 0.184 17629 0.184 17630 0.144244 17631 0.184 17632 0.184 17633 0.144244 17634 0.144244 17635 0.184 17636 0.184 17637 0.144244 17638 0.184 17639 0.184 17640 0.144244 17641 0.184 17642 0.144244 17643 0.184 17644 0.144244 17645 0.184 17646 0.144244 17647 0.184 17648 0.184 17649 0.144244 17650 0.184 17651 0.144244 17652 0.144244 17653 0.184 17654 0.144244 17655 0.144244 17656 0.184 17657 0.184 17658 0.144244 17659 0.184 17660 0.184 17661 0.184 17662 0.144244 17663 0.144244 17664 0.184 17665 0.144244 17666 0.184 17667 0.184 17668 0.184 17669 0.184 17670 0.184 17671 0.144244 17672 0.144244 17673 0.184 17674 0.144244 17675 0.184 17676 0.184 17677 0.184 17678 0.144244 17679 0.184 17680 0.144244 17681 0.144244 17682 0.184 17683 -1.44599 17684 0.184 17685 0.184 17686 -1.1942 17687 0.144244 17688 0.184 17689 -1.20746 17690 0.144244 17691 0.184 17692 0.184 17693 0.144244 17694 0.184 17695 -1.11469 17696 0.184 17697 0.184 17698 0.184 17699 -1.40624 17700 0.184 17701 0.184 17702 -1.11469 17703 0.184 17704 0.184 17705 0.144244 17706 0.144244 17707 0.184 17708 0.184 17709 -1.1942 17710 0.184 17711 0.184 17712 0.144244 17713 0.184 17714 0.144244 17715 0.184 17716 -1.11469 17717 0.184 17718 0.144244 17719 0.184 17720 0.144244 17721 0.184 17722 0.144244 17723 0.184 17724 0.184 17725 0.144244 17726 0.144244 17727 0.144244 17728 0.184 17729 0.144244 17730 -1.45924 17731 0.184 17732 0.144244 17733 0.144244 17734 0.184 17735 0.144244 17736 0.184 17737 0.184 17738 0.144244 17739 0.144244 17740 0.184 17741 0.144244 17742 0.184 17743 0.184 17744 0.144244 17745 0.184 17746 -1.1677 17747 0.184 17748 0.144244 17749 0.184 17750 0.184 17751 0.184 17752 0.184 17753 0.184 17754 0.144244 17755 0.184 17756 -1.1412 17757 0.144244 17758 0.144244 17759 0.184 17760 0.184 17761 0.184 17762 0.184 17763 0.144244 17764 0.144244 17765 0.184 17766 0.184 17767 0.184 17768 0.144244 17769 0.144244 17770 0.184 17771 0.184 17772 0.144244 17773 -1.20746 17774 0.144244 17775 0.144244 17776 0.144244 17777 0.144244 17778 0.144244 17779 0.144244 17780 0.144244 17781 -1.45924 17782 -1.27372 17783 0.144244 17784 0.184 17785 0.184 17786 -1.40624 17787 0.184 17788 0.184 17789 0.184 17790 0.184 17791 0.184 17792 0.144244 17793 -1.43274 17794 0.144244 17795 0.144244 17796 0.184 17797 0.144244 17798 0.184 17799 0.184 17800 0.144244 17801 0.184 17802 0.184 17803 0.184 17804 0.144244 17805 -1.1677 17806 0.184 17807 0.184 17808 0.184 17809 0.144244 17810 0.144244 17811 0.184 17812 0.184 17813 0.184 17814 -1.41949 17815 0.184 17816 0.144244 17817 0.184 17818 0.144244 17819 0.184 17820 0.184 17821 0.184 17822 0.144244 17823 0.184 17824 0.184 17825 0.144244 17826 0.184 17827 0.144244 17828 0.184 17829 0.184 17830 0.184 17831 0.184 17832 0.184 17833 0.144244 17834 0.184 17835 0.144244 17836 0.184 17837 0.144244 17838 0.144244 17839 0.184 17840 0.184 17841 0.184 17842 0.144244 17843 0.144244 17844 -1.1942 17845 -1.11469 17846 0.184 17847 0.144244 17848 0.184 17849 0.184 17850 0.184 17851 0.184 17852 0.184 17853 0.184 17854 0.184 17855 0.184 17856 0.184 17857 0.144244 17858 0.184 17859 0.184 17860 0.144244 17861 0.184 17862 0.184 17863 0.184 17864 0.184 17865 0.144244 17866 0.184 17867 0.144244 17868 0.144244 17869 0.144244 17870 0.184 17871 0.184 17872 0.144244 17873 0.184 17874 0.144244 17875 0.184 17876 0.184 17877 0.144244 17878 0.184 17879 0.184 17880 0.144244 17881 0.184 17882 0.184 17883 0.144244 17884 0.184 17885 0.144244 17886 0.184 17887 -1.499 17888 -1.27372 17889 -1.43274 17890 0.184 17891 -1.1942 17892 0.184 17893 0.184 17894 0.184 17895 0.144244 17896 0.144244 17897 0.144244 17898 -1.1677 17899 0.144244 17900 0.184 17901 0.184 17902 0.184 17903 -1.499 17904 0.184 17905 0.184 17906 0.184 17907 0.184 17908 0.144244 17909 0.184 17910 0.184 17911 0.144244 17912 0.184 17913 0.144244 17914 0.184 17915 0.184 17916 0.144244 17917 0.184 17918 0.184 17919 0.144244 17920 0.184 17921 0.184 17922 0.184 17923 0.184 17924 -1.11469 17925 0.184 17926 0.184 17927 0.184 17928 0.144244 17929 0.184 17930 -1.1412 17931 0.144244 17932 0.184 17933 0.184 17934 0.144244 17935 0.184 17936 0.184 17937 0.184 17938 0.184 17939 0.184 17940 0.184 17941 0.184 17942 0.184 17943 0.144244 17944 0.184 17945 0.184 17946 0.144244 17947 0.144244 17948 0.184 17949 0.184 17950 0.144244 17951 0.184 17952 0.144244 17953 0.184 17954 0.184 17955 0.144244 17956 0.144244 17957 0.184 17958 0.184 17959 0.144244 17960 0.184 17961 0.144244 17962 0.184 17963 0.184 17964 0.184 17965 0.184 17966 0.184 17967 0.184 17968 0.184 17969 0.184 17970 0.184 17971 0.184 17972 0.144244 17973 0.184 17974 0.144244 17975 0.184 17976 0.144244 17977 0.184 17978 0.184 17979 -1.1942 17980 0.184 17981 0.184 17982 0.184 17983 0.184 17984 0.144244 17985 0.184 17986 0.184 17987 0.184 17988 0.144244 17989 0.184 17990 0.184 17991 0.144244 17992 0.184 17993 0.184 17994 0.184 17995 0.184 17996 0.184 17997 0.184 17998 0.184 17999 -1.499 18000 0.184 18001 0.144244 18002 0.184 18003 0.144244 18004 0.144244 18005 0.184 18006 0.144244 18007 0.144244 18008 0.184 18009 0.184 18010 0.144244 18011 0.184 18012 0.184 18013 0.184 18014 0.144244 18015 0.144244 18016 0.144244 18017 0.144244 18018 0.144244 18019 0.184 18020 0.184 18021 0.144244 18022 0.184 18023 0.184 18024 0.144244 18025 0.184 18026 0.184 18027 0.184 18028 0.184 18029 0.184 18030 -1.4725 18031 0.184 18032 -1.39298 18033 0.184 18034 0.184 18035 0.144244 18036 0.184 18037 0.184 18038 0.144244 18039 0.184 18040 0.184 18041 0.184 18042 0.144244 18043 0.144244 18044 0.184 18045 0.144244 18046 0.144244 18047 0.184 18048 0.184 18049 0.184 18050 0.144244 18051 0.184 18052 0.144244 18053 0.184 18054 0.184 18055 0.144244 18056 0.184 18057 0.144244 18058 0.144244 18059 0.184 18060 0.144244 18061 0.184 18062 0.184 18063 0.144244 18064 0.184 18065 0.144244 18066 0.184 18067 0.184 18068 0.144244 18069 0.184 18070 0.144244 18071 0.184 18072 0.144244 18073 0.184 18074 0.184 18075 0.184 18076 0.144244 18077 0.184 18078 0.144244 18079 0.144244 18080 0.184 18081 0.144244 18082 0.184 18083 0.144244 18084 0.184 18085 0.144244 18086 0.184 18087 0.184 18088 0.184 18089 0.184 18090 -1.7313 18091 0.184 18092 0.184 18093 0.184 18094 0.184 18095 0.136709 18096 0.136709 18097 0.184 18098 0.136709 18099 0.184 18100 0.184 18101 0.184 18102 0.136709 18103 0.184 18104 -1.56578 18105 0.184 18106 -1.21109 18107 0.184 18108 0.184 18109 0.184 18110 0.184 18111 0.136709 18112 0.136709 18113 0.184 18114 0.184 18115 0.136709 18116 0.136709 18117 0.136709 18118 0.184 18119 0.184 18120 0.136709 18121 0.136709 18122 0.184 18123 0.184 18124 0.184 18125 0.136709 18126 0.184 18127 0.184 18128 0.136709 18129 0.184 18130 0.136709 18131 0.184 18132 0.184 18133 0.184 18134 0.136709 18135 0.184 18136 0.184 18137 -1.21109 18138 -1.21109 18139 0.136709 18140 0.136709 18141 0.184 18142 0.184 18143 -1.1638 18144 -1.21109 18145 0.184 18146 0.184 18147 0.184 18148 0.136709 18149 0.184 18150 0.184 18151 0.184 18152 0.184 18153 -1.11651 18154 -1.21109 18155 0.184 18156 0.136709 18157 -1.21109 18158 0.184 18159 0.136709 18160 0.136709 18161 0.136709 18162 0.184 18163 -1.11651 18164 -1.1638 18165 -1.11651 18166 0.136709 18167 0.136709 18168 0.184 18169 0.184 18170 0.136709 18171 0.136709 18172 -1.18745 18173 0.136709 18174 0.184 18175 0.184 18176 0.136709 18177 0.184 18178 0.184 18179 -1.11651 18180 0.184 18181 0.136709 18182 0.184 18183 0.184 18184 0.136709 18185 -1.11651 18186 0.184 18187 0.184 18188 0.184 18189 0.136709 18190 0.184 18191 0.136709 18192 0.184 18193 0.136709 18194 0.184 18195 0.136709 18196 0.184 18197 0.184 18198 0.184 18199 0.184 18200 0.136709 18201 0.184 18202 0.184 18203 0.184 18204 0.136709 18205 0.184 18206 0.136709 18207 0.184 18208 0.136709 18209 0.184 18210 0.184 18211 -1.11651 18212 0.136709 18213 0.184 18214 0.184 18215 0.184 18216 0.184 18217 0.184 18218 0.136709 18219 0.136709 18220 0.136709 18221 0.136709 18222 0.184 18223 0.184 18224 0.136709 18225 0.184 18226 0.184 18227 -1.11651 18228 0.136709 18229 0.184 18230 0.136709 18231 0.184 18232 0.184 18233 0.184 18234 0.184 18235 0.136709 18236 0.184 18237 0.136709 18238 0.136709 18239 0.184 18240 0.184 18241 0.184 18242 0.136709 18243 -1.11651 18244 0.136709 18245 0.184 18246 0.184 18247 0.136709 18248 0.184 18249 0.136709 18250 0.184 18251 0.136709 18252 0.184 18253 0.184 18254 0.184 18255 0.184 18256 -1.1638 18257 0.136709 18258 0.136709 18259 0.184 18260 0.184 18261 0.184 18262 0.184 18263 -1.21109 18264 0.184 18265 0.136709 18266 0.136709 18267 0.184 18268 0.184 18269 0.136709 18270 0.136709 18271 0.184 18272 0.136709 18273 0.136709 18274 0.184 18275 0.184 18276 0.184 18277 0.184 18278 0.136709 18279 0.136709 18280 0.184 18281 0.136709 18282 0.136709 18283 0.184 18284 0.184 18285 0.136709 18286 0.184 18287 0.184 18288 0.184 18289 0.184 18290 0.136709 18291 0.136709 18292 0.184 18293 0.136709 18294 0.136709 18295 0.184 18296 0.136709 18297 0.136709 18298 0.184 18299 0.136709 18300 0.184 18301 0.136709 18302 0.136709 18303 0.136709 18304 0.184 18305 0.136709 18306 0.136709 18307 0.184 18308 0.184 18309 0.184 18310 0.184 18311 0.136709 18312 0.184 18313 0.184 18314 0.184 18315 0.184 18316 0.184 18317 0.136709 18318 0.136709 18319 0.184 18320 0.184 18321 0.136709 18322 0.184 18323 0.136709 18324 0.184 18325 0.184 18326 0.136709 18327 0.136709 18328 0.184 18329 0.136709 18330 0.184 18331 0.136709 18332 0.184 18333 0.184 18334 0.136709 18335 0.136709 18336 0.136709 18337 0.184 18338 0.184 18339 0.184 18340 0.184 18341 0.136709 18342 0.184 18343 0.184 18344 0.136709 18345 0.184 18346 0.184 18347 0.136709 18348 0.184 18349 0.184 18350 -1.11651 18351 0.136709 18352 0.184 18353 0.184 18354 0.184 18355 0.184 18356 0.184 18357 0.136709 18358 0.184 18359 0.136709 18360 0.184 18361 0.184 18362 0.136709 18363 0.184 18364 0.136709 18365 0.136709 18366 0.184 18367 0.184 18368 -1.1638 18369 0.184 18370 0.184 18371 0.136709 18372 0.184 18373 0.184 18374 0.184 18375 0.136709 18376 0.184 18377 0.136709 18378 0.136709 18379 0.184 18380 0.184 18381 0.136709 18382 0.184 18383 0.184 18384 0.184 18385 0.136709 18386 -1.21109 18387 0.184 18388 0.184 18389 0.136709 18390 0.184 18391 0.136709 18392 0.136709 18393 -1.30568 18394 0.136709 18395 0.136709 18396 0.184 18397 0.136709 18398 0.184 18399 0.136709 18400 0.184 18401 0.136709 18402 0.184 18403 0.184 18404 0.184 18405 0.136709 18406 0.184 18407 0.184 18408 0.184 18409 0.184 18410 0.136709 18411 0.184 18412 0.184 18413 0.136709 18414 0.184 18415 0.184 18416 0.184 18417 0.184 18418 0.136709 18419 0.184 18420 0.184 18421 0.184 18422 -1.1638 18423 0.184 18424 -1.58943 18425 0.184 18426 0.184 18427 0.136709 18428 0.184 18429 0.136709 18430 0.184 18431 0.184 18432 0.136709 18433 0.184 18434 0.184 18435 0.136709 18436 0.184 18437 0.184 18438 0.136709 18439 -1.1638 18440 0.184 18441 0.136709 18442 0.136709 18443 0.136709 18444 0.184 18445 0.136709 18446 0.184 18447 0.184 18448 0.136709 18449 0.184 18450 -1.21109 18451 -1.42391 18452 0.184 18453 0.136709 18454 0.184 18455 0.184 18456 0.184 18457 0.136709 18458 0.184 18459 0.184 18460 0.184 18461 0.184 18462 0.136709 18463 0.184 18464 0.184 18465 0.184 18466 0.184 18467 -1.11651 18468 0.136709 18469 0.184 18470 0.136709 18471 0.184 18472 0.184 18473 0.184 18474 0.136709 18475 0.136709 18476 0.184 18477 0.184 18478 0.184 18479 0.184 18480 0.136709 18481 0.184 18482 -1.21109 18483 -1.4712 18484 0.136709 18485 0.136709 18486 0.184 18487 0.136709 18488 0.184 18489 0.136709 18490 0.184 18491 0.184 18492 0.184 18493 0.136709 18494 0.184 18495 0.136709 18496 0.184 18497 0.184 18498 0.136709 18499 -1.1638 18500 0.184 18501 0.136709 18502 0.136709 18503 0.184 18504 0.184 18505 0.184 18506 0.136709 18507 0.184 18508 0.184 18509 0.184 18510 0.184 18511 -1.14016 18512 -1.28203 18513 0.136709 18514 0.136709 18515 0.184 18516 0.136709 18517 0.136709 18518 0.184 18519 0.184 18520 0.136709 18521 -1.11651 18522 0.184 18523 0.184 18524 0.136709 18525 0.136709 18526 0.184 18527 0.184 18528 0.136709 18529 -1.61307 18530 0.184 18531 -1.30568 18532 0.184 18533 0.184 18534 0.184 18535 0.184 18536 0.136709 18537 0.184 18538 0.136709 18539 0.136709 18540 0.184 18541 0.136709 18542 0.136709 18543 0.184 18544 0.184 18545 -1.40026 18546 0.136709 18547 0.136709 18548 0.136709 18549 0.136709 18550 0.184 18551 0.136709 18552 0.184 18553 0.136709 18554 0.184 18555 0.184 18556 0.136709 18557 0.184 18558 0.184 18559 0.136709 18560 -1.28203 18561 0.184 18562 0.184 18563 0.136709 18564 0.184 18565 0.184 18566 0.136709 18567 0.184 18568 0.184 18569 0.136709 18570 0.184 18571 -1.21109 18572 0.136709 18573 0.184 18574 0.136709 18575 -1.14016 18576 0.184 18577 0.184 18578 0.136709 18579 -1.4712 18580 0.184 18581 0.136709 18582 0.184 18583 0.136709 18584 0.184 18585 0.136709 18586 0.136709 18587 0.184 18588 0.184 18589 0.184 18590 -1.21109 18591 0.184 18592 0.184 18593 0.136709 18594 0.184 18595 0.136709 18596 0.184 18597 0.136709 18598 0.184 18599 0.136709 18600 0.184 18601 -1.21109 18602 0.136709 18603 0.184 18604 0.184 18605 0.184 18606 0.136709 18607 0.184 18608 0.136709 18609 0.184 18610 -1.11651 18611 0.184 18612 0.184 18613 0.184 18614 0.136709 18615 -1.40026 18616 0.136709 18617 0.184 18618 0.184 18619 0.136709 18620 0.184 18621 -1.40026 18622 0.184 18623 0.136709 18624 0.184 18625 0.184 18626 0.136709 18627 0.184 18628 0.136709 18629 0.136709 18630 0.184 18631 -1.21109 18632 0.136709 18633 0.184 18634 0.184 18635 0.136709 18636 0.136709 18637 0.136709 18638 0.184 18639 0.136709 18640 0.184 18641 0.136709 18642 0.136709 18643 0.184 18644 0.184 18645 0.136709 18646 0.184 18647 0.136709 18648 0.184 18649 0.184 18650 0.184 18651 0.136709 18652 0.184 18653 0.184 18654 0.136709 18655 0.184 18656 0.184 18657 0.184 18658 0.184 18659 -1.40026 18660 0.184 18661 0.136709 18662 0.184 18663 0.136709 18664 0.184 18665 0.136709 18666 0.184 18667 0.184 18668 0.136709 18669 0.136709 18670 0.184 18671 0.184 18672 0.136709 18673 0.136709 18674 0.136709 18675 0.136709 18676 0.136709 18677 0.136709 18678 0.184 18679 0.136709 18680 0.184 18681 0.136709 18682 0.136709 18683 0.184 18684 0.184 18685 0.136709 18686 0.184 18687 0.136709 18688 0.184 18689 0.136709 18690 0.136709 18691 0.184 18692 0.184 18693 0.184 18694 0.136709 18695 0.136709 18696 -2.819 18697 0.136709 18698 0.184 18699 0.136709 18700 0.136709 18701 0.184 18702 0.184 18703 0.184 18704 0.160354 18705 0.160354 18706 0.113063 18707 0.113063 18708 0.184 18709 0.184 18710 0.136709 18711 -1.4712 18712 0.136709 18713 0.136709 18714 0.184 18715 0.136709 18716 0.184 18717 0.136709 18718 0.136709 18719 0.184 18720 0.184 18721 0.184 18722 0.184 18723 0.136709 18724 0.184 18725 0.136709 18726 0.136709 18727 0.184 18728 0.136709 18729 0.136709 18730 0.184 18731 0.136709 18732 0.184 18733 0.184 18734 0.136709 18735 0.184 18736 -1.30568 18737 0.136709 18738 0.184 18739 0.184 18740 0.136709 18741 0.184 18742 0.136709 18743 -1.21109 18744 0.136709 18745 0.136709 18746 -1.49484 18747 0.184 18748 0.136709 18749 -1.14016 18750 0.184 18751 0.184 18752 0.184 18753 0.136709 18754 0.184 18755 0.136709 18756 0.184 18757 0.184 18758 0.136709 18759 0.184 18760 0.136709 18761 0.184 18762 0.184 18763 -1.21109 18764 0.184 18765 0.184 18766 0.136709 18767 0.136709 18768 -1.4712 18769 -1.28203 18770 0.184 18771 0.136709 18772 0.184 18773 0.136709 18774 0.184 18775 0.184 18776 0.184 18777 0.184 18778 0.136709 18779 0.184 18780 0.184 18781 0.184 18782 0.136709 18783 0.184 18784 0.0894173 18785 0.184 18786 0.136709 18787 0.184 18788 0.136709 18789 0.184 18790 0.184 18791 0.184 18792 0.136709 18793 0.136709 18794 0.184 18795 0.184 18796 0.184 18797 0.136709 18798 0.184 18799 0.184 18800 0.0894173 18801 0.113063 18802 0.113063 18803 0.0894173 18804 0.113063 18805 0.0894173 18806 0.113063 18807 0.0894173 18808 0.113063 18809 0.0894173 18810 0.113063 18811 -1.7313 18812 0.0894173 18813 0.113063 18814 0.136709 18815 0.184 18816 0.184 18817 0.136709 18818 0.136709 18819 0.136709 18820 0.136709 18821 0.136709 18822 0.136709 18823 0.184 18824 0.136709 18825 0.184 18826 0.136709 18827 0.184 18828 -1.51849 18829 0.184 18830 0.184 18831 0.184 18832 0.136709 18833 0.136709 18834 0.184 18835 0.136709 18836 0.184 18837 0.184 18838 0.184 18839 0.184 18840 0.184 18841 -1.42391 18842 0.184 18843 0.184 18844 0.136709 18845 0.184 18846 0.0894173 18847 0.0894173 18848 0.136709 18849 -1.14016 18850 0.136709 18851 0.184 18852 0.136709 18853 0.136709 18854 0.184 18855 0.184 18856 0.184 18857 0.184 18858 0.136709 18859 0.184 18860 0.136709 18861 0.184 18862 0.136709 18863 0.184 18864 0.136709 18865 0.184 18866 0.184 18867 0.136709 18868 0.136709 18869 0.136709 18870 0.184 18871 0.184 18872 0.184 18873 -1.4712 18874 0.184 18875 0.136709 18876 0.184 18877 0.136709 18878 0.184 18879 0.184 18880 -1.21109 18881 -1.14016 18882 0.136709 18883 0.184 18884 0.184 18885 0.136709 18886 0.184 18887 -1.28203 18888 -1.21109 18889 0.184 18890 0.184 18891 0.136709 18892 0.184 18893 -1.80224 18894 0.184 18895 0.136709 18896 0.184 18897 0.136709 18898 0.184 18899 0.184 18900 0.136709 18901 -1.28203 18902 0.136709 18903 0.136709 18904 0.184 18905 0.184 18906 0.136709 18907 0.184 18908 0.184 18909 0.136709 18910 0.184 18911 0.136709 18912 0.184 18913 0.184 18914 -1.21109 18915 -1.28203 18916 0.184 18917 0.136709 18918 -1.21109 18919 0.184 18920 0.184 18921 0.136709 18922 0.136709 18923 0.184 18924 0.184 18925 0.136709 18926 0.184 18927 0.184 18928 0.184 18929 0.136709 18930 0.184 18931 0.184 18932 0.136709 18933 -1.11651 18934 0.184 18935 -1.77859 18936 0.184 18937 0.184 18938 0.184 18939 0.136709 18940 0.184 18941 0.184 18942 0.184 18943 0.184 18944 0.184 18945 0.184 18946 0.136709 18947 0.184 18948 0.136709 18949 0.184 18950 0.136709 18951 0.136709 18952 0.136709 18953 -1.21109 18954 0.184 18955 0.136709 18956 0.184 18957 0.184 18958 0.136709 18959 0.184 18960 0.184 18961 0.184 18962 0.136709 18963 0.184 18964 0.184 18965 0.136709 18966 0.184 18967 0.136709 18968 -1.21109 18969 0.184 18970 0.184 18971 0.184 18972 0.184 18973 0.136709 18974 0.184 18975 0.136709 18976 0.136709 18977 0.184 18978 0.136709 18979 -1.11651 18980 0.184 18981 0.184 18982 -1.40026 18983 0.136709 18984 0.184 18985 0.136709 18986 0.184 18987 0.184 18988 0.136709 18989 0.136709 18990 0.136709 18991 0.184 18992 -1.21109 18993 0.136709 18994 -1.14016 18995 0.136709 18996 0.184 18997 0.136709 18998 0.184 18999 0.184 19000 0.136709 19001 0.136709 19002 0.184 19003 0.184 19004 0.136709 19005 0.136709 19006 0.184 19007 0.136709 19008 0.184 19009 0.136709 19010 0.136709 19011 0.184 19012 0.184 19013 0.136709 19014 -1.21109 19015 0.184 19016 -2.74806 19017 -1.21109 19018 0.184 19019 0.136709 19020 0.136709 19021 0.136709 19022 0.136709 19023 0.184 19024 0.184 19025 0.184 19026 0.184 19027 0.136709 19028 0.184 19029 0.136709 19030 0.184 19031 0.136709 19032 0.136709 19033 -1.18745 19034 0.184 19035 0.184 19036 0.136709 19037 0.184 19038 -1.40026 19039 0.184 19040 0.184 19041 0.184 19042 0.136709 19043 0.184 19044 0.184 19045 0.184 19046 0.136709 19047 0.184 19048 0.184 19049 -1.21109 19050 0.184 19051 -1.1638 19052 0.136709 19053 0.184 19054 0.184 19055 0.184 19056 0.184 19057 0.184 19058 0.136709 19059 0.184 19060 0.184 19061 0.184 19062 0.184 19063 0.184 19064 -1.21109 19065 -1.21109 19066 -1.11651 19067 0.136709 19068 0.184 19069 -1.37661 19070 0.184 19071 0.136709 19072 0.184 19073 0.136709 19074 0.136709 19075 0.184 19076 0.184 19077 0.136709 19078 0.136709 19079 0.136709 19080 -1.21109 19081 0.136709 19082 0.184 19083 0.184 19084 0.184 19085 0.136709 19086 0.184 19087 0.136709 19088 -1.21109 19089 -1.63672 19090 0.136709 19091 0.184 19092 0.184 19093 0.184 19094 0.160354 19095 0.113063 19096 0.184 19097 0.136709 19098 0.184 19099 0.136709 19100 0.136709 19101 0.184 19102 0.184 19103 0.136709 19104 0.136709 19105 0.136709 19106 0.136709 19107 0.184 19108 0.184 19109 0.136709 19110 -1.57383 19111 0.184 19112 0.132299 19113 0.132299 19114 0.184 19115 0.132299 19116 0.132299 19117 0.132299 19118 0.184 19119 0.184 19120 0.184 19121 0.184 19122 -1.52213 19123 0.184 19124 0.132299 19125 0.184 19126 0.184 19127 0.132299 19128 0.132299 19129 0.184 19130 0.184 19131 0.184 19132 0.184 19133 -1.52213 19134 0.184 19135 0.132299 19136 0.184 19137 0.132299 19138 0.184 19139 0.132299 19140 0.184 19141 0.132299 19142 0.184 19143 0.184 19144 0.132299 19145 0.132299 19146 0.132299 19147 0.184 19148 0.132299 19149 0.132299 19150 0.132299 19151 0.132299 19152 0.184 19153 0.132299 19154 0.184 19155 0.132299 19156 0.184 19157 0.132299 19158 0.132299 19159 0.184 19160 -1.52213 19161 0.184 19162 0.132299 19163 -1.13437 19164 0.132299 19165 0.184 19166 0.132299 19167 0.132299 19168 0.132299 19169 0.15815 19170 0.132299 19171 0.184 19172 0.132299 19173 0.132299 19174 0.184 19175 0.132299 19176 0.132299 19177 0.184 19178 0.184 19179 0.132299 19180 0.184 19181 0.184 19182 0.132299 19183 0.184 19184 0.184 19185 0.184 19186 0.132299 19187 0.184 19188 0.132299 19189 -1.13437 19190 0.184 19191 0.184 19192 0.184 19193 0.184 19194 0.184 19195 0.132299 19196 0.184 19197 0.132299 19198 0.132299 19199 0.184 19200 0.184 19201 0.132299 19202 0.184 19203 0.184 19204 -1.28947 19205 0.184 19206 0.132299 19207 0.132299 19208 0.184 19209 0.184 19210 0.132299 19211 0.184 19212 0.184 19213 0.132299 19214 0.132299 19215 -1.13437 19216 0.132299 19217 -1.13437 19218 0.184 19219 0.184 19220 0.184 19221 0.132299 19222 0.184 19223 0.184 19224 0.184 19225 0.184 19226 0.132299 19227 0.132299 19228 0.184 19229 0.184 19230 0.184 19231 0.184 19232 0.132299 19233 0.184 19234 0.132299 19235 0.184 19236 0.184 19237 0.184 19238 0.184 19239 0.184 19240 0.132299 19241 0.184 19242 0.132299 19243 0.184 19244 0.132299 19245 0.184 19246 0.184 19247 0.132299 19248 0.184 19249 -1.52213 19250 0.132299 19251 -1.59968 19252 -1.26362 19253 0.132299 19254 0.184 19255 0.132299 19256 0.184 19257 0.184 19258 0.132299 19259 0.184 19260 0.184 19261 0.184 19262 0.132299 19263 0.184 19264 -1.57383 19265 0.184 19266 0.184 19267 0.132299 19268 -1.13437 19269 -1.21192 19270 0.184 19271 0.132299 19272 0.184 19273 0.184 19274 -1.59968 19275 0.184 19276 0.184 19277 0.184 19278 0.132299 19279 0.184 19280 0.184 19281 -1.44457 19282 -1.13437 19283 0.184 19284 0.184 19285 0.132299 19286 0.184 19287 0.184 19288 0.184 19289 0.132299 19290 0.184 19291 0.132299 19292 0.184 19293 0.132299 19294 0.184 19295 -1.52213 19296 0.132299 19297 0.184 19298 0.184 19299 -1.52213 19300 0.184 19301 0.132299 19302 0.184 19303 0.184 19304 -1.13437 19305 0.132299 19306 0.132299 19307 0.184 19308 0.132299 19309 -1.21192 19310 0.132299 19311 0.132299 19312 0.184 19313 0.184 19314 0.184 19315 0.132299 19316 0.184 19317 0.132299 19318 0.184 19319 0.184 19320 -1.21192 19321 0.184 19322 0.184 19323 -1.52213 19324 0.132299 19325 0.184 19326 0.132299 19327 0.184 19328 0.184 19329 0.184 19330 0.184 19331 0.132299 19332 0.132299 19333 0.184 19334 -1.41872 19335 0.132299 19336 0.184 19337 0.132299 19338 0.184 19339 0.184 19340 0.132299 19341 0.132299 19342 -1.47043 19343 0.184 19344 0.184 19345 -1.52213 19346 0.132299 19347 0.184 19348 0.184 19349 0.132299 19350 0.15815 19351 0.15815 19352 0.132299 19353 0.15815 19354 0.15815 19355 0.054748 19356 0.054748 19357 0.054748 19358 0.054748 19359 0.054748 19360 0.054748 19361 0.054748 19362 0.054748 19363 0.054748 19364 0.054748 19365 0.054748 19366 0.054748 19367 0.054748 19368 0.054748 19369 0.054748 19370 0.054748 19371 0.054748 19372 0.054748 19373 0.054748 19374 0.054748 19375 0.054748 19376 0.054748 19377 0.054748 19378 0.054748 19379 0.054748 19380 0.054748 19381 0.054748 19382 0.054748 19383 0.054748 19384 0.054748 19385 0.054748 19386 0.054748 19387 0.054748 19388 0.054748 19389 0.054748 19390 0.054748 19391 0.054748 19392 0.054748 19393 0.054748 19394 0.054748 19395 0.054748 19396 0.054748 19397 0.054748 19398 0.054748 19399 0.054748 19400 0.054748 19401 0.054748 19402 0.054748 19403 0.054748 19404 0.054748 19405 0.054748 19406 0.054748 19407 0.054748 19408 0.054748 19409 0.054748 19410 0.054748 19411 0.054748 19412 0.054748 19413 0.054748 19414 0.054748 19415 -2.37519 19416 0.054748 19417 0.054748 19418 0.054748 19419 -2.11669 19420 0.106449 19421 0.106449 19422 0.0805984 19423 0.106449 19424 0.106449 19425 -1.41872 19426 0.132299 19427 0.184 19428 0.132299 19429 0.184 19430 0.184 19431 0.184 19432 -1.21192 19433 0.132299 19434 0.184 19435 0.184 19436 0.132299 19437 -1.13437 19438 0.184 19439 0.132299 19440 0.184 19441 0.184 19442 0.184 19443 0.184 19444 -1.52213 19445 0.132299 19446 0.184 19447 0.184 19448 0.132299 19449 0.184 19450 0.132299 19451 0.184 19452 0.132299 19453 0.184 19454 0.132299 19455 0.184 19456 0.184 19457 0.184 19458 0.184 19459 0.132299 19460 -1.41872 19461 0.184 19462 0.184 19463 0.184 19464 0.132299 19465 0.132299 19466 -3.099 19467 0.184 19468 0.184 19469 0.132299 19470 0.184 19471 0.184 19472 0.184 19473 -1.41872 19474 0.132299 19475 -1.31532 19476 0.132299 19477 0.184 19478 0.132299 19479 0.184 19480 0.132299 19481 0.132299 19482 0.132299 19483 0.132299 19484 0.184 19485 0.184 19486 0.132299 19487 0.132299 19488 0.184 19489 0.184 19490 0.132299 19491 0.132299 19492 0.132299 19493 0.184 19494 0.132299 19495 0.132299 19496 0.132299 19497 0.184 19498 0.184 19499 0.184 19500 0.184 19501 0.132299 19502 0.132299 19503 0.184 19504 0.132299 19505 0.132299 19506 0.132299 19507 -1.52213 19508 -1.13437 19509 0.184 19510 0.132299 19511 0.184 19512 0.184 19513 0.184 19514 -1.49628 19515 0.132299 19516 -1.49628 19517 0.132299 19518 0.132299 19519 0.184 19520 0.184 19521 0.184 19522 0.132299 19523 0.132299 19524 0.132299 19525 0.184 19526 0.184 19527 0.184 19528 0.132299 19529 0.184 19530 -1.21192 19531 0.132299 19532 0.184 19533 0.184 19534 0.132299 19535 0.184 19536 0.184 19537 0.132299 19538 0.132299 19539 0.184 19540 -1.59968 19541 0.132299 19542 0.132299 19543 0.132299 19544 0.184 19545 0.132299 19546 0.132299 19547 0.184 19548 0.184 19549 0.184 19550 0.184 19551 0.132299 19552 0.132299 19553 0.184 19554 0.184 19555 0.184 19556 0.184 19557 0.184 19558 0.184 19559 0.184 19560 0.132299 19561 0.184 19562 0.132299 19563 0.184 19564 0.132299 19565 0.184 19566 0.184 19567 0.184 19568 0.132299 19569 0.132299 19570 0.132299 19571 0.132299 19572 0.132299 19573 0.184 19574 0.132299 19575 0.184 19576 0.132299 19577 0.132299 19578 0.132299 19579 0.184 19580 0.184 19581 0.132299 19582 0.184 19583 -1.16022 19584 -1.39287 19585 0.184 19586 0.132299 19587 0.184 19588 0.184 19589 0.184 19590 0.184 19591 0.184 19592 0.132299 19593 0.184 19594 0.184 19595 0.184 19596 0.184 19597 0.132299 19598 0.132299 19599 0.132299 19600 -1.65138 19601 0.184 19602 -1.13437 19603 0.184 19604 0.132299 19605 0.184 19606 0.132299 19607 0.184 19608 0.184 19609 0.184 19610 0.184 19611 0.132299 19612 0.184 19613 0.132299 19614 0.132299 19615 0.132299 19616 0.132299 19617 0.184 19618 0.132299 19619 0.184 19620 0.184 19621 0.132299 19622 0.184 19623 0.184 19624 0.184 19625 0.132299 19626 0.184 19627 0.184 19628 0.132299 19629 -1.21192 19630 0.184 19631 0.132299 19632 0.184 19633 0.132299 19634 0.184 19635 0.184 19636 0.132299 19637 -1.13437 19638 -1.13437 19639 0.184 19640 -1.28947 19641 0.184 19642 0.184 19643 0.132299 19644 -1.21192 19645 0.132299 19646 0.184 19647 0.184 19648 0.184 19649 0.132299 19650 0.132299 19651 0.184 19652 0.184 19653 0.184 19654 0.132299 19655 0.184 19656 0.184 19657 0.184 19658 0.184 19659 0.132299 19660 0.184 19661 0.184 19662 0.184 19663 0.132299 19664 0.184 19665 0.132299 19666 0.132299 19667 0.184 19668 0.184 19669 0.132299 19670 0.184 19671 0.184 19672 0.132299 19673 -1.13437 19674 0.184 19675 0.184 19676 0.132299 19677 0.184 19678 -1.39287 19679 0.184 19680 0.132299 19681 0.132299 19682 0.184 19683 0.184 19684 -1.52213 19685 0.184 19686 0.184 19687 0.132299 19688 0.132299 19689 0.132299 19690 0.132299 19691 0.184 19692 0.132299 19693 0.184 19694 0.132299 19695 0.132299 19696 0.184 19697 0.184 19698 0.132299 19699 0.132299 19700 0.184 19701 0.132299 19702 0.184 19703 0.132299 19704 0.184 19705 0.132299 19706 0.184 19707 0.132299 19708 0.184 19709 0.184 19710 0.132299 19711 0.132299 19712 0.132299 19713 0.184 19714 0.132299 19715 0.132299 19716 0.184 19717 0.184 19718 0.132299 19719 0.184 19720 0.184 19721 0.184 19722 0.184 19723 0.132299 19724 0.184 19725 0.132299 19726 0.132299 19727 -1.21192 19728 0.132299 19729 0.184 19730 0.132299 19731 0.184 19732 0.132299 19733 0.132299 19734 0.132299 19735 0.184 19736 0.132299 19737 0.132299 19738 0.132299 19739 0.132299 19740 0.132299 19741 0.132299 19742 0.184 19743 0.132299 19744 0.132299 19745 0.184 19746 0.132299 19747 0.184 19748 0.184 19749 0.132299 19750 0.184 19751 0.184 19752 0.132299 19753 0.132299 19754 0.184 19755 0.132299 19756 0.184 19757 0.184 19758 0.132299 19759 0.132299 19760 0.184 19761 0.184 19762 0.132299 19763 0.184 19764 0.132299 19765 0.184 19766 0.132299 19767 0.184 19768 -1.13437 19769 0.132299 19770 0.132299 19771 0.184 19772 -1.13437 19773 0.184 19774 0.184 19775 0.184 19776 -1.16022 19777 0.184 19778 0.184 19779 0.0805984 19780 0.184 19781 0.184 19782 0.132299 19783 0.184 19784 0.184 19785 0.184 19786 0.132299 19787 0.184 19788 0.184 19789 0.184 19790 0.132299 19791 0.184 19792 0.132299 19793 0.184 19794 0.184 19795 0.132299 19796 0.184 19797 0.184 19798 0.132299 19799 0.132299 19800 0.184 19801 0.132299 19802 0.132299 19803 0.132299 19804 0.132299 19805 0.184 19806 0.132299 19807 0.184 19808 0.184 19809 0.184 19810 0.132299 19811 0.184 19812 0.184 19813 0.184 19814 0.184 19815 0.132299 19816 0.184 19817 0.132299 19818 0.184 19819 0.184 19820 0.184 19821 0.184 19822 0.132299 19823 0.132299 19824 0.184 19825 0.132299 19826 0.132299 19827 0.184 19828 0.132299 19829 0.184 19830 0.184 19831 0.132299 19832 0.184 19833 0.132299 19834 -1.49628 19835 0.132299 19836 0.132299 19837 0.184 19838 0.184 19839 0.184 19840 0.132299 19841 0.184 19842 0.184 19843 0.132299 19844 0.184 19845 0.184 19846 0.132299 19847 0.184 19848 0.184 19849 0.132299 19850 0.184 19851 0.132299 19852 0.132299 19853 0.184 19854 0.184 19855 0.132299 19856 0.184 19857 0.132299 19858 -1.47043 19859 0.132299 19860 0.132299 19861 0.132299 19862 0.132299 19863 0.132299 19864 0.132299 19865 -1.47043 19866 0.132299 19867 0.184 19868 0.184 19869 0.184 19870 0.132299 19871 0.132299 19872 0.184 19873 0.132299 19874 0.132299 19875 -1.26362 19876 0.184 19877 0.184 19878 0.132299 19879 -1.47043 19880 0.184 19881 0.184 19882 0.184 19883 0.132299 19884 0.184 19885 0.184 19886 0.184 19887 0.132299 19888 0.184 19889 -2.8405 19890 0.132299 19891 0.132299 19892 0.184 19893 0.132299 19894 0.184 19895 0.184 19896 0.184 19897 -1.21192 19898 0.132299 19899 0.184 19900 0.184 19901 0.184 19902 0.132299 19903 0.184 19904 -1.13437 19905 0.132299 19906 0.184 19907 0.132299 19908 0.184 19909 0.132299 19910 -1.28947 19911 0.184 19912 0.132299 19913 0.184 19914 -1.26362 19915 0.132299 19916 0.132299 19917 0.184 19918 -1.52213 19919 0.184 19920 -1.49628 19921 0.132299 19922 0.184 19923 -1.59968 19924 0.132299 19925 0.184 19926 0.184 19927 0.132299 19928 0.132299 19929 0.132299 19930 0.132299 19931 0.184 19932 0.184 19933 0.132299 19934 0.184 19935 0.132299 19936 0.184 19937 0.184 19938 0.132299 19939 -1.21192 19940 0.132299 19941 0.184 19942 0.184 19943 0.132299 19944 0.184 19945 -1.13437 19946 0.132299 19947 0.184 19948 0.184 19949 0.132299 19950 0.184 19951 0.132299 19952 0.184 19953 0.184 19954 0.184 19955 0.184 19956 0.184 19957 0.132299 19958 0.184 19959 0.132299 19960 0.184 19961 0.184 19962 0.184 19963 0.132299 19964 0.184 19965 0.184 19966 0.184 19967 0.132299 19968 0.132299 19969 0.184 19970 0.132299 19971 0.184 19972 0.132299 19973 0.184 19974 0.132299 19975 0.184 19976 0.132299 19977 0.184 19978 0.184 19979 0.184 19980 0.184 19981 0.132299 19982 0.184 19983 0.184 19984 0.184 19985 0.132299 19986 0.184 19987 0.132299 19988 0.132299 19989 0.132299 19990 0.184 19991 0.184 19992 0.184 19993 0.132299 19994 0.184 19995 0.184 19996 0.132299 19997 0.184 19998 0.132299 19999 0.184 20000 0.132299 20001 0.184 20002 0.184 20003 0.132299 20004 0.184 20005 0.184 20006 0.132299 20007 0.184 20008 0.184 20009 0.132299 20010 0.184 20011 0.184 20012 0.184 20013 0.132299 20014 0.184 20015 0.132299 20016 -1.21192 20017 0.132299 20018 0.184 20019 0.184 20020 0.132299 20021 0.184 20022 0.132299 20023 0.132299 20024 0.184 20025 0.184 20026 0.184 20027 0.184 20028 0.132299 20029 0.184 20030 0.132299 20031 0.132299 20032 0.184 20033 0.132299 20034 0.184 20035 0.132299 20036 0.184 20037 0.132299 20038 0.132299 20039 0.132299 20040 0.184 20041 0.132299 20042 0.132299 20043 0.184 20044 0.184 20045 -1.52213 20046 0.132299 20047 0.132299 20048 0.184 20049 0.132299 20050 0.184 20051 0.184 20052 0.132299 20053 0.132299 20054 0.184 20055 0.184 20056 -1.21192 20057 0.184 20058 0.132299 20059 0.132299 20060 0.184 20061 0.132299 20062 0.132299 20063 0.132299 20064 0.184 20065 0.132299 20066 0.184 20067 0.184 20068 0.184 20069 0.184 20070 0.132299 20071 0.184 20072 -1.13437 20073 0.132299 20074 0.132299 20075 0.132299 20076 0.132299 20077 0.132299 20078 0.132299 20079 0.132299 20080 0.184 20081 0.132299 20082 0.184 20083 -1.28947 20084 0.132299 20085 0.132299 20086 0.132299 20087 0.184 20088 0.184 20089 0.132299 20090 0.132299 20091 0.132299 20092 0.132299 20093 0.132299 20094 0.132299 20095 0.132299 20096 -1.41872 20097 0.132299 20098 0.184 20099 0.132299 20100 0.184 20101 0.132299 20102 0.184 20103 0.132299 20104 -1.52213 20105 0.132299 20106 -1.21192 20107 0.132299 20108 0.184 20109 0.132299 20110 0.132299 20111 0.132299 20112 0.184 20113 0.184 20114 0.132299 20115 0.184 20116 0.132299 20117 0.132299 20118 0.132299 20119 0.184 20120 0.132299 20121 0.132299 20122 0.132299 20123 0.132299 20124 0.15815 20125 0.15815 20126 0.15815 20127 -1.62553 20128 0.15815 20129 0.15815 20130 0.15815 20131 0.15815 20132 0.132299 20133 0.15815 20134 0.130488 20135 0.157244 20136 0.130488 20137 0.157244 20138 0.157244 20139 0.157244 20140 0.130488 20141 0.157244 20142 0.184 20143 0.184 20144 0.184 20145 0.184 20146 0.184 20147 0.130488 20148 0.184 20149 0.184 20150 0.130488 20151 0.184 20152 -1.44811 20153 0.184 20154 0.184 20155 0.130488 20156 -1.28757 20157 0.184 20158 0.130488 20159 0.130488 20160 -1.42135 20161 0.130488 20162 0.184 20163 -1.3946 20164 0.130488 20165 0.184 20166 0.130488 20167 0.184 20168 0.184 20169 -1.20731 20170 0.130488 20171 0.130488 20172 0.130488 20173 0.184 20174 0.130488 20175 0.184 20176 0.130488 20177 -1.20731 20178 0.130488 20179 0.184 20180 0.130488 20181 0.130488 20182 0.130488 20183 0.184 20184 0.130488 20185 0.184 20186 0.184 20187 -1.20731 20188 0.130488 20189 0.130488 20190 0.130488 20191 -1.42135 20192 -3.08022 20193 0.130488 20194 0.184 20195 0.184 20196 -1.1538 20197 0.130488 20198 0.184 20199 0.184 20200 0.130488 20201 0.184 20202 -1.20731 20203 -1.12704 20204 0.184 20205 0.184 20206 0.184 20207 0.184 20208 0.184 20209 0.184 20210 0.184 20211 0.130488 20212 -1.42135 20213 0.130488 20214 0.130488 20215 0.184 20216 -3.13373 20217 0.130488 20218 0.184 20219 0.184 20220 -1.3946 20221 0.184 20222 0.184 20223 0.130488 20224 0.184 20225 0.130488 20226 0.184 20227 -1.42135 20228 -1.12704 20229 0.184 20230 0.130488 20231 0.130488 20232 0.184 20233 -1.20731 20234 -1.12704 20235 -1.26082 20236 0.130488 20237 0.184 20238 0.130488 20239 0.184 20240 0.130488 20241 -1.26082 20242 0.184 20243 0.130488 20244 0.184 20245 0.184 20246 0.130488 20247 0.130488 20248 0.130488 20249 0.130488 20250 -1.42135 20251 0.184 20252 0.130488 20253 -1.20731 20254 0.184 20255 0.184 20256 0.184 20257 0.130488 20258 0.130488 20259 0.184 20260 0.130488 20261 0.130488 20262 0.130488 20263 0.184 20264 0.130488 20265 0.130488 20266 0.130488 20267 0.130488 20268 0.130488 20269 0.184 20270 -1.42135 20271 0.130488 20272 0.184 20273 -1.50162 20274 0.130488 20275 0.184 20276 0.130488 20277 0.184 20278 0.130488 20279 0.130488 20280 0.184 20281 0.130488 20282 0.130488 20283 -1.12704 20284 0.184 20285 0.130488 20286 0.184 20287 0.184 20288 0.130488 20289 0.130488 20290 0.130488 20291 0.184 20292 0.184 20293 0.184 20294 0.184 20295 0.184 20296 0.130488 20297 0.130488 20298 0.184 20299 0.184 20300 0.130488 20301 0.184 20302 0.184 20303 0.130488 20304 0.184 20305 0.130488 20306 0.184 20307 0.130488 20308 0.130488 20309 0.184 20310 0.130488 20311 -1.3946 20312 0.130488 20313 -1.20731 20314 0.130488 20315 0.184 20316 0.184 20317 0.130488 20318 0.130488 20319 0.130488 20320 0.130488 20321 0.130488 20322 0.130488 20323 0.184 20324 0.130488 20325 0.184 20326 0.130488 20327 0.184 20328 0.184 20329 0.184 20330 0.130488 20331 0.130488 20332 0.130488 20333 0.130488 20334 0.184 20335 0.130488 20336 0.130488 20337 0.184 20338 0.184 20339 0.130488 20340 -1.12704 20341 -1.20731 20342 0.184 20343 0.184 20344 0.184 20345 0.184 20346 0.184 20347 0.130488 20348 0.130488 20349 0.130488 20350 0.184 20351 0.184 20352 0.184 20353 0.184 20354 0.130488 20355 0.184 20356 0.130488 20357 0.184 20358 0.130488 20359 0.130488 20360 0.184 20361 -1.3946 20362 0.130488 20363 0.184 20364 0.130488 20365 0.130488 20366 0.130488 20367 0.184 20368 0.184 20369 0.130488 20370 0.184 20371 0.130488 20372 0.184 20373 0.130488 20374 0.130488 20375 0.130488 20376 0.184 20377 0.184 20378 0.130488 20379 0.184 20380 0.184 20381 0.184 20382 0.130488 20383 0.130488 20384 0.184 20385 0.184 20386 0.130488 20387 0.184 20388 0.184 20389 0.184 20390 0.130488 20391 0.184 20392 0.184 20393 0.184 20394 0.130488 20395 -1.20731 20396 0.184 20397 0.130488 20398 0.184 20399 0.184 20400 0.184 20401 0.130488 20402 0.184 20403 0.130488 20404 0.184 20405 0.130488 20406 0.184 20407 0.184 20408 0.130488 20409 0.130488 20410 0.184 20411 0.184 20412 -1.42135 20413 0.184 20414 0.130488 20415 0.130488 20416 0.184 20417 0.130488 20418 0.130488 20419 0.130488 20420 0.184 20421 0.130488 20422 0.130488 20423 0.184 20424 0.130488 20425 0.184 20426 0.184 20427 -1.20731 20428 0.184 20429 0.184 20430 0.130488 20431 0.130488 20432 0.130488 20433 0.184 20434 0.130488 20435 0.130488 20436 0.130488 20437 0.130488 20438 0.184 20439 0.130488 20440 0.184 20441 0.184 20442 0.184 20443 0.184 20444 0.184 20445 0.184 20446 0.130488 20447 0.184 20448 0.157244 20449 0.130488 20450 0.157244 20451 0.130488 20452 -1.52838 20453 0.130488 20454 0.157244 20455 -1.60865 20456 0.157244 20457 0.157244 20458 0.130488 20459 0.130488 20460 0.157244 20461 0.157244 20462 0.157244 20463 0.130488 20464 0.130488 20465 0.130488 20466 -1.52838 20467 0.157244 20468 0.130488 20469 0.157244 20470 0.157244 20471 0.130488 20472 -1.26082 20473 0.130488 20474 0.157244 20475 0.130488 20476 0.130488 20477 0.157244 20478 0.130488 20479 0.130488 20480 0.157244 20481 0.157244 20482 0.130488 20483 0.130488 20484 0.130488 20485 0.157244 20486 0.130488 20487 0.130488 20488 0.157244 20489 0.157244 20490 0.157244 20491 0.130488 20492 0.157244 20493 0.130488 20494 0.157244 20495 0.130488 20496 -1.18055 20497 0.130488 20498 0.130488 20499 0.130488 20500 0.130488 20501 0.130488 20502 0.130488 20503 0.130488 20504 0.130488 20505 0.130488 20506 0.130488 20507 0.157244 20508 0.157244 20509 0.157244 20510 0.157244 20511 0.130488 20512 0.157244 20513 0.130488 20514 0.157244 20515 -1.36784 20516 0.157244 20517 0.130488 20518 0.130488 20519 0.157244 20520 0.130488 20521 0.157244 20522 0.130488 20523 0.157244 20524 0.157244 20525 0.130488 20526 0.130488 20527 0.130488 20528 0.157244 20529 0.130488 20530 0.157244 20531 0.157244 20532 0.130488 20533 0.130488 20534 0.157244 20535 0.130488 20536 -1.26082 20537 0.157244 20538 0.130488 20539 0.130488 20540 0.130488 20541 0.130488 20542 0.130488 20543 0.130488 20544 0.130488 20545 0.157244 20546 0.130488 20547 0.130488 20548 0.157244 20549 0.157244 20550 0.130488 20551 0.130488 20552 0.157244 20553 0.130488 20554 0.130488 20555 0.130488 20556 0.130488 20557 0.130488 20558 0.130488 20559 0.157244 20560 0.130488 20561 0.130488 20562 0.157244 20563 0.157244 20564 0.157244 20565 0.157244 20566 0.130488 20567 0.130488 20568 0.157244 20569 0.130488 20570 0.130488 20571 -1.26082 20572 0.130488 20573 0.130488 20574 0.157244 20575 0.157244 20576 0.130488 20577 0.130488 20578 0.130488 20579 0.130488 20580 0.130488 20581 0.130488 20582 0.130488 20583 0.0769764 20584 0.0769764 20585 0.0769764 20586 0.103732 20587 0.130488 20588 0.130488 20589 0.157244 20590 -1.52838 20591 0.157244 20592 -1.26082 20593 0.130488 20594 0.130488 20595 0.130488 20596 0.130488 20597 0.130488 20598 0.130488 20599 0.130488 20600 -1.52838 20601 0.130488 20602 0.157244 20603 0.130488 20604 0.130488 20605 0.157244 20606 0.157244 20607 0.130488 20608 0.157244 20609 0.157244 20610 0.130488 20611 0.157244 20612 0.157244 20613 0.157244 20614 0.130488 20615 0.130488 20616 0.157244 20617 0.130488 20618 0.130488 20619 0.157244 20620 -3.214 20621 -1.52838 20622 0.130488 20623 0.130488 20624 0.130488 20625 0.157244 20626 0.130488 20627 0.157244 20628 0.130488 20629 0.157244 20630 0.130488 20631 0.157244 20632 0.130488 20633 0.130488 20634 0.157244 20635 0.130488 20636 0.157244 20637 0.157244 20638 0.130488 20639 0.157244 20640 0.130488 20641 0.130488 20642 0.157244 20643 0.130488 20644 0.130488 20645 0.130488 20646 0.157244 20647 0.130488 20648 0.157244 20649 0.157244 20650 0.130488 20651 0.157244 20652 0.157244 20653 0.130488 20654 0.130488 20655 -1.26082 20656 0.157244 20657 0.130488 20658 0.130488 20659 0.157244 20660 0.130488 20661 0.157244 20662 0.130488 20663 0.130488 20664 0.130488 20665 0.130488 20666 0.157244 20667 0.130488 20668 -1.26082 20669 0.130488 20670 0.130488 20671 0.130488 20672 0.157244 20673 0.157244 20674 0.130488 20675 0.157244 20676 0.130488 20677 0.157244 20678 0.130488 20679 0.130488 20680 0.157244 20681 0.157244 20682 -1.18055 20683 0.157244 20684 0.157244 20685 0.130488 20686 0.130488 20687 -1.58189 20688 0.130488 20689 0.130488 20690 0.157244 20691 0.157244 20692 0.130488 20693 0.130488 20694 0.157244 20695 0.130488 20696 0.130488 20697 0.130488 20698 0.130488 20699 0.130488 20700 -1.18055 20701 -1.52838 20702 0.157244 20703 0.157244 20704 0.130488 20705 0.130488 20706 0.130488 20707 0.130488 20708 0.130488 20709 0.130488 20710 0.130488 20711 0.157244 20712 0.157244 20713 0.157244 20714 0.130488 20715 0.130488 20716 0.130488 20717 0.157244 20718 -1.58189 20719 0.157244 20720 0.130488 20721 0.130488 20722 0.157244 20723 0.130488 20724 0.130488 20725 0.130488 20726 0.157244 20727 0.157244 20728 0.157244 20729 0.157244 20730 0.130488 20731 0.157244 20732 0.130488 20733 0.157244 20734 0.130488 20735 0.157244 20736 0.157244 20737 0.130488 20738 0.157244 20739 0.157244 20740 0.157244 20741 0.157244 20742 0.157244 20743 -1.52838 20744 0.157244 20745 0.157244 20746 0.157244 20747 0.130488 20748 0.157244 20749 0.130488 20750 0.130488 20751 0.157244 20752 -1.26082 20753 0.157244 20754 -1.71567 20755 0.157244 20756 0.157244 20757 0.130488 20758 0.130488 20759 0.157244 20760 0.130488 20761 0.130488 20762 0.157244 20763 0.157244 20764 -2.00998 20765 0.130488 20766 0.157244 20767 0.130488 20768 0.130488 20769 0.130488 20770 -1.3946 20771 0.130488 20772 0.130488 20773 0.130488 20774 0.157244 20775 0.130488 20776 0.157244 20777 0.157244 20778 0.130488 20779 0.157244 20780 0.157244 20781 0.157244 20782 0.130488 20783 0.157244 20784 0.157244 20785 -1.36784 20786 0.130488 20787 0.157244 20788 0.157244 20789 0.130488 20790 0.157244 20791 0.130488 20792 0.157244 20793 0.130488 20794 0.157244 20795 0.130488 20796 0.130488 20797 -1.18055 20798 0.157244 20799 0.130488 20800 0.157244 20801 -1.6354 20802 0.157244 20803 0.130488 20804 0.157244 20805 0.157244 20806 0.130488 20807 0.157244 20808 0.130488 20809 0.157244 20810 0.130488 20811 0.130488 20812 0.130488 20813 0.130488 20814 0.157244 20815 0.130488 20816 0.157244 20817 0.130488 20818 0.157244 20819 -0.00329134 20820 -0.00329134 20821 -0.00329134 20822 -0.00329134 20823 -0.00329134 20824 -0.00329134 20825 -0.00329134 20826 -0.00329134 20827 -0.00329134 20828 -0.00329134 20829 -0.00329134 20830 -0.00329134 20831 -0.00329134 20832 -0.00329134 20833 -0.00329134 20834 -0.00329134 20835 -0.00329134 20836 -0.00329134 20837 -0.00329134 20838 -0.00329134 20839 -0.00329134 20840 -0.00329134 20841 -0.00329134 20842 -0.00329134 20843 -0.00329134 20844 -0.00329134 20845 -0.00329134 20846 -0.00329134 20847 -0.00329134 20848 -0.00329134 20849 -0.00329134 20850 -0.00329134 20851 -0.00329134 20852 -0.00329134 20853 -0.00329134 20854 -0.00329134 20855 -0.00329134 20856 -0.00329134 20857 -0.00329134 20858 -0.00329134 20859 -0.00329134 20860 -0.00329134 20861 -0.00329134 20862 -0.00329134 20863 -0.00329134 20864 0.130488 20865 0.130488 20866 0.103732 20867 0.130488 20868 -1.8762 20869 0.130488 20870 0.130488 20871 0.157244 20872 0.130488 20873 0.157244 20874 0.130488 20875 0.130488 20876 0.157244 20877 0.157244 20878 0.157244 20879 0.157244 20880 0.130488 20881 0.157244 20882 0.157244 20883 0.130488 20884 0.157244 20885 0.130488 20886 0.157244 20887 0.130488 20888 0.157244 20889 0.130488 20890 0.157244 20891 0.157244 20892 0.130488 20893 0.157244 20894 0.130488 20895 0.157244 20896 0.130488 20897 0.157244 20898 0.157244 20899 -1.18055 20900 -1.36784 20901 0.157244 20902 0.157244 20903 0.130488 20904 0.157244 20905 0.157244 20906 0.130488 20907 0.157244 20908 0.157244 bx-python-0.8.13/test_data/bgzf_tests/000077500000000000000000000000001415666465100176735ustar00rootroot00000000000000bx-python-0.8.13/test_data/bgzf_tests/test.txt000066400000000000000000000022461415666465100214170ustar00rootroot00000000000000begin 644 - M7#MX8@E/CF1L8D+5C*2$BF]//54$09RCLJZ!&JQ)#I+^_(W,9?1?NTO#^7_5 M=B4]A-]ARI+$]?`#D`"!W>Q+8`)3.&NIL9L%"L3?QQ8=S:'7\)WU0FGB?FUG M&HZ4;<4I"QU=CH^U8O\_F2HU@FOBLYO2KV\1)TQJYS$*8 M!*O^1\K$YK[XNZF"Q-8,MMW-GPOYD'JER=B6!WYA,WDV`3KR;/<%#K0OQ,L* MRZ(LM#K2EUG94WH3JL&\^M5^]67$?@FO_FS?VBY+FYJ)I"WX[G)L=X4JF0'Q MGDU)V]'E0ZALRY->4@7N)U:8N;1".+;WT#'=QE!A>AUOCJ]F/R8_?L8E(_->_"9P M6;T>A/<7A!"0@N6'V,AA+GHCC\Z7J3/(<=D*FGCY%HJH+UNC+M6HPVEOYQ'W M4.IUE2K=X0N.I\AX^O27ME>,7.*>NG-T#7Q#S_++=4[EI9U3TW$S,F6 M7K)_\I87UF*,J9D'X%*UD!IM$)3.W"/#R.`#83^Z17H5QX&/C",?%R!ACEI[ MGY17MK$GDIA-J9$?TJJXZ'1,)*Y^3KIO(Z4G4]OB8];N^0L<8_/\C&WPT=1W M7OW`L6QX5A75IO&],.2XNF?+[#2\*QYX^IU<0_)@UUXA#IICLH[%MWZ0+P\I M^$`D]&A0A:83 MV!W(_NBD&.P;W9#;23CB4HT>HBF34._SJD>BD[-..6VR6\$IZ`6[VN)0S/%KCA'ULX(SF^GPMEQ$:X0-OF`^BF\=GI45X\P3) ML8N62\>P6JQG,I!BIX5<6^*#K/;RF"\1=Y:`T-W`3^U<>F"P=KK<$S.3`&O1 MWEFNXI+`BWG,"+K,J3D-)M4MED^K4!D6@O?_70,69A8Z_27\C8&Y`;\2-GIC MZ?WM7&9N#"2`XA-^S,]W%6*#![A=_(DM(=&W^^5K8M$YB"7!Q623%6#J!WK: bx-python-0.8.13/test_data/bgzf_tests/test.txt.gz000066400000000000000000000017371415666465100220420ustar00rootroot00000000000000BC F=Oq)ƦW6ʹ t)/'8:GIz%RŠ{ĪT6Ih"[p:krqeWy1[BONFB4y^͕{C{mGDӻ{j=Sud'!y*,J͇Ua&?kag]YsjοS,QHjYҺspjykK΅ Cr+I>eoI|!90g $2; ҸgKP/dU !rؑWI *6 u!0$* GAz7!ܒazѱVr; 5 1_q wiqc|ygo/#zHE'jNr)5J5Qze*8!M 4kU1%]|M= H!?ǹ7^FZ+|Ӆc"HHd+bDK~bJcك,3k9n[Z]rXFXwmVYt;թ>%Օmi#̫燎0* W:txҦ s_b|ȟB]Vo c04(Øk{|Ew4]<>YWEchi8~>+1NyߡdtɣFIC='Y`uSߧ ViWYtw%sn*<;a߿l޷og}= =cm2k9hߋ*޷5xgd_]?t~ l?5.f{*O {sUgAW?Tϗ?__|wݴ,ku>J؟?|v><>voyf /lc\^{fCE宏Iq^l}رj)vV޿C:;zڗ~?sGvW_/_ܯY?JùZqh k8joٞPY2gm){02]x>.ӮW 2}.瞃 sg]&gzz^|~lgz΄߷?Jfms۷|3cO{|e[ }" ]^rU[ԢGOuu9P}Ra?Menz["U\|WÊܫ;hĹ_o) ovNz?$Q}μ(PK>šz+j[M?|S"p._Gr=)v)OS^,[ڶrW_w߮\ϯ9pxOn`p&uSg;5'5_>+l.Y i2픙=6 jI+~;:m0;Vs7o ~qNO/w^d.JZ6zm1^Kλ|yb ~miSz[8X)W_?_n.vVw+2y+g\qpp*e+ZNXl*wJdx J?"\9_74st[]ی[Y+q¨_дYsѩ1\ߏ6{[&V(c. f|s.͹H;}c?Ig=p wanqr*Gw bnW߭`M恋_/`-q"  ?⋲ :Ӡ4]u?phQAwZg-+?ͤaEg+x?UnaR&%nr9_/7x>>t7Zay|Jroz*5gXsup`rieuk8XyVO*^`u%к=;[u-JA2YgU݃F6仏ٕo|O ~r {֏)7&k}-^) uc#}EpkNQ #ɱ QZQ}D%7X~DooWO=[lֈ<€U˨F Lpw}DhO:<6њY66ط9*bR)}UqP%ng/(]-h˿|8cfO8fnWaYDnߞ+W |ƭ/WP x hvK 9;<}?(p~s5C"nE<({܂.z#Ljq2r%ݻuB/Lfߏxg6 P.V}٪ RϾK}w;{(GWLp뱂-Niކܒ o/[Ÿ!v6Jg.rcr*Qw6oꝷ`utqAG :~kU\{+Osg\z+A>xG ر𝥳M-gx%{18gTao'_dro̵'?~h=F>DÕ"^[/#4,XMW>84`ѱo愗է/ Nѝ¿=wy7US# ,Psqh=knaC:wVX15WA2]?nZGy8P%48)GhVϛr.{O<5ZL=/Uvr٥Kؖ iNkjK80^cWp\ +8W?v]6γE_}貪d{P Z `}+vbn5'tQrZq܎ }R7sg3r_K &p?k%jgp6.:& 9nW;1}.np{ 0,yp]n֨wW50bPc컃78T3Q0^H&#3U8kPNS0LOXF/[.6CWiX(s9G1R~ȌqIluYa`ΖhqmYXek_ȃKM@?~Jx$dc<=g5Fy֕p-`;rߒbe,s?]{)8R&/Fv w.4 Jq#DCʽA޻zvTa_%ZVtns{/;_5_uˡc* S#[okEAm R)kX6j^J?}r٥{f&˸ϯqpbهUN=L?A65^m92ϋ[)C)6GonUs|_Y;I ++K-|rcRy{kn9>gPѹ#Шl^} # >ّ%F"`~{7D$W|3_-}ns`6i< WqɀܿmZ}ɋ/o1 C+>vPEfhWp8hs؀THײַ Al&`OBPHw_/#B1 2}UO&g'R_0 ;&XPPPfD'-9?ۦDjE ?c$b) =|L Ij}I(XhY#4vS8_Ag8:nZՑ; cAyp)ҼQ?i91A(ò¹.L;8=%Lh"cTafNocv#ӄ$y5V#?NbD ^2mflva&rX 7@"wihN.s C;nTXQ.r`e,>-":ei_NN@V_xJ}~DkA]B;:pv |T\nt'404$RWB}fg ;w-݅oeZϏ9߳-.Nq\ԘD`x>J[LbkH _,vyO.wiG{N%f.W,} g7*Сѹ ҦhN`AXT|ߪncU`_~ 7h\x~4qnoHolWtx{ʼM8.B-Q柙̶mܳp?KS1 {j_ ΫWY^zHpVzpWQy~ Bt/MX==]<=Rs9p!̽K"bRĖm1177oP-e&ѠɱڢEb+Y 9&@g"=؊M13/%kY1NyW)YDgp|1dpO8Җ;nj^18gqFs6wck\<9q֜<è*md2`VT!ol["x¢h.)|*(* h]z3*1ՠIR!ݗ m /;wGG Z;?&QJJ@(bi%lVƦ)g#z[A([Ii.!)<3vsSW؈KSydŎg# wYt§od_/'}*;(==*GȰU_zϋxʀ;qąӜ#S& 8hV}}g{DYUd2J^B=>._:P/.}ķgpOKGKs?F)8K?x} Aޞi ;rA5 L~ԐcuW* @_rJ[HKDoYd-D%- U 2˞_DxJB:@zx ZO<[{u9{C"0\Xcbw<bӉi eIĸ  cB{˰+[\qwsSxlnc2\5zEEua(2zEJ>svߪa*9\(X^{ FGbE5z$|}72={ŝ~I\ .<`&{=+)N6oNA[d*_: aPLJPt xR8ZMnPel$DƠΒ!ıcrdQLSQy,!( We#cG )1W7@r>:uaz˛NZLObV#mGU[F)c$Uy-6;TR,z?=!/A"`7{HcgTA,ş;!q dcAL20 /r@vd$OrJOn'[uw )c@F'$   'QL5Y39kKz Y+ lF\) ؓ`̓Qy_gmY Zxw{GJ܌''![[/׷1}N_YM$U=YP?uw1t#QT\U,5v\$uEe^]@gP\1N޽-Em&.]Cr3RzFb=JF*wӃl5sX`pCmn׊:y,3xl+;s!b[L#JO\{D*jSϖˈ< H5D(}#om* ${YGÝ>B׬7Ag+&*#Ox{v;#~`#@ǫ90g[5EH~=cW?Dy`[!& ^}2]=&Ĥ(DGDw!2x͞ޟ^S0KtoOPZ,t>ɾ|*Ä`9eE\t.{Ps L;Ӌ{RhWS&#߶w[kY:mG?o8hgpbf G9,=3^sMu?cKv]^V&"AyLV5' P<6=_s0YX65pH6* _ c|b6`"Qng_Bw?'fnGDi ׹!4XJ~;#2SY T>[Әcs,H Y틳Fc^hA"7N84Φ]֘-ס1iH߲\YAjR֤vO1S^I_#FT.;IHo";4Jx~2!=,Dn:հ1 {QY&+g H6p.%] ѹQ#򢫲BT>ax]M#PgM- tWdZf/P,`R:ah4+'xM"tQ'<*:^Dn1gpʌ9E( ^\TwVGP҉Bh:4-FUh<#\ @TAOu mGNkpݹ?x=j&s2XY:YfivgW!\14V"NLJx:oNFy;óV;ֻ =!f+XrE (N% ev;Qd6'_K;VFG~[VzLƾY!wG:79 y]SN`1h&K|ں`wgk]xqS(;|N;Ѿ%ni:FHʚȠ?,ꕯY3,=d=zEbMy]Xw#|k+ N L  Wb֊ i${P4fG\׻U_Li<5:õ(_BZ8T D;'Sx̵l}D A'=g&1b1t8a5K蛪RE[k8!^vާz FA^. x38&lKq1-/~T/G8sn=zurMckBgh]Uw`UͳE#s\W#aN=c=u<^SBPu%j;Ԅ/$ߙ:"z&Nƨhy矔cbI\h 8=P(DOuw"@>MI#m0q  yuۻ7.ƍ7ɕڈ>Tʕ)F#͘+&\Q`+&fN3w증-b4Gxu(dž.,2@ cO[ K`=Z×T޴o!=+7KI3S>arYWkiI< D^*8 ,Y~NM.Y #5{ILU$VqvDp9(eNaXgnsvMl׏XqVeW:j=2(ga/*sJr)2Kp] v CY͑0'6k#* ARqgt-ieZ;g~ n0 =de_&IvS'R-+Md/SH++[ A?&82 < sqjÍc [-߾9Jh~oߞǵY)sSGJW=23fˋA]}98KK;fE MXtE8#`lo)á;,|X0 P Ah1L1Yk" thķXwUŤFaO% y?Qr`t3sZ}2 Or)yk)ᱟ&cM7"'L惔^;0.⪚+5$TyY%hVsXL3HݟL?V v1Ǽ{.l{akf%Agr֥ǜ8.|a0`b+yDJ|<&U8|a5'/ܪPNTyT҅&C]zP GQ~PHJ(sm֦\XU[XK'!< ecОC.^0{Ģ?v➚Zw'; YI̎TW4Xy ]A zbod?";vIf]-([5@;F"޻9e۞PxIaRAQTä/d:I6_Xxvh8"16/A"rH0]'RuZj?z{pV-bOUVWn51Οw,e Ŝ(݇ g[xF0N?{,5΍Rt1_MFT˔X$9OF@ou+S@UBI/jVe{WU{Hׅ{ }P#|i~yĬw&'{H7v^%"uhASdQTKq=S46@O~Qk_| J#8.uHGQ&DhkSZ$n0/5Ax\ F.I􅙾_'))2PæݹH e,"/#nwXVnԯpoeUPO:t&m ҦE T8)ؾ䌥e7!kˀ3<^)3p{>pW{!SO<|)LB<l`ƭZC~|2(ڐMӊ),ʧ-XJ=0@Zz23zlC:tМMHVXT+E|ܖ \Hk΋{ױ(pMa&|qIDp[z6T `ڂv BU l('nyK/EƙmvUC=*'`"·o%FC6c6sxr뷒H{CL Kn3F=EܙU~I,B,:M`󼙚_5dCп֏*({zM-L\J_܍^'x Lˁ[ylb!8fD\N_3Y</Qe>bH\kΦm ?Γ7t9,NP#ڪ;C2L4xiӼ-p8Cd !Ͽ6e@3F>tgX%mC[G%X|)Rhp`eoXfR{$Z$Lcwc]әR!{SҬuI35*GX,)EȚ7Ee]KǽK2cCO=՚:j=b2#`be ,hxK9O3O DO^JM4ŵN H` $JyGiQb\a#pbNw¸\Q~b|J*v|O]d Sse`1F'p5 H1@\[=qU砩P1+]~x Y _k =%hMftμYPnx~*S|/Xs{qHba3 eYYY%w4nqڏ&Q(y@4\_( W[Hqp/@hwA`&cŋm_X>!D? ěbH3dakZӖ#.nc;ovtݞ,)TkSbMZJVM  gVsY)H {KD9@l|^4:mM)m*#{#eeÄf'N3+F9DI!ǜjJ?)Z[Z,Iq@.KKόQmː.i]E,Ql72xxoaő7bSU2;%6Hb|#kb\PdqˋZr#'En_`ćR`h(Q#7²F*dJuail)%ܻ_;\-g{*)a$/H}0f| %|{,>B3tZlOVI %.J(+WDS×gCLqupl4o_5eG!kfn3|)=fNlumQY"|ʂsE".`bGm2\,z#nbvm2J҈ NvT/6d 5Df)IMi dSExGKH8%ޝꂓd#SɕeuBg 5kW}Nkoʾ},_.5j 8IANZ~pyjlQ=z=6y*t!P5k_[y>F\@叿aF~>Ej˜R6KS, La9,4~߆c QOEcA@7n!zCFѝt~BXTB; &= V¦A(sVثD'_qSkH~صu'@>d1E>zQ&1Y=Y`WxmiāhVu HUd4COj!LY IN"?CSo{LӨ{J2#XOUù;,77g`P{h޵ݙ/10?FBT>ě3H 34E11P]? =Rpoô.j뿢sœ͍uY!@P7961 g;9lfXXvJ%wU3BiN2#- ?t} m=\DO4@Dۆ.oP2GT!X@Xh?V'(*0BmzubF٬T]!OG_-5+q!@pۍph*_ b)g^ , lD-{, Rr5)Ћ#\>WF$yu*&fOC{jp4g"r[Ej#-IvU{!kK/ˌ5"b-eh=3q=Ó]Ez&2V- OO^I֕c--H814m_s( +35Ņ?5(m6b(pG(}->{0?:ؐP^DvMS+xyv@3w+š1 82OY [~\M`T(V!,ÂS{MM bj/ 5|àQ%hðqW 䑛d'#mCM !r|yFtj:pEWSX{N+UD$AEy5S5OěH߅z&*76(ka26b5L<+elSoxf/qhv]a1[^l#^a/CTZXɆ𰋟w>j˹]LbQ}mGlWW\^:ݽ PIB=PAЄSRvRg爱F_s=]Ib0Zj^,! j.hl}:9FVASz[ NT5 w|4#jtv6N%#m}m/%O+/oEG8zflsF@^xTȥ{}Շ형sG_ ^׼^BI0jAjؙf~[jVN*1 r uBciZ&Jc;̱r:q$}}U7*|~%o^@?!3-˗9~>|MN%[; o}ĭXke6>`tf˸v.a& Q#4{jJSJdRRZtPѺK%w,@>+,jGrP9(nYVEhXt@[D[.q}, NddI; +I<U(4 m[y>Z #Rc[ْ;5@&bj[{ձ%:U,L#w[&&3 ufT02ƕss4~92nJȠDA]b=j҉~- GcLQ.W3ܾBm2# B^{IKbjQ:K*X5umI J).g6?v-!.p}Kxigw 82Ӱ `_s]W:M GѓE(w4eFEg,z)a@) K<'#0:'oBto辎rC;C{/!Iۨi+LZqU`%ƭ8&fRd*"IRLiSX`jx̴|z/dj|OZ{0fQ=єoqU A)ZnԢusU[Q 8c@[W7τŖ6zO®~CS;ONp+2rP[<ɮ@wU_OMv[~s4 fˏ+> sCÖaq{˘(=Zf@oFK~o+\%YՆ6Jo8܌m]9Zˆ+v \2 0š/vX_IH)`6ߊ/&we<t8Nmo$ㅗ=J7YbEOYqC֒-oZmR lM|2"H+Ǎe6S gNVj.OTy\&b4=lCW_4B)fTBl,VX~w2n aԈ_ g/xu&E)ǂ`z!Gp5=ޓ-ZkI[ok7DڊUV/:EuL\྅NGnq=nӓ6~CQA@ y; ,cH7 iO(1zIHbW6"[VH)aFSɠDt*#vsnNzA04PO|f??VhLfǶY.^r-^~&ha|͐/[X!٨&&iLMI}lT3u0Jvq3񆋢<ď9gdNY7D7:>Q tlMߵ%jss3h-bC2Mb b?14qSdI U'N6;2wCpp]|\KZ*0XjM3gRy/՝,JL8>T4sBl03T"ʻ{ܾupd6IcҡmyB%iƋ*B!u$v8hߤ68, KL v~L#~}c-2b^#\ HVΈKX&"y8;s*ֿKk$ |8ϐibdMtiU> IMggaanŞhHv&.DՏ70KȹzLuu`1YPbǼ"+:GfS{yڑ_ad&Am"#yfg!F,[ŧTT$`QxgC 4dBj9 ˹R.fT'񒃆~&8S#!ğe<*z8Zgm /ǟ3E%ya)3_dZa?{H}(i #AkTHȴlyU`~%$oν:oWiPh[Z([0ٝ3{7¶@h 5y t<Ӌ0TK 6=jYC2q<󊦇ӟl~e~ZNA y.*7j{0FƟu5RO `+rQ$8,/xpH >%m;jTN98ŷzJ2|t?i8Y psj=&@p|#X*/S`pӟj]Z8cpF+/*vlzkx!Kg[KN",lE^+^e kE_}P/R^gNȵx ^edkM+A&g9P7JI&\5 *V-!`Pb'bKmz#)ijEhcQg=tIBs 1i?ʵ6ڣ,vd`tJ^ x5GWBS[q4qңkFh7&9'~8mNnzbOKq,~ש}T=36z_~4^*YDSrF:3^9zl̲ IL޶kcGLD'#`Bd>͠CTiLc|[ jX<>V2%k:Ƴ%}TB k Jnj)apIZև06aE-݋=̴@B,Tj@Ǡ@GZnFf} =`vt@\!,Q>WW- q~0eH+ɣղN[x 2$4J)9v򘏫[ 4 % N dchr mO\c㝍.7_o~1Q['ZHc,|gWSwebIX1kޘ [GLKa4S&?C(P ~F(|6rO+1kmfdRcbCu4ҁ'҆ U}m*rW}ŀInr"+˻4Xf%LrzK^A\yXcLm[b-2vAq %/ bFN mʀzԕݾ]bumPПoD|J;U}^]LΒɶ3ya֣[zN'Z`>v1~k0r!a"ŒƋU/g=.n1yBk B:@| Mq~ʕ+Yx;_;N =Q[X iOR=2 iLiD43ѭcvn-(ؒLw%UpFL :ᢗ^(LYPkR?%Mʿ7l譞Kmt?mیnde"\uMs0Qee}8.遟񃐁 =o0X}CAhqZnm8KG@vH'WLﺑ Lllmd}]"ک<ɷ$IJ \%` X[3x#j3š"YBQKvX?G|D!5L%^}sX닚`y([=R ^7KkE[ Jf n墲}++#+՛˔wFJ, Jn `fx2ցGvCWrHXxzqFO* %>sڙ8UgpjJH>m9R&sEgXݟl_`^hT e#-T6S\mt.AWWd{QE2F | '!hXw]n ]1E]8#{P}a8%0V*m"2{xp/4O ~%ORHB[zvГnJDVF !COZX T750 B-4^E2e:b}tf7z椕I:CxcL163FjWgv2)Go\)yN6 dAX_i^WVO&]Zfme:MF%sYYhUq+[nڴ_JMFisKּB* KF6{>ˋ|{$/e#=#@/_fJ .Tx 0Šu6FJg.a )Mޜ9q H_?גf Qk OdDҀ Ac2vSA\)Xc2:ig|olJl[A?beOr\V*K`cma%̿:ۿF8&I}cF3`pA$jhĢ80j -j&IUORc] D٢ڗRœ$-%ǁZ@#prBK"f]6봪"'Ns,@bi=m.yF\ʏ;bŻ"1ȲҚ$[[}C=D&_!R/9¬DH4AH#׍4cꛂϓոGEsDLJ-L- L#GMMX 5jE=MA&uc6F[ERq-oRXS@-1},~;hM(?{qlI`0wjJi#f)'2*AWRQ}\]a/~ 9AL\ZϿ hsWе5N}<ɢMh$-=j|3ݹ?v3YSC':r2zo{:T+= q>P:;/kĕ3 t{QgEQS+ur[Uӣ:(] (Kmkbٓ!ub*FmU|j8nP 79]"0ۥ.gNp] 4b<|i?8jʣhwZH["yLewr 6CW@53ƣ> !sE+"x\W nryuΩ.umi[9|A2SkM@v ^Zʃ3QJП+Fa#Wfk iXw Ϩ%aCP3"!(n%Sq0bY-b`6^jk:}yX/"##*'IzaW0hQH9"~ Ok{ mqJRmO~&]unРlf4e&`ˬQ !y(V2 ~ D`KMDBNH-Gj$<9Sf_e,e,e(gLz9QLAgdr$;pWB&<V5^dߦ;h7.TVđ瞊N#)To)spC@ytS5PRJMnбƿh(~MRMgo$TѶoP{@O7vߡq~L8hYVum&7iFi=.EV7Hf>:ʜ^;&0~zHP<DmY&N8oΐh$NDJOpc%my9Q GZ7 _ [rC\ɟ"x(,[r$qA.i]̡'޲uÜЉ5zr G}#`P!`m}՚IĊl R+X0xOH=MLwNRԏV1ޡ c-j@ϣK3rC5-\_x[ump'W_& TpBvl@C&bvܙK!TMwֽFzye)(7Őݙӈ g ̅& =vw-uӘ0J5$]4n=pf@ AcEġ/tFY-G:RKBDhCaYzFoC|7k_xҷn1UNkJF./ՅlS,ɝ img"%SqLۤ=tXў]`E^v+EIzg5meSH(v~a ƧP]2)k ylyz+ϴ::KfNMm9گ5T9"B}wP&&8'1Y B!mQhFN+!n :(+SzPFZzM4@|nJ"Hqcd"c_/2k'4؎\pxB sJv׈z=2zlv)M!}#v2Ufd@/gmt=?Fc"- _V۳zF5nf]"BXv@c[NzkynjyjcF f֪I,X>"|`9`^SIB#$b`9+cΡxLK7XJd&"5a_0/hl%`ɄiݝߐcǗn @|e“)NvX5:-%:ѕ2@lXhzj-pFț]wE# A6EF4%Wv.yhtkf4>ˆ+9ZE ؉^G@㨚/%@h5UcZ uJ3`xNj,s=(]3m>8w:d %7=buWڼs.?ns蘈AèkV)HObiKY4 ܴk_Qͧqg ߾ tYX1!Il JNmj9/ncuJn$z"FJTqpSP١Xt.~g8v7&n4rTpq"Nl)dEkQTV>sxzEJww|#%м?9 ʾ7EݏaRiPc,4J~qHOViP ͢#mj ?+AcP[α*h "fAE- b9CqN}lX#u/;" kg!OcigNUG9;a^+ۤ u:7a^9&e3[:^hJিV䡜w',PKt((guoj-v0@ߞbqr"hKS'҂F3D.B}YxÍ/ Xt+pޒc#{VNVP*5y_ ἥ 8Y% Y۩'WNj9l77N Ώͳ4'Hǖya0ٓͬWk/7SǛwW˕ŧ!VNk%Cj Ï\/0[Xj"xHiH_NC:;櫙ѷ^0ehvDJ[GB 3u)bFi1B @cC܌qs3eBD$oӒAWOMe6up\4Z@u[<ZXE1~0NP復]Jr(.KdI<'RkT+)`Sܡ^(O҂$lGUy/7&c,,=C2#(MU|RaD}]0T*3-fo7v m4 DRWv@X.~C4<)Et݌#&)>{M>q⩚>ZU{21sQt{\V~HDpVSKM 4`Tap6k19bXİ{d}5ƓYՄ( BVdl*VR`ξ1rNJ`]!a;q8a$1.k2,!AS5t*1j }~QUؖ5Pa=` ҭn! ]:CU֟ tWx?5Y q"(Ap@p+=dl+DpEVDҮ'̩hü|1F%S %ℛai*i3YtѨH,nBpB`U~%WX9yO߿Κ^nxZb"!Imk%,Ә-֕ز%߂f6lZZX(UQ"M,02r_֨6-OzjNx k82WX ARSh*%Fv3wR"l2ް[a@r<ʅGoAޗ] h81||-fT &٤nJ~roh1<ٽvlmM>ƆkYX|=&_R {*j)̨wFG}+_zi]ӷ1ָX^n62y3ʹ3ٽnLV"+Y-*a-H| CU\&Ji3c=Y\ %~0o@" x},d .%$}58-# fpreJ:FkI$6o y\%?1]t~1P|>}ezG2jƅ]~d.o| >γ"*bR~~la;hVVea(U#\d*{ cz8sbdŌlQ&%Odsw%;a6Ls˦ڭ&/AV@5,rܺT*RѝA^٩3Rb;Gs"Vo*o4i깗 y7ZfTwU#FRpl8;B .~/@zz%| 5CΨO|ExE\DVGu2&qO'(^w&yIj6 =]TB-p7Չk>fd>;y l]= %~D;xi/5h ՈV` `~fsZG=lun׶7mq׋3RQ!թwGsϨěP2غh ] qǽzb~"uLVr,o=㩦#kݨֽܬ%x_$O^#. ~{:[lbv˕'RDpFanlf5s(ssSGyV?%QX?~Eu^<ٓ#Md(ùB#vh_*OEM<!-4p䝵Lpv hGm(~ ˞v4 k}֩2B2y,{rI=DIN T (U'iU)ظxe 12,7v7 Ha)ќ@1.^X(5ȇjvv(if)Tsg߫&&`4c^0DXᬌϾb$h%`9w8z䛚h=̮qu44Oʸ'm(t8єCmw3\B lr__g.mϞ&GkX-~ݍ-{Ag;e%#<Mop"k^ R,>SFs&!8KD &:jfRZˌ"<fD>NW+< O%0rPiɯk&;bikyKiKюcIVP4SۮF n_q4)0ʍtHT*+SRl"$!UBsE2g1}XW1J6<%c+q>DRK˛N %$I[o{N̟ /jnO~x0MR BE¥ù\5ǙwkXi/[aSFIlC5)k gWwίm]F`J:;Ft%A՘l %= eVG0d?5Yc@a!c%B33x=E/^"W`wLL'|gJ"7dutkoq;d/ͩ΍|  \ToyyI˺*^] ƬXIzN+!KFZ($Xևux ҼIur|i'fɫ- J;8eF(<PaTtV%坲v+Wf"\\de18"S?Q+\ HbBJ6T˥Vź]6g~4@U#5/\u<)i*H@geٛq: jLƨHNv̕eC'0B]@^2E:ER@;FyUϚL?uLU"W973jpڗ45dFN0K Tz,)5,2 &:rY Ԭ 827x>pNS|VjdV_bdA92l%R >(.Z-AY| X?PiMp.$fb|"1 *0)_c{ QYw;u,^~mIFyIj{ʂOC qhv f+.{' KB}6OX;('5 p3SOf1D{[ROԷx'=iudc':R33oA?| 1} +t/ 7 >@$4|^m ,~xgo'Eڍ<ΖlUc mtLBzv8sޯMH"cIϘW],76ί}u%UaLJ%:WcqJŢ j:~_ F49Y|%nе6U!zKSC_u.;ėXŵnzNض1}~Wle|V[m9a _֬ׯmQ>M?Pl8'R0sT wr'揑3U T3oƶόk;>|Kʾg[=ATAU \se fB߄5Ԇ$F sS7FLɜglP KfF_$.^ X̘D­?^ST&i?ky͜yyC:ɑzFK>L2(|Q|ޘ~ޯ9cԟKƪC/Ix-j>z4i_z櫄evzZ:H;(aƨJ\"g - 햂Õ4l-{.P7vI F?M]5yUFOP6P׷чߍ ӂ2ƈT+a֒z0+AO֯0R"{،O%'gc <pۡ"I)@3V Վ!6qaBqc)`tQ<ʅ|bKKu3^%"4.z"~u%Eʢ7/M;6S*P#kjDM?ʮ܎8^K!j91GVi>5I[M-^Ս 犡uV_NdvS5dԞ1G} ۣO/*ەyt8ŔpQ*5fRCnWς3 ;oC&IP>J}<Ǧ*tZAӫ*\.>fnjPE[=wO1VF5c9h}Ʉ;W~qLYpI/雞ݚ 0 >ZT{O UJ5*+L]Ƴc4x;~&=h):Kġ4Ԩ*Z=}9mn1XJz/Iz?ƲH8VLQd`8Փx\{ҷrgݝ^6afhRȿeGCmpzX: fs|;Ҿ$pV/ vf;/ jp(B B8k"5Cd@œ<Qft5F °b"|1BN{ag["۽ 00i;UXn P<$CuRd-#K["\a2_{4O{niҧMmQCc(X`">O!@k*Gpp70-ec؍BߪbrԯrѼe& $\3n䬀4y5lLRߤ)wQҘ;h+wZYF ƗNޡ b?I4I9rYj$265 }FKgYl?RʹE ceqQ-T<Ćl`m|a;*AnkX$_}N'@ltɘR= 9M"*C_n!6YRsyt-2 ,Yq>to9O}?!T&NRՔ:jߍHTDP5-m|{ĠdBGO6`%'k=_ f2'٧xZ0#sU|+śz'0m| ֈ<\*`Zǡc:h%jpGɔ4zEGoMl84~g+*P {ԯGĤ{yn|:cZYd`Y9yMƳh"ʅN9$,0,Wn&{GHeqd'f"j'rk@c>-[8+oH ] ܮ#SC4Y!ƻ7+ yh31KKA1 ӊ#7ڿ|% ;~ ?q@A=!(_:_F"D|%&xQ53QSn8[͒n ɀ H( D9°c;`3͙VAvDGҲ1Zcnɤ,k/pdcoHEzHǜJjX{Y.q-ī}_v팍-ܬye,̭.JI].xKUrx'Xr]#hA̜Ѧ ~Mn7A6^KM=E@[),\te^FϺz L@nWzˌFZ [o'|،(H%iZ{pnګ+99Jdųs@ K+DT*Q쑌Ќ M8]F(XX{-or_؟qIvL_9 42;z';N  sO_vhwQ50G Y:}nfWC1E0 U,:o7ժ6޲sKg$_϶HY۽"՘ ;s̑0 =,h*5aܗVMЪZEDhr0|`wHM0[ Ih3z.#hS0`Z=" h5:j.:4bǑ6 i UBA}?NKxW ÞG=$|TFEcq@N$7C34,eM2N:Q>x<>h{=AlnPGL kF}% 2zŸ8L`])}2j&sf*F#~bKyiisT C ~.nQP2¿n,{9i Z`28L}1" AF.DQ;\~jYgU5d!^i꥜ B7,5v53'jHҴ=݇?Z/G3"c(a8 q"&BsP u]c`e8Rr {:Ucՠ'K|sx`&+yl!O1CٕM2Zs}\v-ӸJ t \ZݮMU~8-&TR~0S&ߦ63R]_KZ Ulq3O)&1 $#-,3]=+_[5-#dS (=cmyqQi߿"荧=3%w&؏&&Pi/+s2nZ)zQsp\z48qMh&jpw#.1)f)9G!\T4]]"a^ T.^9- o%v3>"͞PL}r~cI&tYEv_G)nC60ì[.[`y"it̏rF2OFIq! (d6w[ia6\J0CMkPЉ/ -R$vzqVswU!.j;L!\'ƵI× 6]~*% n߮V+O.F\9!3,@*g10$ O3M,pne]i3< FP~^ cS$SQҕn9CR?b0jqլ0`˧$U:ayh5dX`lHNpH\֟p58&Ü TV1U N;Wh_ޠ?h>cSj$a³XeY;C Uzx}5ZI&N qXNM8(3WE8aEM-Ƃ#Chn饩gB 0# "gr"S=yǬpX)Od8# &nqg)> V>A`$ON-BobDH/L)e7J8Nf4>Xr@댰;Q=u{ `kzHXji8DNSxp̫*b{;眂0lBtbn;:'IUeVB'w|(i]rc6H.œ( F툦Ip悥5vv+>݂ޗ`~f0Jd^ ~G <5 ZMpΐY,,GZ2Td[<{85YyQ(<[zZEL|ȼLn([#cg\HG9H5`)mn@j:Y% 仗?(,nK : î$z&#1 +hu'r3iv%5: 0I "lI1DM"TJ'@bל(2<Ԑ1k ԕIjNJY=NYRy\+d,OOVU8? i EdmslOi61ܡ>S;%>~lO 6u|_֧C( N[ )1)D)Tx2P=ù3Zr,eRn@iZSFTu]Bfoy@%["ns"C IJGx0>dcaP^izyMOt)Czi p&ȡ,~@D D vQQxTx3lǟ`E;+HȭhK!oU#p8Z,4VC^NKy&ӲJxB5hzl mJ( WR b=NLGxg¬"7{BR T(ZX@6\؄*+Ϸ㑀3ܮ#CwVa 2\Iǡ1t JXv,bK]#q3a"Hre$;?fϟɓI3Ϝ VOċrѡŤwbg_?%KyB(9s^.z$7n8Rհpa8 `OSwr ߩW*͐sLm|b ţ34Z>І)8^! FbGiDqBy䭂*bI6@V8˖qV iry*g!'Fr5F0ȱ $]?LEPfC#lj`oSu0HV3Psב9#8͂:)-y^k7v*\]*RJ!{5jC-Kw?txY 9IZo1Kh3x%3z*ijGfҷTiyvƱ/pWPIQT?')YE9‚ځ!m~jDk_ؒKvh+ap`A%YF SovzYdzS 7q`CBa QK㉑'H$_f( J"^7qQlIzߣ`;j ["p="(^'(!ixiNmpq\:`iVלO7$1C]]|igX Dw +y"PРp m^gO HΎ\bh?[2J)NbudؚuΓIgu$~FOO.BHp(W[Q p/ѧsȝ}^Iv(smtq@{]3ruFULg|_s-g&MK>ac3. _kJ^1;YXy0Pk;u'Q jy(a2Ez|"x L``m%ZX?)v7KйXE\R?fcsJ U3I#a aHGe$~~kޱE@ 7WSp491qU"j[1򌈉1]fJ)ΚMk 2813Z Y/ձ,A4 ~׷£B›<[4C3InX{BշǨk%ecBBgkȒe{e􆰊._XBZ QO#ִjvEx}[o([M5&soVS6<[I@gZ!f5=܊6bMl>0w㴖(2N@Or"Wp.k0L\p*nˋ`#&]xBO.m~'GO̲dIJH(k8D/=d:#Y }ʢqosLWVy%ot?atꔡZPa@O ˡz)i$.{>`ЩA*6\ x}NşpbyH-̊ZeإoOhlyqUsrUj{b@$%L†Sa Vs>b&#Í|L,W#&MUI,˴H29Wh 93}sf!"m-k^N;wAjOd)zZv(l`eV4(\j LFI~8#aP؝(8zo!zCxRqR&C((=v1! :[ KV4O(t3>^rMzIaY҆V˩dhxsW K GUDVcrVD a#2B4ަĹ+!`"u# _2'@};Ƌn$IHmh\ZX~''|ğ!<<~S 4bB?']qb\]H!9;Lylpǿ$V,*ўQ耓m1ME5.yODcs_ᮗGC4$ eH*rKW~;!U WrƚZGSK\6V(h3'/}\ouJuwy@䐘FJk:w O_NtM.h3,Q v ^Db>G_zL@a) UdIiÓa|4 Ϛ3R~%o6`5<+֝c_蛻4R(䷱.ym= ⩏SM1{![TU;eA!v_͠vb5}6zU͸A&"glߋa"!R3R#F|1 3 nMxI܅:EhHiT .DZ)r;sN Gĵ;[+[}l3Fd/baD"vam9 ܊h蒌کmV z#B`dKw$+H}ZBЄ܂({Yf`]VkF`1t oWiGx"WȟO1\2NW[$vd'ijrx& mwg~-jXU]Q^y_Éd aε'f}:ΐq]4x ޿Gp xtJ%g[;u!!]bVj.u>=V2PHt!0y: k͒ m.{䧢ʈoI*@|i5RO1M/h{E^fTF }3q>zk!@ ^}mF/~z$w8)w4kNړ^N2[DcbY[́e*=4֢ w BxH|j>"n t3V]yp)mY$(:j؛ɷhiL%4iOg[:fs+GF,O"u@Fe!!ɬ3xKB.-/k{F gE@ɧ<ۘkӲOmdɭw@&\*QOC롢OLp9BWWBAȞcurdvOjz I_=X㞨Tlk@I]%noP-# IO2iĘ 3!$W4Ŏ<5ًAy!]?j_3i;g\/+PW/bhP,'-]Z~D<|aւј kD8[ -\SUdA/O䆥O~{'5+U8 ]-l0cX.[EJMCITK08)bFc4V9|qiS2@L{ɲ+GCYq{/:@jbm&M`感CRDУ\H|*h`˷ƊQfJO6LzL17D[;k"DՇQn4),M++?r vE.\}ii\C"3955웈q!]TITg2:W\;>>~5fXYnKuODjSќD!pA_Q*+SGJC׵ܣ;HR-{ ([:ggfUl|sWNqa_B)Kf%{d~zhӳn6vqk0]BoHHu\puq$ Tg৷xOpkDS5j,4 )\xuCǨєֿؾ"K.WX]C(]3Dosmgp~d1k, m"aҚzn,_Jj˟^U$.Lg!ڧ K:QվĘ[C^`=  v=A@:[ܱnȎZY&)`4`SQؖ N#QDn3F"bkσ/Bn1j2 GGV屇ȘXu܋k6u`=-wVg*Vfɀ-!z20,'P<՘O'RR8kCLўnjg(w8~nPEw.`{E4ʏo S],jud@d/rwSYìXq44 wO G1MF-{Wm`k\x,{H[ͤ2, t91~@X njrQCbd\Gح܄n𘂂wf) Yy Xކ~}t-bZޫ_Yױ'b,~5f&MXge 0ѳ)匮M!;y4րm>i-P~ST꡴6w^Æ&ǞI˯:$t'kD1E$x4q "LfHTWKa=b/>=\O($!hkw> A *<]l!Ri8LJF- ,wu\AB+2]g7Kp?t=R[x+r-~%fkoY=XҕdĪƈӿWGFZW qlM޶oe1`Xʔ,7H~;9S1>6aG;vR{.ܝK7 ŅT02 K;uSĤ'._hMc^>Cv131WV&v=fT=4ĚźX-q~*\ T<Ԓ)% <߅rD^RPthE[얊|3 j&%^D,S4+4=8?L݊MRq/oДo$dv90:_ajR?6L9v׸mYX iydHiFJ^C ,AWQQgG0kE5AF L<5qo׶#o,Zpd{I|d jz:4?ݮOpl ,+JLxG+Rc2Y^Q9" '"x0R#)+gw#DlXTwWwNm\_^xQu*J p;.K&m^B'rm/oh?ÑD{yvX~ħjӜ^q/Jv=EY#-xn`?h0:ksU /7qV\u">膢0ZH9zu3z[x=DjqdRF=#ml/Ta[Mij&kRcr[O|q[~I0/OrjJo(qeHWV{V*)-{A^.Ԑh$` ne'BkPYB|;wVv"k,K䏹d[Z \莝ܴ`<0Z)@xxM)2U| N}H(|eH\t{`Hfi,an9zJʼG"0C}OZ%Te*6L-y17sףD]Z=+ vnhbBCNQF3Nos8j?>m"AA y{9~s#x5z{ 5gUoBCoK+ `f 8tx$C|BqkB/%`n Vh ëL'wy1.bhF$^4_>n"玽p1Qdr6n:x%4)p֢&%83Em~;ADT@ eBCSZy>}8s͐苚XQ!<%oV'JhoTZ׭#%f4cu$(wBfm=I#[:j#/ʞt" >hVUgU>M{KoJmo21$WRWd8:WFRu1 P+e ^9n[ 9y9C`6AcqQcY0o)l6~ 恳vCv /0 '&Iy6P4:;}D}k ]xv0 }MD=ISiomC?0REH?bk9 4YL!k$-4m1Յ"T(;#> p#E_sl0\h="On Ɛ^랶̈ (LG\Q2R݉#ō%Av^O+G.s*hۃ`[}ֹ1IBg+ij͍E؃MQPEvp]V} /[j ~w׶K( $b>)*P" bf\ .t&Ƕz8O,3_y"Am蠽I qy4PhM"7?GtAye=xcVTgrL D|Kb)\9I`0%uYX0(YzCh\3j+ vgVBθ][j4?XO@ vtd#g@nOR)Tex(ۈDlO^t3ge5㙱fPodR)5iK8"n}4. 麂>/2toǝw4&bpVnk>ﳔ:G7o[Ys}Qcc|)fkQdљG C509^M l~!.엥X%б~Kyxז8HY] S/c3R*0Dx _#C$?P c@Gp$ʓ+LX6+E.#뎸3mբORHWSpT)o\A 5?lM:B;-kݑhNօ\M}hG{$`ĺr6~xhcu3gJa҉'5by?+DuwSZz 3][fD㿪`aͽcb0bbmDOPud/L%щӌR#Ie0wlmy!n\\,Q;](k[ЬV.ɕCcmkV+W2?敱-l-KxHx͹?TMsQ.%9Ph2A@#SQv֢۸?++$5\GcGQ:d,äTΈ8sqhUl$~-Ρwۇ$C:f-43Gh{!I,'~nsX+. "Sۿ?G).a]u5 1uCLBf1n h:#%gߋf[IUyJDm]5R1kv+Ћڷ!zH`\ϸ3O6mJY1'k:b݋9' .s>4h}ը-KCb"1xI4,k&(:z8mИHqPf/DBFbYEscRw5x!٢ 81%DboTO .5 bLaTVJ2/ TPvm#"+$u*|kYt O(4m!o O*(v{I~!8&tn(a_`O4k(+olĊq^>ƠKOUؠq!~g[!6.iѕ5ɬh y^WgMَiW 7-VVj5۳7]Nzcd͑2s$CHU+2-d_ЄDN2P,?>K8LK{%b lG]'Fy.֕@X귪 _-),`~@$ k.?!k~M_v<9'3d^忙7veF*%0O2suj1j3e;Dn~jXC-}j`j2*ggO172ș,E_Q1zSJ(FO n4]LM p!nghkuXdaECpzʺ٦?y6Aɹ(8}[pD {=icvjgC9φ,(6iV}Ih$H6P: g9#"4+M4El}ZhZCN,3{^PXW#d-|??@# 9 d:LP'O-ZՅWvhϻ b\CQ|_Imhb+acj 'e&Y}Kzn=;=cp ؀c+>N/O*PفʍC'{;K]’vJRR+yA1=J}KлZ GZbiu#dh Z׉{q@I`~Kij`vjce]fJCs%$+vhd/ c? ]4_T0eg.aot+}6x-"zhzޙv{1+&;BBh}'\X{Hv)v]8]5=l<=i|1uFa3% ;j :|uZ mq=랶|4PxvC[zxA"2FS˱CnkcmFqX | mף'7RH  %uqs@g4ȕa(h2+6!Ve~~Xjy3ZjuhD:( FGw]Kj/eԀ 67mWԎ3F4^C`".SS9_[K?w_ F2"wdGby(PUyN1dhVt쀆?)R8̜Kѐ'3{9kVӳU`TѶi)Q%[n*m #iO5v*R P̼<l Yٷ¥NO_Smah8 ٠#{4i{-aGGRTnaaTaA{XDzFIܔQ25-@G|ش Pϴl`E1Lf)s;8=cV_Z] BBKۨIKS{Zª}ypJX'[Tom)D?l?Ky$oxAa?Eu5hJ"T{ªŧ&XݽwH4VrLjRǶ8*e'7w w["^J /9A\Ƿk险NM\IuX̲:-iic̒F;$N=XkH{ptVRY/t#㯌{%ǀ?]SfV~&[!>AEsI tU)8zZ 1G;l ҄0tq%P&<@cQ, ؍.3GB2""֣3W_d|AmT2nL 5JLn}`k0ʈ~U$v! O }֧$<ֳ,[Fuz d6 s7r@ᴝr~ǯ($QE@$S=ywݢ_IYB2ƢZB5wu- SiZ_whܩRuo_@gL+XMg]ӳ/ggݽQ0]8Ch/seL$0S \ْ QVXVu1`bv]#vR<L(+UElLq4;v|Q8L/]X2-DRnĺ BF+O1Tl!;k_ܞ~lxP2#${2 Z*]}0rU]C{{N!㚢<@̳*٭Gh,Pc}XTS(^RS;BuфSZ<"6m/Ҧ'h_1n%)*emC:oP-68i!DFF 'xzUPp27=y[XhdRG_h@J/kaΝ?{'ʓ\vs’U_o]: 2?$M/Gv!/xUWs<#/Dgy3>1bW@t٤cmˮۊ:zx]!/=61|`tJQzn5+X=V.eP4 6E}]iMsGC"Aq\\| cwI oGPz&npLM>2ӷ[zAZ3I _ >%09_OÜ L5 2%ID)l0J2%7]5 e@4a+Ž44tܞzyI}&ScXEΌ-ОqqR!8&,1/-m E|X\^1CTJ@W Њ_ :vQ9x%'ekCp'VY!|v8h':dیľLxEp.JMM.i9cG3 VEkjaM&cNWz}PMSwH B a5X62%gWs;_g)lA\2Ekn_*SN*,H>*ƶ @FAVzKDb֒.}{֫78%å鯞IgŁ;nrPWT2V cxtK?s[|#wz^/XDv'{<>_P}SGlN>JkW ;^3Cl2%ݶh 59o>ׂ{Z/.Wp)U!6(ʌ쯬p3)lh1?Etq? # kBAiR}hޥʤ_9DTg\Zp{X7\.(^"ʀjZKT_oTĿL=[ˋ o3\\ů׶N>Z,﵉d6{Aub ~ YL yG¤!X.7U6iWS<|cy. OEz#| \x4$Eʝ_o&` ]r %?fҞ@ڔ䍎wv8􈮧S!P3*-y_c; .BA#M LPs_֮"eÙQ` ɠ! U.wo`Leudoy~:I'!sjpInBefÔvKʚ3&c,E}<+Eem-r#%zYPĄS,JQOpn!ܓOOQ %c4P̵m3L J`g¦>8矃A{@lrBWo3~#s:i\Hej"EȳQOxH~C*ȣYX_;=9Ut%i+-l-/2 p k S"uEiMTv48G PU+ŋƃp%ڝf6[@qn z*Kn@Law i]L5" 'nܛ>À^#V-؟ΠIVSs8Ijfa '.{ GZ%yG)6\Je0C_"878%'F3X;K:'<:~;}ƪ,٩=;nVX)(jUy -|} u6}=*9j5Cc=Էe+va6~HqQѩCM44sKq{QZ UH'n in =laj]^64q`=S$γfR(Ko`R~zD0ےdU؊̢VX;ȖQmPR}$b&!ҋ7 xXƮV]29jMdHzcbW᯲y*KxJ)[snPX⦧'t) s;q۟rw 9ʌ<&99;R^`:Lß9Ejbާ.Lx-]K d YEY v9yQ# l25y--qlJ`j%8t-E c p 5S+|H" I%cStJmHsU-+aAV^-LVQ<tf͌V@fon1ոG8ɒao8,Npkܲpvzw%++׼(jBçXPN* )v\:U؜-2`]4J"O-Ӝ?M$x+ 3$W$%%I  sP[,"Pusʓz_pT$`Na GБ!u NN,%>?B%).@<[:=lYEX\ z avHZ:y|勌S9,!76s;1/5Et|d_QlT OŻ1a_դnN5SJ;{6z%F%뮾I"NhX4n9={ygtՏ^x/FN cb)zʘ3xELI("Цl(}X#)IwQAK{3)YM_6puMR#71N&םɝP#5| ZFLwM .MT;J}N:hfd!dEpzBCd޻fS2OMa)l!BŠy0DKiA4"#~u ;izPz: |үG Mm4.?vŐs n (H)c.{s HT|ƨ}[d %}qUg,\6v>D `FD98aTW&m+밁'4` uexqƧb1a" x ~ՕY-;k2?56(-`h\]96 QScq0xy$0JQTEqg6M5Z7],q _Dg93SUI|051u#`m'n™y3Y | !W;͞3p)+ZՌٺ\x*c ̈ +~ 'qݒ ۨ+JxJ8e}FaPïx(13_gn-%>"rwXZV`:dv]uM=@o(cZ!<7Wpez>& /2$ 1^'59+fuL*d[zfjw?V8X϶ZtTH8Ct^d0XٔU裰m>)K&! AI]߼U 篌IcM.Bo1QKQɄZX_*csdC5B;hE&_9QE1v & ?ar?hG[;X; jOh GͬժKg{o҆Txggy)DuwS_xb5J(Z/i&9"cjTD/0 ,YIlt&Cd N,u.fKWDy@ug[ 9y+{I*]vQxsP}Lb(\`b-LygFJ?.j"WzhEN1q1Y4ԙ9(Ng2 9zrjzBJjUʜظ.\Q Ҡq w'5JlPo\0lms=qȂ*>˩+g!݉cc3GHv\ʹ+nSDT AqȎ-zGAXWl#n]u(4294-g|&@"ǦCYL.GBctpNn>[ ]TK= =gە;$aÝcS=T -XS$~n&WºeIk}l0I]$s "s\ޱxѝZΣy3DE =Vc@Kת#_56_Z⻓~ gq/]M,ǔQnvvh?LOW)\ԼFgOyN#rϊjr*^Y CKC݊|gR&sSc^r9>%;!ӟ+ ~b!Bo#*)QH>3lʨ丑Šx5o_.֭LA')>8hMٔr3MkW܃·(֡C _P]`Qy1c>cNSLEܦ d,ns5q~7>8*w[f"~Y g=ٛAS%5Z˯8#RU p z$~3QJQb:10J(}BO:ϏݖBbN-rFgNK'UpiBkmPŚeUݶO\+XQ@٘i jxޘ=6fv$~XvEJ51=f$v3|T_5wWI1M~m"7T;ݡTr(7ѷ6gX Z԰;ı o1у Sԯ Վa*F o@șY57S/&h.x:mB7%Ĩ1ۈjrV鐺?9bgr* woӛK]aGM٨seA7D#Z&DKpE? 8#\Vh\Dn־<sK.Glju40=q^L0_6%6*C=U | C_t22;^.-86Y,FUq M'FY=fQQ]t5ױ7ƹVg~<ˉKJL 9tE>1DU ^u97]FPaVc;-An5=Ə'8LFyPs@!  p7a4/嚔*y R9Ji d>#4`NT=0?0_/0l3΄\5cL۔qD0[Ulo'yUSi6юh\kXerC2gCV'3"Szbog@>Ba \z.qr|c`~/Z) <1W9)@ ߸oT\{=,?Ei2`9+F!ބӣ7MLMGZp5ds }k*ӤҒ^}R9=Io}J=N=s%sl~lcg Z#z;c5F U99&i Sqq#O\}7%r-Ḋ$ F&XoUu*:3S<7H` 6]NAᾐS|sr0#\L+<2 ܣ0ʊ+G= >w l0jΚj'qԟ% <>mk1(~ݍ+&]X`UNO zgcwaLB8BJ1X oT91+el)'Gٷ6a;H )sSߢ6~ӞVvt(e[*;rkHnӧ)'Q oc.=uwHY$ffst1!&ygth'\=jZ|2A-l3ckAϙYX. nQ u iIW T ÿS.W ]vVj)z ɺ<ԗ)CYIqhTƅY}bPUJ"] +]+]j j-$12c5_Ɗfk2&r9]ZC c񾃗c)cLGGHw8E2$ƪ?Vi'k.ҐVhD֊80%`[!FؐeLz5Ao6pߠӮd.VY)/#;_E5&4*bL+(*΃!cx]7/!+ }!4adrr޵`ԇSMlN1pO2~JsCjfSTI}mŝ\룱2a8[0|$<9|^Cd Oc pmlbSo@9тR͚KȣE.~:T鱝.ݓ e bi 9~!ÑyNKMXU:g5j["Nm~az}=Dbdp 'yL6eRg)֛dޣODB8md3̄y(<>(6>3^I锫CE`;e _(3 #)׶?xX>R]\8߱<{] f,]S%2h2ϣ q^'}tO Zy a:_erB9sY&\& uW, c-#}σLzlDdSGhHfZ0_  9$xiI}R9352 1VuW&cZl44(NI?^8,!Aa{kfA,\½v 2Qx7&83DTzőOQ!/)*#gts[Եߺ&uA"{,,iMC!/1X7c%lIIDǷ֓""SZ~O1[UDdtzv9z  Ł N ʂ"ri *Mjևn25[ۂGNڙ[r1[xcx'c".9L-P K05&$vy[xM}5;G~4v1W"ZyOsRPȈFa~Rc1-m$:#4wJw3^gt@A~r[@ClOݫ:-v: f'9S~UlLj#FfRFÅɴBrIliaQe;2 dM#Jj;)"}NH0oiN@yዂ"#ɏ*9gT U<Ъ+2Ȉq:NCgceQ7wM-(yr)ou Ԛ)}H%XUM6)x!!rfbmViV6"7ڄ|a$+16뗁ČK@S UMDPudp(2~Z2vlr,/et?Q[/.00ol)F$hӔqJ(fKVG޿ӧkp $mMW#ya AUgPSY "c7쫴M: {6擃x=gcۊq  fsY< _ ..G^]ZEu5gHJ'|zf֏Vu8U'_? Mc?fβm8r sʷ5? +|+Kq;KUݑP8aL{giS87Kx)v7X&jpA]'(ۯ<~O0VK[eZf& Tt6#.D1ixױ g' _39j|g%ܕΗNGggཋ yyu4 Q ;V|*4׎JPf^bƭWNxE2d/2j h ^H)i6Ίn&(+C*[knbPT|p>ji !Ut"AS)EsMcE0Iv2nD}s[GN[9`7!% S?1e;BdEުWG{׈t-+ħPՓ6¡гrL\"Q5{n\1Cך'\cUx@bG)*eyXyfj:tnmstb[YbbT%yzX!r]sͱpyase zoYM;HB\{(Y\@QgV- [+cT.Fy<"*0i!=K!N=%HbEsśh0#PDS1vN#Mg+z!fnT|_F-X<K9;\XLҬT+v8\T՗Dǽx1 T/E\X=3qq6:s~ r:Uj<0ڢX)`pIk16YjhQ/yR7|K6԰.;dKA hz gR;ܛlv4QQ`5VBY(][x=r/gn'Z4>׍TVn").]]d-OeDXF5Q:uAu| >?GTst$_o5U0k#P);PgR3}|O#ENax&qt R)[u9S|-44 vTn<=b0s,Qiػl={StQSk3N<@Ŏa?SbziIu s;fR:i Qn*u~(QZll<0gd4sRL>@|zS. +\4(%L+*^AZs@'߻=uSqr PjZ$PWUL?09f2LGRl#8 5Ճ'g1+ʊIPPG808(~3~P%k6U|TCi%}}i(z_"g"G=h,Un:LI̋r6)ͩXsW4ԥ^ 9Qq38:<$ř…3 {At "k.JeN8:<2!=爼TOҜkSRvq~}rcd@!q[՘/ѧ ~%us<#m]Qˡ| TRnu"i/Ssww!f"oqӥ z'hK7\t{Pl( |zIYWvqOi,~[ELN!ŦFoE/8ۺ hRȸ#_0aU| 2F`^rL;'q2{q,*i. qW$:*F!J4/N9޲-+TK;XC ~Iź,!̞bX#sݟ=7ShqBgh i<֫GC]|-/5{u˼ŷ %#Eh.YH.ɉyo,%Nɸ𰵰ָY֠>O%T֩-w;_,|%L2Z"D'u:ߧMFSCzs3ÖJvg>.qz~3-r JR84fWμ-v%Ey1zI.]V(jFYD(lcçJomk^xqt_Z%ԯau?WsQ Mp la2@bl $Pa5 , ߹7;b |=`M`4gc Y%hZkO 43pd_}]=nJ bÌ S M|J.oenJI-͑G@ ln!G+*ao6[[D!E_ HQIPDT_p_L٨`a-n2bt{z?8fshsyfDo.,9BeeCnbm=tsG S@{-*{2رlWn惧}@uAcw1 mɤrɳĻ+U tJB ՝KLwx đR=['ꃸI9nべ}I(ry{:0Wg*$%dPo Slm!e!r򥒿1My +E/DZ p!ͧ4"J*8Lc\ ]ř켶vh˥lQQ@nWjj`H).G#ɉ&6 z~GT" Aj(J@37b&ڬ*9齡+=2-DrLc9[:@#=SYt Ky䇞 #˻ƜC/)z83`)Xhå~b>׬l5VL ϡEY~`HܮnBiݩJRz8Y)"Ǚ >xΩ,G<縖R&flȸ~,ܑέIdũk#%$`ܪ|!ޣ*4Y$[曳-aQ|Hq!\il8Ĉ|` 0I?rJ>CzPԡ;(k5mO 2+uwgz!K*(TW#dqK G08Uv$YfAE*= jQm[9[@;CJ| е)\cʂIt(%vs7B7.fNB*uidi%XaiR]֥$3@_]qMfhLAs,sŷU4Fkʙ'|iº9%|˟.Xc/tl6Yy[*rOq׈)@$Se34u{*{Q9N8Jej#.Rʭpngc.ED'JCOT+F҂tZ.K`TQWK@^рx !m9~j]W=P#]_Ujww9Gm=r(&ʣE8L AV Lv*ɼ0:g١x=EzNbm[|Q=8ȭYu-,߽(:ɶ?܈\oTg MYaj,5N8fADD$O1J3C&^ y&[B.q/6!MZLŕ>9qoreݼV ,r9L&b}L9{}a +4ҪRF_bgT ~K TicCLõFƺ"!o:FcVPhT*B/cL)Zq^M!= RޛN;6uu}Z'טy t'Pð1[t@,H#g=Jw+t}rwb7]f8ր )v [a0E%՛j#F.iK$p>ANХDڵɹ'5-ׇ#dAA KEIST\Ym)< e*I41ԴTL]ն )o8!eo[Tspkd-3w~qb\E_T0Nȝ!ES!#ZRWj^EZ1*&AyqdBn>ΓDBj.'ˆ{5/-~vS]!eeڕ쪪ygCBRj5u8שh<:8*K !3!Fp R S@Kz*JM HhEe?IdJ0 !E;Wc0y%k o2V&DkYڣ cp+ 114QX7҈Lw85y+㺥NJhiYG>VƸ.~ \gӹ}t֭Q!tr,] |z w+6u3} ރ.'eκ"×ik7hF8]e<`mΝ;V<L+͇tv|sMhLYjcYAⴻ@-C5uf )yij28Ͻru(C-AY8ZOX&[x`7u{88*d1DT+UEq}ۆUrIBQyS^bL!MbfTԷrqPeX>udNkF7K J\`>:V4 QN-bM7޿5@1ttVx W0p|C\'M> Οmѥږ]+y&ҰTEy.Cǂ %yK{Uv h8UxXg)(&T)Cmھ~΃/OzgKGxMN d2WtsO!<ڒ<]PS=)IÈtemWТibc  UX;O\1_|{EǸ-58&:8tPה-KkFb̾)<4{çZHfv)i׳Tϫ A·I{KϘ^:5,xkY8לH΀7GMk^0`ٴhUmܫW~A m;GfZCoT[2B;̚CWRLh$6뗝)15,2T-|'i4 삥Iͮ06|iprtptom3&=gVz z]t_[YMuS=O6ʃkUdᑕm0X1c &Vth]%u=UֹefMD7έɊdq_"b]Nܬ >8Pp{vꕗūXPYr~^ {G~amʬd OVNxi7J}Rf0|olBe$$E:8[FD ^2VLf{gX>,eᶇLH;S4ԢynS;XOLpi#vnd뭪tŭdڣ :lUBykJ y4hrG Fm23^ޏ7N9Cpd/xjOs2{DK׽o^֓3yT+DPRt}6 {Eq-\ۅQ QlųQ X lnT [TU.%lVFZE7"1 x]]D1 =qMBhH)%,&0~R9&+6mn-vxn^%Q<RXƾ~DZǽ-~ΙBdcVOO'Ŝgq&⤬91-wexaY΍(֏5uW^YtOmqēsMJ"z1s͌HzTRb1~$g~鴆󴓦Hq"(`f~%,^qc>bm;e ji;slȫ3}7&sX@j{JՄAEIJ * f`Ib$7g㭭]-FmM3p\>Hrq--is4&CXﺩ2tkNr2i_Z zL[Q?̷ HfyMރV6yLSY[ q@p&~&L+5s%UcN<?+7PO<M6A MrS,>.VL?xxOά)uC,O·{Lہ y%2]jj%'38c Gqd-u#>B* )nmY9YWe:.Xci0yBe``QBLk8F'PgOf?1] @!@xKZ| T4{ p1 :):aD^ؽJE@&i)\dی Vbܞe_'#:Je,2b ޗϻ/>66{E܃v/hή[൤$+rѧ6/Gd"c‡ 1鉂:Z *a)hI@~3P#]^y nph*T;'.3QWG*":>&]l;R: m5*eC k=D9ekq_OU?L2GA|QZu劏|\V eb92|ҭ?j_Pb5w*&5"\YIhlwV̨? / Ւ nަ'C+IH2̉(tw:kz(nC3Gtw$#M 3ouB@!yI~NJBɁ QA<<"r˨cɲEX au+āT] 9[W4/)^lUT!:'dP ;Ct߇|͹?۞0ڝElKIs;hʋ |K'yr)HL[6" Za G={_quX[#;[S]yo$ lW!d;C(1=Y\Y+׻ 9i=\+LVai {sr;v=K 2( 8tj0h*2B7FrYvq9NQ׳#}q,$D`G +'#DkKrXsy@+b!YČ)w[N0}z ]iYxNFTZ/s&ȜwejB:O7-2c`+1fgfV⬳QD\rxQG LU>& mC`q㌞7,YQ:Y)2S18/*k=vS}kռ8LqU]ɲwe(%snd:.>,N DzdKexb !^T , W̄:R\:&xtE,\d5+: Ё9^w8<]EE&||MQ28\Ǟܺy8( sHVc* ,!cDQV wjmlX Ŵư`( &n+x1N ka)?z(*X:۹`2'#y# '-i.%Q*DB ZF4aOז"1beCzFoRrL`I&$`Nz(@e*PF0Cao<>D2e5Dg3A8QcBCW/aUSn̉sR4sHĸRaiRCH yH|y!Cٹ).k\1kQiyӧ)ȺӚ^<_nƺK%rVk Z꫟ bGrnֈyhM)ʊv3g~8T& :UiShOWT7P#•֊k&yowv.eD(׆N}mGő(^FC*WCr 

    0(Fx̎GK +W j^NP ߖ~VU9q%NC0ŊiSa@ Gst6oҫ=YC&g‹֢2>(K҈phY֤NvH#{^XsE-Q6lT[LX+RUvFO%N Rs@U .'|;kеy?񛭺l!,@$@)G/d妆ep?vevC_b7TZ"OlCpZ~L +@Nc \}8]Pz>qd^4(R;n/H}HO@'rjJKp}BX%J]^ Pٚ7;Xp-F@+GI4*_,xM }1G_]7im$jk*'yFș dhItbWC#hr@;wKJ q6Kꑍ$Ե&JS$]pzRpG`8>^KN'20q?:I/gd{*' dmStr--⮥"ȑl؂'IE=3qnqbp롐l-[b%yCf#a1CQALiU$E@|o~#pwnU9pȲJ4>)oSIJ#䋉kI0d*ׄa yΞ^I0Ν-M-D ]EL ӷ2-T"F+CG֜jpcVTjJf:|nom>)0izD̄Dn!deu1STV50X]@2C38a{yݦ0{1\yڷx}{pg֦f[а P&L&YP@r[tYA3ASҌ&8c~8h;yRv&Pn0mhTM!TkIlN]-[жGTD7FCI9ɏC5=†=pՠd-j yȼ.sH9 7ͱ5v@&/qv f^W՞)M9.F!ZN 2Xsz RjnG,O_FhM OmT[/k?m&!+NJ=)ivʑK\L%L@\rPsCp}xE* `oLIuɰE x&g'2|Zmw? Qt{opUlX5pA%Fΰ7 ' 4)wNAdF& ?̦A>u=.A x-W{4Tsr}yhj|P!y5:WC~xDK xQ8Eb@ژ0+ /ep}y,w֟ "n8iz*~G}.Klw0笓֑EBuQB|봸|Ucj?FꬕV*2<'/r1&,) +:Ft#uԍ*@[nw1ƜBkQd렾WeC,׌<1\15%YgaR?d6FD,/(zh"e8d0"W=t!ɫ:k_@{R#1gS"G=~H$F,қitUnp^:Kf[V1VXZ @tF|mRd oS1Ɂ@5#n =='426gh>>B۳%{c\8,0K,D&-:xԒ52B_@J2ʅs/L^,`iK?IU<@II/|cp Jܦ-7D f袗a=䚅@GBbl$jl~h`L.K -<>PB"yk'-oC"Buu&G_p?kV<͵X/Fo\aʛ?5+0 ."@6=CG#)ɍOӨN\8@F2hL &qh w",|yME2M}\r'GAbG`1|yԑgG(;UeŢ6ϰUc7YL­=xƞJnl0M!8SwB2]Φ̡SqwYvE((opfcSZ@&WC5\fOXbQg *0 2OUDpt2Q ؍9#$cI_~?P䍹G "v8Tpd5Dt.D_ :gh)1G܌\i$ ͒SSo bnrKrfRQjCLc&etfD-1@${螱dN T\>XARm{|7it3MNu&yPFjdjgr_naC%I!;u}5 ވH[)EzQ96bh+ XLu]73̻ ұw/ɗNzS\  r̹ZUYS18%15ڲX?pL6\G.}_\E1e]V,u[<1I /9(!݆v:ѰmbQTZ8o(Bso|Td7X)%4[ԮTՃdm2#fa$mW\=g (&dEV*u(?7>F*l~$% aN$Sƃ[%R {Jҷ YT|7rT2;ZhVA1 _qla kuwk-dU6iB cZV+zy/t͇w>&Ą=aYI:^t1Pt? /O^5&A#eȮ \ ]5%1?bsRs"vMh=Sb$#kh= N3-274ag@{{]d-1t(n:=6a?X᪃4;oα43la[w15`R{tz.^Em$=SGY:`l}mLo=lL.&Q90Qt8+ jПm\HݬDOd vfoȘau2uކiܩ 钊Nj?)O810ҴN "#Ym9D|@i!խDHeZLAuv[Lf@@(GLm<ZWݳdTBf_6K2-<h6-U<́X'ZKI>؀?#xEMTkiƤy&;HZ/Rǧ6p;g)澹,H+bouc~ I~cM`A\u+8+,D7Ǩ&"eV2Ǔ#|N=,N/up2@bc>BO'XP|i]gk}h.>ӆpV4D"b9S\]r)WUy.҉Ij2+cQY|fSZ9ȵβnUޙ)Ux@TVmrB*ʟ8s}$ g1VP6j 2k܄€ck C.lcCnZ*{C6)3wZiM2; ]v!yFw10LjV3 |w$vc? m-}g^ʽHzZpFPZPSL+(Dx>")v<Ub1Ji9xGd|>{Hb*@VS<Ԣ)wuo(t`za? FSѥ22v=ZN&4 A`^aه4v1MԒDɠ̈Лukt;OG L?@Ț֬y {z>'u5Ԋ2{Lߌ/~!qY 2t8eJIRS,vorc@"-2" DĊ b`5.f8snj wZ'>vs]Cw(J, f-3*Da򐩳@a,zK&z&55('/ʁu&P &B2jT 3xɾ4[c)xIX}a7fI]ɃE8+zf]27炍=>VBUNCgnbv!c?gKO~VjV0 XFU(xΌ;4,edUl?^=rFJe+iiQ#IZWV5 z Xyvv=d{UL*`0 26(s3((MmN@*yEA@9{,N{QPlOS\>ʣ+P(KJSR)ѡV(< 7n2)C@&K v:x|\4U鎲{EKxN=8m9eBE)^e=p agq4&6"ZoNÂoi ET'p竸x',gGg !hQzM- _-5GBP4Ƥ$Ş:{Ģ5:\j+vEYsqYxGu? lc}h[*/Ams(S23! I;䝍L? Ňd[zL0*{fZY_ߧō:BBz QrFqg?MFX|W" BCGO"wktZS6SЕY2˝S {&KvK>Cfn\;ڬHzvzjxQeY1U@6ߧ/*3 1E BP`v(t(zbdϡj~}V9x\ *X-K1_. cPUԄ531jKCwm+(˹LDѾ[b<ptZ[63 Lj)8աGѵ<~𭚲<Cy&$rDT Ut-&[Ÿఝ`e.h)dfJTT)SEEhGjY$g@heiP )LT.qa`2z ۪QZEz{}zh,'_r+2c1-iG 摬޲!CWG4/wfu/s5{,EրM犵`&وހoUW`FJt]UżTЫNoQG9H=5G9 *"樁>>#C&I de]11KƎ'?od{zmƝ')Ċ[:_@OS!bq}7 Lfo?^:m+dSAwV \L5?,E\FكB0A ɠXG5Xe |ȼ3`'²Svkpo.ZH"1Gu@X Z+g/M(3TQL$t_"4,ڿֿ@4 7[mOx?6z\t2CPl f8-:ETdsHFjk{6y ;.lFd&Li-_pٖRa6(U}Ox2*<1%ehJ34f`:%jz"yN@;Z*aqxtkzyҖc{贂@?gm@KC_}] ڎϬ JKU[襫GfHo0j?6r~"X]AJV5sN3#1]|=]yl2;zkQ={,NbBg8I`jRKkHbiO} Bb/?垩!YJE# 7 2걦+9*^7Ʌ_2DRL7D߂wfZcUs="{u.J/Բ| nl+ӪsjLw[|GF)6:`k4Já02S\6$؇0wWdxdV i)爟A7B">L 9K&K3ON-~bB'HLc2JL/FZY֖Dah]#ǤM8l}N^}6aADO6C9Aۨ=dju-w\A%(~f 7VyH3E?}:Ύ ܒKTGxUSh9{ "W|hwNc[+@=sܮOfoՎZg zCr+E)\v{Z1B_BeUonʄi..0/z]uMbIkZc!-gg4Yܠx{ F\ELig"qn_S>wZ1HF{ML"H p?9?5&MJGb1OcF\W7p:"V[{}>6"Q)bw4fm Pҿq|R*/79MG`I#ȐPܹVd*)ҚdzJ*:ՐR\YG6T+601‰ ic.(HKׂs؀&αwtwOnAӌ.-QP Z ")GQjZCq"B;~<鍩f 7ԫ#<.Mu"Nl ' %jѪ[*pEhtdW4]bޛn󐯟\;G,)t&*h(`A_}nPfM((=S&L ?b)`Q*qeO]bu/^㰆`5eڟ3#4luWavpmן1D[`VG7#E\j1T{Qt \;V5PU(` ;9E= =[`vҏDJEhfV a!"~w:p#s͎mNXg{1[̦-*%GŽvNɤW+ak_PY kJR-Tha@>k.VgiU_#on_Wo ^l?lE@!{ZFScɕYŒZYLƠ*K࿑ڻ%IwKmЯZ/8Qb|&dEE[pGVV[:'ڔpǽ /2$kPx0@ݞP>Z*V$Яp hU7dmDV.Itg3|2| >nS `zWaݔ%j?8u񉁀fG=n;,4lJׯ=&-GYhij 4tOKiA,78"ٿT)f!t ^3JF\(gv;4>7Z!t5;v4,Np)XBf^0z0z+~Ty9㊰)HjE`1 ̗I "x&+W9SS _.Z5}$hRcxҍPZQ5bQ9owK(`-ƈPAôZI\ &ZA|\M5@}rLqOP)v˦|7HŚw}rY&َqf|MQo3 K!xtiWU 82>|:3GÜF ?zJ]QG؝[AIg &d<L3s%ڧeڧwlrQbfW!ʒMB=PͲ9;FI6i;REH)D?TL9=y`:XOM{:/wyU=h?LB$&"A3Uwhl 6 ;ϝ0zD>LaWֲ% M!ZU z?t rPAAhMPZOr-6CS:p؞:wx=O5Nm0(Jr/οlzJFZ(taگ3fB}I`6AFݼCUKv4nnC`6R1E1{!^ٳ[vuPS$%v(XDUjK!fMﯻRp܋R܂؋&祫F@}wuԽ6КٞcqyϽWNPsnY/ ]bÎS,g\e[UgeF(W,۷nFmlvb9=Ֆv~=˼1۶wRrvV}i3ljZ[6[߯ξH=lZʓς<^>*"gD^| >]~?Q%CqقspNc[9S~܈q`2͢ik[mGg9EJVj,Ps 1o_wh㜦&|Z8[h9EjPhlS~Iw &xDYC  S%w7 YϼǑg?֝k S5^%{V KhTxlP d7@ס_s8>FlWa!(m]T檦<܅ra`zo7G=13qZ>յ$ji.v*P00aL-`z3Gl"j,_;+F lj=W&z_.%!G.mqqkIXelېܜ?槔f[z jDsѳ3bzO"rލe#3[ ޡ{1~$1>P>ER :o{P{=;Fvԝoz Λ] VWQsp˦{̫J |hړF߈R6"B wTPhe3cTK;Qf5In*)3*^ku *f[d2ZCbYQe9hHYwA pkݟ0Y/ӛՠƧ5AܤT5PMr6`V(e "}B.Î&!!KwHVtkmVh?9@Vfƥ=RDtH c5#0<(f35HDDF=]Yk?Y|Ht3w0u9Ef2Yˣ*=1426S*hDDE)` ESC(OY Z!ͮy+^)[c0\2ƶ\HoK_s( ?K<ćsV!/e;Ok*[ :_G*Hb?8UiCun|X*v(%9 lZ]e)lÕHG1{{;g+1~:enzf&*l|K/8NjXnJVraPH Az*uh{NvJ;R1B]+>+|,jEڄp,r"FAleNhPI(_,x9eT`:]BPZ܁?m;ҞS)u"?͚q]}j+N]?k50CV;u/9"~El$XnHۖnMk O#$YjF8o΃}j9ovT[(}D0ZX?v "@ ֙Y6~f8N$Ⱎ-̙TL *\&Tzbfp1Y>qC#P= ﷴB=)|l-RN~Vdn236lkPъ8MA5qx"q H}8t JE]ZfKδwvRe%|Xg 6V\uqy=~=+D%Пv*j?jy u9d59Img{wH3'obYmT pf*c\\bp,&c11Dd) ;iVx9fë| 0^}ߤ_Ht Lt@(M·{tX4|\$?븥&z1:nxl PsGQz_dF 3VD tIț۞t4ao]?ED?(r9U|\tZh쑋{K;R-|JI5"W< EWp @(憥 \JUC:jFfb}`ʡip ^. j0-(M$*fJH*TqJݑ_ޠx#ò"0\DŽ:~O3{랲\#X蕾WÊInGC|T6ubӂmAP/t îGPDY1y< ^hLzj c0Bh0A_"g,:VUPJLDCT ($ xԱ"`B<̰o,u<жt=w3A_/INs5'}jfM>NV~D א6$f=mByɰ785P{jz0EI(F9JߚS#,9>oFoƒA>jnȸજ7*D!NR|pd~NқiD^L(T>b,7i :Xn ueUb ^-TI_a*ۗ>IŚ:$ejp]܁<-#.$3ouEi5̄OKflO{iAYo0ln4v &7?\Xk@+VY7ߧST1W3k| MYuka4o  u[^<{> f$YtXWil NxH#FXHGkU-VQOo=VwK_lHk\9Ӟ5M83*w=(00Yw.h.orъ >udž6 Nԛ@' %ݧfxl)[3?3 N{Z9ݑh4Q`DV,*z,OᕱlEP z׍|u( O s~7%? 6xQ:[RēA@̢ڟ[KFǞ(b@M,t/m3hr ?=:>5~/sOqg Zi@$[KH?ߧFNro5wf4[YCra8$"*֞f%O(2P+~~fߥN1LqXߊ7J_ψzH]>ʱX PFyVř:91l 5Hg3F3gh _.Q@o([(;VRU4( Sw9x/M1#vRC{>yC&+,~ƈ>&T׫j :DGh,crb Nʩ :r)Y 8i?a'w<6#^GLhߩnϬ1U!$GawIM'B4ߨʧן8 Ѓ'~LY6>¢RQ5#1+N^Ik3񞗖%-^t0ki+͌b{: uEF-F0!w Ca*[|ly ]VOkR91 ebn}u^ %߱BEɌP$$j[:qx}R LK]sFjwgqxuٱEK֟CSUY d=kX@b"ٙ2)/wQZ8f~]ߣZXJ/~? ztKT T.A2e<-_x'UDdsNڊ%!N Zqߢso x/hR^;{HC;4[֧:s+s'&Qd$0)c@@B&*sȇGxTc9Wy(m$nŮC 3Gk~s $i~T"(M e'Gt4ޤh*.4Y`J<0}* vQ?c^g"WT4 ,Е5:PYe4+{YD9h5;5w,T((v)qrIDuC;z:{t~YOx NiڔLJE}S,6if{f%DUv~C(bAW1:ͅPvMcaP[1O &18 !=A$/s/LI:#'>-s}~}xj.Fod,dIh5t?x̼jÌ1So#A#o}V@$㚾\EN+7q[nWS1JR"^q+2ÈEv](YRvn"D^1nC(%H%)=Q--r&тsYsx+BȞO(RWm>_05j*Gۃ8Sm߮9"E1/:t `XJLړa4:S^y%첗v18qcqbW}Gg]mIWCSA_N#=x$Xo>c2IiR%觤an˞uxӖ3%aY370gޙIb5(; X>?RIŸFzAuN*k)pjp9Vv+}.Q_Y ;BXch[a QsV4Ra>Nn1DMJg}bbj|+͒j>EHkFFI[O"L˳<2PeJcNucόrk?RҲ0R89K3J뮗`F@RH;fb>"qe炪>B(~'rG8nSП|ܢYoIjZ=3-&&@G\6Mbh~z2zT|Ld{9ӏ͖Dۙ'4a$֙^=,~Mu^ũaUҭPZ7!4#{l>k"tB`*aN Kz9}{|Btvx4[}lQ Vx 9V9k5мWH1F #C͔; (3DZ!l'bi1kW!Ykt=±56Hx z֑q^Q\bXnq4?>}7?J|* 0jH $PKJʎS 6}, >hDsN}3N>SP Y?JLxqcPl""igQ tyȥ2I;W:2-w>F牎hbe26`Ui h ǽWr`9 (j?9ǣq ;^-MK֘Z"N]yc h'<1 if ?f8:yVR%kY Үus/uv)w> }dI(65KGd~k$%3"Jh!=SiT/IXQwuѯZ= =1no0Ӑqוceǹ?0?XIif=r]>$+vA{KF$jMx(MVd7j{drڱLl3nP"{& `bҌo['$tZt/"rKqCy0n}dY46yETL3H7UMߕWX+w5D$3l~imЕ!KP\ QuzRCQ-1?mqZ~^~HI{) N*J]Bȣݣ 5I Z5dcD)e~3m)⥞ſ#"7"'Yx_Ю!Q=`Zj+EB\m,_`9D4g~M$ }﷏G@zQNt\yy2KË-gձyPBjQE%̩EEy0c$uCMW1o" әͼq>SP=okB2A<<_]cC~7T| WN 3?fI;BGS4#hJEI }ɸ*l8ٱm9\;ֲ;(V&4ѰC~[5ǃ5y/ol-VβX`uB#5?EK@V8??ٹ`NޞX3N8PkgD7KGC̞zH&q!2@j-0jgޔ 4Jmڈ! {u`;5z. %x#躷8 Jz(w>u-!l'gimϠ1:jֹjdRcCRCKmCΦ;xfj2 hQ,0YN3wd"NܖjIJ$Ǣ?>3c7FgdB 'a)g4%H%owciu8`vߟjN Rz0Ā dDQ7xUp6j3Oj|r ~CtBd]RY,6d3* SqȜӂsؾЂ#m00\`n5B.l"J!>p)ޟ?;OS4Sǵ*.j|Ddϓ~)Żfpf_pݲK{䢰q(h[>>/)- -Ns"X1xFӴer=&`B*J\Co|f|;W;'n\@zI/٧;2=oڐO~h'ՀalU``y; 8/ GXP$ 6[MJ\`~h,h F`[~<-t۵'ݪvƣzKe#:0 ʋUN-5E7^x3 tM0S=Y!Brnei=\46jm$n'6?66#NUA!tpogo-0s B7KҢjwyg@inERC>E.wFUƫ fui$vX8+!IDU'"M(|R|rOX -h|}q55hWiqx(>տ- xęY.|N giio R8IdBڽeL¯*E(t;rT4@f6?Bn>8-`eq6xJ2ʝ=+ːXyJ؆P=sm97\'h ;ſ73z~A3},Ri N@ &j:oS54VUZ LdnomEԸcYsq5r4 4rI ^<45l9t/zQ͙_gZ&TP;_ɳV" 7/5O\'qHmit (JH{,3Z7udL~gCs%,{ f!Xa9RbgT`3qoi(hcQ]4ԚgҞv[S5bn~Colg-/h7AJܑ.[ʶkڋGƫ0WAbƾT8Td&nhi]$9 r_}=*bhNzplءrPO޾_foxg "_*!EEM,ʡiH?xs!}P擣*ٶjzjK?*Ai%nvDePijgs~vQ>c4l1c.dDv3%!UcSm"|QKz?DK{"Xyhضlߡ{ [n1eCh? 'jsGI}TG/y3nlw2c߆ʥPQԮt:ma.stKݡ*N Cĵ pDeuN;Z0l*w>N' *1oןÀ(]8a*ٟw_=B,.)=M/"<=ܰOu"xGb圯P ;W|Q J }1<)6;Ti`5 5F(Z>Śα_@ăl:>>jc#ZpβYuhf 8{&R%A} 0#l'V*lqȳ\JσB<֔I,j3ʰ ь'EM\ͳ _DQ1FjxPfmBcrtΉZ3Q1T'__nK+".k0&_  mHC7d!끼rz#0 3څrl9v39u2k,v~5DZN|h*7V{ *hAO!(by4^Ȥg vƞ%E0緀6LoꏪLakEBU=ۧ.^#ok?֝1<-DK5I*)@go/cCB;3#AMn1N]zG<5-dgQMvcD|6+Y߿HXƹĈgQrXu1+?{r{AY:42BTi٦{S 0P:>>)lg)վ9?}LQ/Blr_QP<촇_%[=mxvWTJ7eZg͸DK6*h6m֘Z|&EfZҾ=r. os=2E?9 y 0CWj%xVQo3#Z:OfRVJnco)GsOtT[ p<4d>ZpGrz*yx`Fӈ-6=5\%j6mBK,*輀^<y%"zUjJ {Hb}GjedrVؓu)@!BEc<u𻨂ɖdM7!M=ZLo?jpzl='L>"7ןq. \:>؎dH+\-0nЬ7OUnXZ MjEW߆ OOmQ&.EivjfQiHB]aESrI$7˹x.B6%6w~KCNFb&-Aʏ#nO-mapMOK lF,9-Wcb qnRЋջ7*ԡp{CGX)@p.[b.Y'%;ΧfS if7K3C8. @]WV0Z4q`G )wqq92uopCzݞdvs8wG:JVi?Osϸr 7^RmZdzS+fC qP5)kN}0U'3A2<YY^PWy"D8B\8629l\fWQmNzw|6#, !o슪62#T/ 'gZ!?yΥ˥cIKOv!SC$TWw6o$ssjCݞW U(AwuJGmM#)B:Q sc Qd"~)f-ZD%k"Ψq,3 y|qPmPխ3gIu{9Kģ U>Ex`oBp{Sa}C4gak ͨ.N:I0 ѩ6NT*0b9 %ufi[I{w=`(;&~ XԢtbĤSj\Uj5,x9%EO%5+RDF0B%$T9YcPyz-A9t *p:w<qa<~o\_\H *u|}[O0E-E&|Rx1~$nhkM|Iӭ4Rhq<Qu05'ldf:y'l{gCrVy^]Ob}1%;ۺ/P~1[x{|U:5՛H5L @L!=^p\"pJQ{VNxi}h>؞ ݸucifcΌҭ`&*x`%PgN+%Ѣ/(_].|g ;ة9\0($(fGIqYK_kL3eWTHyĦݞwt9 5Qؠ~xk_$%`ഺzޢsEf`e<[ٚOrUX2OVO摄A FՎitvR=Gt$HYjiB6Q:}7~k6I,)3o]ĿY$G`E@mREtU\1r s,H[)79+Z" ?ɅN2y%$vz\g>hcgQcCE?SAM1(;:w6[ښ˛G/)e5kqoC "A^-=KQj!1fq܍W뛗}K([lwG0E0Lǒ?1>%9]S1CbΪ٫cTZg XVn++c=eC=mky@0 mϬiKɀC=8{MF)uGU|3^VdvVXL= ># FEv0Iwhf'vT [lWovTabGg)QKo g?^~@Tơn bFFa{ߞy-k<9kAķagu1g gGvap\0{4a*BfOV0d.ȡk:V6*_̯^@:;"=,زpkxR~˜mj[4i?hMUCdp5Dw "0$+KɪyyҾh';{أ^IM8߯e&_/"SҺBI fFʦ$1gH{X` zqm77PU=].)qguC InWS\9Y917$.癍(oSv. X_S'Er.lc(Ep d}mW=ޟ8XHL,-v\c ~& gꁆt`T2:u:oMtlT>A9ͪMTK3V\-{D 9)*58sQ{Aᇁ<5p|.λdտ[3kP *K8 v:cpwaےgfzUUOٌ1%4٪n9I=VmQyKQ~pﶢYJ0ڭ.B-2&|TR'u h18 !')cK1B.G?3en-hZW'`L5$Z}W8@{HO۷dN9sQ~dx ooUώZ.Lyd|~ZF3v-b6Z dN E X F>HB?fU"eSܓmv1S!q s{'F0)qUET$m%$Rϸ-޷b?]uK 2h;)'hj+?pxZ2Pc6 з!̎\X=))|:a?4)-O/=BQzLNH[q^ҮouuWF+-?]smq߄'^cͿDEbriaiC%j-wqf@e$S+NSi-M]e}2h<*CE<Ė؅-{D+M7&%4,@ ejP8h?,њ5EJbG$8yO.:./"u jw+ߖqKx}RWZ>\T+XY|YIJMrs3KMfK,:~[dQl wz&AIDM ͮDm~vͩDI`Ӳߚ!mok_Pkwzt6[ %QN 1; HH /bƹv?e|h[qM/,2Tof$R?ъr'j9ɓn._`ٕa,;q$|qFLn o&RyB><)=jρ?>K^N5ޛҳyrKRN#<F=寙݅|)oAyñAjq?)hh7HCKR00b-Jd䶌#d8=LH ý-s*+jAT˺sP3olWnS/÷4lfx#!+So6أiwSGN>KR%&3hnآGv8"Mhkt}4 T'`ܸۆ>tJKޞS(-jM˶!ZKh_RC97Nƻ\+ڪ}̽*1FmTdcY J!A-@O=s7S xJd_bq, ֭f6Tf!xh2M ~WYZ*CM<A2$;`6qџ-BQ#\2tkph(15o{44W0z0i&nYX>!́蔩N_8hc'Q9iԶ磜[s( +d{GlMFy?N [싵reNC)|zqrtn֯ gn8X--`#ĨbYۇzDzV~N#0΍n9{-ιlDSTlO;J?&[˽kj]6DFV1pAMv\]Tτʧ(AizEwؤZsc!KQϩ@8^kJ̱P8hdz\z댣lznҢnoM;0Ь` iHtS&#7jCZc;Է ^O$;1nvG*.`A6cVú{FpezVЖ!lDv3 iP7|AMܛ tQpI>Z?-ص5 s"@ﱻ3x #sO =-"\1ۂ5t 6~Jvmq-&t̶(Bzo}ݛ'^3@MY?_-puVj]imЩϿ?<VCZz_CgE;' n@.XF0- r~''0zLn<A >#ms/I{Rb`Z 1hK4=RCL 0??@ v+D%P^X蝢,[u&!8s<ҫB^yrմ44"Sll9]xk&ym*dD9T;n9˄L#W]0{/ږ]e0"0ppI +{1'B \: M1i*MjL`x- 2ݚ iH< :hڝqH r0D\*z-#0eX O=@c]]mc|D4a(:#?z`VWI> G^Oj "#EHE Վ[k~*z8-C3~>+~;"CZҋ*'u%eCau+aB\hCouH[`Fܯ] ,"i&fJ2lSwG,8mI0\S!f)gM{ PMVz*YBSKbcbpTy@A9H%$]TX`v9&u>jcT7o&  i)\ E8Axc EQ7?-ݐUw{}nJ+BuLS{{膍&Ccك$3qJ*Euz{0%P4z-#i3Būa$ eTiS<KE -~ k tr.B:#+ n鞝Ydo=Âjjf7Fښ--pħSN=9Ɵ]ު=tZv4zyEŤ.yP'y/IB+Nl]n-p0 emyz&Tn R܅=-uzLSr',W=/T7RyȌ,|*mM,%Nt]&ySmE[oXAO)#mхPT&i!حF_avORZrzд]tTqm8!gkaڢ낤3~Ur >V9 Xq.CPLL:ۇo6::2}?>R<I uZ|˝DF`]lR Њ,GxkHa@o$\ָm1U?:Ā>~ƪ %o /O;!_-n`1#Pf>`9u#I 7DY=RK9> (ifOU j=bnybNTu?qg*s@\= } (x :m-^,%Sh ĎJ驪prͫ cqr8=lGA4&ҘZD*MHKKX| n Q؝!)Y+XO$Z U=[-mt56#GBh8GM.DST;NSm#gavD!4a _e`. C= ߱8@̌ |!5^jwT5[zU%$xu{nr c U QM΄0Dҋ:IǸg,#00&O 7!xrzkFڭ1d)qsD47P(3Dl#i!nXO}G!K_TwObLK.!'|}=vh`yX"LQv8c==Mv4ҷ-V|{@ L=~3I`vPĆH$YѮR~5 +!׸鑺Q6:&VG3lb9/1_~F{XgD5rB$T t՗umӞK6]EvSt;U!-FG'EH{$(0c sI6mn'N8hV!'Y6jHë U Ğ blSSR*KI*"~cNkꄗu~* ˆF ¦嘧pRR )$CW D2\]nI>N(75Ɨ7BwQY[n5xxs0(s5P @zBNI3v#z-~{ @!iCJf*w _%pg&i3+L ™);iqݬ*C76Xd W]8ց0C,IJeb !qHtG XhB(A&ЈNs1~\Z,u[JcZ9BrGNEխ |kIZ,chl+,5ҿoϊ%8oJrgDze)}<>j{SK~9KT$%=ȇi<^*!{ހ\&r:n]v;\;Ji%y>eRB]~ -FPvuz<l}Q Y j_Ewq+/7QvH: ).v%lX.Vy\D 6tu| H\LehwaD2_xX LjZaK8[q<45?P=e#\3$@Ԣ3n.'J \ -JR'@n\ ["‡DhR]J0P9? pE%$lE8ȕY՝mz?339pv 5%Y!cIfSHdyDWu, 0T]M5x6edmb&7TyUb*D Ӎ+m(y F[lobӣ4gϛ+Fx&!huX#HYF@ⰳ&[Ő#yC(~WߌޑV_ggޣr.Ύ8Ɍ9~7.H%`l&]N>W8,Pp>'j49.UuD3^{QewE8M ODhOqݲ:$0{Eh{/c2"d:qu?sQ,3y써pڋވoB·vzڞ>Y˭B婝gS"U3+JJq/uZ?۬)"s~[Ɛ9}]%Oo 8;$OC?D=E>wP;+e2<?nJLdr_.RmpCfKCS9ZWR~W,g͔nm;@S! Xf)&0*fWr"raJ/eOM\bqKgLiDT5ZR=O|b'JC$c6qUgv|+i@Pa=䁺Ik%s`2#h8՞93MR=LpdקϸdF‹xQ g&(o;e%ifγjW#)=K$EcBL7RjAm63b*o #}T+k Q(:&>˩Տz{|LL%VNY M4zW!c^km8fI<]Nj18sVbhfPq?jAXTy](Udoи>=~Ͳ^ۃtRU3 bq|r;ef9U15~iԳyljXZo,w*0cFKct}v-Ǹ:$c eB%Ts$e),|Xn5ϏDJmM%}FK&hdۘ޶"|L 3[o lH(f \҅o 4G(>\*iyۇ0boz1X%Bi3fۿ!1]1eCmL/1j3'[d9o:uTFyA 2O?D4 < ֗A'm f^wAk.},ni4Λs0kNS:r=F1"t,[g.\ ٜS<[&z4 ;OF3<1xC37GҨ[!nHàK~ }zm?vM(ig [>$âoB*]-\kٔf{ȂO]q %6crmeAY Z׳N L ]T(g?ȸYtV},Wd/ϺfiAVZw&50WcqO!K‘"I#ۺڂs @p C:pzhW#*>,\7W1nNpv;ʵK-ٛjz{[Q:>uM&@ =\=;K`h>u,J2&i܆[0Hoa3':-lRqc0ƈק~hG|힓0YMSɌoqX֡D qՆVR+0>(rG{FЅw?R E폝8SҏkcYzjلb4[Ttw?AxATJH'S R(ۣ,} E]5_v5@0v^h~SlH lA"@pTkzeV{\J+xk_込Mc[9^γڰC4 M[,*ܻGuBDoLJDz!\y5vc%QßJy&ri[0RpA%]@δl8Ij'ծv$v G>e:kXЩ4  YyᇚGu :>H4o-5Д7#ļ9gŨK?7V:B0b cȱeDR7M\"f=TSؙk!@R0Nv}|m2RΡbqE\lMO qȃS'C/ڻuTu %l96X%ڋiGqPn:)eglfY C^t R=[#+N$_C+pXGr8Rɕ|gԧ}fN[we=aOE#Nceg 91=Lh~agc_P&wN棛s-ip{I֐xO~_=Ύ>XF_߹M\$T{aˇw>oɢe=DGxjnx#Uɪ۰vzHv guLf]ab?z޹E1tm?6}*1IuG)9ZUF9|,E/Ҳhۂ?p+ڛl89ʈ)Agȋ^CIy}!e Bj FS)Szh+ܟ=6/P;[pVFh G3{BpLwL(1&ot엻FU,`'0v) F ;D/ *ϸdot.wcdUMhk,.D =4h&_ɳsec%d ^sJ׾zƔABTrQL K*pn;/^=M HB?|NL00W@ح7$O]QÒR#ltT$ x=)oݞ,[~jz1MkLk)6:FYՁy볌9\OoĕSnY RZh-l-:` 7_{GR hk=qp%<&.DN*7TYmV%, j.W=d'zHu5P">l?*ïK"6E'1nt@ z{j>n41B!CVdC]eLvsu2&'XVŖ%Τ>XZ#2pjd%Luڑoƍ" ʭRn#?_owMv3961 Q.rwk$!/,U-g)i . i t yhs|M CnRbR{d/XrsVlS䳸by,Es)U]@5-}6G;vs\/npYTc|De=*|R-v5Yݳ a1r{-pOdlC\Fk"1:ӥRes~yq]&9aKh_C[%R0!m![ w`yqoiY; ܦ6>Zzsz3p < ,rlL9s&oދ&LKvZMh,MLSm`V+׵< ӁioqD-1(x)MT#:Cx\+0ᲹʋJ6\e/eoy?֯r һIx&G>z*!Sa~ǁJjWi m`63NO%xm%)@ 2q2\bQ-~`&,E:r" ORd+iE`ܣ8=;?.BiWn.eaewCg)Mӛ[R"=uyvPkO*xG/, 4ڗC<܃'pLCH W#i{4iprfuiw/]5*ɓ94M31!_Jl}DTHwo&mv%E .4T2URt˙j^&cߒf^h[|Q`ֳ: 3LgWj O;H5d]w둽q~|ߧ2-Tg 7;7QJ 2ǠҘCbL JW݌k MS*؃?Ø\MV8`-k7Pw iɆDZTmZt l/1&@ 5H-׵5( e}PfvO 'fKt<̈FxPT!~3T <% g?6e 0?@h& 8 >/^U`*HIBb!Rf,j td 72鈎e:eկyZlw #?뎉V!G;OY(*;4JޗP \*Vj,גؚSO̵ s>µmAx1fȀ ]U7&ɠ5^}31JFrqu R5Z\U}AGӀ1 H/䖢LV&( swi F.cmAa۾JX <^'F@7 NJJ<}-61\FӇ%] ~jEJەH9nOe[Ő,dD{3eWRY{+3Opwt9q~&$j$i6;0N]K-HOܝ|_φcx4i>s[&xxHN3(TtRʏ7ȂIYТsM^WR [XE?JD1EsbC kUBd]kf g>{ʷ("혿DJu {Т.QC<#f8]8X79 ]rS*չvSgƅ56kI$4"+Э?}7j쬕%A8%\n>0856+'Ai_EKS\eThǨ-ܐ|c/uQۃv*5bCT0E'_%4 w>oFQx|5w5ZJ3ŋۣ뱭` ,a,fb+I1u:ax\G^_)i-Nl7x] %^$A1Fh:Nx( rHr!m ڻǁ GbRd~]ԏ CCaXsc˹~_ .s! 0t/⯭K{̡L`2T6SpcL̦ytOhnq ꟓ[?FrR kj3|]4z~p{#L,n{`JE#MfO 2y%sqL{,c%c).ZF>GD_.z\ΠFn%o̾V;T qLf=[Afq3ċ+~E/FVH|N%w8&UEsGul\BR3qng0D MJm/43?)TMsDƀ&1~矕[ktqMᣋ7VȻqwbTfݲX$0id*ՑHH t 꺜 ,>=D<ʣ~.ݔ 1p7w>zϛ0{SYǿȚE/v2O+1JdH1@Jϟ)=`OM|X*9Vd6%eѥ"c,rEj'E=_N I-+~kj; gЗ/}uZ*IKD]4KƢ0 R=ù5(9`{2ȹ stpyFY8E@Yiߙ!lv(;q^-y<2kR􂡗e1԰GT}q3vvUY!=D 6:s9_ר;F Wc;4'A˱c숭0y{?-iFg%[]LTW+h񹰆[̏2xߣ$*F?pnv'w jc)xc!&osއUfM^#p'AE"ZZxUW.M<=Z  u~>-.׃2z堆2$i{Qj=7tu+}:Ԣ50;!ƓR&fuzk( US񮧩Nm97 b}tAD"-ʜ)!)V,y5Ih?YV[6>3|c m\.dr,xQ`kq$pF7jhnvREs@oƌJ1%v9-{Zjh[8֯0Ǵ\o}]|`5cߩ󋚁,bفJ.xtU`&}S(ߵ#3#o2zEmjYI >3يa}ʠpQaqE HˢtTlIeMb} !XA> [4F.FdrkmQ>3D իjz??id&þ'?2j ҷ9io- sF?MX+bh%76/0׵BWA}NG<')P?tƏ<ޣbu%^aR]`y5sc_n)˰qU(P.3A!c$‘$G6fU8ّ(~t-Ay14/oPmH3?-<<cpa)&a3eBTyG$Ez0.=PGyD38Qg=2%1%m0c6176d Ynx86n?&Eu=ko`01EBNNW~ϻs+WEu+imunn.wNUݞhi9X&*~cu~pE^ qkzl% KXβdJLe3>I\I lo17.?s6f#3$6~aTcƒS4M%.E~( ZcSRu5\?9F;s=kh _=[6D^/fu*3LFmczN%ݲLؤv6 6[i&C,qN, D'%Ѐ iH>zx #uT1F$F2U~@G~!\Q mV: $;1<`vW/%jgb2Thj!˝謨.6KQc3Vu vEc$9x?fS %U=dyT4q1|\=h ~:4~5vE-ue51cZ<Čj8N{DGV htъIcиg׿5s 3"S=233f3Vqb,W8RuVy63 V*+J4[QCILt}dNHes wިg Υ5KlOW2WaEҧ&H 21ޛq{P˫XꑤjK( q#[qH(Dz,ᅗq:kqn[,u٩?[!/)(<6@f}bSsrxu҉4b'AZ?#nSa>GBka` C%Q>24[OcsD[S WXX8݋GCu]>˿GF-*݆x,Brl@Ex .vx<'>Z1pŞ,PpZ<RS)݈t+_ %H=}6I ]ẅ́Vc_Y/* _s&x;Gm#O1jQ_Mό!u)dgSv8U&a"f<1D]iA^&DjC \4gQ)a"p "Ju(SuK*X3C>&riZ%b޽$;+d>ujݪy C%,diirݞ!ͷxq0F̩!F'%ʉ qB{qU4HHh4A)EC@ٗg-@:4#ks ~>?q'-u(*fr:%i\(!S+ju1T,-E&7ad?8. b۟,+FƯ= wUG;ze13/NE,S@lWi$tM )9vL&VFK $U>Iƻ!$Lʇnd.R~x&og5م'=l΀q2T^ %# .1 :ۚzHw3$dP7$I2z?V]ƞox|yi`/ҤQHAmweT4uJLI{\ȑ2Tc91~k[JQI;74"+5zhE;|%2зj~ {k0w-2g1ww~>iN\.c?GOKgP8iyX P/2zd<{@aPQ/?EDRM3z:qf[ń!q60qԕ ty)P*#32zS_N/کGX:E8|pq eцْ{mL,nǓ7~$SXRБm%0IДR,!KbչmC:?Ƶ*"^X@$y$=)n,rN׈HGt#@^H͌ s='X|ܯxCk!%Uqjg"DlKWzܟsak4Y"s?RXqB'q5}jOg^r_'U1Vt(o 3ɳoJ<'\Q[GKH$vO/ȴ@"}[1d#mVP|z!P4t>.7{Uy Ct=mMrK[G1+27$\%UA}OP3ӹBBVg1I=M\ۑWAv۳(Tw8x9eKjXWuT&F6aВ!,-bl< q-L!Ψ6 Rqoe?'Fd9cIpAeZZX?.m f5E<}8,K Z%?BWC68)ZR`v~&E$.Ȱpv6Ndy`@C7E%>b ѮAS? zFo}~]=8c'.%^@v$zW%: 0xSR턃Lb9"cحɉ!!VN)&xo0iw~i%ݠG,zμ~ovn#Ӟ*E$աj61&Ⱦdx3z#Bp\}S=w|amQ_xƝی2JAUCY9uW] 8N踥u.5僐E qv^_ C8ciSdm<ӆkW\3G=T"^$CU^jFb=fB>ܿ"qu$2$w$ o)W}ABĹ9*.N vL"t;Gsf5 Sڊők`=xRkK@XgEV28 n|WG4+cv.{xe^8fF ޓ9ƫmeDK|t(()mRh2b~i0!A˜b cmyCcy-1uo=ayjRr}bI=!z|WQL'\E}Ct Fq-U6n . r;ƒf,)1{K{_U!qh8A̋sP"`̶GoZx [eFglP%jE+% +ob\{Ƴ7j Gÿ3n kO3UcP.|]*glت(h>^0/E"J^U,;*~ahMDv`í>]v6Vxjh9q٦9݀$چ(@aB1D-/1veS.D Aw#_pX=yxY%fыR0K ,* =GCJ j~{i 'Ji|tB*$}΁mϙ7n|9p$B?/b`S)i7hSyon$rP 嫰QV{Z3s\Hg|,P;u:^Bʲoϥ>J GX6TG|$cWaaHUl.NNgl8`շT!JGX.E$b%.ˮ-0d̰SO)u)M;f5~LTɫ2Yd[PWZLP-zJhᜅu//d=EsNH{9h}ibCToͳSV"8ї$,6lk ,۽(]ZK3,cFiAһ^Ѷ32jKbfE܅iBc`~s}~bhӛRoh^1vvHY@ sAJULCNbgX ߫'I(ɘ1(udhQlUmn?,`,>wAa 2OL2g{2ogIt#> &ҬVD fRMHog Je;aZ3TAra51AA'{ !Vgja^ɦ2SѹI蝦Us4}do5pQ"4v~?Cz[05g~ǖdо TAmP 4kb"(Ϣܖz{B5EiC}L֪jjǦF[B,w)#TB YbjŒL31,t9K'm~iݸ[9_г_rLֺԛEGO}gw<5FAC덛5<+Gh6w&rAuB0"@B+N s"48m|%`P$@gWy͑L4WPj17uݜȢEp_g{sf=2>%֢ƉVw[\%c|2(۬d5i-#F+%jN^ֿCO{\|H5d(FrY.bZy'lO\*5ʏf'wtgYΚPGQkїJ*_y (Ɋ,f?}d,.q-*zߐHan%^ּyiȏqZ'L\ W{[v.798 sB33wЮ/qB"yVϫ4jHRϨ6~k0gsi3F]f9}@PNc`ɴoI7D޹yw=4Ǚb︅%5=1ҞzH!:YNdDgGs=Uư|9vvɝeOڇ1q- ڧ }j!譮ɄtgA(_پLAr96Mk>-vC3_I <@_fSRF@mtE;PçEiA(*4 $PWS|eV Yi5K6Kw_ ɕ檤ޓ/KE;<zFr}q G:$P{GJ"VdiKh+gaӆrݤ=h|n>pJtu&}8d$Ȼ$OV}oL4<v% :9\+=/aEsG3b񦞣I.^VC?֌>me;[D'Py9wŒ6v]Ь3wxA.y0cpqp>+%l<}f钩CF8(1nE u+* gfP¹cי4L<[}JkYX̭ͪIJP9!2)FȑߒaY?-߫}mjAFX||H+U"Қߥ3v=,' <67=&DSuLEno
    $#QY(ʌL'*] 5Sύ%WF>5!@эl1i&"1[K*=HYTO=] Nƴp.g`fZhj!0mt.ETz2!/)l(Sݜx;5򎏡y?-УX;Dፘ &pUoi>-GmNhLpkvN2$ıGh ==]JZ <>}CKmwZ45nq(g//j]psƹTqGlޢYA\pJSX*hOLۥx8q\&{ waX4ݾ ђ rjO "޾}Kp9TD-PQ񋚏/.mP Kg6+wA|פWDN2iM mNFF7L՘i،ye_*΅v<{nQcY{"oi(ĦqA㙰0W[x"cdN |EϹ6x؊$0yܱXq<j\ӲC} Q۳8g=&jzx@TecWު$c8+5#KyXT#%9N㳙"KװdV﷾z$)Y[ՋU$ppq{qc3>4q -Ζ³"@zaFav̸CuLLRA*G84lA+lcFQ5Q$H޳wgذK|$` Va1R(L f, ob)N3$D\^&q4S_#*{ ym.`?MBд{e!y'&L4'(I{DJν< EYߟ#N̩=u[+Ww-VG?pRDzf;wYB.{uX@+kX; NzEZJdiT9KQ2w0$`qE22q4U#n2.@ 3ȴ הl 4!+:R2<`]>>#T rtre( L٘C|W>r>|AO6/RSsP3H*08Ba/?}טe@A#WX"EHhxSn'gl}k\OַMwF>+[ =Z@WoWa#B7*v  V9WHM[eXQ8YwS~\}^qjHbJ'Y1o,j]StZu4a< ,!evU9"fQ0KJPy Չؙ]vFsF_x&L v~jN45٢,<[dɢ@ >Cttc:29;omd2C,}iYǿJ¸xgqq :+d+OMAdYf+OƑ DpX:1'0:\=d\?IjYC3Q`D%\[ADۓ}zL Ey`qmF0[P&}DX \sf}g%S"]xPV:FG bZ'K a t(Jp:8Ts;<ߜ:LNqT!tPK߾|~YRpO!S{u2%=+Xtwb1Tn !q  \;`yNO> r:1޻uGr:ʌ,p;ֈ`̪-l챍"lk~"bZ]DL[KoRQG^Oc33'&KVNQxc#ծͿ:J@}w .\]jQ03tfFV΁~& [%i`82Fᣓt&4ynVbܮI=@FȖvz>0_Y5ðU~ӟ Z/lP 3H"R+T]"=~۔K1I.to:5oL`$~{e #m#A0ofFCO#=VgTM'' 5=fs('V6{OvnLWW$9|_ ہ 9KcScxQSXp窘LC7cЕq_; ^QٵxfN?ݎI %w]#ɗ`?jX$R=kܝ 7/+5.v+/ߧeB*"BWQ{eݯRti~6/yVb M ;jWc4#5a#+ɈkLȪ 'G;xޞQ@;PD ù-zwC uYτFEiZ$l`#zFD߿M"'b ɒȾ LKgqġ_ ~oؖaIh-#3UC!MU<"%786e9ofѹs<抅w2ّ%)5<8-EE,|\{hٖ[Dpz9ghsE魩iCA`FՠkK/Hٮb3%yheDax0^ \ 6VnXF>~$˿q\GFR~d*nsǽ"O- m7UxX wNo0 ˙Iul(*S,W6>8UNg ibͭJw`Rl=-{.k ?ަQ{H%;9D2f$iL8"Rug>+-L$y~Z(qf9UױFG-^ ]z(]RlЪ")T^)Iw녘􆯊@gu[OB.!/߮4 /MC泄y6,KD*٣\N<]-17SޕL<1PtX(JncBexgI/Kkĭ}*rV?+Wkg$ˎOw=b}+;\T b"2XBZ`Ϡq`%& $o;G̀2sN5QrۈE>vȣ2r{}{lʌ[2dx{]I~D'ӄ0;dgMEΈVnej.N^vtt뚉&5&_t-:òlϵ)}W%wpO7CUz^ҥ}Ȍ'@;9~KN￰ /qua'ʖ{i`L F%W2L\4poPS{RKnno5* I:09v[ cʂڸ4| -!E2\-U4U3Tb)`x:-Ӵh-njKՋ{@o%Nm|?.ww][&8Cy=|!E ojؓW _Y>H7$ m+L' K î#ڤMBx;Xn D ݐ69X-K;?12Cu8I[Mj /]\ NZK`r0\,5#W,8ld9I>%ֹ֫Æj ܎괧Ll7쁘')b8 k0"?kT{ֲu8$Тz? "*HhWd z,帇>_ɟ|bC'hz Bh::1q>BE>JI"EҚԊ.28:?YQMa.J ηߒ>~:un\v6钚* 1O4H\McR,І0\nw=Օ^1d{yt;#{'7ZUC)a\q%"(Ѯ oRf?Uv>q;[@u)l \!婱A.b7Zތp:G\3'7 lXh[ju/&/ =כ8c b^;Sِ0JLSխ$i;F9Ik\[0hУqR=ģ!?t2K|[uK JѮ8`.G,-$+6 t@D8,؊ l9kڽ6)^p8xlWtH5)%H `T<77 ۔U۔'mSڡ6D{ V2UFy&0FYqJ<[c?6T~FN4^`z |X0t3.q ȕx`faRk1$Y>T|M/g|#=N%N9̫;&7?vCѴTUnÿnKP:"E}Im?eፂ ^Z7U=IA'EiiTVq3Ra~hXg‘{ ~o \SFiƎ[N}r Gly(?)ݏFvk|Nit֠<V+ fAeҡ̨1,# IF:D\U 8%y^9.981׵0t7Fxgx|idlRWQkPQb@Z \̕vK|$Vj̵ť[חGT搒)"7Y0-Y߾"xwadF)Ei`{}6wm}F{ a)tEh6d^^:tM~P"Wdlt8P= ɕRF1-k@ԝn0#;' e0{}ئSn\QPI{V(Ј>wPPGqܬ/+Zd:(Mڼ>ꤑMɞ3~O_n=}Ɩ 5! '>"#[^Y"&aG`60q&xu%sa~T-_iXj(D-q@ FeE:V֞$TU;uoyg땬[B.P #-D+LD_"}0*6"D+BDq`S-ѼTTGOf~Xe`P83Rg/J5cͧ`fq6'eXgԥ :6O,'ۖUڕHu$N`6 TNa2v^E$ rF2)J{~%T֙o]遵C=V ke!'|.8WX,j%\KpૄiQԎ$]Q>Kzv!3j1slh8R!ڰȄew޷{ތ?y٩rʀ'O´ l4t ґ)'zHx濩pmx>FR]aM=J ] ݚA%8y0~KMx8'<"Wmé/"{B 4S芌Fe}̛\eiwL{2Ie'\$<^'ER6duj~@Kic{[Z1oLmR9f y~*?+̥0hѕj̘($}$t-;;>gc^:A7(āiRVG3AD; E~E =UԈGtUK8G]5vs `/?Cr~lur1}ѡB%.,%86=]6.âO9+V2(=q!D0bcb&2]/f. *ZyD,zmhpM5.u,!nwV^u+߿F^D+$xqV]$a-F6j+Wh`R=cmឣEkf DT&{-U3m ; l+[emj*Y33zrYҭ6qPX@*4c\obA[,2st [(pu.λԆD6 !f))*LtI}T)e_R-k Vd"`NE}s;7zB(M|?2|+ӭxHʌ596nѻ-yQ%5,8Jj$#-!-;X[V}Єg73,̓?ѣ[.jm.lӥAH8BkGa^XlzVsZÕqTa;0\bTãiD)f{2W ;"躢+cԑTQ:$sP|؝Mi$,lZGvarY]1$ɏXx'ޡOݻ۷'=Ԥٱ؈\1YE}axKh ~*3)FG}xÿ,#d(GILcI/h?k^^(E_y0<bXԓXoݱbTBQ!6UuWzv-y&9ɸ&IAKYhر"'dhP(,V7c!8YgQ{6|U5U/HmXVm3<G;M e2CH cǺ#x+ar Ig|& \<Vp)Um{*g(ő ˭Y:&o8dI&ʗ+ɝPw"gڇ.]VgdMh:zJ\?cj*4|gV%pZE؊3hʳ5>gq;$=џ]gɓ#Fx*3f^7vt Gu[RرYBJiX9HDE9Y3`kXsaIkvd4#(3O Z뒓M̑=̓K|Ν$dMsLѦӹܦpgBzآŋ IDr[vx|/ɠtөrSE1sU)Ff+&SUj@X %dXgqάX%x\BqIֹ8:F0e;rw69),p=f6U!7'7R7Kh#9t;=huqPla\P^qlQ|׵_t^F&ĸY/YF0FѺBr -Њ:}Seoh'\A3u5r⠻K"ʭG"((|TquؠߏƼ "a9K |͈YL鬊2fuT3Ȕ&GMC$>;S|iyW8n8 9hF&9:F͢Mk/;oߵfB٢yHj2L=l*i$#5_mTbkl%ҁqՄ8 Ȍ.Mdp5g~hQG;?f:ҟߞPLq[ݴUXC& kb+iT<ү%=VfMrxIilfRĔ͌1e<=[ybi)U@0(/* \G"eW Va"[~/J0Y]b3U@O" XyQ)8ܲRέ<뗰VTC$ŷ OOơفk92+b ӱyHjYS-F:2Lj3ByӫɘٟƗThP6O"LXSӋjrRsbIx2V,xMj|i8,$^ k|E 1MiO@g A-FոbOz8FZ}u݇1T-5Q_ J[ PL|r[5qk8xϮrI1$2`_MXQ#?m,=p& M:Y9o ƛ\Et#M:)cpy)r ZPm 95Sp= җ&xo.hYx´Vp1r+AV(Im?s]S#f=!q]u El-\f+jY[\ &;Jrwr`~`b j̀2CtKen/Wяs$]Q&d\T'7+\j^Ņ[lwtZ6_ox[֝Wast z뢶DLe:5>Xbg9[coطȡ&"}qZuV{|sK!K7VJyɘ9g0#DZd[o=5~CxRrJ?\gYnj*eӌ }f^R9 DMۡBy}<A!B]S$q'8@EL9F!$I&ebIShAkO4Lix, un:KB{7e!E1]I>$–JӴ?,h`/ޒ&XPfUizӊ2aMSk #gAۛa:tfu@RIBq-L*TyB=|ZY/:Ssyby#%C‘RH>Yzx?#9` fOpOcg`i]_*>eWB#h'p(#m+g_r-p1ZMhƽPgc&= pQg(RrΣς0A%l9w<G df7XZrHZhD@ȵ~⨋c%bPLCsmA>82nnjWox]c9E%mGU51g$QïV&@wgk"`3藷V5>`4Xhi.m3+b'X^+U$ZӬTSsh1\)"ߑ bo@mWQ\Nm[5v-N8oan|}=%M:aTMdM{΂2iU,1E7nv/)[+]S" flvNߪUƄg8?ҳobs!C ~E*StFNnS;ײgwjG4[a8lyk xcxEP9ũ\CT?yN{p|*5˝a:! w=ӛhDתbJncxXlD-3Wg1;f횫łIFzq{ 6OJpን7P ̹!%hk@!\GsvM)D4=*5_ϺϤb }X|PPxLr]n#k۝Ⱦ i5PtZ?{!H)u Bokjc:-T%坶q l]8hn vF>zTDI6U-3&4^@9ʮ/B4K0U`G?ʸ*܄iCGꆤy ENKz E̵̮CQEio z2>W0@S("4 Cx70ԓT3~һB ,7_ڢa) _A Y) xǷ3gfLloVr:8UpS)3,"t`M#.@W Յ6F:OD ygZKM{}N1xR{@(1 +4FI.]_6ԭ<H RΘ~^N^٧%qKy4b~+Њjul%\ŕU_rQ]yy$mG]H\Mb 7)8[5d %p/urG:/FVh+G576"@Gr RҋvJx_|kQSڀL8V1F.xsktSW[  ]a'*guyNzQ:nKx\N_K֐-Jú ^J详U#e$DZmM}*+;A>g>B{,;rBwlHb*<^% {a%f0!KZoӨ ?jQt="bm9aWXC`? s 6ջx+4n1Ο)ēuq.1_}Tc P3Dm5fIprEWAƮ;J"}q@ h qJ=^$A3KQNk!^$TG?߫V~y!c! rhxx vsu7(1Uu{7R6Ȑb;S8&&Czn{@Ig;&%| /Py4xDq!ΠYq\iX]NL;>i>RZ^kSpєREcǵ4FIؙŽ.^1`jjlõB B<Tke)f ps9b'p,b(|sLЄ~qP3#h뗋Hjahq:BT>y;$}ŶeʵmXȧ*5 -Eiz,@WlrW|fV$u#ZW8[g>(4* 2:ߪgee& A:Lc@ZkBkur]c/Y~=ݝ]L2OZj4&3}#dR[ߙ?.{^(lGsm 1-\k&/}R6Oz\9gShjn㉨[1PhepDVSN?o>4yѨ^NpCf F l:G+ By-^i.b@OF6-2H-clB-Қx`X6ގ.8l\4m߁4uTZ)X{25+ 0Lk(vhkI%j G9Ό?G;ݔzm'qGVن2 lurUsTjn-';,)3wFLg2rH$hCy:MK#B&b,=ކ7v= fMͣ/R1jKgK#~b>y+ٓ\v\si7l> r.STToPH4[&A'dFV5ħFSǩ*h/v3sX^ HmFjp`P6YS'ZAh8t 6CEoQ}`'Szi@K!>EeDFnuHr=Ї_c]6쑧%=RǒdckSdAswXB{Z%¹;<1}:bu∬t怃kgJͳ"d?t%Kfq71KO]uHUFnQfSS>UjhPRϠRld-x^5^dp-㯭7]+6:K~"py|g﹅P |hXIbRwY1N)Z&"\fe!oLZFIEVTcyLiNWA APh_;_9pNnėJ~ '&$D}Zq@Y~[;{Ԥ/oi 0BɆv ZL!w6uW߇0$z؉~k+p|I.ꇊ[2=څ!{ΦCБҐ#;Bmc4yt"cUkv~+ƕ;Uj?qy>"#8H',^D ~M9˯ѐbκqM~TI-ES Mg4~3%t ^kLP# yEMZt7 ٠H bdj+GfXwqש ʃf[ D}WLL$Qe2QC%BOdNsV!r}klW'Oc{. g) 2ڏ q"6${z?YL X9XC6>N[MEg*A:;*E̷.5Ec ]R)n 卿F}UT=}W}>TLgi۟N$ {KH·&HDុ;wj\€uې63: =-oZZz̈v?;l!jfoWbhp`gS?>QGSPk9:S aaPwNn^('6' ]].=pQ7+,#DdZTKXՕGX?O _ǑzmL}s6 c`q ~o|lB n`_w kZLo: ϳEd?p;x:RdIdBuB(h&&vQfˆ\W$>Poeyׂ!3.jf%"Af9 u;}1TR1?tv5c7H}l4O} .LV"&%m!,IJ񵐴9gբ$cl5(nޣ넱ph /BQ3LK2'pivA6rw&Fe`<.Wƾ_bERʘ78iWɓޗ> QzHPS{74c.-yȦy0c|+̶醡G4C)rW':LRkӰgR "vd♋mP30NNgy/rCҒԨ' |O0Duma9Vp&!E;)wEz F}F+E,25V50&N5R]WI /Ř=`՘'fWVHɆɱ˥At`0~ca|EDm[; 5x~߂!+ ؏ރ p> Oqcdᬯ"yJHD@`V^ i"_MWpS2֑`؋%h޿pw HړJ<>6Jny`how"!-#t `ut|*vP.(sG #ҙ FGc"Z:.r6jGp=0~8,#1g)tEU}58Q|&D.a[U >Xq}0HLG&5)4<c}T[«y x~&m)@Z7=,w4ޟpS=Ru BAk"ſt0feJ|*r\ ̟AjDyO,ar!UT_:t ~~?'] (+~Q~ֲb<l=oD)E|Ad1UImOVveo/, K es(*? Y0Yp"۩ُNR~Y8;Ķy9IF0F rW}џJȆqn5:(2^mk'N8By*fb2zhKz~i6 åsưW;=Xm\I"H7ePsb|8q#+$DŽcʕuC-ƀݷ`O؄*`Ob$crSIJW zo3sx @ /iaC'­g1T9ɏ#'4dޒ"m . .>nʣ[c2jUem_ #`P%^,Ryz[aI&/F N/ĝn+ k46C*FqxʏVdz`qxểL FΙH*MBGtl4_|FPF $wwց[zRkw]WmٖfzD~6΁o v/i?-ʟ,fN$$cz|c+K|t5O4(n#* e gN<7~tR=A8?#hU-/,WzŸ:' vU 볗Y8=zX!DBYo{R-a=.IU\j!%7Yv3܌Kn7D^O`%mAW o>r/ {iص(>2N(Df|rMbyu4#Y\ .C#C S2F]:\H}uyA{VCHC,[%! nκuA__w36_K_Eޢ絙0j@_+3V:!{I ,2˰NΏ-nDQcl[fsՒb b@W45Uu^#gTek f46=H(r,鮍ʾqHgGDB5POd~|ƒq3I0Ked@<:bc͝显&5pp8,~FV5Z{LʐC@zJj: 2(yO./nd``Zj*H^1^kk iDP9Olm g {Tm6:^{v(֌o1iIJ$o]q%#M| Md W9nQ0 ;,jS'9E#kWg vc ?-I̟ȋgrR#^;W&@H+ajXsJq摈y <8vm&$ plxY 7=M@s֤HK/"rfW̝-P]Qt& #zŌ2"lPʴm!#UY(%D.ʔ\}wO /ѧ}_O5V?_-/\z7-KIKOH} A=TMy=R<_Be@ʖ5u'R] sToaԶ8>wzW$F\x#{@\#@Y[/.7bu=GX't"bƷR5PscdTy^ڡ\HYcQޤ#=s# LYN/HJEox>^PGQw 3&-^hG6XI2> y-? leh~4X.j18ԍx# } sVS35 $n>O|%;`,YjA_`XEfn!'oi/Ƴ24+?v~)<gQ$@pQ/8+̻uK%at&:m- ;QDo[_l?~GIgd&\>> ujɼ2jxIJ|K"O ҃~J&`%|M'vASc&7~+$H6oԤ&p  !_6l ?{O4תI%if u!C ɩTV}4ᮡ*i |0@܃ҼG XOU)җ!`;vȼ,8.槊o(*'ȏ"}ĂX+Zwlf<35'IƓӍljMZγ~&Y g*a9iw$?=7No ;1ac0,skdM1m~0ڸȘ/~k~"nGj"l]' .=A}ʆ%g{Tnq9ƅ2V[ߡ)gyQb?ۯPdjg=9@tq z0GKKIq &(J`%q+d ] 12h1W**H2EV}M-DE;@A}7ٜ4I- 퉽 VY#<"GJ]G{O_3xJG  LdFexuCd("IU>8wh烓t%IN —Iv[H8AjM d#nfw#qv2KɦPs.jgMgPߍU6/I Ϳi)EExUrv{+NLloh 63!R6A,R8؆:_LBְFU3U?{Bi[)xL%_f?2oB5v59&]wo$}X?R7iHwK{ 2㝿(0 kO` oEDU @iUƇunk<7;U?-zɍT|EH6;kǘܕ hR_'"]S2UIJ>@O:CZ8 @甮 0rF>-s+pd/(2g P=l7*MM0`Q+5"YtIѐk!gvM'M" t}i5,&ȢBpYh7Ԕb|{^ƶؠvSbI[#ʳXawmܻw9(e?}EW0>Eo@!dL;u,N*dtM4=TNЈTjw`m(!ƥsh0Ԉ?D`qz>G=odS;g`8oe;SK Tr끖"fLNJ]4+9\ d`,p^ߐR[d"s^%F uΐE| KGȔ0SVlO 6TaYggtӋks:jA0(PXA|tROEഐiU_PON1"W"=Ɂab~V󀎫x3_ 9;9S娚ܝG9>lEsoۙUY5@1k" ;6E Kwꀊ"C%0vs򝆶l$+ߧd5a Qk.x6{v9yAľ\:Q5 Mm'Ohc>އ##Z¦E: ^7V['wZxn 1?Rh:i IQ]SY(;"%o> {MS>!a : 0q ;%.P aղrJOKf2/.zUsK_ҋ!aaLYw팾&HCBd;g WFx\w(F1]nyH)LQ-~^/92T8eJ&I;Ma7M]a1ب㨨{y22ejddD?ң]a˳z@'8 (G,Z5P1:ʑ[cN=td4ʚsi6$| `6p 0Ʉkb9 81|>*l| 5i7WDt\XjAZ3}Y'YrKK 7TAǶCKT>/M7OʁY'hJJl &[. ٔ vz-&kQ)(m<˓_[4vɪ hIodx|]AC0[ 1:>*ډ#<>WY<^w55="NzTOGpҐONE(=rC8Bjy aVדSV(*mqn4+9pMe[ҷX;p{%7qB""EK[zs KƆKIiwW78Y˔UdMϊj񝒢r}_iI#kidA꺄SE1$8h-FwhBT!3gT PrЂZ+U[xIyW'<8xeqF^fj?^&;ܭ*w 86YB.{(g}ѽb|Z3rޫMDP/]` R8"@Xz޳@|s=x%2 I;g~/MyĴhgO-Њ;SU#6{L/=wnji!7 SbkWek.is#᫦y \Oӆ5~ bPH=]_Fj91!%a}čPŦ$ `gK=ܷ"PC l,.;&G'k*t]BnvEWc2Hu+RMMf!(<6"Ŵð `gkF }Ȳk\?vjex"$/d87,>b߳T&ZQ%XU 2E5MXCB-}On.Zө+>zgΈ9NKU199Eu8%@]6U,ObѵEGd >Wz&.faΛBqau?gϔdIf'9f_92EH'渚{t|0%O)=j}[}WLg6N)e#tS+ Έk8u=PW؁qk+OO; |힗PܿP5`FI/A4K'r h4%ȭq-|L8c8 ²h/6-ut9ԤT[uutW_y`F|'YjN&s&.ԝ>Wb5.!ls?jF݊IbM#b&7RuǤk3؄^6}-ڻTw.VؠN NWFWm#jipg#+k; e8J|ohp90c}POIKjwee }#ieW,]9;Z?!CR_x:yVPs$THQ!V:]Hҽ۱;/mJfHT ApETEu 4b'? f$SJ&% nH/Yw>G7Cnif*Y\c&/Y{\}>"!Z Y;gPBA )$)iTtj[ӝC{~Cќ Ҫe#} K{y)z^3d"PtM/rgk=pPǹG/qC-yR8,Vfkɟm*S0Vv@!^yz~Gahg|rS6- -2 tf)U݁P+7:'8g"C|;Mʢ.Y{0Wi<(Z;w'3r퍭" 4\\!{ʦu8sL83KFgM#u>#%z>E6P84fEEΩh1P1#YNX`(̋xJpEӝc!7pG,Wh{+\Bd^+(D=*9 lm#Qe6\?J-ĺ5_ll>chV9^X쪆k&5bi]?;nx aq%^h'%[13Zب>|TZ2&ĶOX(꛺DLY#6~qZȱM W{XJw^|\lLUl{ HPV6q~'4 :ghC6ҥAsYS@[GJcEKܢtMG<]pZxn#N8KSNEjJP 6Cax YX2EDG]4?#3<9J=c}92?eJK _cӁ϶>+24_IWa!VYVA vܝ8v=sX.h__,;/J}oG:PMԼiJG 4a5>C[]\un`*k;Kw4g?`pΨ͵I-Ygzo5ueFcMGhb 6d3܊(ULer1W~w\rnrO|ls&ϒ=>4d%i' 3pXGj5Yp+j1&_ޔ5eB/;^e=7NI~ku҄lxXm58{'KbK/d s4Y$A y (J#rSջ5Jn1p-% N$͠RalkcQRZYfЬ2zH`9WC*AMK(rk=KOQoY9:.pɍM1.I+SQ=*Rϱ5T(Ӫv|ݍ\u9.<~ʻv 3.2:G̣JA^T-1 Mwd+UzHƔx`ɖZ#;OW# zH &ji.){KJt&nhe=9y,}7\kVz};/Xf9SrSZY&QSĴr7BWJalnGR 9JX:)h] |lyh"35& ;]?:yyCr{Vp(3(j|愃>zGE"a=w?Y,[%o)Z8+t+>&j'S{r:s婝2`crz=Ji%J>QK@Ts: OIcԸk|GRiarjbIMd% DgԹPܶGm6x?>~’\Y/叁 .lp" ex4fk-vFQ=_Ӕ˗CT9:(Iuݲrd m v,;Hx>3@HnބH#=c"kD١B|"!j(pǜo$ i?cA.&/=#\&sa RX5x j"J海!S t K X8zi&RmP//[ZƳ|T8n jG ޥ,gNCFK_S׈crļDi[uj~tnsG2Hh7Slïiv1R%C6>5G]XEv:pĪ_qo}3$;-y,TYy\V}+ݞ(h8Gu[6zܴYb_u8l ?3uDx|!2^ՐoE&;vNOr71tf|`1@EKʁ0ZKثtu}׺3h^LV֞#SGSHIgJ~ o Ҫgqj&i@෈^vKZKSI"RM5\lyz l%Hb1Qף󀷐^ֵ;L8mFɇ[^_2r}L;D_MɾP5p\XȪŮ Y\3d{r I'DK^Y+XXV.zsW )uC,jQ(oGi(FLCO(zmZO>Z}qFg̾1I AgB7VaBG6?S vZyTN/e]sAv1b i ۨ 3TV-a<ؚJe}e:g=TӺt^1ؿr#>h?j%T6;~S!IK$24kXx^dMf6/qe}W2fA%h̜6 y &\UM?|tV_~\]#3ٔY#z&CA{#Y+g+=/&sxޫ+4_'ՉVhpӥEec"/# ?*>| :##)@JQUQ"N¶`3 J !|ƓgE/deA"9z9'oQR}%ǝ٭ (64PXW"d\qyfש}6H5mdFĝRbp|=lii27HXs>' 92XəxכFkV6]sO>Ɠ6)u֜B'Ikҷ=FSkq@-BEQ,~u\'ѳեW֊|Ȣ,>R !>R=5SLI!3_a'Y;Iā z8e!+2&UH;Ę.)\,"9`c,0o5LB|ky]QUƬb ]bnI\[gGDnkK-z $˸ƂrST5Ukھ)QoթE>F[CQ lsn.BbH\̯L6IG1|G9G }hǝZ\O Wob6C$ۣB=&|=#O q7]3_yN+r DdLo?lju:;E/iXO pU ,Xcjl)Ot|v:4x 0űThYRs.W!-5m|5|ZEB舡DKf:MYy< H@v{j٭h~% 6>#iXԕ7]Fͩa)%,;G\94K |mNaH_|gU~'* ]M!u)oAnR| S4k摾So}jHi=tYb!я_2&溮sJFF6ѣ߳=YAVvX qGeCˀ<{(& bo27bth‚P~SYޯi!LYlsGMV"O\xgbך;^İ%YBSïT+!DK9x'ۣЩEpi8.s^RvtC'}*?nlOc0 bV,g Q-ᬶݵ7<+ҋPt=k{ki GZtF w@f jj`_Cٸ<(\( V(=>mHyu*Om}~|ɐnaΨd 9;uiZA$ MZmx9(eݐjolJ0,}ۨi fEɆ2Kl".[=J+sRt(e{{?P#F.Isܒ>m@֠<6ٸ=,i\Wys2?ԧo(rHspJ{Pɘ"u4 )oO.+7 $GXXXگPnFx{ٌBF ݴ~^$sh{{P3%h=6Y+cqEY^moiF|F2U[6KcRb[U  E utf]VL)j#ꅮͼf5> {Ktv \3n !$jTaټuςϑ"4gPm>GDɃ: 0sPfYlc4`oF|le&jYH.?GCP?6kskWWf*)r!8طU D tpX"4Aȍ"cXG7CX6xS^RQY=:ey{Lڟ?Yn#df<ٷ'Su n0Q9?i1vlfNs6,!:' +n],_y&.JG1Lxg8".1D1-eDz*iv++s%-nͮ xIy=JIX{9-tRzb_QXRJM TJi fJ{GrW\;oͥ&ػhRt^r+Sӗ]4q,~PG- iG#>T*eƠW45>s1WJs={z})4R[Q8jL&=ܠA_BPK#G IdfYլSn!݅1>Ú7pMc9h8*\޺c㙪GF3 /t2 8G.ϫeN/%JtW4 җlpuY5MdME:qԩ~R$\p92LLe>bH!## >~N0M%lSJCD a S4"d\_rGb;*or拵d?"G%ܥAB\]̨Wa 3GC=(;>XTC~ 㞠zcx?w%>D$TOnnbloo/{P-1uaˌLzF\cKՎ'zz=(Epshgfr]'EjSLl8Pr^^.r3˔&aT8t{"3Ќ uL+>>:+~J_"L%4I^3Qk'g |GjK054sCpA Ј'EEAOj埽D* b,إ軀g _3$eTV^#v a'\Ǟv?,=A[mwN7}=6Z)>5bvد_ 1ݺ=$`]afy2TCe%eqo$nr<7<'muYP M.ߗ=y%0+± ~M0!gj+K&.BzO{ǿгxFSk )5g8,Oy`iO,㶊/f^Pp(Sb$ic"!DHUM*=I#oVI8yK9˿23WðヂAM|\do7eeL*{Ɵn΍%JHj(NtxlGč|rԊ f/Y}6ԛ1_1->A]lYc Y3U!ca@o{=JNĦt> bd\4ָ?w8Hh bIKƑ|C0Of@"fz钍NlXfjR9;ncO/>gU\!~ba5s0:M%PM{1ZX?F>< S˟Ӝw:W 2~ٮp<ɘ/ t3;4ꔧX@Dg'GIw7eC<1su %I%<I"'^ECbȍIu}(M;> L e?gP+uBSUi<{|ƒ9u0L48/#r~&e6gyJV}lLd1!!;Ş wrjFw HNH_x4o)mU͜3yc,.koj$Tmo%PO_ pUhI2ҶXIhFtS07j(}%޼N-}%S^箼Q_iijG7@` />nyQ̳X9c@ǟޞ-DWUI{([[c!1 c缅{XP{Z܂on1tGcbɧlLvS)4 ZR%nC*QP:jLX[_zDŽGuLZ }1uIQ؅VIro̱,\zMg7hJ__aBp^>Lj!&jIQ"+=-P..#C$||=͵%%۪cl 8 +SmAlJ.Gy$$%>!&k# ESLJ >{2e%76'']oerVE}ksi<ekf MmOBT&^ o&WEr%T}DЛk+w 0ka6oԦ#d=#C43E߇dH6"=! [_OelgÒl-v :%SjV?)$bdVYzqE אbsu}C،Tz%_>DPTh{]f# p h:kc.7I3;$a#&Ǵ$dpZ@h\/b8Jׅ[Sl7r;k.va0i0[hMJ@r"E%Y=B)﵏t"j :hOKR~!(a-xbC=w{[=>B[)3>.h9kSIbZyƼ[cl0*v Ȋ_}`#ϝR3:Y֫$139JtӤ/s3k_ :iuz#Y!,UeQvκ̈) XBЛ!'AueJZ7U 0ўE:@^ӚU1@mHЙ??f.Eé2iW8lj9 k|[iY9Þ^ 8 Řo} nB0^YOhL2e1m]GEKZgYJ x8S8\Z|4Yot`O?N6$WZ&G]ա#dtw>pŐ_垎M<*e@y;R sB ư# lWQCPhFgUۓҞ)Kyp`6uq 9<7tW=geӗDl^7MӘT Jh=.:R?+ 8IjH=fkwnIkMosp: YcZRaGRT6"g#eJ1O}w_A/2RL^$u]qX+25Ю򌠡rj xؒK58A f),u$Xu4x&)΋-y3Q]K\"I'ma)~=[IL@3?pY- 5wó~'"34X]r%λ7 j45 }'y|H^ccosqu$bjI (<v-@̓a.Qq/\wy٤lYPڄp#铍RSMB;F^XtCCծw̾9x.F/ !*"|SWX-N}Ƨ%Yy#ӳM5fq5G Y{Nl!¢״ŠEt+a,jzI5J;Z/I47 GH;I,40C2w)#2A\c*Y2Qp3 h䕰 c/Y0fZ bKq20.Eزe~߲l*.pKTgUcx5qç(PưwI=1XlVQCf[YTgĢ}?F90Ҧ#IakUzm"K _iXW)\Dԟ\O0@%?Q!h@7Mt*)2#,&PQaPRkrm,KډY>WɉPi{6Q+ry"MN|U%|RCU(ts‘҇/s{XBe>*.'H K[ux)ь{!LDO6\Ǽ ׀ [6 {vdTvus޵^X f$Y YIO}0?'_nD'c5JN tE3^uW52ӱi*z%[ZTҚS}h:S9RVꧬE62!+Y#!̌QQėN1|K tf/3[.`!v$Ep}txW3x? JJ3_q^䃘ޡ_c(맱0Bq`I`OH9~q#@HV-\@AR|?y%400_tc~|YUS)x)_IhS+K.Cˉw UFa]=9k5Q zM2*uV ]&;1%5jRϻĶQH"Imc4pOH\^COI͋gߦ4Q3*̅]hO]k^luiCR'2= 4O&Њ9_ƀ'i:`KYO8p-4PtV,@•h׺I91r&l;Sĵ.]E,*Tgf̵=-X=a3jQkkc)487CJρa]<Єb[)5aas`oUzqkM}I w\[3%9!|=& oz-\'ɣ?2(~d0U8FW]YoKÞ@o(Ldnܕl%`ƾ?Lwpt8TGm-wC<+!A* <QY|CUಟZJqGMRi{ͺ10P~I/sm/~Xr,*e0).v(!d-b-(2n1 4?Ik=yT7⊙Y,P,7%}ۀq,ack]yk/)T 0C{q`S씬fF`[yL-P:a127T_$Ϣ'\ܧ"jft,z V$W K.m2׶i?XܒmФJU\mbA-SY7߭(L+EI:#%.%ug͔ d"C5BnTKNKg]3)4J< rG^,ьFU.Z5!`ə~W%,D>⅃&oxvQkt<+:6kZ,<1\\NW'FXVMc`Qz;3! rT`7OTٱR]zq}sJ\W/x|3kW`;PP!t֭.}S'zܲ!-r vH D边 [ 5Q•az渤#nfuZ世[IFōs; !΄]5kVR%Qr(5Vl!YSSDK^P9E),uvg)i\>Pd┮&8+Pcc[/CQ"iW|>j>sGIKZnUU]BݡP<xlwY;|m6`!ߑmS0uh3m#y1FQvP ǁaW餫~YHnz7:JQrE2B)`ƈ鴻o5_gcoIFe\OHKfe]!|E8Hlڬ_{^ |q<|x-O1 iAϩyy`7#Qu?jOVw{,_]%ָ#,q 9'#x3 $89Ţhar6@#K!o0Rp:A15UG?UTtU/_2|IƛaK5A/Ec "`Ape@x2bTS@1Y吙-i pN]98v GGdZQ)^ҽ?Mɿο44S#ɑۻ~"qW SK GGÖBzh2 $sPq;]Ԕ\^>)7qXU ˂ZB05J6JFq`lXٜ A 2DQ$xΉW=Grh0J,!.I96j܂5wq] I"zwpY`@:OД0?C^u(QZVn[<ʆ*uSr 7 N1Zc-Lߧ'yGʲb]mS!Ɵs-%۞b.|W TY%gȌBCM6|(Epe:!g7S+iV?`}F+`G'#1'jtѸ"2*1,b<5ܔج"egwwuܫ#rF~\Ⱟca u yђ}vuvEI?TqCp5HmÞl)+Kyhpm.ô2y%0nۺME\Z\MTD ^/wTN?sX5i-]|6hq:q.aS02m|!F#+U .˼CXh*dbr]XO_%ݑԯ0wR92mtCԾXi eb; 91X oO/ك&`#>Iv9'wlqW Ë A,-ܿޢSQ82j`}|Y`"g\+d)#Kp$~_]~bkx9#`˓ZEj*l|QaSL_MO?AWԕ_Yd$Q^Q<"zNa3Da#ݪ-Ln?E'*+R2k3*鸞Xg7d)soK&T}䨈*pY,HIQMZP/^ԉE&z)=Djcib)a&<=!O20<- Iܟt]aT<F[lPnn?nY6k*'`<P*b˰rtLjPb[d?`[~coMCtENX&@ ,eD}>F0'詹:ucc p]O⎫fD Byʃʗs \w3JĺM5RNHO1gRi6ԐH+< "HsTq(1}7¿,Y36w+<7ykq¿R uZ:}*A*KRf\Ǧ̄dh3y- Js{l-ƺ,!c*W6[Qz`qAvܚz2=xΛ}_gl!ßݫ-j%'z#8[,KS4Ȥ?̒ٔkT23KktUUO!:#e&Vd=Fn0 nϺ+rlY"ES! x]N~燙xmGz2_3/\Vs:{&X@B;U?+Q#C)0_zVEX}l+QY"@uaNh9*a)-2E !JZӵ1Jm j¢_tzHe x{| uC|/1 b:a1s J>< K7p7kEA#hJ>Ɲ,|1D?}"M'a@Cj I [,&=wAS??ZغJ,4a~ V#1|(?s<9 nwFXdઢ@W-t3s|a/bD>G|S+qm΄BjG4!x ?Xk9XdƜol)Fh+WuEsI)oi1W@v7hSϫa-SՈNbHǵgȕ̍tѡ~(mA|5]I8ué:s_{h5 %> ZlX .:g,wzܙGil pH0h 9RԵ }[w-3D1V,Fʆ`2i%Os.W|UK,W\kU%Y[ԙޅԩ7}LMJ-?s-B0Mj=^^ԈJbh)b+A{B;~\&X]dQf phފD[CԟuO{lT A&t!ޚ1n>Ң%e>i8٫7H{Ə|-TdwǢt-`3Z\!4Mz(tL|ΰX2H92Qfx#F9%, ZOEhAԹ'Ӹ#V0QD&.֊{qˈ C%jWwȯ*t07qT=/U+z-B")V c}*$v*Sb :PFZTS?6_QL" W(ZiZGf|[7^ e1ujT:[M^Tb6[bETEB:bUtB9aI_4B߄\zZa \}aY=F<- ݄&?->$gLQڵgqHg ~Cm0x-T 3>`cTMem ajc0xyz4Ab'WnI|XC>ҠXfAD\`:%qܩm5lV3=Vv|ڭfet5ö\\ODƦP2"G -5c4ZE$}1bZB k%; ~$RcUVkO(}KcG^$Ndbn%91'@ %w2q8;@gFͿ׿2 WCD-V(W^T("HN֊?v%ci4_?2/W| Z +Z.zR5x#[Xs?Ux^(j.J0Cea/4t1:*->}g}ƱZmmL/R[ḐxgIKM ߪ330# X Q۫_!.O l=4St?Kcgl$ޖ؂wv\iȮ-3C2qp:tR: b2',a F|e{e)~(c3/Qw:8\.n ȀA3'h;N+ =&&RR aloKo3>;MOsk6j3!Oϭ1AUl5KEv1i_XSTuQYִ^kDw2B1ta<}sY1\݆z@5ǔxh)#`F:=#X<}Od8/;Y#_! Hhl l[lS>%6 GX~U8J}Dax½z@4m]!u=}%3H" 16/\d{ճiC&e!n Ic ]E3d]»%!A$Oy1Q9X7!-a.pN["~9`﯄şii+{wѢYߓSda5+AhF/ U[H&XZk|Ҽ֓ގ#`e#{%;V,\PxGDd(PAg(Sa_v@yTIH'O"%u ~ߡ 8**}.dϩ-T,I e{d 7}kd鑀X5 ֞Xi 1LhQи/9"+0lwa>ԥhlvt];{E4ak2L-1J ڌwO[m`mObe@qR54jƠݗU=956 3$[0moaʒ6[q@a_$S:.Mc՗#|@f/*AʖنFRa FDԇ$TMa3qiEXe2~hE6f(O=Y6#hZr}+IQbzW,U9`R}W E)0. UMhĶ“)Nlt2.۳^*jbmG1SJ*&3Ri 9SL# ʶ IWnga;,!irX:G^9?ܻOZ 5|Ƴ=TIDip!(R ŠKfei3v~ U_[RϞLC*zԣWDhk_ѡ[?Uw>-?[rnHy.m3WUvMJav3#l,?[WZrH<jH&iE-uc)^Br׎ը]QSdwx2LDQ]N:b3- DxzO&`ߗʧBuA=7=CFmχ=$WYg{:LOj'r´BxIR 'qϬ1x8:R*H^iz#1$ yE7gT&ܠ쒊dJ`᫤2mHD*bM$;Vuh0Fd?ߵcc0ءo$eİ)𢳀j+D:/md Na8 0ɍ;`f µ\EFoP:~>@iǽ&3]N@ Ld2 `>x4{ yM" Z.trnSNjk_ɾ”o6A_/SZ ~QVR#n=~fzMhA:6_i[eTRnLΨŵȊ BeK2KFbUуpŐG#R>r\iwzE{Z"9 B-i0VLU@yIa7$%26P gׇ`sA:LtI3UcFа,8m\Ke*s)y" iUTd~~LݿhK7}ag&n!YFmA"$t{t_RIb~~iے}~KBW{6>.҂@>(lޞ~77W{< shjx}b 6+aE[XQ3r4KR(* G0}W²gn*ZƷr_L7& WPZɮ*HMv@n% ?rbTnc*Tmb6sUJLa[RM%gG LLb˵IBWNg~eSoE*[iO g%b$|2 KRGq>s%D>"*>WƐբuN1 Fnӗfq/gaJ jwY]>T!yM^j=>3 [ɢ&lzmi2 IN [WcQ2SnϑFGXo_jNNkZ)9#O.^+ʦc<\!&9% x:qWF&u27Z\blpj|J 1@ +*fv[ YYr#Um܋cXƟbb %f|A}jX7?gI82={KS =r%a{ЈM8$&nµaafC`\#8#y`QTµoE%=Hjz_1x߻7MU #3"vs'4PI䇳ɂ¶fx cSYJn+M濢iIS;X 2IE •mN=m?un JwnqZG>p(Y1e eH8w }Oݖ`IH4 (N̴ 8G3I2Fo|U ?Ҙa;t$8Ϩf*7Sɀ׭?z,űƆ$Xh*. vČqzE%z*blѨ@|,&y=L1v í sn6qD -{&I^05s{(Q9l ,߸; 7OCS2"v?Ii!V9*أѾ_\-/maB]f2(2o9h`ijT~ӝ\>\Q0v[=$4;On\+g\L&Gv!~dUW` F~{fwQ =" >#zx &mʍqwjJ&C(/6&pb0bE&jGGF?3F>:yUv=;@̔"A-V̭GěѿE$C Թ? vPXƠΙ9 U⮪NE.D2܎F~䶲ooR)شYvǤ8gSbx' -rcƎ8|<'Im@QBU 3]G׻ob}|\2MeLsdRue1G:DHQJ2eWѮ[҄UB_&IqEr>;+S!"$8\%_3EK;4ѽ2pdCz[iƑ,{%K~M*xG Y޳#@1J`:L]neY0VG6/PXdWHAD=4=Z17!t٦jYZr"kyJ'Ծ^5 }?e#֔IU̽Zśid$`Y+EʽgO`F]X{a4Dfdr MVB=bK2wC`<' Lx*u]Gd-wÓ8&tTGs:>#q2kdCnlZcqtLp3t}1Q#q"-lTGj:Y_"zK,TAkcg]mK39D)G@{\p9^/):pb3H45D@yY" <9(k 6i¼%5u@3Gs^!qGJXl7jȞ5$lC7٥"ܲ$G!znIfk[~{St;&&f~,7"oSlC٩M_07cDXSǶ~K=Fd_NwMuR#=c&=5 0dUfL(I-A6Y6ʗ=u'9FUBdEi#I-FLo{ld>q~= ʡv5e-NM 4Sщd uM&vjoˀ 8#H`lIS vvQJDk&īH ˓5.}# ay`@ڲ8WG)KN0Ts粱"7h@ox:ũ蝅T$Yd7^-..<83hnEfYA)S ++jͿ5򤄽>C(11 9QQ 6(zwrjܺ"jC ] *+-"mw_&Ǐ]@֛Mȉ-,3:&"d"35Gږ{>k4UݻqEK )O. C=Zf %Hꁕ4>",Q:o- xP.д\$5Bt)pJTg`*`t'w@RQKT74wXyhFWϘ_hy"ieK4e T .\+8NFZ6}$cFVҒnpwJ X0h KB>2yEԌ)u-A[E[6͙1d|뿔HED3sd0y=1>ҢԖ|aB! Ff0*$/`o$f\*mG#m dS1uҔE%&Cnp,ڽv#XKDCQqC%x4L,cx˩ +:]m3:}ASi6UZ{nG灟GcB2A,(|랤1(,mdˤALn G)ߟ>&BP,BrAit4E^.<Z::$-03` %ٺ2N46.@D'(+]Xn*}IGgC!O-[jJ}GLEf7C}eP($jϿm) Z<ڍ !{ =3b SUB3L@h-/-wi-#OHwW3e $Ku{-.gdw{4D Ѳd F,ߺ==]}2UX܂\4E(K:n.5fPP8l4B](c`Ll7cDHCB deP3QxE[UM6m}O Q * 2츿sמ$ن_ LNp S(7FfU.m7<\FIhkGFDRrZ 5L}>S L=$1(,e2B1mH 9)rƃKP^{p;!]!maRHsB`*X]˙#ꆍ+ie\tv}zm1h".^Kp*6뽜.[UW#/65!qfZIU|.HnuMNs, bDM.UTᲿwu `9fNSKiȾbٳ m5!} :giSHcѡgޝyyu[);<?.g8\eKTzT.PQuQg܁/aƜa,P?n=1aUy~C;~`닔)\)Ŧ3@(-VX)cW|^1Cɘ=֚S+*]#h蝟BJ][(-HC^n@+\ߌd6_1iCTZT,5Jϐ® 4z*oThFb=c=Zb\K9,鱈yr5#ް)zdmv>4o5 ԕ*PNPUN[l?VKF7&XV&*AgQogWù ;ׯ;gE,[S\~߃Ώk@ؔ7v:K{? ,N[\(w}zJy;230pHƛv>F\ Sj TȠq]5P1<ޔWݷ+}D 81BhzWHi2j.R<zE8mnT~<֐veɹ8y[IF8  "R$HdCcJQ%&?Isj#@x Y׍e Ж1]G6'(r+sV[40szVr#VxԤlnV-빘_@ŇsdS._d mxmKBmB+ jxl|#Ϩ57;}al%@E +8+,o|EQt\,u7a!OÂL6N7AlB+Xo YON9B>"ydɍP̸~/n}aZ+8xȽM9dlB4W#o Śr2dHon=0#[,c;9/%I}-es{E51],gBR@5PqpƨuX.h2šz/ctwH?InE@`]Emps&%07vf 59>ғK3L5ILȃCβjH_!!P ܄s -=UԌ#ai>YV<82IsRScR{=nܸcU2Wk CmM %iI2Dhf>;3eM얅q`.M~#RɮQy'l)5Щz!-n=%|Ǽ0oPI!J _Kj]̽bix&L5A52J[HD:`BT 4%p ޙQXސ={;_.Ր$t\ZxK,VW;W sõă-XšnK" MZwV=Yf!(}jZ"&Õ;f>fފܲ+]iWtnIf4w&vX5>+8:ڛHBM /XQZ)K+ⴾLơ*Crv*pL^ԌH l[VȾCˌ-[G;Ϧ03"ʵ7odqDȲne"Z+, 3WJRCUyu dhUN7E[q>n LF(8_^8. ;ѿI'j_. kS~ruӒ/y(wɝ{eae-e \#U>옿GqU"|ga[3TП%q3  S+ i`gy5ֺJ=RS+`Ε5)z;jfºs/4iY_pЬJ:\KK"_X]+Se&9˱a|㚜SSAyt[ڪ;=(1}4|rzRӜ;{W_G#kfxX["cJP5;O?ZIؤ-2?*0D8PPd&[dJA!'ZrXtph]+ kd.q]9?EEz4E@n 2e2'dS@Ec>!9-nq<?MAR>?2.™7ٟkz}cұK$1CO#evNEJN"ˡY!QڀA= tr"as-é)8"Wgs^%r3@$ltji:Y *&pϱmnҤDRmA\ڄ8EP UeO(GbˮWcES@pҒPiw n "`Ov(PZX ̑E&\-u)PYqRc _8i׊|[FXCwA)τĔ{bgSX:WTgNw2 GGW qBEI&x?37R[ TI- cz!*Qzh'ǓN &8>O"z: ? 8]N_CQŒAR%5;SgNp=CX&{`nfX)'nhB1cc‹פh'E?|O]i_mbw5]QESAl|"q6{]-tNM-[Y&W _nR8kyk#4@X ~6hn+ݑdeEhs'|)TؐUlq衲( KZgMµExVh< wZsR`"O#΁wĮRHD?!O-:>ۃ;ŘCqhP8vEG=#0m&YNUJX?K"uKg+N*b}J(.y~GpZѡF[4+-e;/~0W4/Tx$~iE/v|)\HiLNt q[i9WXލ0"*XsSV;fѬjpI6Rq+6jhe`r0+Hc4Mds4؞Ɗ8m$k A\&oɪ>SJ oZQ`?B+s R7َa `˵wP5fEtj؇LDz)o@ ncIY4p{`N.}G\5>T'yvx>рD ''Lwz~ҤK>UwPh˼t(c3b9\J@i~|ƠŅ`^q{ dVxf n(`ʠo(`aSu"-q/Y_26'6"brwd[kϼ?(܆ʘSO!5_=l-L.SӞ(G`g}ՏtB-Ӑ{d.M<-D;Ow[b0ȖFάK#ܣJZ"qP݅s~!#Ebk܌&Z;gʗh5#&8ZkFi11O"ڪ=/+e&1fಜnt#kkd}=ÇZrÉ nMHcIt@#zV 25z&1fީ 2͉AQlʓ&2C'K>'l]IJQ~? O#_i9fQfrh<[$+ґ|pХ =tQE Τa)[]2ŕo4DuEh$k rB w *O5 8|Hn>.f3v ->_Bta,E FCrvj`OAO9*}R m޷OڹlgV/o=b5aǘ[͜ Ĭ%*h8P,jo~yJC29cߞ&u$[O>Cl'Aӂ1dʶ (W`꓂e6jRc,0vmm{Ԏ3gzSz_["LYV~.{}t(,BKx>UK<CL;@ %EY uW˯i5*[4j)*a+!f7EF0*;M|^;. enW6Sx5QM0gWq\SQcU(hL۾}*T|Wh6o4X~Omá`cc_ERߞ^.؋p yw3dQFVzp^˅-b{~>#֒o:oib|nH̨J|8T[<3ӑ q.=8T#bz혱A*\y0/.C|Y555 rʨc]mYc:bQ3xuur>?0 N䷷psnSG-'!4`- ZXiiCKmu`9%©ĜuBhKJYR@5Pcqj&ݵh,{ҳK-B`fǠcf&m-S rXdy-&?KMzNcb)1gCSbQ(隢o]p}edm i; 7VstrF+#K[_Af:9ΙߎʒY*#+/`qV-$;e☪="\D(}}+g,iM]"M/B^ݐQ6EV1ܚ6Rhq&VįtEdܟnw{dQ Ő ΉHg*ni!I%ƒ=s9S7Awf_nNw-%j(=nڋk[49>F)Y HAW)+ H98ʰqSW E[J'cf`8ܙߛl!8l m[1Lư1u 9jHlGp y5WE狀r8g$j=t71#S.4ȡĈLu,TTIr^ кL[4.OR u_GÐd$X`hM2J+tg\˄Ds]HM S3o]<a\GJ6F(>];Jw ~x3]qdXxf,G<5&řh!2i'A% νh F6z<~=Òv1Wuܒyц̳TI=?Hިq(AK-V4)1Ibf@@M4Q?s(ֱvp[*)#G[cWꨤ(q+!t]Mw} `-lڶY< ` "k~>qNJ)̳mȽ^.&F<D_&,3u^$+>#R"㳏 qALw%Nz%88hx-ENHMFNĽE$u, v >ph(!+64 Ȱ}eM A:߿@ 2,z*#^IU$>%:b\R&]Uw)טg~}RGNJ5#Ӑo-ZkL""דq}U'?S_n6TΆD%= E|`U|G䋔q}nwmvz*IJo0!=Jf'I%+U9>hʴ0m)EgzGwZ0Ys뮄2{mtE{5 y0! -_]KY933PsEyGʚZeDX{%!#j&|HDcݧLX,CmX\.TLEz=I#2VNUwzGZHn:Rڅ_삂WSHs3]!<;ʎʗ0KoȕHa񇈭u (}}*K#PFqRpW6ţ/֕8iS8G|V%&(P|,h3Ɣd A+peIt'_@g43O*M~+`1KrYO.pmD m~0%`ƱL ~?mѱB[ <%1=ǝ#L蕿W璡QTBHFc%fy82~-xO>,#iABkl,wA " ],'8VHb}A.^"(NJ|s|~WS{ _8ugadx٦EQO+:׃!{7qfmarI>%0J|84>բ/t} ˸]l7k sR+\Spe,p<7Ty߰J6eVKu߽f&gLBZwmИl\H]sAWR@XroSDpvrX"-C +VɾXB%>6"d# ΤepLm]}3пVx {-MIJR|BG*^#a%x J[b=5gYbb Z<)|-)_z$l =8QDTX~OԻM6XR ɨJ_L%**Hعvw y 湴<sSg~,thF%vZ\x)J[s}^([Jv%Lv%4$F$oMǤ9rpe=ѰH=XI$) <1 DMVqЩ䴻S~ᘙ8zpVh2-肸^,gg`5MDB=tbxH/ HVY럚'h1(8G Ãjg C\l&{@iFjmŚq@9L@@Te?ϋ Nj(|:K(|8uWLam OF 3lXٓ:ۢX&'W~N|v΀85[Ibj:,.C>zX#UyJ5Z;1፰AE{I]Y8|fԨ 9,8K8$Կۧ$E?q:wkOym`\5daIi^tF1aI':і%⤪kApQSo\̋ؐqJ)"feXҦ~bE=xܟ󰎯 b7By3fiu2gL;mWU2|R&DhG?cZ]WgW6jj qMYMSVD:(i 53 m/]-7B=V2yM;RT:W6]x~< 쳦!~/Z|Lz̾ȗb}l +۹_FL ;jda]8 lhgNA,\ ^ (?QY} <^=6x3q}r:WxP\Cw>BEGJ@5[L3q ɰXkW$WIOO) {{tvWf+hwʳ%/ڱ.<]eq{9).לŦbW-OPRɲD^CjEh8 =Ҿ  o\=uwwbUiL#ބz:5H3s"D|a?+t$#ii`:oyGVօq[K P|c`i!A ~?c`~zzJOc|UDx>baΕvl _}i|#/Tu޳/˫\̐:}3Oo+"p0ČVUI)<2.&VvIf,RS@%LFgF7}CJgT1\Lq=ͳt)3¯CMvE*E*Dz6-2:6q)_ߚύ 9`z:p+`0a6te|%/{lK,ÈG1&/m9FnpTӶCt^L+YFJe .y*_bJEcG;/;& H.YL[yըX,NtC~X8h7qP[H p7Z6 $vc{&sߐV/)}}t.4X+CLU4_k- HvN"; X ;Q+{Db` wMQ S@qizdfdTZ"fX?[SuT@ vJ d\E]y5 6d( 5 +MGϭ۸g ߠj`ncKb&<ըa9?AA萉Tf [s7,q߭-0&ߊza*fdrQ:lm/c⬰%WO!@:UW]K& 8kP%$5/w5gW8 \JrFI2qq"TP7 l}-wO?͊K kKyl=MzB8(%I񡋶()̮Us%)3&].SMڽS~?KnW9 3йaA`| t~DC9:83όBuCp׌p׼ⷓ1Dr+Hur)ܕ X{d^_$eݸ܂xQxGjgO,&uC( S,~,ʔ%TQoENn@a qTz85tOATߓ)#2t:rfA: ׇ[H־ ) q%׵̅[m~`ŏ?ox32E$Pm<D_1]a&[jMKs]/PLZ26cvI:sngk5YYBJiڔ1^\0dĨt;ۚyٚ8`z՚ `?8t 3A(H>`xymϗ_6G#cnK28IZ7TBJ!z 2-Iv5|'A4b*cLjykRóX;DlB, OH#32C`k?0ؐx(迊 9"shk-P\ MVqlu9lƎBd@6#A/[KSR(se3˔U|!sTHuEZ9%׮+$A4011wUWZ2\:DxE(&jkV:T]T2МP{5!~&'.;J} tv #hū++/8"vQ"z4w}t7-q"j~^ mIWdr2BPWb= t'!\_48yj;nk-Cii8ٔMhukO1LpBܡ7s#0+׬K)f{]#|1Z/LLƂkf#Xa.+{0ybhSn+WwehRK!~++~*NEEfz<̛wx6JI~ RHמ cdkjP9.Ӱ^U-p8?faJ-`/,I'Q OÌ- ڿ#Ӫٺ4L{Y8oY**!jpĕYU  CS:NKDp\Eʱ-Ӓb2h3(Qr\$pv[/ n;3Ldc`y:,?M*3Mi޲ܶBKb8f̖"c/Me2:ɰyHY̶:~u{T!405^؁~xׅ#c!4($a Ąb3X";urÏw㩝tUPnz jFܐQN5#\]HkKQ$ZNL]T^o4gkKI`qvU֌(zPVf;e5t[9Eo\ ҁ,d}u>ۮ0yп=b[6a13@v54?*_>ۋր.7 ʫWou(\mXWu @z&?-+U[ ׇ{?&및s˥j/G5tJrmݭS乚)!%ZB 7y0GأghGݞs f5a"-eXr xd+^>!фpz/" yYVNXY] jV+J{h mmUz+<;]IL)v JPh$=BSTLϼJ^h+9?[񢟛Y`KmXbh8Lk#8d)uHӔMivxs )4s?<&zlWڴl C|8]F{x(_U(8l$ú',UuW]iKvjsq#Op k16&{4o**35' ͘\תu񹒔 `Fo;_3w%d>)cܣ eO|*^l<'oɄMqGzFK{Ӗ\mMU>}"i֝{̢D:ûX*LADLJ u1L1X3 v̢AZygҥ_=W Щښ[nTyԞë.TyQB3Ai6P}pSwӻCAG/Β|BuN(wNDyM^c.Wd K ֠3hv#݁0t$^'ʓ1CT O/ mkjKD>x풖)}vj(Bп1()&|T-W-9Pd.=Y5mH,&Rp0c-7>%v.4W~goZhQBKpkahSkzl3.}Xg ryV t3uz{dAޒ[=-$~!Q83^mojooh9 $#י&l#&PauH{Kq\o. m3j)=!8 HQ .䅚 $нǔ咅Jo:D~_BX \\Q+V%qOHSsʹ:Nd, _3cڵ4fU+ޮdj.KFa`rڊ xr^㞽+YT1Uj!hh;-D'm%E8Yu]wJ9|ֻ Rl]plu h`UE 1VQ#S\O[/LU.~LeIxFSxS5Z۳ohc -+\kÁ5($ӊp/-qJ+&?~;*+~NSh19iKXvQc;D{qv9K?&v6ekP)c!ZҒWU>F$UuVƼ,n= gZ( )+MkʇuԶp؟{SczX]N׍:΅n7ҜGQ\֯`bK?;Yϕde2υ:7rHKf ) ؖKFaGS~'~ jIZe('B,O.' g "NW20J%jix naÈ˛`LfeZtg&d!>Y5`.YdUީe4x dSVGF|hTh)ҸMꅝM雷ɱk= #ﰍ8B;T@ԖԐhRzMi \#V%ƟR=1[9'#OxӯoggƠ4ԡs2*b/1h{7ߺ3+!={e}%>+`hշy,0P7,'xОԁMEf,|x=e/I&a#36 )(O_{ w2Gå|ܕ)eT m js+cgfr]_CI[΂y2\*Oaᣅ0ٸ)8١eI0xpQ xV%~wTЙR OPX~QuBė^U^ǖg!9/="Wu6:9*U1OR.~2+g^<Je Wv).v' ݤ>!3Z U$>V}+KDP/݊lys`~:g|qwуc>\{L6ZG0 _u%^ah hX\fl &)`,xvҙ*щmO!fOʡaxծ3{q)FC07a@^}ě"7^V¶gTr- k'r5qB2wU3۳hf Fkס_掫Ka^A \.TKuDž%{H\Lr5Y:4$@/L^bk/c[I9OH}ժͮϢ@8&WtAMŷ)Wz>$1홴+q66vO @ Bfݯscmw[*偬YϦ޳P+bo)]|IL֚;N}tGW|y\Ol)bGi{P8-V~שph/<2tـ1كtkL}:j#N]U'?l%kv]JűM\LUĮ%ҾFKUTO+zMZ؉fx JJEWȼ9Gs 8TĈF=|3Q=_ۺ?IAO8MFI]tPݵ5-|1+VA1iL֨k'w?uW}XvM3*BwlW<^şxotnT8xzK~]KU}ϖT0kdO_P=+ǘW~qGzlQB!,/:n-OUWׯ]+.s'iZr[zKa}0&+^شVeRɑ F'"Ez/ U F֏\c O\r=f{Z"q5ATbN38@]Tu\d741~jwU@El?51¤h1z:*й27eoyRu!'X$JD޶|Fc~%z]aAK'[to30")>;Nz.ی)V_0_iwUuNZ ԧRہ 5JZ=8ݖZtش՜@|hlCo@Y̚H鳨r05wAx 7J[<99GWZ4.4v0Jp~z}ǜA}|UC}*ף^:ե}d]xF9=M=C"!%yZݏ|s>VHvOb]iBD/,qs"ljf{Lm ׸:NVDG9ckP3("lcjf@s,:G*[{IZ[K d@H9EB)pY{@*zÂP!P]Gr(:P bαj,6Gu {aa[ V/OQc*#˫K`γo4UBwɯq ܞxt;־B_Ĩ0J$p'sXQT.`uiV(| ͓=nX/Y6zhlmۊ3Ҡ ^8ٝ]5ҕnu0~UE<`!xD;R ƸYÅ,eN;Ywi2:S1Rnun`Eo#YJ;8M[9VEZEx1a0=Yb+G,޴)\ BX?ַ:Z&gl ve㹟(B}NமMk-M!͜/37B+ +E\n3-&ge$SŎ= =3+JrVːH۬Z"\"r:+SWO$U+"=7 #1N]`ٶiC@ZcD4^@y+TZ8JQGQ>sK|^qGKv0 tIn@+%&`F98+a,T[πcIP* 5|RA t\fmAݶbc*"̧UPAOI\Da(IdX䡾Xu}cBao q!O=y.]I'*% {a%ɨ,CKc1r T`,:eJ?4gLL G_p,1gKSWg ;#*ힿdy% /}xe#vȚXZosP`?Srrj[=lEqk;A[l;Tx63 p-ykcd-f@ij;Ӟ)!^3fp; UZLLn!n}_]d&@+uC~$qB8Qn !2^LFmg96~c:h(}dTKňH QId\v{k$BkBtqQ.M3yؤFԯ'Rgd`0ҡd #Ҡt-P*%xڀƘ/l [0JQ ,ڳtd8)Ŵ;WoV3xj燰){ dx 1-!AzFy@Yz&R:44x0\O͙X݇Ɯp{@Ӎ5_T:;VY/m00TSԄG=l,R1ā8yo(7W5x5F'C#vkITl`{%6>59dVT V$.hJu<~[@OU%븵8S~A?æm? 58Od ;Df^<kϰuAq|JR, C=4b_x_ިqb@V)|ً4 q: . ju%%V~>+JPknyPgm޳l.Xb Iv<{JԤOP˝,[d]k9B̍tIxf4/E68ӵP/P|UWc硌LM0]hGX 4!ޑF'țPRrO'O.w9Mr^\AaC|⇰)>*=#U.2ypofs) I̳:R"YGG CPRv3|m7B%IPYdQu:qKklrKK`H7'5q;PB~_ߤ^Å׸@O\#2.;?}鋶}>x~M.S rpi|F#|hHfSJ_q:gy+]NzA@cOݻ:רU資OgZ S6o<&&o] aՙ0;AU8;w4~}⏮W FiFށ fUˀq,cc #sA@,~j2/g"d#܄5%X, ky'8+-v3o4C,dgwIS%w0,&et!22qzozBKXoʽSo{@%V( t&% Epe5k&-nXoR|c~~#:et 9}.(9\旰H5r]G- BRZ@ȀۿHtXn'YXҔ^*ʼnȖbOĭ;QR %Zn >طgmsiJ]yR>G/ Q#.c7,5VF-23^ ytMi2f җ|&FUhT+}PpSݭ 틞Ŝ*+AP[Z(bv$oj2ĭU\SO'8&Ƽa{ݫ7̡)BNRp۲y^v@UYIn-:-/U 4&AXKQ6#FhX }铄! SVXSKn쟶4Έ9{GĽBqc5N3¸G.L,'ؓNE6dsyKxʮ6Y^QW[dQ6q0 q91+V}iK94 q#gEtn`ۀ48מ:w KDݵ}^Ö2(;x'jw.)?R,őWϢ^yZB]kdJľ߳[I>:~s >}gU7%qX*=Y}&bLWw-xr>9:*-QHe^tj#FCywщw:Y-r|wDW4c 5zXE@2¹_75*mwN14aŪow45/MЁ:r(K8w f9!]s#WOƕGsnSc(`lNhN!GrB%{~AI4Tfcb_-Pߚ"Etj53Zcrɞ S/rvbj@6޿Eh`˂Z`5ôI>RbA 9>vNp9ϵ 4"&q ٷQ ŵF?y$;}@po5S9l5O~E)w<+²C迒OM 듓\cx2@hFί$4&Ri3p yqO(d̠o i#Hܛf_Yti÷ "/j v_o( Y?5!mZ)rR(; sZ2Ȕ dMiFS1]̍"cQ!D[Wm(㞖X 8 Xד0jph`+d^#gY\"ܹNw57k6b0̄t|Bvzď ġv9_GY76mَtU)S2#0c e %4}v^ ݈8t "}M|*K| d2*DVrBI'hԩ~KhA5 ֓|cE|px:#\]hY7E3?$RZֆM}tW|zVJcP(ʕy}+} m'[P7`,|b}<䍰o]K􈃋`&-%[؆DeMbv>*~Y `BP yE%|߈Ϩ_ ?09_,%NFNO_8we"41kj)ڻv \9 >bKzÁ,dCV<J;$Q1"P_h[j3Mu5ưuD&ގMaƿFd9Ჱō _6Bpp{\s3p3m=ψ֞{##A/+Y$0P:il^,ێi+q('!U XlV- Q2A<XJh(?6zίf#[r[lC`E䁟JI`qαH=$ogLN̸jդU0CU0d=ֲ&*HCˉG^#&ܵ,ɱߟ'˵}ޑ-o1_J%.Xn}N{x>it!'ʎo5V&RR(d\cP>v5\18ߨ99rd' M"=tQd=4j?S=F}:,ƶxaօgI%j&xS=D*KJq0N8mo] <,,mefb^>SN:5Sck4z. \VBR ȁC_ƄPtΑOo5)DnՏ}ebV`&#J9FJ8ef\vj)G-z)Yyז/Epûa.r}> 9ݷg(*:& =S̒ydva(-gV /$74$R3 5|Ƨ,֦MTKֽ8%Ԩ!.^k?֙qF=Szy?cۼ?9pԆ6ZoE쌦 ݉.†lkī`!ǫ*{ Ej"ʰڝ $4y/K300CDKhsـZ3oǬ+1 7ؿ"l jagB7:SM;s, "x7^0%Qɛd`>.Rp- .àA/3"IgѢi.=ƙb.#E|3]W,au &h;_es#Q(Z/J5$W S*Dt2,ChC8d~lqoavyƽ]\Ĵi./U5`1Tmm-Y~ugaЇҰ3Gё5<6XY;[GbtjqcE\A˜add%+Ua\aH5:Fߓ+o7u=>ʙ$ESKK|Ea?Khp `J}dZDtKGc@zyI&|eȂ(ql|ZH#V-q^|A a/oŗOݦ&1ͿZFQ{`b7ėsqKM,;?!.GAcƎ&]vXUh̩k Dwªj Ks7|d贛iY=լ޲k2QX%af0y ᱯs],.yM "i@I}a6\o=g筼cw:ƯwЗf >{>V_f.=&ƍX@H*#ݏ. .:l) ,f{PPs*iE*͞SC*S$ QZ@ꭥ*$ (_#uي{'>B/$oCd`4jdJ==ا6:,Dۂt+VZ|^+?E(%д-Q<( `6o;PE`VƷs4co'''ێߑc"tfW_uc^|&?+ &mL檏+Шj'Pqy9FtwG;iM*ň7 8g$a@vLN %{h{4~涨p wu$y2'۱0 f1 n( >=G K Y T!1 %z |48P,%5jg0zv‚y_QMbcMew72$8];ۖMr]ƒXBn\`kO-?Sp2f:|ry%g[,JDQո3!$xhJDjqd̬dW Ilλ r@w+h}dkσpH,^^ꌯfÓ2|V o=~W#]&O3 .Ԫs\V칮`ȼA+)[hyAuO~X A@iwi,_tE_?gWC\RmHs!L,MmQ5fE\++Σ[\L0J,ԤDZТO4K<^M),sDF< nz>7V:b,<3wn p% 0%BD$^uX@C`SM%*'8VՈM#5 =]:w-WƓ'I/;17iO{ϔُ8멣T-%MK)v[quy"\l?b葡eqKn|A?(+,>A@tTn JFfr E2WԮw{|ώ9ZνV@dlŪ0s]>JJ;k##FKu{ɵT(-*OrS2E-Qki[c_yC"Poa=6DW^>+aϠ }b_*GH=ҏ%"shfu-^PaH<=B0PNʌ?u+k8z;~{&|)2ΎvzkmDw,Ud|"ŰR0\,R퉝 ?I.gz~<{;vt6ʷxkCjEɊ`*p_73e$>^L`@*J3Ӥ7`8yNEkaQ@|)i` M=Ipt d9GQ}L'Oq}&Xuv%7'%WASo޷>S59I|' #<Ƃ/o{{[.;|vg}K5,G63,kPj*׽!aZV`t ozypJnMbO7T1XnasR5!/-N^8C;lQ6zDu_y̥];\%z$k nj,Osj`OOf `ֺm vx*XE+X2~I6$[7 셥Pr[Jgx'4N%j,kW/&z[Ѿų'YV.O,Jyy˟UG<`k1Ul۳x0}B~65qr5 WKmTs^ރ)Eʖ{ʯf C1%m4ik u¯vBM8PD&tnlg. *_ˑRżP`yYr}UfS1<7ϯyX@PZD;I܈.O &D)%oe,n6ͺ o:MP;XcD =zˌ|Yj:c ƧՃH43TdDc0=.6+9GJ/?pMyWy+DH4U2K KC$#uoBpHw4\k<#N*8Տ~(Ss9O|$\ J>p%e#+ Ooq"2LP0R1;Wm=mnEPvj<]ⶇ̠wweKdͮ,WyI ToQ) JbADikIsM_CMl%bXX&lj; NP Q-H/("=&,Z`hMDQ{,PcW&k-;<]3)8H5{I-Gcn;.%N!ƴNSTmy]e)f%ʳ=O?ؠ=oފpr>)\l%:ʯN_`9rUϋCzdo`n EI/f&RGL@׾rrVI.>r#CjVe"ִ: ܂o%.M}A匑kLMFdKiK,~Tq.zE%@ ŷ y'?i{^& K ŤGh\QJI g?ѹ)Rbң$ ĉe5lT#-8Z[*pځQ;L$YPe;Jhf<"r7>Z[FgUÞ(xzW!ISO%,NQݕ5Z!f6vd{*g 1I {WGeA =PMQLnMVr94? p; FgӯҬFAPZ;ib>ia04"Tο`ϤgZW? U^L&2sB!=xiF%KZ(/+;-lӑ 9'WHݫ`HSKE ڶmqD}+niCmJʽLa k&ٖ2X匠j ~O;ΧNfweσ ㊎WD ^;cbTN }~Bmk.L͞VpJk:*|#FHce,F]1I7UGA~,x 4؝lGlՌ<~(L2iDpQ;WM2u?{3rP)/既yU Αe~FsC73(Upk1w%EMM=UQDن'o,GLk =vnl 5X̶$+6P"7;۳WqtW߁|CRHhpB>iJ,Y}V!4&_SP>/( %TN 'n lˏ[T2כa ~(Ofbt'C8=Cn"4@ I MI nc?S7onjJE2qg\L%GR 3w9XV =^4 a#a-rL,Fȧڅq$y^M5[fw0Q/=v36UՒF^2ءܹ"-O{)9 q… 9ӏa-d0Gڞ*^j|>xcYD {a,Vc0;$Ns+u.& Zm%6` ^h~xO2Ip_ 8}#ʪ;>G:\̂UNm'SϰI2ưpA=sEyu{3'UuL ΕnN'QG.#ɧ Ouk ZK’д$ӂj:[ގbĹ`fq|fH'W=U@薨!7~.wɤ)O8ʮ1#L,Ϙh)b1]{(ouVe}FJ;kG5W> &5V̫S%($u_j(|*j~j3 $a0 O71;nO&>-0Sy~/+ؗAGEf\U싁kƅLTNOxC8M imٲٸV$oc2T)֞0(W vȤYsU)/%XwwxJk;EW쳮U,_VrѢ|y㍋H3uF@rEt =bT#"#BVpL8Ҫbc^ [Sk+ϵ]-0#^ P6IW.Iцe5&WK1O~bKq ,f&Q=*B^dx|\ 1>0Gًo Ҩp iNڠhKGu^ԊL'f$U4<<&*85o%)ǒ;IX#rNc$aTh%ְa%@G{=VXuwGĘ)f8\B2})I-6vy!0B;L$!:Zmq)v)$= I?ZDs:bì8^ f!.k&lBC=`d:ƈgQh1{I0R-uRs #3HZP(5Iړ9έ-,E\xI=iIIx8WfhoixI|\G#u?)OBP(D\δlU'O|L6h*S5ILjzgZk3뛓J =^w?- Pۏ(*0k@TȂ.J~bQd׎[6+=vg9Mk8%#UƑ[ rKc]GrFf =vFu.&gwe]-_FM,?}fhMfc0$!4AC%})ޑ~(ZmlR-;&AVaAWD2f]2d<ól!'1fW⟪lJ #DdUGиu I8ϳ <{ojO pHq^:!*QxH5OR"#KH>Yty"&k PsZn-fհ>>vFsHECgoB+IF~h p 55N_23άb+1z,L}a1XH:y~=ZC;ݰX6n N;&blPfD#(znwKӚ?OidcHʙ,g}飦A G~]tui^Ƌb]23%6JGf> Jnxi T89FMc+Z` jF[zUh=CG=M&6^1rI{:} fon'N-w7羲.cqeUrPuʅ-f;'$atqpANXtU2NW"Yx%F88]"&Hu^؃yT~ATQcޅ,CIL/֟pFi&rVn]( 44\=aH' Ȕ&"[v3 *3Cb&Κ\ yDU#X`졃ĦQuԳ^>CxHB^ma|2!%f?yjU5B'[szfr$~74lb$: H]"MOϹL2bL3qaq+=0O=AF k2Y?z8t?Y3[sls3D0 p5~*_8@iWQ/Ā"DQb[:#|Ċd h#JOHBv jFbzCpg+!kYZ3 B6Cr~GsyD|73}ؒN:b.2H w/AWv-2zv }G,DNqEh=.~|*m{x@ SDWn؝K(+Ik5q-" 5ga(:<!Y6@6ّAhLߨOp)Cdj3* OF 9?M+ y(S(iH w禅Š4֙K Mv3G@$S4 (d{[("7Ӥj0v)\"1hy<|Pمe =Դ'XP5&a%*Uҧkjbu} |>Bh]W%]\oO-eBhgɼ:cP%Hk%p< 3r\=,@a f"G3(>Mo~6LJ^(y#B  Esn :"˾ -=1m:YDˈVh( ]F qY u4nůﻚ9&~\B4g;vH\nsxIXWvUF6\87l7ѫ(P['0G Թ#M7`}7HIT1(d3ꑰk9L9Ly e:u@;8ЪM0Y"rP B0'1DUTxŸw5nWIg49cT*d-j-5H\ˢثlJcGL:Ę!>v* 83i0Ҍfit,4#~iǃWK}.͉vf, vkX:ioM\c!GS8zJ'L%*WZ Z:`lč5?/I|=X|P<p7GksJO(BD1f噘X$vy:֒=ZdΠ=5?uڌ) W>c^a` yԕރHe♝;u ]B dP^5ݘ:Msg\$fi[''35W3eS7␴/ `Ba%[P+Q0A%g`oF?# |뎛"4ob2 s-Kcۺ!0:?=bSj)-Ӎ3X1:|?B4`4OtFzޓpD3M84McJaί=$jjS.O\бTGEo$+7@o ܎-BHchҽ_901A0#HtBZ3VHC/J!p"`k3S#o4կE{KT6N!xmn!16^Q>@UCW F0Ca0OV`GK!8C-թ].5W >MIȷ{}P`2ZOUVۢ^V.ϭ>^lF 2EUY >Bκ 0*׃ maXfc=B #sJ\%&#"ëaVV$#Ģow=E!x]g#SM*lxT(c!*(qnG*kg dL-ZāӘSVeq(|wubsP.x31З,ۯ I}t*$_Q][;` VX{Ca /9m .ӌR8BIލ~D \n ϗACÌ/iT2*N HcwX6 3[&X|gx?:4kh1X,m$E^<贾[Xz@h~(W!F{,`Xx,2\5|25&jYIٞ|=) zzG!'D핓#38_jN*"T6?ڟ4ge}:'ٴEUݸJ6SĐ0^Li``Ǘ:?šŸ>LUe-tģ(/J+}F ӟ-TO)TF:\\Ǔ'ԕ'ZS$PZƅtO֝y=~u95^J0E bmf26%81r-: kI.|=5*7Ĵjv}u݂[(С57PWa8ÁGP"#vh K:9Z_/^f<I5qrRl{Y(Dz(;yWjoq>{sGYMG9Z\;o]:شz(zR>+4K՟Y1s&6X@~[pf{SS#Xأ=%_~DHl,IzNk>UM7Eɲ!'_QtUe~"uZr&K?I?1 4e}IZA*BU b1\8w*Nθ<Ŀ򌟱o1гi *ĪdQh@78s> "VQFn~=MAF5_Č`X}ZۣPxZqQ7x@Al1K-nA_8P{,=Y;׫`"gq Ce-jo%}$)G]i6x{"V$ M!c'Q$Jʟ6=?ۛ rPu|~Oہti8 sJղspKe)=▘{ c"`lmDL"+ eHYkgN5, [nYoɍQ6f">,އ-ҋ58'֝_'{@lVhd)Us{%>{J\7c(JnYjL,TnvKM ɝ!O8. N1)0љG# ړ9wgRT&zNxrZWuQ9)7 ƜR: X6sH"K @.1rj7Jj|#!4Orϕt:eؽ>eÎ7!Ó=GQMr%P͉YP_%Tnqe[Ԭ(jV){ Ra5>ƒa<ЬA}J#}cs=ٔD?Ԙݠ& $][Z$1N>Dmݶv*s疳OFZ NMXm&+R8羃c~dA %#T&g֨,jƶEY'cHPj)#g+4Tq[~S+I4gRuA8 1iٴH K"*J9A2\4$3 19J|[h䢔Q3j+ j<(I7Z"+9ZfWn~>Jo[Or͵F_ B;DNKlY>ZkKw-oa0:t*F:DHv4QyM`b჏{4ۓ'}l[^#4`|ZKAA㠧U;gҶg1b<8I` 7  4zhaktmrJ'ςHyZᲸhƼE:_#q<1=]S׺?%5rH6gEE[ ,G9Ӭd(~zНХ_'`%Ӹw7WDṢb[4Puw-1Fك*~LfĄ*B$Q2y88sve(~9>Yt~*aOphh1\JLoI5Ec}G>TJYAantn I=眼5 p0H1 @:B/MV@,W2\;m#^]YX?0;ة\[5E&I,NxӾ1jOO]гآ)bUj{E K͜Lu؂!l|nz&hc*rmqΫt(zn:0=)aΈ|Z#F;HB(U#|ġa; 0P''ae&F|6IUVJqt {ʖ?J"A;[a0.a!ryB8x֕T.fEOSN֕1m](KgPrurPXDž'n['u,|vJR,igb1Ꜫ;cR31^DB#IEF''iSأd oXp)bDۓQ$b6k% z>|o1ߢL_1xi :m1j37Łi9'>3&΋UyTt3ٖBWQO̹a$^XI''OhX5-r-[d.񒬪fU&XuF yh2kQ-MNLk XzZp3`Km#?8>w>{c%L(z-xe]b)MC:oaW6.x(,q^M )Yu{کŬoGЌ(鍹Suzq{ΥoMtݴS5K{WKd&GW8e>X4#w|榨I.(adzv|bn9u`t~q,: umx%7rUXlŅvQXK;8&H}Vؿ%f.TƔDƙ"C݌qi@4(1#fUia9ep,a5S{_'R2%/ntHCJX$}l1LOԄ>\X ;[opضudԄ- ml:5qYWiYMmT(SmMxd٪ 6ܬsOI_o},1نee&4OFF;|1PhcCLZƊ#cȺojE?M9m LLql_sHhF)jB%Hobc0iٞ3].23fmɠy,N!{MK9(D2%(FO8wS :oؑ=ÖncRBjU3 qֺdE0y2{i2Hdxná-.٘mV\M!aCkt1y$h e͟q+\ dgFzԢ+GYfg _la vX:r׼UO[|p*#ǽ5F)Eƈ|eЦ.jz}2߃a/I$ȩ6cZvo8dzphZECb9TsG%ku0IqROy&U4 >lĨ9(aIS7]&2 G U\+,*V Pp^پHDϰBܘ4.>4c Lܷk/>n kwGњ#"aKpO,c VNh]! ;tNIFӾΛppGdU`yKkHXK Ő0u@/l ^7T!E~8XX-Ble3m YB@xy~if@t'5&Tcdt`d|7^lyQG'oU>b Z#ZBOywnyxv.WOSIĻI sJm[[|۪aXw֍{OcChO3O~O=//1ƀ߹X.kX-"UΤcD52:m9}`RĒ+;Lk♟@,)uuwFplrhǚGQ⹾r9/w%={L-;ec3h*{\xc՜_O 1t# -֣_= 9 yr3=V -94㋀n/s6>NBoz@o/` iO30^XBRR[a˖6Btd_vn`/V7{>].N^h.)S-a@R1KB6_vCqõ |ߨV\AhO` +ϮN)ٖ=\aܬ[K̖_`k&>K9LتE?K?d?&!_(MbBHuB3n8dš7kV )CK3G5rcI8J\~gU!y2t512Y-$j,*]4PkP9Pߤ֭(R՚UAȼJ;溒v '(>".dEbTQvwM}uifeS9TkT3'ul 2D`c"B֠@l^..!./b-.W/|aTpV޶{`'}7.\zGT爡ۗ<ÁT w[R1@-5R6pY=0=c,Ą9vЧ:yvr7OpQȕctߧ 59>rņaIlZ,u-)3G*%t[N_k #RIJK{'>"X-bS ;Q\!ewӊ0Zĵɉ+6ƺzok ,E U+'d'ه(BN 8]stk0Ev-̣",׫!<0-w˅`g:խsľ2%Q@`ON^[IBqr2ʣ G{6TQ QΥ Vu s %':ELȟO\wQSI?c&CXfM5/?A28q/n -"]>^v?nh|A͢ӞR?x~(9Zj!-;UPo#|K]߁G|gv8b$CBC V#4shhF^㱷uqLwtD5ԷxDUYqjmc)uP.l5\w- `PԚӽrnG nogOzҶ7-?=mfD QxC(~Rރ~.]AIٺp v6-uXkO\2Gl,2TS-6F}wiLcX]HO`~RdrX$l$l;pCw_% vj%}vH=xyQӂۄ놊($>%*,1ckfh+洽yU,iOܞ>_ucߛzd{f-q=Եݫ(s] [DNQL%@`Imudh"qB88JE:ԚlaSK(_ qGX/0>vIaH2y'j@糉#*U0꬚Kb* }O3ը XVQ$Z^{EK@/EHb%٬?踏H8s@d]d,^1 D9ӒY!_^MPW~+SYJFzҼ~&d oIZ%JO4IĹ[Ҭ #bAItHޚjxa)n{;ReӆFb"E&^OzlmLAdA,m>b]|R>Nh7Re?pǣh#E@g 857PN~؁3\.Xݣg.);v+|TܴTr =˟R1=D3w WM}T[x͍,W&rw<Zip K!|4ĭ>֖B5-0IY\H$[=bVe[m8<A3!,ŠaTYUugF,!9l& pPM(k),8zŤoDm]`,K⩽Ϯl~I%UQXuR媨6TK/ƈ  3qw9wqQ^eV4JWL-JA:x_ot)F825]= S$ lGӪBxҪdH2f}uPeua t5D=b`m:}&i*V!gxv;OM2=Oc]MsH1m2!)w:쯵+'!;cJHy#kv֙HCě,VE)X.& Vfg9cDߊ`{sT1mՠr/%xOR-*:BHnkozq3ow7 @y~s2GF_)`N hJϜjbS5d8qzl5KMH rk;iCiSwv9Fv8W$FX';\EPt=U;?oO2"+Ec4B!XT$q ׊=2#4HѲL}U D)nE9qnL5E͘wL<;w*<ӕM-,?{TS3NR}Rgu/m;L=^'￟n^yXs%ovFԒSHs&DNhjg~]2;S\$Nוj ɕHz}3<]irDA͌7edJ"eRtu+ݥZDbʫ+mq[h`*HCGXO*,yԆKͬa+kx,RV ,8h|SA {b)JW Êgď -U8I~aY(G $4+R|)^ 輷qh+.qEGG2kcn6Oezoc8v}J6Bݷq[5ٹV@]|o,|C5M;(aRAYΊqQތp3vsP* ¶D$n}46HG r|%ߜvk6V׼˂Uel tkxAa'(ǩiaEaJK8 +Ըڎ>}f:3d!Ž_=d$i+iUkQyz Q[EW䙲05|&nǙqi#|1obn1o#lYiֽE7S nԍWSf 0!ѤߣD辐5 w$ vF,ۚ{"fI߱I,'"5œ*N7FUՔ!ldsbqC.Pfج6ψݾ}XEQD\LVbnDچ"ӸvE2DC7mt+rxhHC1Ri ,P/.Xߋ>#E}ǶzY@:Y&>]$UܛIe7iAȦ/E`*/? ^ j=GVuxuDl<.=թqH)} ӷ1:8).ULuW "0| |iA`AA/L9I\pIz سw -irs1[wtcPoi$nq|$<5[G]Ba hٹa%c %WQ%WS3&[c!D{MXS؞TsMN*|]tQR(3V2wRU:6eM3mȅ|Cdcn3AG(i͝V8:$b0+B$\_/fx)VV,tX:|H~aXaOL, ']48W:#zAx=G[vB FIz W@w>$!NH:q1kt ^EGUD0~dcWoƣi_^No?^9 ݒ8-OD fz W\y,O/zDy:RfL;}?JJ| 4tlyC6)p~#'lx 0^?'5O3 pD2llږ#chD6ar7Ꙙ.cs9r ([}Ž|LLjNsQEW, /PmxV=p ʮ}Xԣډ1$A qǴVZ׽ HnlF-]9x&!AIʇ٫3!yr.;7o<[wl}PO)c 9=Bq4U?ח!237~xm܇.+V4`~5<#+6f?p˒&v ERXWOO#K;Pq,ܭu$ 4PN"=gAnAYMpJc3 # k^p3Ç`'do:*<|RN&ry;q,7he5?nTbcnVAтu/b* Tzhe՛]M3U}07'Dpx>_W?a`Ur~& փ[jE$c ZWWҠ`G ~)c~V? jц)k䯋ߩVF$~")N^$C[g\Oj%g}bY$ su*)HDs,-}k&'Dsٔ;L9>=z_,|(R(t{ΆJML/Gf#dR4.5cb?iV~y{뒬0yhX?|F~b,ZΑIkxh,NgxEEJgh# !U(Ԥqy#(#z}‚ٜ?- Pߋ-R@ K{aщ\Jn ?GI%C'oP0EGglq~*bT`w}U |]](p- jK)-M# %?=_&5U24~\u*Y*̒.S:NZ"!c_FA jlQ9cne]U/<7ΧD8Y"L+Lv?yG2?m3S0_Q/e=ߝ"zj*jj/Nuٻ\s#€\$%) 6v[!?ĝfXj{~ T=."kX>DSTP%VZOv+塛4Z0[nxDӶE"±<٠{. X. UJBj_z7ʝGèY]:#nC-X?|d,Ȼǚ[joIbaS:f2_R'8i:zZh]TQ!o'l?fJ>Ѯ ݚ7Ze.p X5;Y~?ƜdB[~/6F,s)&7#=9bBٲDx>p9'/o2e4xyW'*/ Jֆ yDcQU\M}ف&nY|u+?&*[3R#3.wQ8y]_C9(!'y!2 WUwG 2qN`^<,nMK`rlGT4?&[X-_oᢵYܷ]JQM'ĴD/7c`mn]u7\#I=֍*QUpx'yRZaA-_i3!p."~fĬ" RxƁ\g55dfok4(J&y0+g5!re^y#]"z:j-:*G:moW'B;Әpی>gL_ЎZDNr]+{~C7yLKu>eZQ:MAS17Ī^xIpŹ8Z:UEKd91W&;U~,ܢ?vƧ'!$g'b% [`bk׌ hy㘮4nz71Q,| OB}. ir>@N,L>g6$ jəf:  ^.nr}p}ѰQXMq+^ CZ{2[PИjO NRbcP|?̚-3&Cm\ cg|N5 ZLm#qiM}Rv%l~2"Hbsf Iх0 .`0 hc_[=Bb?.џ9҆MnJW 3$J|u1E3UyMCA}d~|s+Y<\4T'S){I("w*(+3d5>$6qJw5AځkcLn'5ǚ\2[ٝX+OV}y|cEqfyrM4S`RX@qU O<`:DabU+uU7!!X.PȈ)c:<%/ݰ9b➭Z0A$1Xv⪫G›ipgTrr?tcSۜ,fjۂ22X3j M8A5aHf>/|:qy@iBKl\C'ĪߙBkR6;3Γm&#Ղu.FhHI"npg8lKQkQ cIliF̢ۢ.TJuEpn+}CݫuCZ i?@4X6KaqF5 o-TDmNxu\3bİrS}p>7'n#brPSs# tSx|UT?<^#yw"ԤͨG?[?|UAC]·>bb2y|ߕ#s\bqkٲ]pp(.)l5IzӬ<||Z{JYa|?Ҿ1ȅjrjw*-7h02'6A{ү_g[VbXNHƔK*}t}ĴsNC;v'4:B$ Vm"ϑ,TNartcvZ3lTfrт3 o[RUbB+IrdeGyFG!JkD]5JFi F4o8UM8:5s5fM?Mme;|i#Lzu8n6} \qp К($'ZG6-(1Zbq֩zg=Ft.I-U~KUlym3c2ԸxKD{/{(-;)6ZI8Ͷa׈X}FUmZ%KjD4bA$1eJ㡂$BU:țҢW`ˏh!'B$Dcmg$+rf>kk~x N>,tUaC[_ Qqbk*(S:neőKhBJzGoiOgBb4hܔP6E'/ī/coAX'Z(>%-FPT_DG/bɯOe{7x=; ={Y=qm`5 z.L4UHevk1,3,O}n+Kb35W.lvbZ9ΘE t7.w^2'gd(NځL z{¢lϡ\z9cMWYbJ3KVs2Ȼ [*Dm|=!rb&>qjvsp9U - 4F&[I6ZLppCj:lG]JOq)@p/Lr;ٸX'kKC!̣dck,f=i!i:hA@َ;uC!P}]ǯ-ʹ&おEa=yeWZvp۬_>Kl)yZ)b{J0 `dgNkwb@Uf̴[G~EnZY E+$l39;i+\| {KGl=(o5_s ?.[4㿜1ؑ*{"|`q ]Aj(#z GNL8eK NG@xF^qЯ'+Ι=Dðl!ԡjcP <؋w<ۄp:zxVje31YeٺBsy(b\;2{\0&YEqSCH4y:l9ʼrp`ΙyL,uD@vrʻCt5șOQUMpN:͓{m4AX'cVQR6ŋrv sVN(3vp׏&J(~ðkjt((KAK"& t/*szlQ֖%rq x>;*9J584{iΞŮz_,ݯ;w)ӓp d?<d)Hvh48F?K+Sj1Bv?Vyn(+@*,؋.[}]'2'r[*gi[ǒ sy!] X*7Jw [n"' ^m&-<",욕U{xua礼gq=A$0$ڛ2]IޘpY#C50mFˀ*~Ʃ}#2JGmHV-wRaSpm_֯7UlUMa5wȰt XƟ6J\dIt/N 营dN/@ҭ7te?h?9y++wŽËvF=M^6G<0a‹va[r oF;8W="jmU3 sazڬ0-'t^ΚC_ H> p)}@czE,DmsOl9FΈE->SIk:GAK J xa`_ҏy3u-)^1wK%p4V"5y`G &\`Ё _j7=p JOVv6AsJG B8U-86G=aJ QysҒ*a(1s b8CjWbFQoNq*,H^[(5f xF:)xH|0ܺT Fsxb* W n#a #&϶Obo6'ӎ΄e J&<,{ o SE,dm_uC}3ޏm$V'q-=ՒDe0t`j@൰aA-4V]u*wxb;:=5*2S<&5-g! ֢DO$w(b $L+sgLET"NϮ#UTh*24I+5Uy| .{A?6 |`4:f⇜x%,-Ȫyg`.D hH}nXj!)pnƼ5A;@V3MJN!h$7WxTmHVY'7xfby {$!3Y,!шFp1BÄ VMjHe_Om7?侞l?U^DXϓDS8ʳM87l4z;Yif*tCf?~R%ܓԋRi $iOu(M9|Z^NVl ?ҸG`\yeÝ; Aq4~|f.*7d*1Grq{:,2D8_{ ccIKS@h.]b#mZ)^ό '+=g<׽~S`=3:Ja[̃ 1԰9k3H'W3f⏂ο 0DΒ 1\' ~_쎋e~͞,(-G7ʞVS392·zPv0ІKv.ʊ*á;/zlg:gSȔs^KQc{V`cC%DkwVYt5=R(2]Ԣz7~Fq(XdY4l.w1kc03m9Α<\ X.[U~37BHiJtP3c!b9QRqKs&|eƔ7/krN2bD2=XѮcl㑨 t,b93Wls1z~;.ݚބ8 erLm)Q.uBIn!g)R5֌$l4q N9Zjj4OPFTNNAwbLyǣz4s4.c8b0Wdq (c\5Ic3$Me2G,<3%71l"E3:"9Wbż G9GXM`Q9eS3o6CgõGP;(!\Z> ڢ۩ d)u~]Ud)&VF]y)J7 zq_ʸ/w㾰H6#mu:#OҦyf*(P/E82Y|g!OJx;"|j8|:l[S̰:ZW f >ՎHx6BrLYYA !˾oR:?r{biLAs7l5IA;\ֹͅ&* 'Q cd20>TxAʠ@oZ}8{C#p+& TߢdIQ0+zŜ|;kKPrb4ZٺۜcMI԰QQubio[+Z=64zY[eP*(W)á<4beo溼.SzEG""15z3rTĠ);B07;س=;^`.чH}q&@*Ԁi]"? uYWgʹ{(XѨ3H ;fEy#BNcکu`pM55bKt,Yehc*0$"grY{zY]f>.ݱ~f)18VY[Aͣi"ȩ0NoCGyƅa8#_U8A݇Wy2崱!s̪:u{oR?Xpz!8T6eLӾܦf?t}=`!a>ÕOm.fV|$}Y"3"_;V9E0RgTp[nP0g_x-S=v˳<iKk-Iq|jjl&13`a;PƂahoxTPx Jص$ [Q^SGN4OkTuxM[~'68au.b"a~x '[nj=zs"y-bh/*aD,;"2ߊ3G0bŚ#Ex![2bb:[ݝ kc(zHagg$''xl6<[-!-= Su^c>2;"t\^:(`|ƷHƠHKxg`xM&nk$j·[Gr>eKC1R¶z<=ڐI#.Ak})_%LldՄ|'%;+՛KZ-7c]+b°5XRԢ K'ȪCp:z-VB=:_:! mLOZ{;vbW̕(j{&BX-@=!+y$ FU &R*?8^2"D nELGVZ$=#Z!*a K¬'˯X;Ռ8h.Ub El[rC^nP}$@\{䍜7G'555ōgQaBϯ$m=jS/joh-#bRJ!Q/%+ʎQLVԦo3xB$# V$C,$MX^m4 HZ3>.P4VB)9tndL_>͟p C5RTZY3|vR̐#}0d#x |"pX3kvN|`xY?*%~{B0ǥXgu}p2$oGMƫ΢T1l96T=8?jm!-q3c'ix4|.i;.tBtMUSv>#O XS ycO,>io ~5Nɸ] |;OUC;O?NGNL6ie&^eLX8,&EHtZé:xQx0u 9l-&kTMNPM[ cסBH`0E;{'\w~_E;#dDȗ*߈&vx`dR۟Ui rԕ˯_5l&$[XMfYp{0!I `@@2| ŭw?ѱg--w[KSacmYuӷZl[&]|-C9Tp+GZe\[&8s鏐OUWZGf's~%c XxO+3oaiٝIp(%!&K83%4vgeVzg*^d_~=.?F b\*>\QR%rw\`MŃ6g)f1JAKR](z:CQ$\ш3_ͮ \?9Nr94=EE Ƣߊ돀~Fmy4X2z4"řy;WVVCe3.rY]Zq [f5~sdaXbwP;U=ZNBTsa5hz:u.9j:/All} iFTwQʥݓeق__y0r } ÜDBTn ލ󕡹H9b7?^ upG!tPc{)xza~mWV6CԶp;u]=u~gqJjJuiU\P9BX7^SMRCN3y̦1ZeKE1S7oiǨaˀ6~hoEWqX QI )[Lز,ZtU?TUO0^*jd_k^!]>ځI Z+dlabpj~A}Q6 P4XzΛPW+և(FyterG "{ׂ'ӓ"aqms R$NhƓ_ C2V =6alČ52߈ۙSӨMD@:7stYUWU[쯇 Q |pIMpAHwXO]VBt ~|R暐OGUL%q&E؝5 (A)ϷDzhC<j|N^!D\-5@PL`w2eڲ>2Xu}i ^?CFLSY 4^.h9= ݹ_iPl9)l۔Ĩ$TooOv~ uK΋Zu1ۿB$5RQ:oQ.Հ2`=Qmź @хpa_g8sqR귧+5% =4徕)Wq"U<[8ۛ]wxüQT|l1}0RBx} JAQ!f 1S& %IQrC̸>ؑ hGȅ1ۃQ$W>-'-C^Ʀ/] ÿaVvԶZo4%ߓQ'<8]Qͨ]XmUQOQEЇ4bCLIW"#{"K{psjR)nr5g|gʔ^q TPepJ)c[,}V=K@Q=_^bS-r=͂+$*t1Sq>¬e@{$BsjKpXQ:J#T -Y5cOU^˝}'ב! Jxim2fyYv<`ayz'WXV5`([r=Zl9χAwR;!^(Z$*А\Dr$L|Q2Gr%u8Gzo5i/bݦb9x Odz 5kh)IM{@ 'N#'Pܔk DTdB珏pn=aUO Jߊ[];u? j V)ģIQmL1BhjeMG%gdn|F1.B30MOj'U^ML)~S:'ފ-ƺK,)u5ʣ'S8T0sL#v J,'Rؿ[_Yd*YӮ+c1Vr , inV]ׂ>Y6Bը֮ 7ư5ekbDAeB36wl"=vX wʯhYI"iGQsv P{n6{WpLx|zFuz%hs .Y"SdEIP9zɜPemT͡Yډz>=LTF~PW>h$Ve9˟,;M֪a`GxvL=2=Z*l?osygYc~-Hdxw5| w?CiF!g6mm:XI|VC\3ΛǶ ,b.AE'x1 AX Uywo@7$r9ΏRZ k|ҁ%@k{ Ѯ8im8B]I]c5c@f*Į { EF8Cdb)NN Yt{$Ə|%}Ĵ'u ܈=)iyНj^,}A$ )غaKY#u3OxCJ-S\s' Ud%OeP➎&(@1&#/LC E%\MȬǤTZ-BgV!z|w-D.xT:xo]6!pA3j.N*pm,+g1E&ѧ)Fyg"rFX^AO\II ѻJW-gUm#H3YOr~IcS=C9A)}g# (z8eqSS 29_.x 15[2uf:W-'p#V5cm?"t+›WN _=1Wk(^Ŝ,͊o*.N+ޘ9o_kD`|#36qKg.0r F.Տ}V3s|:? dhj\cȲjB\4v^; 'pFl`zmgylS .ӥ]w*qstcoCkcj;0M;KXU*uմyw99~88̌Z,:ƨaADzךyyhW⋫%ŔDS% l+Y;#Mj$5.;A.u?E勰RcTz9M12@EQO N9}P:G缥gIнO1.Kg0yqYdVܞuge 6LC8_GP/{$JDJP=g-le6. B)D|z5o*MAwХ7;#gu/~Kvotxy@(9: ̰Օp.oNFV@=-5p> _ԨãTѲcF hKJ:b2JZ|9=3CUB}-{Q1jV_Q`O~.Ms3z9XaPԀ,,ӴbQ$Q;_ވ#h5`RiiUTq/Baxu;-伱zl=?O~=xn= '.أ?Χ*S,YZ*\*Qsy*pD·Z_5#Qf:>D-=vVpN蝃zx w̩;6E[ȿHp]X}C,1-P.y5UkaYhvs`^{v`"$_[)K 4Yz߾X@Qws qGՙ!g &G42?{$(Xd|X!B>VmJhB~lR*6J_1^h9E YY._]֕YEaGź}Kg\3`ֶ#{GS6s`SX)kE|^P}BQ-4Ky6''=`:b9)9衢 7!e3S3:$`=Ex43Rmau5Z|G;'j AMyg!o+Ե Z4I)AwGC8:\3|ww'MM~;4>. ;2܅c| f0 Ѷx):JySmc쉕8q  \I =uP(Ղ̹ct/~%F5l>{v޷BE; w+xUzLv1"jp淨$q@?h!Rl> UJ􆺜| uw?#^N1p:Ud~5a#s;0UYJu!]0aKN* { emꯆ#e>Z% wN_ SbY]S 6E Yt&.Ņ8(L(żG$b4j\ѝ)JFv{rĝąI l/mr~ rTֆ\TzvQ*1g>ExA$o"m n[St!-Y`7+uaWOv/y MH+ȣR[;YF[:h Qbf7c4r+.zP`_/3x]Rz?2X: Zg2r)u_/Hvn^Qb%tk7>}+J/wchEY $Jm^\I),nL41Q$?OvjN[tB_L`Y+M?Q!]X]!by@x$M#X? -C DB[o{w/No+<\4;O&SSӊ{33*1,}u U  =1Fpt*{&LJGAE55ެ)-SO}Ui7>r c>]0NB -RV39IM%0Me2NچtRȾk&fA-\~c<'X0 rf:d9>ڣDtjYb{?ϰ:n~FGjHK/]G 󫝦2GFu`zG~Rzk\}C9̎8.: 1'7I^wZFi8wrG] ?$+)O G1t0}WTjOc(@ smZ]ْ Xj  U;m;9T@/><:v1;_A3+;L `C5/0WlÙ ИM8N_tw@Smڌ@LtD($SѕJ̟)&$]dRV\K^X,8d/7asbJ .ϖKut2Su=P\QKn*TE wHEfH3kK)xt qwEB3폯S{pE.ԂߒÎFs8'|=v;iDU7>gNCWKdfH)+ 7Uv\cfˑ]9u@uy4giGArFg Imh^5 i(** 8 ߗɑ3'r|!6jٳhrM*ى=8ʯTL z>GJMs< b!}+fgj[NQ$%: WVWZ@ĉK-iѕ;+gV(B*_W ,jw!=`#l]"$f1\& 57Fө-ήfjIv囝laqe%&,R ;ՙ+o>=@43.jRi9ݵ)O!tZ;q3>FWkA@evգ7qJ=cC/4@}Fa{CF;iYцѽJˆͧip>JDA [l NENx`{4g![;(y0ZW ɪr`8 [WcEja Bb  mx𘯘?Wgr2T ׉v I|=KPϵ<0'ӕH{-U( &*y gL0_ćJ>9Z7$|k7\2$VzmX 8y{҈p[;zFb^jB#NW;QUvBi#dUh,cևAë!TY|dv{%Fч$$!-ՕؘfG7p1@a _;2vM=qΑU~OШ0=pBS(!*rd/S}߱{-}C7}eg=Zc]H%TwP9_ϴ!cKBe{Ka^N)+*`ɕe!̼LjI z^dU 1]|zĄjRZv=CB+mx}.T4{ϫva(!'pz*Y7ҁv}.biQ4֖&(V18YR˵b= e0BS{A* 28_y XSJxk.Sݤi& #؄YJM+e#m9ƿ%㳳;-<ӕS-'.B@$uk|xZlZ3dM@A3̀m:ѹ4|C9 (HkMPl2 &bĀ) <Ë:aM⨀aQU9 &pè&ۡJjQMbDAJ^X2w)6#]xQC SD lAJhp¾rE?fO{O!3,-%o%!֛k(6L6 4%,R"<=?oTT}k asn7:>Hbo#1!Bχ:u\0Bd)9M@[HSD gd[9G%;8J} zˁ"ўE̎X.Y#s'2sl:?t(^Cw;<¡(dӊadxl-S>YB%^(>cS1^ ^_n1\H%U3@Z3&PF9!MeNjyXgGl+~FD>8QONBߍ.sq=ŁKgs*􍱏4jKB=p }1%vS9鶥:thDhʶ7xj2کYMK[ྛqO^-BuFr.iPx:Т_m%0OSF<"nw¤tJ8Bk{f5PܬIl=;b` -Dam9.0 BفQd4$Mؙ<Ւs4hQ''j;++腂WؐNXi XU.}̧bc-&jHZfƹcih/7e0.xxx@uq^"pe)rRyH_1=N^E>˨?O%+0%+? K=wvYIxބ=dF_{>OEdۢoPgȼҚp5KEsH.Æt;u6ֿnGD2p9HsR '"]Ѝ j)DsR=sLњG^ҳ{W^S7Q#+a(nVdJ]ſUWuN [S盛&[>ldYy+'Ug-ȉhU [ԭyºw&*G&+ ArP$9wj̼6nqf=%l*;̍qWԠrɠAR%N3 !*M;u\2U"f[d4cf&L=B{tD2ɥFWLTGAL3F{ >߾/ZN/T0`๐?ꘅnIz`%tԖi6?1D]S"D>h=1W9ժ$|{δo޸mW!ˢ3[6s;%όQEF6v߉]#p8}]k %Jh~B[pS 55Y8+Ūכ<Q~4lo%YpLw7FYKmAr0)#+5 ?gcclh="?.WŨ!Л/Vf^m!Rv,ݍHUP.d'=Un4,vf)^ɴ-Gc`|TYDz5%m(>o *vD5Х~^yDɖ1+0tG_FeH:L.g V@V{'/݄H0y}i\53k Z{gfJFKO3jjNoS~y 1P^b"=tW긫ʳ| r4Xc_X_+r!Y16SQڻd@qA{Ԁ.>ɋ D4 /#%obOlC)~y)d VGT{?-3IWaЇ tx;Ϭt*7[xmܷdX1ݻ?"T@"+ U6lwsF@5mq3ם6;nmV37 H%ަ}AէQySa  ]5C FK[3EL-ڮ\<ȸWan|ȡsxKυt~T -%곪lN4.Q`p٘l?!c}Ǭ1y_nX՘@ }AN.1&lXwvzN"ӨjC8Ÿ[ g3DR&ni4 2\ UC{,ӍmlT":Si]ɻAq6rd&tCCs-cHFtQ좵B]3H Zvg*ߙ7t#߯*iYh1W ߋއyʷV";1^)ƌ悐5s'8wvz##p'YMŪ0PA!k~y&:HS%1k# #]l!w5}1̺(Zmr;ӵ*j^D%ƢK@Rw4EƤ1OzErq[0k\KbOU0oc] ZtIdH:i__ԢtiF|N~4U犎;:C.,N_w+,E'\xVWlF4.U<$ʰ($5 秚ԾTGߺ2C:,D>咯L] ^2yGȾzM $ȱQ5QG:KH @|@ 3UNf]-;暍jN:I=1I"&n?2ad1NeJ$WHʺ/X`c҆L_yw`!k'cO;#c<.p7paQW1~!vb@ (0V&mO)O3cv аErgTarAw.Xۿߓ!ᬆ=M_!biCVBj\} Yp<ά,S4DmefY /freN o2{8(XQ2Gi񄲌_ ""&f?l XD5p 96_JZ@ֆd;PT\50yhL8H I\j#B$ߐMg5]A} |P`,`0Ik"M@m<Ĝs-ivEcT`DR.}2Bj+TJjEBqWK\r2jDזޑr ~8j%{Ems01vu^jOz# PNCuD'#SJbI  ^n@7rB?ץ HiG}_F:Dg8ktRneTH|V83GVMq 9.>!A n*4 ɡC1z~;0񝏴K<V )˄Qe.Zl"ɨ(;;jpn#ߥIvr*SxĽc.x?*fqdN1T8>qò`;"Z tTjmiqWCb 4#6ssHE_p[%Y ~ BF=Ԛn ޢ[UveqPf;C՞؞:Pj?XFL- Bh2,Ecj s9sFTY %`ǠҲT4n0Ƣ((%c?c8ĭ`nܝ XyHhZ|fg*W.kaT Y՜znenyz=MLwEeC3(ڽwF3FFɯfrd3]qk{a*> V#T2# )=K?D!ކRjtKQJ-`+݊8K[Jwe]g35$/yި㿦W4MmpO@ 쿘d3P @/h@3-6Bjgb"푖ݞ-}`#HN!:TQأJ[98 ݣv1%8ЬI ed5U-nCYkzۢw];2Bu3*bqoј<'%Hfr3'58'ƎL59Kme-Pm >g!\Ls?TihK?f47c\X-]DgRyȩ#k=f3mRL$;K-l~ŦkÒR'lHQʄ;N(Ϸ$;ܑt R f ܥ: ,@Ry=pqRZ2|-. {tͩp\`lq1uGa4#0 %c[Fv 3 P|ٶA޹#x0LOYV|3_V<926_J0e١éz ?`20SVnK1pد VKl' O-o+vm |<\;3" ؿ) ,x =Lki9@rxunT`g9dl3CTSYm(a6U Y+4{r13D5s9u{^հ]$S܌X15eIѷH @.,(:J&R+SѵrJC235Kcdmf.+ X.Jj, z'bg>Il/6Ǥ}ð]7+ԙ EWTLނ2 茅fHHrj-wQ"X"bW6PMX`b*&&}f[' ME*&#w{.dͥEYۓ5#x}b&XTC[ &T؉JkOAcuAϘJoAA0rLow@s xE=#&ِ Yx0 )^T_"b%OU<O~ o1$coDgdLJNi.jNΨ Xdv!Y,l4mܝ7l\dDAD]5Ř$;w45KEʅSMCiY-a/5/ȊH֥#Z`Kyj0P|G߰coM"~]GO:1*uGzK!tm=jo":PSAzcx_'[•|}gȁh>I-}3 _}:+΃Zi73Ӹ>Bd P 3s0J[S1^rÑfu>۱?{ňQT \m+=꒪6Jߥ[Z}j ͒GV,u 9]$ewzQ$&x XZ{al :˵ тXI|_F(d d Q B$Dowor"']lN2kxb2ygʪęfpNp(GĻ2wA;LaЏz@5])Tݲ$~}z T Ec;EQ.$钸 H5fIhCon)-9]'knȼ ixSIN]Pa#ަ+1}7LE3'[ץ4YG5(I?/ C#o>)_ԩ2ntX^-_|Ƹ%! -f0pLHhZGvj'9vE5m$o[We14ao8RZ^'<zqm Yfe@Up+ӊ31 M>v̢uV8*5ϠlX,,>%a׬A岿rޕIQJ4ݲTV =+ï^b֬4}iBm!3b6s=3Hn\d'U=1LI?OeJڨ0+߳XӒWC|_P<:h^["D3t&?"#0PVc,_ӑ2" `6TLS;?*֛!zDrB۲8J oq;-YTa> K"gHy#zXڴ޾ƞ]O5SZ45ƿcê͢mt<"t_{vi&[1aaVY^ k)P~&FofI`4s^\:ͭ/b?ۺكZU$v/ ~\u049BtU"I8O.M<mOG t%Ʃ+3Mlƅ d.ih(bHN(ב 2-STa]ǁ\T{BeDwBjQoӤS3q" $#5%4)1_mMnBWA7򵗸OQ8R(8AېZIXk pEtsnh <.PvZ,[Ҧy4^sKZׅDb4T2S0߅5˩O+ov7ULehjav<@pI42Ej}=rjāɮ]}w4(\׳Gh3vƵߏ~Sų\UF|=]L&+>gR$o?  r>ƷOQ`SK0<>" ,WΨ-ꞼB=O.@ߙ ʑf+ Œ?\e K^1F`o@)4le9g+/':Ƹ_M9, KiL*]V[yܐ+qs=*/EN;@9XaߠX1A2Qh1/_9 u䊲oU{Ъtu(qBo9jx̓=f)mjv3kg1Hͧ\ԙh,5@/tߢ7 tD\خְ9W:*x~ z|OAD.9Že=p&ŘnG3E8 fkGӞWkI# B=Foǟr[i7GٰskVgï1q9#jxDOQF)@Y:Y6)5/[mRx9P ֖2k.U\^7Uhe9' "WCXbjc}S;hE5ٸ/DwX[ =г)F&yW;ù{q0W_W)ƴ o®$^}k>Uwad QHBs-Fco)C)+ME%89ht>ko_` 89WF# Q(O+&́u$#MU8Ec%n%(GCHk83I11r\p ʩbdXXgN(TwBa6t_ԧXɝC#ΧҸ11QnOSU=ƵDPP#̧\V{jP>KS=+)=-X/ FsσDGxQu6j)hFʯD qA[H9h#V)y)0X}P8 MwcQé?ٻ&2u0I\_k5]`t6)g>%WO}$d>nyL0um҃8XOӤbOk+ <5S-$"ʗ13FjʈrШHr&\n{[u[y " }ˣH/`wZdN #Y +#сՄ.oe3_)Q`"3HZqE={VGѕ.z!hBd8I :fGoߐSy j^!+5;wyp:75-2k㝚[җ ReVYp[PLD# _zL~&Hlѳt08z`*cZaLY#!W&^^u.jw\f)'(pk_8rCGC_[ ⍏^̰΁g]L di萜򀋌J&b&:ğhʅyMal`&VVd;op۪(lh_8/ߡ?q9Ԥ yyShۈb4LbS0BaKEHOESdUΟ ,GXGCv;͗'஄C}X<32}}50S ݗ>~8Wb 'dQҸW"*q>I+US'@q\6$MO?~$[iPw.kqY]ŋTU"\cC󨥤Y1V%7eHJ o^A,1W'*Dv?^jg,j. I>~߅q鰾 p`o˹1yZ*  0.;2bd/FۿՌ*:FнTWSÄuS2Hp.%Ł5cAdC.̈w[fZ"2ɣqn T鳯g"2evLMW|މe¬cL""GcS"_R{'(12ꣶŰ]`Q=Lx;.#Obf\:h8ӑ"Ϥx:e'fĥy q =_'}p9yҪJ]&&!9}@'rmq TW :MX$.]RY]-Ѿdiz?򎎜ߋ4- ?Az3.Q/eDTIʊoB{HWψ[Q G6}Aic3i1y^:Raa,Yԃhnl V#r-.'[ǩ<;wA#RaOw9KxconR-]ԛI/lm^]G~ q~ocyH}(ZURE0v&)v_`z[_Z{R MGŎ媜q `v`DAr]IC4YL޹Mlif'(dYrʳ %C_ZKܷo/s8mX;%]K\%k,͖ICZ&  G8x|d&sKV/xZΥ|a-A(:'Bq6뒸Cjj<ڂZ_$h;$ tCoNLZtH x_óbig~\baȐIR\V>|G%;-6# .VEϣ6gbOGt$z#za3>"g׊nu4K*G H1e5* !pd@|W@ )CSyGRU!Pb&M-4I9@/F],+&?jJ(xך{{$Б^ ~Ru˼u^( zCays.t#[y;=wlDD7U+ B;!tPepxmkpb=x~++@/X<aT߭͆Vc^n?` 涇.<K?8!$= Y_K1kr Kv_I1Z S:~}G۷"his뻍 )7V=t (+sf+KITZHzTا+]Äta)U)jJ^]--u+ryP=p lwOoKxLG=sT X=j8, c1ڪJb '.R5-]iس]˳<|,GxP ?ы٬T8]P1&iIXb|2ǰ h+1q~|;kx E<]@Uxk[2Ѐ ~qhe都k?IJ=-Wyѱ|C65MrzޡZUjY"|+lZiM*K$~]RҍZh-IC+9)ϏbRgVwRJ&t1灾Z* +ˢ3ڝ`ggT͉5Z]F|`k%sR;kA]p[~K'%_PiR⧘P %e:Ҝ.@7=:'t˭0WORեjO_1j΅UXL5cѶ\_ő_Ͽ&cӘ@u籠!N~+!Kg1Kۢ5 ._g]O:PNR\Z9e9s;U{ᙗ9[WL "|X[<;cvcWXh8c_lQs`TÚO-{^3CF\Q(lIx0J4x1-՘oaufH% LВ/mpE^$J-_dt29 k}NJ?UW, 8O t ]Ѐ>A}EuAӶY:z/o4%z)do+Jg=ktZ/=L<='%\Ny MLy@7m5G!1Ǘ!*g7`XpK hyǺߍZW]BY')4a"),WrtN@FB0qaS +~@ưKPIXZI9 Lj+BE+5FO{hOjI zWOzB NW`XbWgl._KL)KKɲg+O=%$F-G0nH$qi*[QXus\ّcހ%Ɩ¥LsC2`^;wnfdj^9m<L|-rƠKE50Q1{# 4tQ:NvCOF% rrSad^!`m!:HЭ<TlfhŚ IM9$i#g=6 B-=B%* 3(Ai\je1chC?L&?3j2xXEk*2lxnEtY+ Yq߆,`!Yҳ#O]@AJ52L:X($~b^^FTS3 tG<|Uc(`+|S-{g},7g 2Y12ϳ-пaf?3Q=zkqcSFc"nkop!N3׀et*pEșfH _r"22F_ rcGy5#R(:O-L ؾ*V+,&̒RVg (t|#|:.겕,H9+N0ڧ) ╡/kA&]; hƽp+~l-B`9jG@#a*E'UZݕ懏^nGL5B&O= O9 @O|(B6:60BW?p-D8pg#)uZ\bb> hSZᰫkScsJ2o/k% d0{ƢXw)jpi11QoqEG%MKp;D#9߿XK,'o<mafOOHV); +~O LSc'I9X=?)Kae/80dߺݷ"&/ܦc@|Ph`pNc<ahZELAFhi̤+ݯ~\Ճ`iCs!ׄsj<|Gk<O0Skڠ/v% e@NsgWë`10ՠ;tVUlCG\KW:h AMbEP2P[ioF\1|1jOߥi9C$&„VqKQ-=BhZcH7Ƈrwue.%(ChG%i{*0CYk= P1-z H3*c:[z,,i<"X;;wv|$ ,SYM%bx<=I@P8?ygCT]H<*k) Gξy_Hv)36֥F}.+H}< .ƇG"K俗8:8,a,uvQ5cnü $gVrHT'"8ѽ~J}ګYrW d]JKA'$o,$I|eq̦6 Γ*q}R[ Qٗ} >{Zh`_قAJU:~LK\gوڧFWfRE)|2eȯ7xtї_E+Pݓr&%Q"0ϞX+Cˑ}|##:9|r# ZBKJ>,-ĴV1\wg}bP4O`@foNfh:N *DXC>쑼=G?ND$&],\}jIbz=W7EgD=#uAۄ}cڿYـ3 _fh|yӁ3.qFR@PfZ_zê:TBϽe'?ĪkѠPg:`©Uʃ$l4vg׌}kxt60|Jc _D'Qc0kX% QЖWJ[+Vuϸ._Qn\;"s}\V }Ym.!~s h,ީrDEJ ؓ)&(kCԆI$%҃U9IPF"+_i0gXYO% y_)R&_Kو; ߢjFh0,t.P3PՇʟý5joJVyHAY_X,n#Ljٗ֜%TZ| oٞ>X‡|1`r! S7ߢΏHy{Z]Ra|[h3mE.l;|uP }!&AL(t" z`bS)ԒWWf;WOhGJ{XoC֓s?+Y.-g廖WC3f,Ad.ݳ)N>xr%\&PwX=gZgNHW;7ijZ'' e,!~ f,y>iK R(~L!\c<4DF@x]8.w{,ΨLkw=dܝ:JO"B?gy)WnV `ںa tz & |K*,Ǎ0c:$~PAfK~wOŃ ͘uM7$a18t ϒ<Eٰ ]mYQW41fLin Zljni@kFFI,=&&[%B@ 1 Ix/;c\½6SR*`e3IgPr7ZAtj9#-.P)ky]f/uxLhCrt̘)?U2_?U|j<%P _Etin#*"8RC0Ӂn vљf!kފ7^=8?3[A[j5n5v 8SVp3eGHy(ĝ), I+4oKQẍcT'U-e&yI8,ԣgcQ*Je]ly+=#[Iƻ5 WbZ:"G?o5G.|ijc+ێ/۲*3FL! 4ߟ%GP2dCEfxMcXL)Wqyeթj!)H:Y $D(yEimg"Iى `DdNrj"]Rw4[զOv35?xXZosE#T)oxs[$BiU]V5jNuLU>_W-q\;Qy:7E&ėO#\eLK݈>T"ST(ȰCb57MV^s+IGΡNx:nbV:JJ~Z)悠N)zyL3 %f;;r{CCџe={(M;DF=bb%GC;֯By#dCq\,Z?ݘhsR ILwr'R$3J.,t@ed,0f |-:\}͟df`LAxh`u 1'\Z"X 5}ǜTAҍ-zdVd"dŸ_]d^~N">gՈR.n{)'.x|[JF)b7Otv[@^bI ų]4XFCjS}2B]tT5IaR""lK FI~*ٵEDI3ooS&Tz-j[#\v@iK-Q^9in`i21BYA+?%mu۟j*b(Ɏ]OȽԣQbW_3h1]YbR])<.G}**UuP1+PFԐd~=1x"aNISW0%`Eb'qSzi^2OQ~d@1ӕX©&i_\hx[@+hHı%tЂp#}r=7dK &t5UDxhZ1kyo`)MB @AS4f*Sq%vMm&IAOAYZCTDyA= ZHBO!XjFFxYyOzC<|{֘d憋!KynKd%T6\Vw̨Вe50fOtRo^evwҢR(Bmt I {lME!O%,՟l)pH5 eF tZ줄'<v6S@j̞`8)u9S ɬL"MǍ[KH%2iWug=Xe38(bZw2KV(l(ںEailn8[;5ܘgޏR rpˤ*e?Z,boٷc;DXyZ^ @Z%-+v_:%e SͧG@e(⒙EdPy| .q5oxR ,<7&u--f `*ά U˫- ?J֊>X1&uJ9mm;\Jy,8/"UF𛼞Zq3R7ObcIj7t}s1K[{(>.+s*qFzR ;5_JB?S@.>\==d `qo)"A8CۋX AASū H-6uL]lf>83]q#[X\-Qhҁ*? Yֈ yzc؂wb92"9-^MH=D ꛽n5"<-pg~b9KmTcѲ۫0rcsߞ]]Qiu'(lX2r}W'z *j:2ަЙH%@,iӣڑ״er*u9"KXG"1{o^HX+ehMIa|зj9.="zR\%` P8fISUL?t 6.gj+~w$&0-8?vw5XT7١i D<Ā6A#x\4g1X&"r}[-'L6@?Ka>3 D5:,a Y+v̥ q?}͇fFh5xi-H[5MFb|B1 ;+Va4EIhٽ)ݳ P'OSz`1ŗtWQ/%MGNqRDn,I1͗!ZxE}{?,S1,9ÕxS̬Ă6U_0\Im%ܰ[>՘9<4Q$_]SYDa5o[U|FQפ%e* fL4hHlܧGpp[} ]ƽzW b 5b.ނy/]|:c26'(.+*XDLtުmDR/R}V"-Đӑ7ak^SUO]C?d?$P{D_! g=*9G&G.WT2{L؊Y-||ud+|O69&k2|Bʭ;n\\x$}ch"*I P&;..F&d ef kXI'a`Y8C=OayKvqsZ͍6Mcai7wa>44zV/\, 1ڱ[d0Zvq*5|QmX{l1p5E?1cW\i.;hH䱫d<'jİ𞜎 ZEdˉ"t%vcj'oMR4cy[9$ۘJcD]7r#^d5º+_#kC./WAq)~AӤ(`2h4Έ3ОXeD6ev(S wCPVbl_#T3I|[YqĻl*L9R/LO'J2bd'˓Q:zљ@ pcU:@&O0oB.)>@ܸ8ŴMnr{ՖBFSTpqtEK^yQz<{5&>"n[94 6Z1UL?CkZ Tv*-4dėެtv |KkͺJ|ZհV,)_5vi6Z/Y8_Q>?FB p BJ& V@Cd<<{ M*yކMc:۞cު}VT9ie,Cc9%e=A2_?2C3c噬9{HHL_j1,p1+=DAR#psdÓ&FĪV()c*<7ToX. X.r,uOϚbq\jژEx.&[9LKL:\hgcUzۿw,Dh#(ſ8 M <5tZ,RF˙ap|]rՍ_qBL43nR.Qiow'Ǥv"&:R+Ot~׶CaH&~ƀPUb=C16 b{I5\!OpZ[D_-ʼ@A9JMD&Wc!͚[O,slJy1/p_ohx4K,՘aٿ"hzebxR cds_movDqKn 8k$P'gIJ>٘dᏮC¥$VpЫZs3oU:'K*%ضwƫ]PיTY*pjyqqf7@3Tn>sv*q_l4b󻎳NhRAto㸩bY▵-nUIyh9{AѻnDr 8S\pŽ l=n`%}3-ꚻ\F6,p4fxHJiz"~y' IqL]qVcvu5h;9,f+<a,?<֫lX[h ;VdVur[DɫB{H K*Y0B\Y;i3o+;uv_Ƽ\ | i[\FUFgȴSXn ֝M˗R`; ѱy4φbQ՟h=G'D,[(civ=^!TvWR4]iMUq!"Ʋsr[[(b I@-1=5W[_AHd efSI};!)G<(6&K:n 詵AA`VIY`ÉIMp(CxRjsԟg?بn̘4dv X 8jp d&\9ag-=5@HIz,=eQb9 f$q*D\ص9(RaΠo¨#f|;Bpe{?QwK*dFع/hߗӜN((?Ÿp*#hWeڼT5E2H~[RR =r*ZUT/ONS9GQIL|iPӋs,~+Hq娥K 5mDhFЄ%C9~e!#yF~ž.@*7Eٵ=X#aC\…)M4өJD@lζ#wV`j[2/GgMXε8i}SBXdgܚy"WG9=|2(8J B#;.KXdwr:#0|9cy ?V%jH'uf,3w~=v':ĕ,"Jܦ75,$3DTc'IX%hOYlFF:mг `z~Kj1΃w/y{:*ďOt=FAҬiaq6ȌLqWru2>>Ũ\dZM rKaϬ^zbKNҎDJ7(ڄХvS~UxKK]#6nnpwƂ5VX:X:{Db\"]"r$mK"߷U-ffc*V`"kM4nlpG'H>q. 6 $&h_ZxWX՚@ BPJCHHCIhZmO5s4 ޻xrB &f4oF1[^4gFl+C@fsZ*R&,UKGhZ Uuऋ:5]McÇ@ z>Fzo AP$Pzķİ(ɷiLA,EL-U/4MA/Ci݊NSS/2u1i ,_kD{ؿOQ}6³.ieƞ rf:=It[(htP5k +ͨɟnטg{O+G\XlTD2r˽u?znxʆybCաy c[LwA =n]k{[ *_jIyR xpS]P_n!Qry<=w%R'Ȏ4d &o,\YbWSENB(';QBg9g4AR n~\(ꬽ}ǡGR2b'ǖۅ yG!ƩXFFq<}$gi`IM.WY`)C4]KGǢdSo N z Y&Ο* "hQHZ*;RK;?BNsyQd"LahmGuԠa|?\ۈ<%Dl_KJX }0FvI-nwG/63.-nKJ .N 60dd8tb#104Nx"p{ƮeuW8uTd/Hۣ2ж:XF0"y^ע̹?pbnٔtNc{}-@mVt.f CM_d?ad& ^khQ%bsfXҋ8 [RTͮ%s^6 "5ũ^65YL8iS"zƟ`lGJoYrniܐF Bgb_M{A3UxJrOT^!nu^ae"d/;9Ů^R`fTny =@.E@71=d gCW59;-E_aݹ\ǪF}bmH*ju70;/:aGҩȊLdѬ#nz(w]\̣wh7 A B;pwgDbG)S7%ya 0)W~guޘtN)L|,j}YAaW$n,rMIؐ2hަQmBW/`4*(F--VWWI"P|tkJcYȚR,Z*o`$^v0 6O ybRi)jEWL;¡lqa𡴧sg13t0dT yebÁIx3g֬Q?>6NhQ!YɟsmA &f%1SSnq΅(&`ianpvr~T+!}!=*`qf=[a>"^v`}wb(/2Qvuk$(wrE bj02 p,?*Kѧ #5^ʥR~c A&n9׏CC%:yZ#iAYLIO#,l%B=u0+?xh&FhB! 8U}+# uj+Vr[2MYܧǿ {u%VfKLGs;q7eJ|ETv f.KHѰH5g5CLj' c^4aޭg#'d_1ڮX5~'k'`/ɦ杬 f<ܚh_,`-^FU6hPfeS6~77") 6[R1 Ye֐l@lx[>+kZ63?Dyqf6x$:siFrޒ.Rx$7EuV_3bBO.֋X3yѺ-ξGPo>WF;׽<[,pĊJ*Cw@J=,R: y~1fi=QsW3'=%@Eю~h@w5pX6*w-]S~֎Mpi\lxhy\j }w^Gg欞1"v*nw`4p7R(cmF+a`G޲P>ܫ=tg5f$d?Yah+bvOP]:GD|×5\lfE‡mT1\=/)2M(Nܴ# c} *"b]Nl|#+i:xS #UxN ^[aikeO&Oq:9)5 KbKE) JeYTn(lzuڱ&}*T*ǧ{KwDUD9(\S(d:߸G*-ř0;S=%Xݮ9 DѴkC3(e3d5h0HCPgU03j8Hts[6;LBpQUԁe-yOQvw7"#|H6kB}p|}YWK~8&9v=Jػ̫6|_s`.=qD},Z<ϗb璚poDrMPCcIS5+WHROdpx~E5A\e L)=m _Ë Y\X5kOZ*zV!>,BeM_erʉ_&Qv4X?2] -D={3yw&WoS~z؝9a=Asg Oyiu{2'g%+E9"|dwx*1_%HW(߼qytgYǬ&ǍҟXx76x]IO\Nld3FL=y&T| ?:,,(KVfXF^!H[u_㕸dC惐#0Nڸʳ^ՇbSy!]6=$ѱ| g1T(mRZ`:ߜƜ3-?nvrIjX _Bk*5a̻Mу)_Qga’œ*cCLbxcYv9o)qA—c~Z1V|@!w&шc . >@8!Uxs8>jQD s/x:Aj_jMj:]Cޢo4Rv3l)Vxb`>Bc MuZi.E'g\Q ʔh֥oi[ Em*+.σʴOʮs8Z !KҼ]&K|r E CVSY?`lx=+i  ٖޔNk8jh#(Uҳs?_KH|3.=ŦgE|+zbpz Zy8Sg c3CrhwZ3+d21ɳ޽:”y᝞ą.kBH}w[S[+>8.`1!)p#f:PB={1T=}i6tg~,ajOAdeP{r|w,C\rط8=B_IۛKCGe^eXDW@?HSΕ3mHC4ќ삚<ʽtH5ɗ5?Q{}O|mEرU9# rxni: 2\9 R[Y3M{@I5z:,§gQqw?rR -ʼn%<{=PJ) m9x5ܹƎxψ2vѝW^`%sgQ ) `lfʎ9oE*9:|2d0sSn&XԨV -{D,6$vj~;H|:@j;iw`Ҧn4:. `j)1oVnJI$mR8͢,n5Och8dāl"%cB>?og8 i*X or|daVlL1Elۓ#vBHPe!)GJEGq>+&}7 }8:]JƷ6ˇmJۧ79_u*P:0} D'ZxΗy HAyT~N XRMm+gU!#%صh0rja)DϨǜ-|JPGNϚsaX^hrgnS%6Og Ѫe? ;sCz3wuWX=}QޥRb9d҉wC%;d1ANlFo5Z<20o\AϏݮi(+4 ͎v\ȪnbQwS|z6 mɄH$#&۫QЍ|l[M bjFtxn9hd"&b9bnPIu2{zi-ѽ·k4AUnUb+^0{d];{^8ֿǨBC;εhr}N (fڒmOF2_Y"`Hwv(u,& ηX^xY0 s8咩kQ ?vsu5p';F4>pp(.(%H}4Hj1SY^50mCEz_O+1瓳kyߕk%Q7'\wLYS}&KɾHwbV%jjug,K)A OvE4 L1ڃE#ǿOn>ܙr;a5۰EL02./3}tcdq|TPWw=}tU@-,Kn;z1H(,8RkO ҩ&Ye|=)i6!vE4#m3#7v [iRB9MLcBH]v\qyIȖ۔J(9[T ]e"mqd j\;]_h[s+!#<VJ[ğJSc)\GRUJ[ȲfX:@=Vۂi^_>G>qWg~|Qn3fd$bN*e[.|^'hYkVZC1}^ZʆْitQ)Wm)U?Z%dͱi o>K\%h{0+{,5^+#3bV2$?~rrZ$2i=$U j{" rݠ AP߰";rmYx{#$ͪ6F>ahRgH8yݵ= [S5"zѺ(2*OW4\'t!N %#1xox.NJaHyCJEkF+z>]G{fC~ MnWHٞwP?jNe/*H, |P铰9 YtoPQ?]DS`Y ?,YI^~ߖs &eL2'`6%q&B !?y*'ht<߯cp{ʝ!3c29GŢϣ !#9t,l`"~5X'\GX{9$"]8Ȩໃ%T OUGG`ʲljR!«^&=(2G^HO\qG{扶9$̓IF'I<+օF&)[ ,CR5|G< ]c4%"ڎEqGLG&a pY"!Sjݲ>l:շoW߸R?$֖]O!4I^|KZG=YKܮ6G,[+!+B8 ?C o!ҹ"hnZR|+-…EV8/!N{J xȭ~EM1چ})L{Q4w\4y& vpE"N@~# 6}.Y"/#_ 9Mx`+iE27|;k{t !$![Iz jD4N™r ZW|lꙗtWc1YCB$lR߁JQf=AV»!4tZ(ҩ=A)A ,Gp*yt4UV6ڳioBqDV~E:WY(rf\Yve4%5M#rMfڄ VG1vKԶ%,86R ѨMXv=xm8֕Hlhbbx%޶t@"Y/\ ]=뼒fh,n= ✑?Kh/b6>ܚb~EooK;jˣ{{Lb>?#X '-s)-gYn[v s=pU(QpF`EMAtF7nVxpUV;Կ'(񣋚'@oC_,sgM̃W̵e[j;J)]yw h"倥 "$p41!#$ޭ2B!Կ{Gd&)9bz$Zz2?+.قv'-n$W:+*JIeh3U<vt,-تs3~d&?Y%~$NxΞ:rVX`_Dҍ4'=C8)L=-zEl`qkX8 m BJ+oZw-9tHU(|n,-廓](B[97OPjV> gNoK ڗx(h LHM 'ψڰ-M!_a7,bغºu%P7hwNlҙڃ 0gkfs>o/"M"ں%YIF5bO ObR-ž`/}+k.Y0Ǒ8uqC" c^oEgrpRWwYd|xp>?<Dװp Mz`ni]Y368PD@Qt@fyj{FȂR)244*5WlLVn׿w"S8rqe9K\NF&(\<[?&zxZ۠5_bޝٟܨƼ~kx 8OG-zЃ;"]-%ֲezl_|>fKW 8S?!ö'ΤetmPW{B;u56~!B U9e:DOBn\Fdu.h9Z`R7W={ =7[~_&0/~wA@7[eKu;O]-un_9jG](<1%#l{qc?i3/;{q*owC#<9]d6Zp"͉-SQabeaVFR}t|~|c VS'$G %z2w@uarF$$dmP- Y}5|ERL)!7U`s2A"QNYT-0#ngH#1 qi%9p2KgyCejޕcRG7^giA+&+EngOA9?`2=i2x.퇞&@/%HKB T~ã{wO ^6G5<__-ZNN0?d]xMI=#56FRQKMdQۗ\"DfTdeX<㔜򷺓Oj$s1#@4iU# ݶ0%[2cg" fM [ X l2Z]qSV{rЀI೐B1 08󛪑GoÍ :Ҧ3;qD1 "ATĴ ),5U G;\5&+uyu!N/[ͣć*DUx)_AޞS]oigѸf}] *'Ũ|L5{duܸbzOP+QonVHU<#ݽ|G6gkEiY˷HG +=<2+U]t2@ /Z܊ưࣇeE[ =I8_Ε#rΣRoyJ0R cDo-@s"A4&:7LȤe5U/PrHUh b+HJ!zu@aTR|< izR G!W˄jFIJgǙ$br$#+_:gI\1U5ċh6VzB8CGḑB::'<*jYwNgdz $V]Xg7&FM=-&TC}.O q!ZiHjgi]꿡^H'+HE"νsWׯ^$ V=/G) qɸFLW)Y/Fdd?s#Mƺn=LX\'kמn EF",?8L WHM$ԣNvK {$$uTD%Ξ[-T,=#$«,BJ1o9KIx{{;Pi9k&S&Ok#vts]hՑ3k\'_IOЂFP,l[6dI  3x,[iGMp,E7}ڔ͹6KM3G=AE8Jꦷg,⻰]) }]O hkv#Y[5f7E +ga%Z0{7ogG`MzBS^l*!C1gLA󶒃WR;? gCZJw]ld%j JW2EI:'dZ?iAyJ,ST*-jj:JEsq^ZfEXdbKww̬E;|"E]A j4 uAďraRWE^BȏpL25|4lT{D ϞHE?U<ΚfKy<WF~m5I`ab+L5HpܷʵԼzqCG 8 ho?yQASrҳd`6KuOt:=j?qήf@~Ŧd*L^:bA\RUEEc<;?{-Vo\t5d}X/]*[\Ũ|WD!濖Xtr89x"BwpCIO/?[6 ]w?撛~䕴GwZ$^K. &#;ߓ~O>:IxpV@jq31Yzrb%6I=` _KSݲIdk.?YvLEBG,ˌ3{ Y OaivǍ{o΅ %=1_6cR4u.@U"staLRP%peCuMSr`;ap̩guMP"^t"p%y5.x\ m6qC{H;eW$.⼄Uf#0sbڢ:3 >>3>pmUtMR_dq{x0v(m{'ܒI8)=Ҹ!2/uw5o A0RK-"6~X/3 J֘?t[|pYSͪ MÇ6]&D; @4]:e=#eS].ƶh?k$=>1' y_9VXU5v=*TꍺoHr}L:ަ$FȋK$rGB2( ù]47T᧯@oȖ.n b3pOe~B0 R58ETu6 +ۈaX)>;̩iL? ,t(^K0~R~4;%-n5BJWr?L5 UtU%syKrɬ\1T acFGJ[d8٠ၵzYPE$c@v $@no#BU՘[<|E;HJ~LD+=vgɀܽ՘['As;Qi s2!^(% @5E/HߢVy\/B,+V]ǚIB9 z]SH6CPrV$Ց$G#^0Dw@J~{5ip;"FSt9-k"pmMkCi5]/Z ;]SV ԛ O+_NVB<|!<=!5{ٟ};o v^0}Ҹ1rH+aWvYqfĊ?A(k%QvMM4Y I$),4dQǪE~E.T˭(#S{s1vsp$ro ?ghY+\F3ٜW<)!vgrCQSKHρ.{5[ b8%`Nn'@˟+3bTpb.&vN izJ^0-^JeAi}o|tq q6I-L%#1EFӌ΅gD*/䪺]n{~S|mӃ~V$>'fRlgdpEߟM`GUcؾDil?*A.)B/;REx?K2KOʌ]$FWDZYO17Д-0)`jٞȯ3k/)5НIo?z8J S-v},V )ɵ;0YKaE9WG؞WbFlm+OG-{Hw2!'д0| hpXGnqZИ;k(_Fy,G xw.W=_[GVꭉjܩ|Kd2_ #@hgW"MJ^#rW b@/. O߁]iQ1&{Y[gsC}?O۰ {r|fX/IpLǽv@ j6fd%24S PH̝35Gy=MDx_{rNw4j5һơ'GڃSĥʬ \p{;S3l הΗeAo$ )YSz2y}.vҤ %M|vDa@h>9䱌N \gfT<{xxax/ K&TיDV<-?"9r]nUTI.HKF[LN0WuJ_&{h>?_g*rsLcE_KKVDG!6"a4ewg'0!Aڮa)S!8aCV㵚|Ek=i/iW<&&NDȌY& lɾhZ]<"C `( \w(`"XaPdɞ|Sr3\,( % oV#cS KGL:}ftE`, RL{ $љWTs8=$^b΃\J R!4BeJ464#%*FA"vf6/yß8@y)Tbc~qnH*2^j d( v؅Ɖؘv0d,XzHl1=b(@ߣQ?/(Eܙk "3 t zkPL4a%ė{UH`&du=YAW~Yf͝*_=i8I@N1.*H#5'g[Y ))Et]EK|/oUc֜9sflNR_NqQŸVFCyVP4!ayC+o\|kHz"kr(~oiٳ&yyS?k02AU%/Z :IOR^/:S#HlW6\r~޸u) עMy{S0 cilLm=ah;vV3zǦs\˾ؓUC#˷*gx~^/iDgsVuZ.PUf?p #z$`:J6v=-b<$8~_t[U Ѕ.tVe䍧P,jƍ1 RtѰF*BY1RBl֢69-CVpZ7F_ڄ! łQB ۃ żYɩ.;.v\ckD{3Z8f Y$V[¼:¶21]1j)qvX'"\h,OY˵jhW5dۯ^{C=~S=Ʃ=/MZY=XGSU1B[-3mԧJߟ{'i7޳58U=2CmddQ9ťYIYP*# cux_#__yKG޽o7eO_Ed SbnzH3q@TQj+>K1ZaV\!+ ?[ʂZ<=6#EfC5:N}GGh8hCTK51SԋxJk '=mK5n{+u`ͼ=$p='Yʣ91-ur)gYѱ x0fjכAYp>uo涴ٟ؅fpÐ*xu3.%."`O3LAa׵43[, {p]M&OeVCK0~0Gui&LT-zxdR^՟{zTnԳ'p,p^Q>9Mxcj:nSI JK8W\AۤmYe5쐾'֡*9֠߅ctt]~ѹ1H@.5,q/<3LZMT2sQVRp*"^T3$[8,H% ||X#G{wﴚ. }+[Ғvкza}qu̦{@]-\cLx@,mv]ʈxwٞXRuyfuU]Op(xHQ &8Ov] h]U'Y, 2-TW6a;]D"~Ґ5U:v /3tT(^x'(_xڢ>@{NP 7 cФWo=V#4TgUHa Q^pk%*EΔ{ ߱y""ŀ}Kz ʁ95ZC@l5VP3#&RWFЇW.: H4~% cSN{AVCsGyfj e-AGϗΪ"}y&Fg,)+cY88n|+!li]mTQfeyFDiqDgST[c1u%s7'IV}##1u}|3HCgll ?^<ا&+N)RDxn S U㡚@"uOèQZ2Pq#*ʒNnc#r=3cDsȀYM+d<yǵr6nFJTTfМ"H9[4 g-S@Y f)('.F2!R.X\sqLVN?癱:۷~:v|?hţ/@P>ư No{Em$T9_ Xsq[Z'd,\ H<c]ywFJFT~ݧ~-ⶕֿ{?;Yde* p0uۓ(l` QVniԡ8[2s<:HmHw-AyYGj)_̞5ٙhc`M1oV1$zeaX.h a,LVx[\AHOZ~᪋YE9Yjj^l-֊UMVP?iȩ1&t{>Ts'L/jG%r&):'ڴ/u<@"74g?-̦L=vjP,EsBa/.-rx$Ml A䗥-{`rIFZNZhHĖPk\?]ܸq91p325?D(ZBBG@](?8UY'-Mě|EI@>P,fgԇfwIk_+B[]F_ΙT{eT2)sd2f\$Wʋ:Gfl J(2y;Ś]҆irFnjIm,%J{d7Wm'8R9\{PqH ")1h6.1LNwc=iz v +fr#Cɐ!ȉѸNp{^ >:k¢ d#H!H*,Dor8}傢;5$*$K:mOpHw6wÈOr?mӵrM:+o2'zS% e,a [M,Ch)zPQOOԡ+<5ҒhF1JzWܚTP3xCq遆4TT=iM,'MCQ')*,J/E#]KQE+FX;Y<3Sl[LLWe"L y*`YI/\Ε $93mg=*l<)yPP=4bTdŲ"f/fF0;2O@%hUE7`_p1[CX^.rc.ZoJTmZFɬ pY,PXUKiVѦ8X 7`a;r[P9q%9+fTvd0l>[vyPD!;H\e!Cz~նd]L)`dѽw/o G&"U_ #zl2_"Ќ̓9E`[߸G(8LD)*Ĥu5X2Kt|,|ԕ=@R0Җ֒R/ЗLXtP7Z~8Bx WϬvRTc3CP>t&%E yZ| +ar TLxJݍa5 ɲ(ilsk9xIoXz,RD êK\uņ3~3uPquفYMEPK?fF!d]>OC#8wb#xXa<xiZrdX23Ӭ>T Pf(Dl[Y\ɉ9rM; Ք+zf'V5hvtì*)1jt_f)>q!z:-(y;4RX$x+|e>x;R0(&OZ{6.{12mտVs2Eurr_& b^鶛 JC #:G qYЌJ^lļ5&S=9a%, VJ4$%VЃ5pLծWל?`oOк&' _-0ѷt%{ w  {1ډ7td^}&"hn1ml2e#\J1'hG5GTD8(wE&1QU@}3pK&xavd+¦Ϙ$MGo5VTwq u;?w`T nTDJV)#LflaB*:8Cf*%mv+[U{3Wm72]o.ÆÜy|g>4R-UKˮ%GЩ+ǝxiYms@Pz[o2j [%QEM!FzcH(P& =jdEDqu޾(a,1)Coj0=; :_䛮fйkQ˄skѵE3!YbcOL@|$73g4eFȫ N;^j~[& h/-7ҸP*Y(.kډ}_{։X`$HThFk|Eps 3bXʗJ("Fv6#3n24L^[xMPxVl5[- h>~[v)c6򈵴!+Bq=p EY {@đ3㗴g햂hrXliFI.ZƖXBqD3,:}S<\t_ʿ,UkcXQQZV42١'{yjiDEaow睓Ȓ>dnkUR42 y\yk$Sqw?QogtW2pLXʲAGq Qh1*=Ι XLf@Wlcu&0'tIO-wЉ$`}+?PB\6~$|)90–y:[5V<12f^nHsq(8A`ۗS4|{BfJIEjQatg 3R?8-#Feȁz'!+*99qe=B#^Wғ芀X2<>>[ʪq߹U"Z L'ZQ~c7*`oM3m3Մc5=DgJZ.x1Ca<f$ٙcEY,US ?.̈9uH`9xy3xWqkjߠ3=P$t쏶J(.rS$]N\>{uf߾hY^_2p '*{BV#HRlMkk1zfú.&mѿYì1DV̑oq&ʦ}2^mJzKᗕF;e@8|r$#T>I/ i"/j{f&ӥ60.TexKzC3Q?$"PVVd7)a"<M~uO9:q240ś z}"#z^ElɼVܸO1hx޹!gl절-XDf>؎gG~hf.qͻM}fgXʦCv6@zlGRz~z,cŪ?qP' c2xaVFp1;34̱ώ*X!. 3o{p~fiKY bib-M-)|OdoR3G&IJ]A|]] q [ɫy{C4 .uC?|{he*il,7;r8 lع(|e^ { U(m);,9ѿw$S+<7wd= :L`aZS7Uq 'a̓aLL|x1sTvƜ+$SH2fE ~C{ۑH hy@=F}3%w U/WB\"ʑɑf?1 ӈ=S!֑/btmX˕L>#,Z̵!2-anMT:&z»jWWʉz`^ 0%x\К5xX8k{ XR1JXo'$R "$]XW crrA&P>6hPVK+_/j' '$šI0..'V-: c+-q {-’`+iٿOK[{rGXSJgyq揋fuD|~Y ds+u m+uъzm [ښL 络Sr98ʹ`keV )N@Y<7ٚ$݁-Hw~q\;.WqhO,yGc_;x쾌d:$\֨J..2c6yg'#5A5vKa Ң1jMGUً =sb$dOvQ&2H v7^BP蜩qXHc2Ei(#aiv=kYhY@7+DQ %#W\={ O M|M>";ܳS?uw9e?> fLLI4וGZdT{Kt?G~sڜ^-Dc} @+ /jo+lrޞa w)w"h<;tP:M}k(W9: -'E%}T/)]1ث_L2s%X9(3ݻ^a[!笝|mYN"l^g(!C|%*I[{ۭ[Gn#FJpѤLqBR<_{V~\3:j[ שǢR! Tpk5wx5\ǹ8LH E5FDvh1w~ t;Ǭ8+=& |"BYJiƺԞcα%EGՊ⻦zsފ4ODC1]^$ E靄{99cY kfw^_!u&^n:E fd ߷^u=ɡxM)r犿O^rAL5WjlQ~U̕$xFX9 d1lb-)GK _ T=6M_gh1axҏETד27I,GM8dc K=L}O=E4%P?Txaos^,TRT,7;CqF\bs1%f5?ɧu= vD_TiqC}*BuD#@AUvJpʷ!¿[TW`F()$\?1T~R i$;W zN+;zVآ)Yu7ͥ>>t}ZT/Tr"lyj{Yca5|#~eOٌi͘=6=3&j\K"֑:iC ˏ/5= nBt__KNDIlNf"={`sy3%Fl` H3"HXWimoYۃ۬[9ߜJp[r[i6K!4cͱ#k)6=zoqT( y8YlnZ,?Jƍkfzu ֏dP,[īݼOtp?̈́؟+=/Nw&;[,z$PuEvjU4 S;J F+A#s7vs ]"/c)*٠Hvi52߽pM?=]acR޶+Ęl,aq|Mga%' Ms#ҹ},~欤S`'x-t+YP-SCN_fmDWDA}K3?J9Հ[2%&~2`ִ}Z8'YxY"tVȹGseLl\Ltrm j%ZW{7 kp}go2ی bnFFSk0؆=*TXD69Gl]Ulylt^d?%;[{w7v!q:-ͻ{ *Mb,ߐRWbs\皡T+ahꧾ)՗g4 1G^}[T;,jd!NV^CU|36b˄'YEw3'l–^@3g9QQHn2Ըx2TGj`?AK FFG᤬|k`0vSri#J=[J,4o=[yU;0R&5N f[=u1f~c|}{F="cZ55bKđGbfjH0Mn|_"<8X6~cq!4ᐜR8`8mUb"IW.q#VuW΄9[} o*)TqeG&W)imfK- 끖7MzC$l^߳_af[.k6AR+LaPO w 14,<H1mj:{*ЇWa%8~^U}:z؆RS_!v4c(\ooM~㶞<߷x<<v$ I[^'O7=`5wdI4_I +C蕦ҫQ_mv7Ԝϝ+R⺬ ꘏ ~<-n䴞/ P_kR0[ ؉,0PP&6"ǫb9/oh LaaXNjȷZ}&Ihi :}{u*k\v^|cLsTpO[Y\O=ˍ(eFH9NXa jz~u'6q߷ 6[`WgmX / ?.C"]W#+# sp-Kuah&(WєVDE̸̮gT pSwG(S<ˍ3ʖ>yV0UreGEG0^y=iOF˩lF U9V9?YJ P.aCk pЩ CZR圻LMQG9擏69%xZ-! `eRO] 2nŸO}/{ijT/;5ʝd*11=p?Z:KKwjg%!in8ѡOÖ́(VOCb}TꤌvIl:",˹:8Úۈ<02#||UG0ŨG-sDn*q(-g%.+${.oKEwlbh ~2,.G-ߣy8)l̑~ 2?TZ0Ø}rTh'(o1, U !O0O`]KDG >%]𬆌.KmG|RD"BҪicX~`|Gqמ%uʯ^^3 !CzT4IsQSG&Kh4F̐4qRy#SE^N7%_xȑEF!h Վþĭ 37 p GԱ !…H* BFobq>QNypiK9wEf;+'ɼ[$֚ohGC"L=0pOK)dfJn])[OdK q VHj^uݰ+qڕfR2B뼲niA y7_6hdW)'h IHHULi2biZ%~!!K͌gAX s޷(+ ?3c77nhu"F.ے\= m 6b?5toA9%Vo~mꡄ5*<ύIyz@3>|O%51IBĿbU3.&YὅgyT\]Khy6Gl,s^swR}^RPg8_`sH!CD)^Gr,9b]Ag=NtA[}ߨI*Мkڃ&< !{z5\Ucť܉2m]}-2W1?uJS=e |@ݲ"\2TVԱo6^4=\4X(}WӰ͕=ڂ>OPXg]Վ!`$[YfXKw~ŢTئrFlLԢTxC)U&v@Wi ZOᒜ(x䢲C N&SDcŒK"LVC܀ִ1ֻ%I#9﵊XQol$ f9HwWeFz>CMڅaP8mLѭ83Fjϡ<DND:#\CiGy_~{(5ٱ[O~MqOY8hXN2) r>X9CF8ɋfԸҴ,fyRLAΦͩ夳Y >*Aόq:GY'X 0%==>m%[ &+VLR#2j)H"|jnJM$4 h2=N6cHuco>1H}o(d!.Á5p /p 2¤{ T7?dQ|rW޽1vU>7KcebLoAplFV*0ӑvmxrvW!^ ^j9xOnQ֛@Fy1~fo1,-PK;gm.Qi#_>rpy6Z2G:foᘸ@audF\gx0kqo22Iwv96nҲjɴ z$>3MFCLM?gP !Lyʯ r-ݍ;[!"桚'ysLNXz,H#V( C`CIUy6–VfM*$Ab*up( Wi4 :GWAˋd#-)X[[ӗ:fk 0|!KZQ-Իf!ֶp)Je< _m.~ 0 a )q3]ƬfJeCV_Rw('PTY~/!{Lm܇42* 03)cWB0W)?x=,?p7'"C>Ɉªe$=cΪw諼GkMMM?ɾPZke׌!="@y:"TC3VX{[Łf9O;WY؜sfaf7xk+)]>b4wXRA辥%HË4T|C}Kdc,>61TUvdyA|UB%d͝={z^4`ܝ fu|8PBO leZ[ ս0+R'Fn4S݊~[̰;ŅƨLqDޒ-څPĩ\)u*HbAUZ0''7֔xK1aUh7GaGπYl!w-Wֳ`V w8A*#K03SeAGƞFJ:ݳBca%` g@Wrɦ! ||^UlI;I`؇SȚW1pa61WZ%̓f/@ko4`ϓ _e]h<3rm;W7H*,s'U;:hᵒjPvu딫~nl;?ϳ\"EyVG(%C}W>(`՜=~F{֚t?کWq&*zFKjojC9^HJ2YuH-Y Jx6EXmTM9&5is,9OWCN`H#< $~R#oW&; {kt`)@/e@TFkН*8J!Afb@ϻI uBM)x*"{q6d^("IKYY59^ߙ'Ozq/&?]FCa$8UX5f0 -tAB'd Zh*E1êaZ 2;~EoKEsACdžRڅ-U~ [" Ft{6.d\߳S9T?a625ޑЩ3@WO*v5px2 k~8g-b&.'Odki%N.`?"#sҎzGH;,"ռeAS}!:AK8C>;[R\F$er u;xjʁݸ$ԗJPF`)cX')7mr)fMbr[!tgI}#G3欄YsqS"Lk$[E]4,S2` P{<~O ] 6j'Y{Tګ;'=C첢}C0xL(K&'ؔ,e)}߭)m^b b h )E`S؂e\(O\X4x Ᏺ2}7.w$)_*JTq4bI:|<‘q&ݛ<Μ\}sO$KOɜ% yZ:k*zןV.s}%e}};Hh{q0lu-C$Too:m{zJSuP k0< MIZXa:,вdbhLWHGE֓$ݰ#]>ٹTZBeo:"DǑ{ˣJ v n/nlL\$0K鵴|E#y¸gsmbq)+O5W# Zz{?悇0W|?Ɩ!FW zs VQ==,V;0)#yJzU#N֗XP;+'(!BaŖri뒭}?]y4_&[ֈM$ݽ*ه?:Ts4&r1sðk-UX2#y4e*팢 (±Ȧ4{zM21j]g)U1(Y \*R'hIm`m3MZ22l0 )v'S1uN . Vhg(K' vPm_bS nW퓅\(a&]4=mStޡKiX6 I#B;BMqp$gObyUXgz ?JLx$r.]V"aE]0-?^/t MĈ 0z<זځdT-ٹ["HIz^p\i$Ay9qw$A MJQ嚐QԌ пS+;C+?mfz^hdV}m|rr̅v2I) xаSO8ߙgf`tB֓H􎤃&t~l+MUPބ'7Rcd5].a&X٣:ˉUHe42B-8_G/'L\SJt'ӥuUiAQ_. /XVkY^<ߛPr=R(4Wɸg ,/5jD[.ГQYS2*.4WY!a<aux?]ZK > 2RH-8Bl5̩-X!ٲm* D1 =1mslŌ&.>oѮnOƜ)1h@wL/lt~.\(Ͳ# NYSZaͧI50Kd@+H^5P/& $<gV[1z] oaI:\JM|}^g_gEb&܆n8AۂuEXvKg4fp cľXPP+T63oS(X/֭DRtzL̲ Oa𵓯Vxx_>οOEnlGOCQ49p+J CԒZYڵ/[z͠ҍK!ϪDxnI5LMoSIccPasXZ/YxpoIq?UItdDw>w}38hRB[gw|VS&^aN /GS=KhVH>^Kqi%^dFQ*҆Źϸy̲H;iAe+`jXF@JҞSOaAt=9 1w;伮WȕjqSDl]&C0dRKS?x .ry1+!dk̢jQuQE'=S-%]̴kkفU[zh+S#&{|1 JpU'SN:9 tjąݿSa/`BKFH9YYb5y{*Wc*r ָE];B s:옹B:2ɝ@3(\* P_˳2Ag[S:-ipn&l 1ϔP*ɠݲcvENJ/D M+<p<%pZ,JP[-t΍!A%NML4|A#=E-抝BT%|]vTMƸ20~3H ]lKm)ا2Aίj۬9% 뷀Q;X>i˽U 1 0 nfT`뮦R$/x;Uے;ч2fz}y5+5ۜy%d{Kz zY|1xJ}JαXyVנyzX1nWŽ"[f Kl*j{SBQuNUTF?Ժs5\U,R^ŹqՖ# !CWT#Ϻf:_ ĭ,&@DgQr7LCdJ'\ܟ{Qc/Q*SpSJ1] o0Sm)^xؚi]a,y$ -6Q_v/sK ){IٜӐg25M,B.!=^]^s,mđdmC8u]a;-8nEIzzƒ@G;~R*">yڎz)V0}޿8̖M2ٜIqWV 1i9S)J/7:a_ ]fFlhxE{T5 $4Y&:q< zmu}d[#d̰V)r)T߈ &sۙHްXJʣ5 a@!j;%@r 1[3֠y6Qx &9Kws^]gFy\@Ki Q8\s_ %ad\\uoSNЕ#l4~?Ź7%*e/ t4.4M5ECjN j6ꑦޕ&/>)~8&P2Uj+ynr9iV&;cc5'c#{I1F_hp]al365RDUbpU]8Mۗ{^vf= lM. I1(F@{>t5&w2 cP>hvX#$: ݽ.LX{L&iS>$1J5%_Sǩb8w_] 3 ga%fJܗ/ӵgRwtBHH!mC֫"!iK''hV;E8n#[E_K+RreᷛC"pq uEmiaJAɥ1|\H].Wk(^uQ qX3T{ia@<&u%m8Ş"*]f=YYD#q/AA6\pJrȄN*əwU`-@)FmE/|E2j Z*.V1gM`, z"]2D-J 7Cu{()pn4>뿞[cɠK |8w?T )-b&j_vӈ``=oB%HxLփn^O6\CE !)Je.C~`iX휾|!YN\B' ܞ0}«<%̉Ic8~{ 'ϡ%m.,j*ȹ;Y,~|=u9rYp4H[#v 30ٗ^Z|)Ge{ UފXx >J<ʟ|PPX#kG R YM,ؐF]9G;h7&ce8: ZMCd,( F^T#FZ* OޕCq\a[hRk`7w-wbۧt+շb,t>kRX圯  '`PKVI^o p-Į/S|-Gv޷Ir9LaUyS=y3<-/C~ⷹj_Ԗ 7}P-%!KSg6bFiȝ%"BqKZF.w$؍\ ہ'sP8i`' 'T 35^BHұ,ꠙ}Z)Ԅ8Cś ֓j h Kتd3WSm9Ui"f( D`a%j_aFQTy!h%Wavհ.UI>OD_`s} # =w^<5\!"G" r?((kveO8<@J-j c43w1` 7+F>AQlOOC?揿{M۷?h%4&MI"U6e<$9pu1>ӑxUW!'Wv9N(#t^9P/N9]SRV{S~۪wWoUr$ӟ%.! %/-ٗolX+z0kEmw䇖ASQa UxSڌ6&kre!cr@[m:{X_'r[^qDbrq\{6"{p>r׺pѵbKZW>`CO[ܟa*%<`ؕY(чGt!9;Pljw NIEL[Y Pp7yθ^T\ 5S  p ^e҈3CCi-?ij]bTxd5b(juVm6E=WOUΪ8y\|jK2"p[/SNʳ6In=sd}v.j-kmKRO =n*waba`83W8[-)I Jh(:NDZ.V&}*G9Gax+;_c%!B/sV>#j,g`G[[2ml{Wl<_{Fi*2r]Ϧ zro-: 7-t8Neӧ_4E 6`åzUQi=+/(73H6El'x{|ϱRpb9eZ1Tpw{%9FbniDLN?KqcW7kRp'L^wv#Թs>R" ? fUDڄ˨ 4;hc^n6W/-[5Lj#sHŁ5rkx?z{` j -6t8&ߍGNn]\)=de#{ @is68k,服H E@XTyPج`<96LO*J8<"LqUĀS<G@FEnG2l_{ֶJ~=bкW;_{\R:91ErRPZő &JUWlDXoy.0+"xک}~&jĕK"u1jc4 _=B\rK+*]Qha|ȰtN-UUE$ /ӷ5<%9)ڴLi<ͯ`CS5z_dOra|ĢKK.2HPbCKPѵx\ËpkWPu?8JkW<#a٪5o!܄vtHtxX~Bcex]3[~Fxn{-t:r4N7b<3j:4ޠϖ5Vm,# OИ(ޮShK38,c: rgqpO*4JfIl(uL[t i'T+26+Kfk8c" ڼ1P;Ս,cb|H5|z\c24l5ESh-|0O//[>iqԘ6|ќحK {=Ќ1#|ZF$yf3ԇșQߗ㷞a̕}8ԛg/A,xǒcHe1rfFw.N{ cȇYEؔln#NTIR`%gmPբwCV.Qn/x6A yRY:3‚BF#~ hϷ5?k3P||aRmԱy++KPRt\C!X^H\CyrEF`24>3ߝߘI_\ {F{foO wn*qQTHcԔupT_UXBW$:%T)V**snoӄa2\1;4&~sX"Y~;e}pP2.Z?YjSu* |('7s RAz>#rQ6BrWK{ya/#Ɍߩε/ᱝ+b q8z~h,_>bk.,q*R*@ޝx"{wHCmҤw6|hيzD-^)C2$>D#'ӘbxfӘ >=!4{!Kn(Y Y*?erM /u^L%t 'kAH GeL˲^oxgcJ>|{ vwL͂pXp TgWM<דT^hוn1Gƅ 8W&" h5W8E+^˘CKZ x;$1к]nj w*@DhZ-5wMnOpPD  p^x=y)CLb$x=q&+!N6 z~{-.!g ai"V'pFؘLފaAŖ k]?(+.gD!fr4f+EGJMo\kp9;lY Q}ܶZP9f:.T]dS{Mn9>&z@ro g?Z>k'y7WĹn:9IGNf"޸!:UBI@f$N-H`9UQ`[s#?hKԘ`~oS,^^<ImZb}xiSeQ e:n=h멑r1bxՑ]&%6[V;;b虈 !::5.I]ưu?c$0Gnq VL Q٪[#AwW t8"(G_K16KP&ǖeC3M?Y(=݌iQ`ۛNll%o.Aޑnc'6"lXjk`0ܵ+$B Nuf5݂gL+` oLA@t2rޖ^pnFm#ksj+Jc.sNnKIҀSl{ ؿ~?,tϸe~J>Zk9X\y?H6Җz}Zu[W;VdQDg)@^_V!q?s M+>4q%e"4Ɠ %#]rX,o[2 dƯg=LCZ1>Gt?5ʸq= cP!e]D,ƖBrmѠ3'.e\3y(X}T{jȵYn?%0vQݔƝ^_í ɦ/O=RQ 8f[h݀2,VprQAHPQ:jH5=*73.Eϙ.ž?b;f(\} nV_l4(P*6u?tSC}#ZLv)%^Nd4cU8$wUd(%#vߖ'#zTGoǠE~0XY.h PiM@Ԩ*/kkVSGkZ=ж,BPM-gp2sg;ܫf5<IOUENI`.,I&L(Dp/+?-0(?Ǧc2w@uբlÐ.lr͑z*fFaPG-=m: GV#/hOx+XNyO?$ N&HLrl&WG&g2fqOm*3ZF4CQrG@(۲Qi{E[#Xg}ŷ!y1.!}-[ ?Q73S:#K $ίĵ `I.4'91ZJ@a_ r 퐇:4ƺ47q[p>9[2#DZHf[>u e<99̫ÎÀ]|N+9*h[1Y3C-.aIYEoĞ 4H;vz/藧<֧OdСF<& H BCFeKV&\pULh JeysmLUzS^o ln\D|XP"J[kCx E5ΔX<\jĠ(p^& rz uec<ʧԷ0ꟄT[EKMX7:P-LOvf&21'YV r?ܔ, ·d(2%}8YoZ[pP䚄J cxcޞaWvvm!o)A_w9椲pO#Mcw]Kߠdy2t$S8rm㞙W#r_FӘLEK̈rə HVD!Nm3LhѼK̈]rȄ]z\=%8O{iL$%${;B_s7|z'='MhKkIRt,V5o|F~1&_1B  ^lrO~C6OwGz*@HVc 5#WS 7%wF d NK4JɔF*nf@C՗ Hg][Bd,{nG*b/@[Ht(hj &YrE6V,_L]TQ[b^8c@";ْ%ǿ ªv4<OKZB!3).U uj:(lȉv1od25;z\("A*eC_x5mUOpw,J~q^򼓊BcfwzI֎iSPnԓ&XEvn*(<\Cj&SQ[dRU:c2"]7n3~1.EC7wj֚*)Y& 'F x@.y+6f-Ѓq'zyx?yr~m-y;ꖒ K^O~G<|Gmu榅sBpOBq9%3jaK$fЋ}w~ -߻BGߝB0 v@;RY۾ gPHX.G@&2;b sQ;8S cO@/Vi5Zr VlPD`sHiBm1$q8l+;iym(9]:P2 ߣ@\^v]1;51ʚ `Ij*'xi8;T!ݰmF{  (rX*zdgz\Ҭi2`𑶤G CVBWRFJ,{̝VMWRS]Z!"QeCL,6d#"1ey\/XLVSD(҃fC [T2 "㍯>/ -j}4]\mZe+ݔ Z0Pr}ſGW$eYڛu8L s<6+Y7f@-xXM'VzjDӈ$@?>ϝ9?"p¼"Ʃ|JZZqH a%T1uz9@3u,Ҵ-fEՒ$<Ɋצ"BR[ k̬~ٵ xݿcϥz$r~VB'*ALTƚûv{S.Mry7Uh.|!U;sڂwiא#.dg3nNp\Yo1f\r(ZiqV%AfDO)Ř3a 5Sw%KԸ`~|O& ;d!Kf*gP(xzmy]r&5N]j3l^7-|hx(un&\?!hCe%]ؠC IzMev}h:JZ zVjsP·GL#nHywU[Fv-4[1 wdz(fD-ciѠIcU-OI)mBwijo$+q @30JR3DbA,>J_PpUV o=6[D1 >uGIC8Wh;jHJꈓH ziݥ v2Z8 kK0QU2mIu4tCUo$k #=!dWyբF.|;c2=5?iD ,KMۑ}7t;*wo sU\o2Պ9/$l=u= zC*=R%\ӛ*r*cAoH1p)olD]Cʗ*=28%,ىtRի!e=.,xsQ7qs]I1:Xj.^_.&'% rX:kG;(s*m\y6|{xؖT#ɺ5گhFv={$x`<CZ 8(lӘ4ؖ\$ɉM9G}t{=?{l ]`[K_ϸvFG2M u8>{v5H`zUj;/hD̘tOv߼t _x+j樦2‰ie=:鿋Ȟ9ҢDWg'mMCi2!Ș6B1¿r; 6S67+&~FlϢ]{',CY3Y8QǑX((cM(Bh~N`xv7ۢ)9xh/߄ Cn =D1mxM<_襋R 9 'ESK!}ewo1!ӕTYV u\[, h`&ۋEA)&^R) N MˇqgOfӗg[.F]L&Ǒ$E 0Kxr%)Lh_KYf]؟$7f-K."$bNPƈZXu~.cB-Rx Bl.ϱ~bO>3A=l#ڕ[ Zb<3=8p,pXv#H }s8ޕ%3 '2(K51D5W# U gZM AFEr+2ӕ)i8?[̆f9U[Xu]HbW~#++ض8k M`Q?DWgKf}]2*ɵǐnWK1X;1gcJ `d;A|uoG;rL,.E'ydߺ67EҹVE-]*B 󡔖 )`ԃa6<87^1tÚ^ ɃnkJ]hBCd8SHd߀SёK(Hy zO}g&CZ1>O:w6FE˛ћG5K\qJ+8k.~2JE2qzJbpv'oujM{cSm`ImO*ЮȤSBU=``rsəVWrZy%2p}&m =>XW[ 'jD(1 )B8_D6bi#e5ڇ=ɴ , n,_|K2* UJ\R/y$Q9^|ڸOC˻r`<1R7)B TGotMnKʎߪt]E!yɖm3v(:V*&2\/KU'aLܙg IӍ+Nܻ4%R0-KVTIasw]D-*Wwcn`>|bxڪ23^c ࠌxEܥC7e;P S]A^nƠ5UmSسތr v;3oxz0m]DY\#R6FyX)_칌 HnJ}p v2CN7y{u =UЀI e /G4 GPpՐQCٯ0il5=o vIS&~3)`yE?s) =h&2y wִVDMZWiАAb-g^̌*pHlUZ+AgVHmE"Ҁ֓.EKc&R%1HJUy(/r׹l^uE sop}M0V!˚ƳDҨ=BzSRLXDcY-4ĔJ$gs3%73Gux =C2in⦿ԦvV<š Ƅw-`X )(ޭmPybS$ bhsQiL ;YBJ\hۛ5!eH¡PXsj1i]Pmo -3ģLLG&\bzJGltՁ$y1Lvw9kl!%)X})=m_[9O{_ycB7N` jBGVnfwdK9-o;0]eo^tz5.7*5JbU jw xwz&7݁v3W$s/{31$Wg_"o'tb-*;nP(]9ev!hDiۅ}ŖGW#4teհ 5 &M grZ rSg.`/ Qc?~J@x."f>Qfoe>SEC]! &Ԟ9s+Zi=j 2~B]p tsVnꦙk|=-U㶚?a$cŠ{hOƙ2e:Lb@]IXƨXasۆj j]NvL:G&QS/b _‰t7%X'0Pxޣ _)aʱ 7 ̟fj!]ʡ[* dpx{6m+1I=,N{D3epoףx .b挐βaJdpx)R]cԕ=im۝{TRh cIVp%uFqr4xq[x5'wy0{ #cHtRbNveGȲ> Spށ4`굼>9~TM3a,r[0G[7Mpu(lb JMęɰa:/3S.NK!:m2:zZZ{ȴQwl{fOkwK^̀"ؖ\wi[ˉ9n^b̔UFݭڮ~_L:_}f͗.iEyL,)Գ*Θݺ&vUa( k]q*UG6[WD@3z5'% yJ4g;2.G |}JexՙXBZA1YmQ#T!.ViVۺҕZ5,| a eybWWc܂Z Nmi$u$t\*VګcTv!$(h\LWOE7ޱ$'|!AUd * 5sy\2kiݤ s DyK= Uʘ7ӿ`z- Pt Jkɇi:Ooy4F`TxJ\m,z^GPbQ&ɭS$^S։,gQ%>?^­dA!f!)0jk7)$HaHYx?3d/lUT ;_FZ.Hx5O[ۨ}[$H1<80;!qT1X0JK ԈOG)\նI heu7+.LL&3Bx@2Eo[χp2䪔T<.҆Ok GYfc0Z /q0+q^k VcYlhuzV@K\+c7v~[=GKٿT@ZAfrWeb!ͷ~ozy{/= 0eKWz  k>ݯzP  *bYcRPS/U1E, \R/et>w`K:,#N?(ljTîM,Phkdx{pUйs^#"a\dޛ8#$8]ZQpJ@WX81U,""'铺`DV-lj !laYkPb|\jBXOïS=F2˝ع8#Ay`M5-#{-EO1""K͝2AzkfH{p>2mSfrfI le-8*-Xg?C]3]oevzEc|/VޖXa1%w_*29|"sJE Tީy!n<1&ԛn k9t=57/Sűdy(v`l3f'1DAL+kȠ@40c`s*~Y&|'e{)Q;-GjnʼuE`յ ³K3T km,E& {5c<)!j8[]DcmؠCiRo){̒~U])[;KIkuK森H+<5/K+O!qځYlG3Cf=*c$s?Vs}G[S<8o\I>21!LZmc?ǩlMFlg]BwKnh. fU+FCV[8.T\j.ྋ["݀,qr9*@(ڻ\*gEG<|,>eA}u5 _C:Vl*(kCy n= ,!SI(xNIWi.-I"62?5C4 Y)C)Y5]ඃZ"rCv| UX*,(ViKO}483(E&U%`Uh_&Qvud~kw;/SV'թ625,\KO\MЦUJ h)>8-Bl@LGfe.Ϊޠ+sXQߙpZuf?84oC >;=*]gtP#jýj'y <`q|9'*- K^l 8Z]sĀ8;T Uy†5 >qݯ:p`: e8gH\װ?t2.l# 5@FZw02]Bc, %"Dj2@* `+1]m_~v{<7c~ Od!C FsÞ_OfA=I)OΙ%D~(i>KʭАOcVj.tJ;lG0u'AsQ7[y|:E$tTQR m*1.U9 X`HԷ$2]P/(4Q=U^8ew=ߘt}8fpe.8:Ǎp{fflŇ7gvǧs U VuD̴8ч͞t ÀkCo[cjW}\+=7_SΠ$1əiTEɓEٮJtVXf]VP HM ` H`K#+M 4ACcc$%ξ={ߕ/^::1@6dcX~/;1x9o)!",CghY@.P8RZE 7e3S$5V$H)?l?EHw_9L ZfAg-']*OضK⫩Le@A߾UQ WƀoPn?jedm2fNjKLJ#,1r1@i9Nvr7~L]ܔ/[Q*Joxdp\Ufg4 I2D޼v!dLUަuGXҿ aW .QW~HM-8џAUc֒p -!o((QkvLjH5_Uw44-Q_ӉLSA66e;JJ$K*p2=X3#h\{6 ~YmgbUsq{Fվǟ7b;.I8(,*]wU2`YH]^&cF@SgyY\]^m(6D;>/B^3M-'RR7%36yqJʟm^s'6.@4"z|J SG07g.  ^BA\㲁x$d񠋶ʫsvQ=rYqYbhat`M&Q=o)ƘxXlH|'vI|ZFCK=2j|f̓Gҁ,HЊ6<&-}?Ԓb^SՔ 羻ENG?3u4Y e/yD$o%0y(wq$$"Ȝ MUe~p}ڳ+dOĸ/rŠFme]DLzk vݻS¢y8mrNj P#ǫd4*jf9-9-s!DN(.Ktʟg$.RcbZ! Q^c8q(b˱/%Y8 m]7 qP%qM,֫ ",vԈ1Qlq\$YpylfOmK]}:na`BZ`7hOiNKh{jImqChSW.US{ۏ^`w8YN>p!ز46$c~kK=qsT~ i.YLZXOYz}ۇ K<,sayT5X9`jhYl"օhJY37*BPq+ 르7Ȍ|ge]'cI`k5F }J(0ȯ;7EzńŅ]1x;:bī'0@Z<.\qkNws)@/,. cn~?r[TsC.Q,D客)H ?l'5f-ȑZ^-ԙ^  k<93w+cDabҡ"P y˞m͗*F ΄^<܎Zv$=vW%4L23ˆ~j T< 'LUyɖbE1CQ> l. Xa})BD*N:SfWs=.w:R${0H hskۻ's!>1:QC,GRWv~m}]'D:׉s| /%؂)j2T s&r( &JrKM{Gk쬊4Q+0!5߸h˳5.]0j~9w_BjCqkLDsfNvЖ ?l=2DiQ1" uQڭ3]8RJ9?¿v[z9r֌@"\Ju F߈ק'eiYY j(]ߌ%a+(VJX=OQƥȷ0K4$eNY$U˧-uf#CAD|xCO? {XgDq~z-ab!9֭'*2`k|ӎ_gX4nʙqe5E1I q<},kFŘ*G+nBH[*xÏL>b#7]|c۳}L>Z\$ Mb}ej<qSj T)Tf';XdA9P43@/(30sjb+D~E2U)Ē^ MȖ F|*K`}wE]ɿ)_dzua>xB=SS^5ޤ<1;ܷjI1p/DR]v>ձ:2 ZZ3d[c R*f㏊Z2ȵK)nnU~cU/[? ++٦SzSj/g b isXKnw1gemA 6cZ\0e2 ([n&zhjȬHӻ'{Gh[JfkbXvj7x÷,v&WY”VsLX}=2$讙ѷʨyE@M]YA2%P7}@Qø} 7r9pnJ7[)b&βJ09e1GADi{:m6gG4.m=xy t6w3oG1̀+˜)GuIp.[ wI¹X%pyQh$P>8^.GNT;# L kF?Vm !n-&QLdC-)DTk XbI4b 9wN{=x5w`:̕G¯R5iXUx7.Sm&&;qXi˸Sְ p>i3oa|4Z=٪Ӆ 6A{I~gAzӒܒ[oy R5X1.,qj2pm]Y.GHx{>n2Y #^.a'[kqȍN\mp+Єvnl?!%CRI3fЅ U K0b{WoCǑ_K`߈rwT?K!xVlѵQJfwn=LTF& UG̾ W[RE\V2kH( =1ɘ0{tC B*Sh^"Ào4YE47>WN|ȎMoXY|C+]f2mC3cY ]%Hdiv*4h=5}Fcq`':Yxf"LݡIY -oiCJSSI#Xʘ=ܖA]PUJ-vìpƧORQNQ o-'5W|w ?h$W<Ī5h;"cYF {Ćſ Ku }iUr )C<K;/V kp Cv<= ت/4,)m'&PyairAO "<֢zo;<5ubxuXv L~1|#etyUrOhc#swYee@3'P#YhwieiJE إS^潅]iBdY0!ʥa2Sl1S4À,OT7NiNpSLa$(V"ޏMZ!N~Q;Qpެ֜><+qDPj ʿD-}vxj r`h +$yp ?¢QCH-bŀ ^8Y Y:zj^GlDuJ9 ؔo\=NrI~KfjBowy՘q'HЂw]>~zgӈeV䶇+ꁇ$Q\wqT}蒩s5DnNJp`W;6o.bOdikd_,;P4+&4bJ$Ţ\EB9' =$5ɼAw7ujT93\s˜@quM #g=`HUIQ{-~]bt+ѹ'ijX1KghX0#bwC>Xh[wS$xdu|\%LOP{yBL=H6]H.ſ;tAL-!, S}"l;QNE9XL]i$lyU&]d"CH,kmJ^!BҙJh+*䖔ZA 'IbbظBPdS' z33IBc+b$O=! )@^,UWb,N!і$ahG8 ޘ&SEF;T@tˢn7q'@ypQ"۩/DeX bow1Sm-5"##jU;ؽo-~V,`C\{ KzXW"C:Z.Yرʔkt`ө] BɝPN}޺X ےVJj[$x*w:=Vb#ʅ8~͆69"l\ \+O|+6k %5cNh+:I^05]V}88&/e \g9!AuFyZ62~5h~BHqcZ#Opkz pߛԭv޳+{YΎӵ3IO:D:!3zr<-Ἥ5w&070BL=q3S/ *k9OX hDaW=m(1\TxW\I?6w}Áx;Z<ƕ+; !l%Pe)>ș^ fhupM*=Tv8?ChL}~Aд3G;0c"bPskrl'Sy( EDzÓ?MBS/fUuzYlZb OÇx{j0D_TPl'ٖ&Q~AǍkȬiCmLѡ /pa_=)= kMB g;Nxpu{1G貙uKd1! {=FQPc\=tn;hM9=2:t=-i1;zjrwk2k_YE xT[$/zЎs>nmu(s%lO#Vſ ó^zj:|<ō8߱ 16R$cg&fk3ŁLJm5;_ ) ySrXRhՈ}E7՟[ŒW)V_J/OVayUɆk{VijtBt?z2#4 0 "@"،U̓!Q ߆)0pJ }( -WbǏ`3@'JԹ\PX}|qRh9Тou9DPۚ;S(2K0NxC14Zo/ώvBRw6eX?]:> Xl8ܛf6 t,F6.7Z{a.f=߼ԝ'Zඝ֙?m`ab]:>pϬ^[#˂oEGpXb3{ o8"rgVg9T s}UlAv)H{kD͊k.{kZh>]&OXC\X~j цrx!.G3u7w&x .S~܇\fNcOMpl?%Ѱ| #èMrjzªw(>6]W#ID`š>ŵIT̄dp쐯#SGjrxr7uKb9>rsP둮[U:a4m>t vQoKy`Ghn RO8|ld6\"/O]kޛBmDy~ɘdU .\zT Q=DwQ(LEM "p4V;=wW3* :gؚ}V&+.Sk%Yɤ{R;- PIK5]1ajx7,^XgSi<hۻ-F{ccri#\2z|#`vSmaŷMZdʢC^~g$rm)GPMz<®;>R}R^) >vG6zћ>ə;g)XCQ*B[is&Yz<*V} %шh*h}P}hz+Vyv8qR'~g/5ƧjrGhNMNOV?K،d=HοO 3~C5TJor-L( ݻEwh- |9hWXy0=ʶX %w=XKW5J- V`[F;^S`B7MdISb *gRX> WssݦoF/%!kYY΂%9U~5H\jB zR7Q@> =vOQ~Dz#;ve >6EA^3t>w nM]=! b:/:K}]5eĝp\ᖖd -m(yiA@Jݥ &=>Zpł?4 4# sH?:P ƕNI:Pp^?ԒCG N~<גobsӦ>s3H|4Mx=S"Pct5_V{d؎LΒ VmnqB<HTSp%f١UyJ4pJzkI~MR @"H Kvc2LcE4vL>3 |cp]᯸&X}a Hԅ)Ҝs%?mSC<:M"|\?L6l)zTr&(. %L1Ty ˗f ej0nu 6ung5RG Ӿ#FFлX#d9 O%Ժ.֘W$u$T`?\Jڌ @6UK-Vje0b4 aZ[p[(M91+jUF0^ă@º_g܇k㊀4 ?B: Qu Y[L={v:4kR|deR L.;<>OS(P#yonSG)!ɜF?NgvXLFEI$UlvfH]e#_ikͬ;Loȟzn *K?/TVҍأZ*,K GW 2q;vݺ?F6=gvY ;du弆#-X=CF$LO}\IƥIaKH)`b"z!Ngck?[¦l:w?#i8c>dww+<[ 5zn o]Z oLJfh,2k^8HtLvon=2KmpiݍQC/E)cs>S5IɨnBSfMx0 fb нk(5lh&!9dabؤ`VCAܭ#0ax;H4N ojd>T2eW>$FKuOi%Fh,E>?AHHLFV!bY7M~mnۊHP@DPQ9ˢ+Sӣt+|ձqM\;66˼ҕYN " NDt6Ya\0}H/$9 aW)ҍ6mlŞ۵]~r셺ȁm׮YDP`h{͒٪ιR}]uPS1P BqSAhw\>e x-4G|2P¤1rܸmmXcHgOiOne"DJ™6F0=eS7r_sSS}UeUILtgjAQ+j&D42 m Oqb&JՕJ>v$UM9Y)2ANϞ=pGbzƔB)0zKf9 JB7nEWۃX0ծ"S.wuPR+N6ܗbq_K $gL%Œ(Lf&C4b]1u[7UE\$iCHq Hp::'EqW\HЯtH$-+X7Nt[WǯB)ԥg[])QA.jHecp׮p<#8aJ  aaa0RYQ Nz}>|yn0#nbA ư9d j˖퍇kֶ=?Tu*-v@Oܴ/\_A/S댳wwau"28]L$;bd^:F K"v`q?O x_xڠ%{Y7ĴaLϹRL` s~LckՆ~ *"l >`eU/h#ǨԗF7v1{:v1>d"WΛ)7eWs'~gB ޙo:K1FZOB'x}nun䈥oSM4+#Ew.%*X%aK!۶͸ye~jׯb@u%5+RPq3@WrÍB / V= /xu{=(`ّ:H&=dLt__"V8 O߽ej˿-d-0&H8JٞE^lj T{DbT^eE{pG20I{, ^j<$: 7Djo/,gH3h_5^ K 5gnds[b̆Nȗ(]['>k,kΠe3;DLn8^"C2vkso;4Š"Gr61  PvBZg|Q3 K(!uNH|ݧ[TXEyeRu SdB'3I+a-qY N,֩Ĵ hC Pϛ _֜TpU|W'@}ڎ&GrX\hz aKՋ. .L R,:I2cYkon8# 25 WQ )^}WZRJoD[9+he}{|JIcŸo;,@;3C'sg9otpPK+NI`IT7y4&6A%|y.Co 5Fm(6WHX:F 8x3r5]huW]lc8$zmg6/گYp Rm!aL!qh=E j@6 V!:kJwC Kȗ5r%v0sJvQtjXdT*6cn}8 V8[wxvTrdqY= -G U(z=[ )/drӪ{p7jaqԴHiJ@#j̣#W\LGE"s&a זxV)ϙ\3lkh>bE{/Q4#Ws+ũ n zB~㶖?p  y`O>S֡3BrslF|֎a%N'أ"_Cޝ3g'/axq_\$[݂4_W[..k*M1 )yZ+i T~Ph' t'= z/Q#RfH&ɍ)͖270kG|Ze>QҐCQMâf =0puJT[ZX[d6F6lFY`kujbaC{pl,e\e $o"SlOxnf\uu~бhE5zn18LdiCϬƜ*fҵxk/ϛ'*ȯAshz_B6Ѱ ){E;d*Onˡ%S'l72EB5 9_ -l5) κ;Tg$ٗw0_-]9a}'48jhzS($EC2?*ݛFW×<`LsP2M)/-wz э_͉4Խ뼗ʢ?gU@{N",UV+ n"3!VLbw?M Wm p)qQO<53?bh|@f-gJ?΀tE u VdD =,:Q2n mn4vӜ\o'}#|k~w[`'+'C;BFfӈ+5^"Wݰʱ̢;J#k*[u3P&s~2gEz Ѝ-F,ly# rgsT;+0"1=_$rO^{,bDGA};ǖ<-J'~)3lY@1G/ʁir50^kb L\_qG 4=-W#Ug9g@a1IX]1iPhlօ3Q)_؋*#&]Y-&,R56M, gruS3[!b}gHVuo}̭D8q26IVO+hB :1UjIpzfZ~/ֲoWt4u ksW;6N؎ߞC+ Mllfh| c#u=j'jGSa_?R$bmXr<'㸆tmş0$S[&̑ SX5c|1p Ӳ׃克܋4ۊպ4El,evP ݹ}P#AiqeD@|w}hW XNy1~K\2~;Wd|Hvg9ಙ,5E:74jGۖk+]_VMH~uz↶X֪i7^ *)b-/rz&W|\ʅ0g5DUp>st.r=2 犘^|%k~?ʝŭy{ʋTڎ`UJ/-V kW<R>{'a]٫=ܾ\'cq?_H  \=I[T 1)[*GA-{Az|kSM}s3,yJ{JkLL1;Wο`_[ÉdC׌8qӼONVwD;9蝠b$ۈ7<Dk6_( ^0{O tQKvh{<oH)df.` ;eZ+i`h{LۡXfZg'3FÖ"Gj7F$T5 Hz]By#[$k1@o Sn%Zc%l߉\=ֆ3x6o>vhWXlR ʹIae#1(oXj˩YB%nޝQF7c_Ax 0-KO]JJȫw:hjTJ(X2ZX4|0(HorD/xg-MDqG5,5r~!m(\(±:27Yԭ7\` x[p6-JS>b!+srJ,ʔ&זT[gjYuӊF8ӄW}E5Km{|xVpî?`P-Ep_yhi%3xu.OyWK2:}|Ræ˜Y:!fiB[wwqNZQV`]_拔1/1 C=aܷU RۑB!ؔO𒒂wD+2]K@V(58NNPWoXR0Ƹʧ 9/늢,1]2s80Ij)?Y 8 mG%8v8Miҍo2BG$ .H d)v{cUk4NI*EXXˌoaİgBE-Eb`Rrq y}7U0n-g:څ"䰅0;;ϕUvA;~Ge?.~E&-'[aXO~;E.V|PGEKwE&@D/FT7!cne?K-5p@exc;p)p :Tٕ,0C-֕Jx[}3srpEa \R+_#B7ԕKm*P%[Zp*ť  n6 ),7;*5XtzW-QXdljA+kI-4X P2~tYr+эVt#lVuU7$94F 9d܅Ñ 5&+q+V=`x' 3EjdM ~!МTMf)6xj[Rz za,䣦 e;e!o!e})\)&U`l āD$+Ts}oϤVJ|2 m2O5t|=(x n&:l$9G sL#Q#CuZ)#xm?7Y0A/ B9&Kf4Əuv ]Z``MmꭝC663C²h|ڤRߋFx6JR8Ns}M##)Ybc%̊ ? ˥"X090(tV[B٨4NtWvl(HGMh~BrM.:=ѸKqɅW;ѽ<-MXȗYcPV}]uJʗzXa=[X_bj$B-< x--s5dAUT;f![l0 >:/Q{C|`somdN_ ?ڿ+ 7I\2M>- K0ED>2yI-tx}_6$E~mkCeH \q3>TWaae_[vPjBgt P-b)N]84á5544KYpffk4b(t$,<.1sր$܆$_8U8ј5nqPu8g)k+*c$16&)8?ï1WT~*v<5!TgUе:iګiMo~`GD~qxෟVg?PeeԨ{mZ(#M}WYJ@45xZ^x<$K&GkUͻk^:}kKFTz L3e3h]hLΦ%8@"T}H%~" '$,oBgCڡ2ʀ `)09e !̘ǚ℮v °43j#f˭dD$wA(zn Pl!ۙ>+=`جhKb0Cz&URs.JvU*' ۝WM>Gyl?%bpx")>s8gwac4 {U(|I%ŢAnF/ߝB&HXK?x19 sZ5_o+Oz/z'#P ȊX w/ud+*-@Ĕ2z8_Jw(7RPA i6C3/Ț*#f F0JԗMA#U?UGM0=kc42~ޑxvC*E'>87cKMw>C%2YwdM]wƐӵUh{10qToy0%̚Rh,X a, >`+d}m$AΓaF4M4r1d4s,XC8d7&ע2T/*ƊݞII4ŽxDy8,2VC-e~ p{KqGaӣ:?.赨懺Um[ji:>]>6ȵ^o&""Ti؋v'Ad3ȟy)d۲_^7P9T3)(8OW'oחwJNѓ!.`\'>~+)3H؞E65Cy HڹO7'P RTI7!)>+7P!L3gSnW_czh[j-Oec&6֝rФYNi?ɲ-ʊ}/Q̈kq);7Q&D Uk׻os<t1>ړuX?Kٿo3#F" FK:W@ɠҲzR[=Vz  F\S j!? Fgt : r|FWDx*`S#`Uqhf1hs؞Խ)gs!p)VjbtjaUJ0ߗ==w_=~svVV"mNq=ek9q qu@X|.~G09hh~IrI|R07 i;-NguuV\hbcLLד3ZKG\Ly.v!//U20-4޿ίE u C(ɹn%Erb':54s'޽- 1ߣm$~&*GV qn.2bܨM8d0oЪ:B"JkymsW`z~wrx5 E!Ye62:~p- -`kz@Lpu#oidCgSyC|Òi3(lyATu>]’}$0="VzeNɺ`}Gnd֑ܛ ZfzoӌN1̅=6ƈKX>U@kY6V&@JQ񈃒Ym>h lѩQC^DZ% Wq6hhP-&v8.7dEom&©dᏞ:5ow^.-OjBzdgWMq[@dS""DCx*V$$Q,CMBZV'ֻ&K㺺s9':$$f]VCjbN߱ihcgO"v7\k PdTՉ{Y֢F9vZzD'7s]g:GӋ@-.L"=3.'5ׂ3pJ5'~NE۪ ?Sga}F g zo}4|T[ψ)ۻ9ULJ Hx\˗' *!;y ]B0|s3<Ac?7bRIʴ.č(bWT?PLhd_Wp]Ug ,Ɯ[|E4C Q52,gݕ/&[ҵyUw"_2;R]`D*. 0~1U+)!@BVA(v@IpZn^j(VxP€}"S B:E+4 Jg!z;#}=!%-ZosLzVޮє 9+yh9"~G"+VrL`E=#i~f,ud-zh:(ў{ȏ"#6[w]C CרF__sEucn7cu%aNQ,$잪/Rt,bSu3ӵVƗ~>isl?`!g>ЧCp3O[ l3l~Yb]&c\]y0N:ڟ(`> ܊Jl<,R'Y<Ⲟ&uƻg[X.z"^XɃFn$~aapOқE:WZ4w Slgmd| ?ovI!`[.8#@S%( 8b 5_k#pGx F"swA:̗YjE" ]~Vy Ej:@dأa^GP a먽L.Ę!<ȟYB@5ODhU]?5ٸ Wƹr!x,a-@U KŲw*xEdC<XS;:֞soBq eሷ) _uo?v)+:X8B:7M`#jKqSbqUE$ y^i~۫?v]2B̉g,&>H@&G+]o3|Q?sV%ak}I XBܢ<0є1s,^q=ȥZz\S͙bF޻lYWf2+4 L(]:֓ EKu߀ѿ݉VKqǼZeSO|dIXxcJoteV25t""J" ?(Bw[!vMV'кS z=ZTÆmD:݅L)tńpa .#C{>lGfR#~aJW$/Ċ+#b\lUO0lI2̧q\Zӗ1pb. >eZq2 . OZ0'H+fxq^KQi]}9o2TCLj{21n6dl@g vkB]V]Ϸ0y$1#hR䒫y4$9Oa{QDf}zƌÔUl\ $#]{jg%UϥQH B{g<-?b+g!Br&\;a}k7}-((If}rts۰cjqYVfN` *9`4 dI?|Z6ޛL {Fcm{\ hU~ʹ-ho&C1=87I8M1U/Oya%,wUf7d=WQh6ɈI#s(E;/Ta;a`,dlYuK@'k+P2k<6\$ ~@E4oeZʼfRѕc6,hQϩT*8~=,Iƹ:vbfЕg!%O|KׂG.i|Vzz\vl%Vġw{Ux?>+@ℸhO/~%WpRd!dbu.g޸RdFMWSHijzn)נgۄ#۞}}^ ?1NQQ,6:l\7~~v 8WeP̩w/f$!(OROEE̪2\^maU%"~nu]SV|"p?SJcU}j}ڑ}RD P]i{2{LKWa~)- א|x[G5e1洞$=#߻_=BŨå6* ʼnI(B(gsW K`szbp!)qOeCȫ&A:$̨۞ؕL"?s"ˡAQjۣ=HZK`W#U]>CT$nI^_`kK]CJU˹νf '+^PHxh Ķf 9Y/l1Lb.=%GT5>λ 66&$pf$SI38Wz!r>1+n 3=eR. J%qGP͹zmw|8(]LINЏ bzAk_d9.~%Q;K{ 8CA^.\iQPycouQ4j2[zGE1M[|ebû(+.6 +WgmNtj{<ޏ e䪒]JOK}rc )("d.YF}C\ n1 `?@`nX`>q#4(T/MI:V2I_l? ?=G(peW39otA` z k=<-1.$t900^mAհ_wQ>5ʟ ~--%#V_q043nƋ/Bۜ/ ^Q?)t.M1Y\ʷf`QG@0ZNS=[+ǎOtM0|POnxۗV1H\te^v֨Nl;b ]2+p%i;ʗ~`8+ (x0W,ey6ƎA2VړnaiwȗV 8*31dA耋`&\ZhF/|ychLBu9*f[~ +Բ}t!Fjy1)#nDK;wgNJH7iSX,XlvoQqĆJf/p#\(v9 Z`veeJ:dJdίdf3j҄&1s!Ig8ACJ#M9-*`n9'"`ʬ /y%6$+ۍ-YpDԐxs%wSPH'1R}vFƪD! !a7N'QqJ3r/ʫ'J;^JpS4G p=4Gf] .t%t[Gu5+p{Ðѡ$tVӁ>5E0؂9 Ak\o14~))ksE)o'UНKhSQ㼋b{)Z$ ^&)!>0Dʜ@.ʙ9PwZSd7P yenh#yWx _6E`9*!7SV|W-pX;J-dĭTItSƻbڐa|uwc&*kiSȄKћZ~ъ93BlE*vPޘ/skO놜`$%l7_L[S_j1#9m)mQ\A$dK=-NjVB]p>nԹ6]Bz?" i´UR\T(T+d jʈ3߽,IzNߤ"Ǥfs6nUڹ$̯ qOۭ-Lic{ q>cJ>j}TkOuu1d \Y\+~r?alhz ŴpkJm331~9.OY v`&y2>ă@u~7kB165#ԉ!ST 8rp'86"ka?^Dp?1LWrod~̸Uvb_X/dٷ `;.^W`Iepmw1muXnA_)+St>)3]剆9N[4o:@4-HeDWܷ#cqm\% & .zjd͙C1(ǴPǶy"Y X )(Ȕsh1c杗ZHwh୍x*A8g ]VڭX^w囘Veϝ9~ N탎RCAc12)hP-exJzA:ԥn2)0J6,. I듲MI Ccxq$4EW8o4q0/ъ]'@I\Y y]|`Y+緪AmEo.!xOPiEKI/i;XD:_IvKs8mx[ڬƢ_,iwUDH7Osd ,}|Iy:vî ሔ÷WU-f5M\OH a#-1EfDH)z,v 5Lki Oђ|%ioXz Ѯ+gt%`x1Р]фjhPCڻ^] 7^Ur׌\|gfEJOn^ #n>L懓Mj+2dtX4VDש=}^e'+{rDi\ ck9I}~42[Pb|痿 <×m삹7atsQ] HKZrLVƐhu-*@_<4E_¨@`apl:Pܫ)gó5/*%+z1ﱅN} c.P*PUFԬ)/[DQacef~e/xfVyLJ'lHՕ \a|f _"z/Gn'FfU}ecՋ8q-GeR=&+z8~ơ%/Ў3+*<8σ𞆷 ƛF;a5W $=>1vJ])2]Rtr;$h$$Ab䱾.-Xه}k1àd+>Bᢙra{u7F64(˓\֬ s&-h>޵C,tD̝ !'DErhwgI\)10=Ju 5=Ħ@i)~gȳ۔Mx~D3Qx~-zظ:O1l0}DF_jR}eC;A҄넮&[._~/ րA5{xIe|*/mJ麬])E{`<-긟Tr-Mg(wUBD=$U%i`?79K8W+Dyg:I M \4MF⢤gɰMNɿY ^E|$ne e;,9I$<;p4; eͻ/9O(Hj#V[dFgP\ōK۹+n-\0X%i䪾hIc̗CC@ 0;&>Ǥaѿ: ,'~F)J~zCѓS7XnybɈc ;J3s{I왁5h*)jǹ!( 1b_f8-Owԋ IXXuќ9q+DV ~2(Z0Op9`' w9bi=&%^ feLIMotјJb\ |{}>'\Cbg-A/G:CQ+օӯwMΉ,Ƞ컬MDnU;P+1#-YM;$9Ffyz L~%ʆs%5xb>K5pQ_8 V6+ЄiƷ@x,@ *mP!V`u{?&XRŵ_ϱ[LS>BM}=Co&HXO|/ZW#1 '+F1yfrR]sDEքhߋ`3bJZoTx7EɡM]频;s+7^rC9V=j(|r=RO G2z? jڭQeO36Ǔ+UUi䯡E5zhRЌ{!116YIkϞAag&) X!l80 C+MzJK0Ls)92K z"Z [i+۠W"ׂfʸ&6{>m Z~TKY,\83C\2Obo۷TF37PAViǺ̈"W"aX^.)X;.^*p]PN:{㖧팷CQ]n{>Ÿ dew:0NIF4Or9K*"oTaBvFn5ܫ>n_׃9x{&1'2{* ]U66RIlPP3{|$_F҇c,8.kWdz]4^) ~2ߡrdž^ Iœ[SEvfTWqldq?) JJ.bHƫ%h2[5dSFH^Q望 jGimb*a@27wǟ^壋h,a|*C$HUYaIe6_ީa)+B) ?.T% wd+?~Vb Y ^Ija=;zC>uF魸Y=ETUˤq9}C5'p6Rסu pS唧LL,_9[.I=[eQH9Z UgX)YTz';=*^"' 父<@Ra\#G)(aj{4!<힂 WhoPk\S"Bx`zΔz\lk (zqr?U \@Qx O0ָ=ݭp«<$7Yˉq͜-c)#ELsy-.i#/;(~wyu249r0K z@eF/ F:Q3Ԕp>:Z|$.cu+w)3=h,ƅa2Ǥ=i];hAH(m*3/s :4Z Y0%&Wd2^܏hzЎkavdt<3N߅1ACZ Tk=~,UHtWƨB{pe<0v)0Ghq. iKm<>C0%%~4ǰoPBR䏔,4Q=SZ9rB̮ _RNBYXC*&1[)S!G썊aT{Ȃ (}_CΩ'41g|JrZWތs?g㯄7͟,mחg%ՂkD=I͂w]8B{k+Ib̳g_F9ohrPҠkwE5\Q DhKwgCJjq۬Mhy$kȅrl7$1Lkd,;UMp1n]OjC ~"N)-4C֫cm kik{z–&gMƉ (hBJ>Gk(͔:#K=^-tExp\6Ϭ 9]3,3ZX}}yXc12U{pllڨSqXقw|-DGNa9ºc'E/1WGI;[2(-@7[dy4EZJPHg ʀfwr/Q!ma9Z&+]UL5jlF%j.~vGƟ%=[(YzS41dej 7$S#ۺ'T-x%`465M-ZOVgpDM!3#J_р #]%}]>"F8 hX⋧ufn!rms˶T+>XXzņ]Fd^L5l >RKW OZWrq$).S"Yu`RCwxe/^V.v  P.?3Ocٲi:4t 4$~8fI^ @[r|OR12=aft./{bUjK}[qY^;&ګjReRlh|ϑpgjJYZA#NnfZF(jbʛÛ  #Ԍ"8$8c=\kF쎳nPҘofcؚkӈNR|̔6wʻ6(q0~\jre@il>BIMv ND\X}ƋE{#*rꊳ@v0ڃb/Zchl{H8v -Pvf4_6I>'m^"K;OOZIC^|5K0-Kg?-ٓ;y}YM%Qqy$IsxE#o|8 ZpazV"aYzWjI=8_B92¦5TU`^1AS[h jg]!iC^2Ek_Rq4dbEB?RSRw{5PvB\/ĔPCYXV#vpf+sNC2dRPPY%b\N+9_aOlOΥ r[Bo4tJ ei3hufgu{wB'mQ /οA9b,-1a^ )C9y`Ixw O}'>Rq(g>ĥ=hS M뚀dXf>#Cpyķ1PKАqF&q M (SS"@W~pxemop?<H:mꙭc'3]S9C`aL\ -#.Ecl}8yY^S8,^4. z AuĞnJl #[>UL!椭5r,&RJ4BXel#Bۺ,M&wBMĠKn)SO07Z%/y%1񧐖q cM~q!d;[i nڍՈ$c}C?!rB&R0naOO ~4K*XC8M*H>}thn:M_x2̡{8#<)ܺ.3W.#Ȧ*Kk/('ɔIh|'咱z>*_eC 2%ta|ҎAw1 dcmFOĞ%|n pӋ=@;)b:@xSNQU A^$bUr!D۝ $<';|^3RYH_W6p8On+5INap*bNKzfbq=pKv+nח27q*E{GҀʂjL5Oswn-AFӡbf1g3uAs;2ȸ8sewT[򦪡;.-b/aAN)y6jC Vӗus{% JEֱU&wVHE0--qQ,=Y= cl*qMDUsX#EAJ=w-Gx/ Cԗ > ޞ%Yrw࢔ᒱӑ6a0>5e3͞-q!MʬσEċr~o1PL{[%q쥁6$=՞GQb<+>ٞqL EJaTsQm@ɓy\?2j6!k?[ bMPՔhPLrOj]<%~!KTm(WFW>u*Jd,朗8Q~_ 4`a/lD9$Ďd_l&_-7G `_4D8$kt|pߖU95btzKx Y}8NPHm.*xf5jo\2 (,?Ȑ<'I-> _55ˤGќ\IleJEɳonD}D0/EHLi/boC[EeJn-ԐUSQC3oP]$F,ڀ䪵Os^ :zzQ`]Ɂb7\_XFh5mz#>AAGGfJ\H*.}>~GJDW%.  $\ K/*y+Kʂнx'+@Qd"Ī"xҟG[~ eQX((j~k$|{Pr Jwᢱ' WdF:<:})-I] G?y5T%| xc5=+{b3ƥexDM;kll%n$bEE7saϟ  n.yAmNJ} Xظy:"9?)O#+4ނUWSX̠`'A}~,*v78 <VcGPSVyeFP5b@)ulmpC/`B+_L#!FFue.x[zI._f#?ኦl oMϤeleGtfy'#!Oʠ<^S.^+N,DHkP`U۵ 7[!+86,:MiK}Zwگ 5 cL]~9~͔ 0SEIғo/J}C !Q%w=X¯u8Z"Ie*+8v&V t*?0j nJݼn.]#]-!aԄN֥fYٕ8?z8MxlԽGٜ/;YY[Z9˙1QMiY"MP55tTJWm \qh*יmf}.[)v>jo9 [:f #r]p?s}fAzI)ze6}WaN[6a` ,Wƹkg^\3b!z֐,q4 (c%bBAʝOwUVl 後*#K=o3eu![#~:m SRCB>+i IVXh1L̉|I2988:lVw8ՈƦ|RtZ"#X0!OBq{i4!kWq[|v$,n$ vrӼ}H3pvwKhۼva!C1BoԦ`2r@6%Hؑ ~ڟK̮/x&OY t Xp/Z)K$,)DPb&!Dm0/l3+HcnQxu&@ڏSW]-RA3H%`RХvD6,#i_P!w4]!/OzWgl oȣjZ=p}CCjBp! ߶rg?١Veˍ!!3H*_/5EGDYp-5,B){&RE K 5D[/ o_I4^W6y6<?rkj*y1}*#Z`u 0ں;#Knh%cC5;Y#(Mjᙿ3_'v6lK-+;de\87Y,%+eqpg7ObՅw |!MQ9&V$+[mFA?K?Mصt[4¶VlKuE嚹%x\6S ѩ.y+%Qis3GK!l/5:4/c4M* a-IrOl$hA+"h̛CTxq#'-G[IDp[|.*|OecߋqtNfM~vD^Pq a:.nZFA2x*ܑ[m\Ihr~fL*Y!a> İ]| eCvhbn--oؔx/o29фH4 EƷ"r$0M *}V_anOw<HZ' d,lMw$Ǔ&|DHўq.sWjZOym%͙"ςAJE\0Ǻ< o.Rga9؝H`G3}]]U> PGid+Te%bh$C%:+`w'fJb1w2Jα<|<#pP=M]>,C%8EpvUSzKm|ˊk-kTnOe/ i/r&`K7n%bΖ&^#erSqyd%?-:I*o?nnt\2RC, R [ =Nr*oSL7霄s#ҙh*e9P>I^[.3%۠PuFcXW/l@ )"Ebw .G&]NDˊP~(f7"a%X?,iH~V%OP=6&I}ʃ#:U)KOUQ5# 7OչMW~;䯵ަ&o%E byh,.pCƗap/Pz [)0An/nhjMdpYgP81Ok9zou/rHKf1^jU9$MyхC׈bXoUhdjpx}55[K{~M^c\ΰ91Yd<8K 7$J;e5U%{2ɮU:N!-h[A(3#z1k;IJIQ, dwocVTCT57>rkYIϨ=\k5V.||a\œ. զbZ,>geF1ax5x[ĚNYF3L O[\Ղ{6dhP-[|O!{2 s{'r;b4iɦ2n>lʝ${4v*0ֶb\*J?rI_ X׽oWn˹FӿIXm538"+&Qh ad6UC5Y/-Ϭ8&TD[ޠK=Jt:;?!8ȕg~?3ZwV_nϝpOcFΙ~f$&7FQ?bzxKnSQN_@_0yu%,!{|}OgS}<<nCR &>sC/U hxk.9a}cD?а80*+o8!ܩ=/* k7ƫkܟNBt\TՐn=*-F]/qOjpҵ0W3^q= P9`+b/nQ#Nt?GV9=Ы,W2r;ǞktWB<+>pT29r~?G#s/eRfIUJV% sy9/spU|,|Y`f[%حu^3 c0Ѱg#jƇ_C !~ڹ9mp:+Y@ *{^"Q ]h=ĥ$}5y18*2O!+l%|`YݘQVv(8V>3䴟~d s,/$"7{r``b SL\uXjER-5XcsU(`F;UeT~~)beT١ ?u!E:?+qZ=V>PtzY# 7K(G!ru>[#IEh O$=֎M}MS1?BxcB Ae>v{1-G &َX:ps  K2Wd\˺Eba++$\gdTcD4_}>@R-qt?<6# ,Q"U}`ڧqI0 F׺й?jKMQ9Zn}5bр{}QtƝIЕvjcz&Xˆt:7f%h-W#28D }7oV^&nX1aP Ԍ=esls/>.Ȼ^3~ӕ|s[-"K D[-VnI5(ؒeZo]ZP z6"B'|͝[=)yʗY!lJ>|5HVnv*SD'31_s}/P"uV"W.hZ~O1;W'"2Ko"#,>U 26"_YB <׆1L%Lzf*Cdfv(IRVDz(#D![Hc9/A ;(\%*(eCFɒȏ P4nLK{ju*"('9Bt""ZZƇN8{O<#0wNeAJVT2rQ}y/=Sղ ]ąB5Y2qbd4z=8;9ψqʿ4nM%+=){[}f6#B!P'F|_bZf|ؔ]Ս2`u*HC4]c"4;LΧjNΟx`6۹j{]}vqQ/X@C¤Wό֨к?3X" r\n.,HaڪbZ-/"L*.8*P^ 9\5oJ'ˆIDtkP1H ̷S5zzMD7v(!lJgwЯpߵ{n:1wbsxQǔ~džC$2|+gD^(iW;˓{y$S?X؈P,iMG~R%B€ T5A.ad~c/dYKtF-v'jbH'h b]9э/*g~AYdm{0^=>9ΈE_[:%;j%oε ؞`7 nBI: ez3!uRCB*CVn G&-)|,Q.hj(yʹ_43PtꨍPW1}vk_){,$õU[a:  7e/A77H=Wt] 1sjBMelQY 'v 4hEGgbnE'%bO$68H*iC:v3jm̠ݦJ$ a|vwq_tg!SV1E^sCQ?z G` cD^2q2:dGKfG=$[M%ZeE1bڃ>ύtIn&I@rL;]?JYgS9ϡ.tpS PSO@)|h؟`8s!_$]{2F3OLȎ{N2uBwVB цwy՝+cU ,?UGGJ!hQ66uO:Q1 P I,i#|p'ٹ1smLjl9i*R BBxo&Jt։U]kߒZ9BKICS犫DQbm ^LlJ4Tmޭf|Y\1ViA`ƵIP"; FpϨFXn4z/#mo*Zわlx|(hnaӽS,iJRa.nj&mOoJ oxmbaq9 We>&%=-1aR U L޺2[<\O_Y OCjCD)4SMSHE3N=pSi;o fX01Ř $5@M'TR*59 D5OT]A~ŃVmXNHlJxv!+q P~?ێ)^zƧ {ַ4L)"mOY. H4ԕ}ZVH=jy314 Pag K#c:aI NӘ/ݘ .J)وR1frML崺I2~?a{4Lo/w(*#59XcRx5Uu[_'xvaؚ)sB~ϘE@T]ldWRaMWEDo|,@6 QA[4.#hB~@iAxP} ]#-,],W*?{_j}i"p=[}8=gkE'/|ĄDŗ %Tr%|꩗r`Eǀ65GЕ@k;+O"&o |,5`hkϑhY+}U.]xri1jHO@?ؕ>7M1mi5~o( =m3lcMLW7RpZ_y'=6[;YB 垖LG\*񸁝}-Lz{\{Ϻ+ !1k_nj8IYyY?Xwz{EK<~:·;=Zimirw6w殠z]M0I1ճbvɳA8Gs|f٧3+YX4q3<\rWvmkGLxd¯iǧ pA'5tA@RxݍCsrFύ656r=X`]+X$2-S2="= y{upS"i-*q!zOd=8-\rv:Nf &.sIuHp?ΨqcKmKnVb@RK7y`7M[?m'p{yQLp3ai-5e\|hP9x(~|_]-7)smm~}u7 ^a;C[h`%f~DtCXF|ӕ88ȧS}03|v{vr=[)#U0wW}B;N&":qX_]V7雓VojƳ?ٍ;MvTc<:uı1 2&r>ݡA,iLeS5(iDDf,PJy8eGuR)2i7˱{w5YHvM-u)b{[!E~V eugMLp]+[I_t2wXtD ,f:(hW[`ƾo{B,~++}l12+9f [c;v#{U)a-2ܾ'IVrx ceiXY0Yf^z_8y+eѤZȻo<3T?oߍq\|1@mxU >]R=L]9Q d(^hlo{pjBV66"\g⢊Dnk%=lG>1N]z2_C2l|zg;UyɨޮH B@ōֵ{$̈/CL{*\h"/!, n~؍d *BY$d`&J qg9J^9E>pxț bxu; k7VsΆe0e !DwCn=Ynrzfͣ0w,AIeɭ7X %q|4Wsߌ%[Ԕ{LZ3ązRrDGaARD}CkIpOwܰ" c!'QLdo}(?#U}4eǾ~0o @չ`ˌ\*}`zE9.0䃄d_3eὣ+~%/D̼`t>VsW*D۰L^NfOY3z Mv{u|ۀFMW@Q( }aOy NDQʖ2.B:]$_$մW̼)Kb3,,^zQa̡sXY>2DyVk-/R&2HFX{̡nIb1D҃s+7nKFnl(8=c}9`7lQg\ YФG7t/W+\>4ieyYykFfqYR7n_yͱ#A1$*0!Rt`YS@3 g 3>5|lYTı;jt>aDSdՋDU-/c64A3@ .] 7N BVx{n%XpSD+Ƿe6*2Mtg U["kl2؞Fף"*nX;p0gMܲFj@$煤-<6UC0BMdc*KGknn 0n%qhʕxa3WLmjhV=RFy;C#EKU>GeO6~^$IY 3*Jo0zr ЌSdwes>z[R\!lc0sP{ "Il&jNŏ+nKSBGJQch=` aG rbY-2JvAA<ޟ. Hw!fuM#lS"]K%yI*V6Y}ʡzNÆJQb +CXy="c)Z/U2"ds7Dpx(]r8 fɘo:էl BcvϒZR=,अP-{+}edz`b(M[ ww:HFQQ,n{}`-̛HɞIG}bq8XwJzIGO,йawn֔Ky~O"EVBq"iNq< ?kGhcL3n$EƻӴe48ܒ$'m&3"(6,_dtTm'n{WV-ZP{Iwej?`P 8}mD2J{2f{MjZilBn[Yӥ@h'LrNk=Dt8֓r;ȵ ^bTd}%nY'f ]~gpc,$*.cx;g>$x`勰BԑqGtb8n-U;*;2Qu_F=@Oڳ1myݒ{9'#|FÝ3SԢt5UE_9ȼ;Ir؂z6*O@M)Uo.IJ@;jK *+[I#qc|4RT 85}.Cs`bC2#6JÙsϭWD2H6Ojܾ%O5i,kOIL=ĭD@a:hD5 je&GyDLRonPaOs,B 6ԱXy1;A<+ݒх2K-X%'BJbo-n\@XrV fHk, F[L}zP[| ;/v[FAW&W:[2S®T.Sk;+w( 67UEoxE3qFQ|Yk!< $Y4zM; $7aJjDm Gn`i6lä 35`C " .4.<n͓i#i:O9.I!!7Od,N%~uRRXFLg< } @A# ~{tܗC@6FƒV.O?d)mPeO/F56nĂ; w&'õzMH9dG&|:K`{V~]˞b[Py>64p!IU"1O~+ M_қQ%O>ms[i+Ehk;VIu8vպhIQHHOJ3h>T5dl~*X,SR]rXY%ݱͱďsҸAlmo}3X ZE>E`3)AzGm?G?Ptr/Qh#EhMb-R)@U6wwS>(Ώ󯸴ZWt"-cX슡_J'*,P mlCj\>񈖹SKH>2&Ԛr;8`+ބPũYO9Va6)#jC.\=&e&MdU9Ǔۉ4Tx~k!m>3Ut{s[D)!^ ;>8:<./#% \sjDb-NyP[ˑ-Tu @g3IzgOm)z5❱ T58 +E3S2Uf"L$#8_85;17-XV1)hJGQ3)G}H 5}V9&gԻ@Rt+MI-T(7\ql)I+Ԋ yBD2&E$|+Tv<h=nj?MM] zO8eAdO{}wC(qϞlt{iJ;%> 8$!QbyP W Za<_] }WդODRcG3`86֎b<5H3F@UF2xKrӴ)?#iؓ;Ƭ,*,lw.|.`b(F@FC@d5"d|N+-tuND3k Y|4թ꾶yWT=wKcr_uOiAMu JP\dEL˓ɀloȻ CXq'yX$kH~oNwmEHLcuvz]"Rɞ14k=e $-eA`L=J^՞ CPIK^-/n*١:BS\)ȦEW"6pNӧ@_TKX$aW>YvZ+q`Vt$";wK{uNBf`|(ʚO #quz]WvUk7Z꣝?U 3 h Re0WHvoY/*[_&h׈gШǷG%$9Mczt}|iƘ|'. +=LZts/#9|"'Y|#c/";xxM "_5U-,ąO+]TW=^*xˌE$$׳ v%Y}ʪ7vWMv[dpD{gx =KxrxȮ @%rSYA&/uuM rb:UߖV.z(X` dP-q'L*#"EQtJ灌rkv"/goW#{aV޾D̞|b ,!2L6 In #,`6Q&NO 7М;⿊ַ~ɇSj1تu.[KP[Ye{bFXњKix0+K]?,MtLfi,Y.0pKFcSKN ϨPc!viKaGh2|be|吭XcW5cRH Q*1;Z۸9) ;}|˓Idvz֊$i- b#ͦt统O ${D%/iRHa~r[!+,C E.0Ɩ`b\hWj8"-?_&v9:0>mE?b˼G}s0ԧy[^/B "hP mLl$󽹶=w9-; WY1) 䄖.@?uƍ2]+˱.X橣ـ}"Jc+B+!Yt!ܪ8L: $"1;ܾ"5SE̷ vuAL/յfMW|1k'5 v%B#5{x( Έ-=LtzTRFHX b`Іv;ۃ|kǗCFș;aOا擥>[ڳQŧr .Th4;+HPN~[j! &1%-Iwy8-d+$b`QAO8bC7NHL\)7/9μte E%ջo25{FHsF_1CKqY/M' VyIaWŹ#"9{1 2%.4>~;3ˀ_-۝xv^Ԝ=f ^ӑ[+ki_!@7.c0o#&Sb :*ڱ%i]p=cRzK'U <n3KCBI)%?{kV6|z?*RKA7FCb CU$˒̝cZq㳉`xN,"$ɤYWIx k3\qK.=$U5{7%u*Q ?Z"$݃vAgsggα'JɡzM&tj/*c˚t 9ޠ)VOX 8HTFf)&*"hA AEra)o 1KPJ!wa)^Џ'ind\0/r@hzX $eٮǮs4cl g/M&t0`9_7EM!ˎ'JҦ_R43ޒeB89x'+pMȬRO Dw4en<(: i1()vGUy؝-Y5U-0տjz8KDZ*&D'ܓhMPjDj..~ydVcC/v4z3%D{G[17=pF 1rTxe.0|Kf @C>cKUgE ҭtW۠ql#,4V0(GBUK -U9aqGR'p=w%#cOx.ncl8x'BuI%=]u#"#J(Zܢ%PKlmF6"g2R\0wTMoB&^O;R==hIģanwz( /`'3?[$ƹ'!7zUA]9.0ab\3Y,> w%Lg ȥ'$٩|vg0SX _j3\n:5ٖźsQeX˅CɷDzd}F~_ɶh+}# ߇'_]&S̮-߶Į{Zn#`D`z+Feܱ:^M^H eߺeDq@ g5E:0~n ('1/Ȓ 'C!Z)^C))TUS> =~Ϊj -.ϭ&]"5o-A7ԿmrK!DNt_0f1>2Qn!j4|x&4Tq4`IKˤ R,nϋ&1J/Mng ~&BE&h/)9//R"[EMJ^ݧ2UCb皫!^`z^N\4"m x#W|env~vn#Ly&x:HNJxړS.21XS`Bp.{K0%䬅Τ):-z/BU~{`GRj/K[D֡)-v_OT"Q lΌȓмh y otTԸ=m:Gץ^|_R#"֭ktqgرbz%xFx@G}W3yUp0]#5]Qbq̭ゥ׭`-r~SStESgw%ܐX)г&}1eWյP,ٹ~{>˖kju8~\֢qShNiS:^`5o*z^&IPJu21;ާCOw ,A[,{mrP` ]8(#pq\fdݩx1F:V|pQP,mIR u[ńLorPQ %d2N݄E@%GSJڔ|B׭Z0ۧv˖*_U3FNR_5!2{i'؜X@B`DM#oљ5:~qRQeh:yŎ+j#8UڱDhFw8IB-CeEkWfSXYÔjswm'X_l$2e(cOD8Mzl **ֽϲG'`6*8e(ǒJM{wiA%"ppdM6 %,61-3FYp g1 <"O+@DM\UNq"ZBm$9.jQZ)*jp>͜% nԽY;ӉЬ)SW鸃sLQ-,<,C m_~lڐF֘"Ik 5b!@Yl_jTX6p%HjADakemRٷi, -=s۸T0 } p>k)~u?΂rտp\ao7m"n:\a{?G?P|7IǐC)p&5FLa25 vk颓UG`@ Kӑ9Jz0mRUzR0mg= Ih:]I٬ ZCk{iIHo.zT=_%Ӿ)"O7z79J -BcnOPx.H[.poEp'.Pь2|#]|Ωd0660*҇ymVE\bH)TA6[#<) E9%מ^BonDRKq}*t`EDӒG e;L\{vX-&IjKu*JBDuNK.sc+A]q*B@y0#8;QbQ93W ݍsbיW}=QLMy^| Ab'PӔUm`s{ON9yWH1WmE?l7J;AёҫGqzǸ60."dxɽRYsRXRc;yL[8~Cfo 5)5qd.LV۽3a̮Z+)=H]gLv}_RW?<44gf~pN-sbgXbWq#!3KT%׳jpu V,9Ru_'P`kndm.9>H9/@mD$crX!{?u_b{3y~ v,p%JNhk1f]+˕ہ)94PHu-xն50ʥt9\1)%-|}&#'+<+jf6ar'cؔfMA1Nq2G] ޘ/_= ~phXxZ23jԂ!H<VZ0 kbq,BFY+T!oʥ'ۺʤOyį'r`?`=#{n60d+2ނ ɨ醙mWW?.!=_0c("D>eLO6zOLz"ci1OL1T\= LxΜ_-:\NvA^;Pb' }&?yR zTw˗K{}-<,nނg?y;+1+6)8g'Ozb9nfztY!2~F&yX=7S-P6 Z&ې)guB8߅E_q6o\u} m6( qbnc#ؠ9N`DC[#>6|.%vLH9+& R,bBoKjV.Nib2ny4]mnXEDEAWVdZ*s;gB=AD>F)XZ%3؜u\-,ejbz#.EGIQA=-`&)43\|/YL;wl!.$XkT?V~{]{J#|*'l@DYh›!)W0W;'{IY{%vۑ5)jnga˧,ݰu)3,[/g]2g%Rp"f3Eh"6K^cB@>(Yh PTlKCNזb~L 7p°.:r''EFx(2zNF?ڤ ~`?1C8/+!S=OX]#bIDy)|Jq:N]"`q>؁!7#̙YM_JWbaP֡]z _W- Hjy PO[?ttcRO;_5HuiEQfۑYTt?*[8/1`^TW׉+iU%ic3#+=⤯{DP6Y_xfa>5"ad8<KX1!{*~ql;Gaٻyb @>^Ю@'}-{Xd,*A-zprB79x.-s %a*dR2x:TPg-* \4[Usv R:9|B&}iiIS齧mGRa 3&O;.s(jWik1"G-bҥ@A/䉄>$+ֺILT=څ̐F8&tcSXꂿ.H/gee4w?+&4f^lZ(G5ɹspő)= (یΉ#.LF\H3\%6Z*U75H&O8E-3!reP9lnh"0֊aKs!._F"WKvՌ?eq@zsŤ7ŽDV5:_XIh <7?;f9YMs:>lxŶH *Mf4ʢ~~0&!2*+˳SWP? w( Ϟ' ` zmp?"Wț1ffgF +",RԻi^͵P\aS:}-cntP1ڟmSժ3C|oמk:]79K_Nzr2RvWH& ٨p/)Uť9B5 =d^T(ɓZx81.5%>ߥg95/圾qxkf8ŎӴSKsäƚ_遒=aZh DAm&jLc FS{CPOZ9 ZqKgL y&8I+-epU;Ert%, )Cƚ 4c*KugTRrRquJIXPdEѐ,1KH4kƥyg #z_ף&ב}eiԍ:T֍QR7H=Kp@{}DH-MխsI96W}9k\rk$;B͡Ui@u37g&ClGeQMJ`2QS5$^)wr=FtxjlMOʱB:QnUfI%eO(>5%?ΧOKΌJk9a1I} 96'åD6Ma&4 TQlyk9eI5TD\c~%[s.TG :f)Ȩ!b8Uc.Jrɷ(X>CҐ:aD9+~­$&h@Hw3k8_m>XM?(rArghjowA[U/_"ĖJ_lY1ؒ>{Gp\GFOn|*;\CV)m="lf(<أ/+{Z~30T,R|9djBDԘ};Ӂ3έF$'K{x[5LD‚Ф]Tzt']a.+:?#ZIKcU:u^@ w?b=4ba{\ _9{T)gL)TUZ%qmnާwcV;oG7:gw /6ŏ"ɚq?Z J ޻RFmiA+X?k7񘭲SS?+vfh캎4v4~1Ys HA![>:;,)kpIF]rٜɪԲECڔH§ (,@QO[Dvr0eY",L' k^ebzk8+_kQNє/0"?0-V ap !tkrO:Vn_RNԯb@S љ9ǧ#9>6!.\i̘I9$bpy:h"Chh=!x{Vt*A"Bf*8F^ 2pgYpHg]^e> LC~-Rp {랡Uɦs Ni20r:Ԭ(5U<ܓgɢsE#ebʁ,m4M[\f$CcaV w!)e4֙R-%d3isMO^hK+tͻk?HƾҎ]S UXY $f +x`A{2n0sC:4am",xw`jCZ>$vWި6]Qt +nKx+Q+! 'S5Tֲs}a^SO(mj)VX%2p1Sߌ[ÿ"o7-$.68w?hGѣ-胠,XO]vw17l[|,DžXX-B.d>d1u \aW4 Nާ@nQ dyq23b'?CMR|2Nxx$"Eyk䯝*YWE؇J Eck}LPEEGmXF W%C$WƠPεkc};B:Utlx@<=Bn>#}E2NwW/+k'O9_E۔iBQ@pnq2nu}H ROg&2I[1"wy_"I@^ٓZ$V֯bYQU'/Bh3_-6&zXo]tޢ[ RySBrDlp*}^pjFjVCO>y[c !{Cp`k켖[ː`+Bi`WƷvhcS ܃.ҝ0kZ&l>߯i# ".!ש\fX?: QQ5 7tf o2o(ϖ 1$"1S8'P9aVQd=QՉaox"YȦ WcHږƲ%duMN(< QL0݅:}2 j2dw{!_.bT$JXtc-) gn!&v%-V\>XGCp^LiYm!iif6t5˛QWU] ;]MHS^خ=Vx/:4ZiX_ͱ,lX0h<;`{~g4Zy? K|2LtK׌>uR+X|oS =}L1 R"ܾ4gƦ &Ju掸G(W>(o;O VJ1$) p#ag=;C?\aБ?#_AOs#O:)brR(n>}2:0nRYqG~312VH9'S[(V Hw$ĒG50/rl-\KTihT uR ;OG\?aJЯm>aP8-O~H";#1p=b~䴐`AX8c' cd(ʑw t(bHMi够I#PŲZFT[$ВT)c.sz-)lp3y;ǾԽ)M=wџQa)},\qZl'$7UGWԤB?hg4 u$Ÿ=qTԣ]%Ws0'%?SKuџiHEz sr`^ŗxrj{bWX`_ hp,t<_/\Y+鿷OĎiܱ8y^)ĔP EJϫ6a:Q䮍z*WWa!3ؘH)!&';g`DC ZUd~!vl}mLhp!-TA\̟C~>NfP }ZHAK`JρiT5]?$k\ۍ^]yx?S2GٞR#ERP.Fx'~^؃qxd[9m^?u>/MJʏ߽NhC2'O 4jC-^=UbZ_,A&m8%{ΜɔO.<3j/g41!)%Odr2:okXO9?*ux6 6ZoE:0;)yUQOu$N=luϥ'i׵HXi R#"er%YRxXrH6)Q2'!Bh6 5 *N:J!%q%'xQF[%s+A#JT_v`]7Q}_# !_i3y$0X=-'*CۊADN l#)OMgzKJt$@)&v%D,qeРǦ#>xIX>-zgw=&r<fFZY_VxNȯf1%p%##7F@e2nt#f)aY^Vs g~pdI"|XpX,7# yˉސ@ȜPՌE_Y s=%;+Q1v8Pw$ nE0we<9$¹ꑯ(QѣS1ާw\8Z(L.<17"yVcoSp5yE \ȗZ—w{C͋0uSA+JI"uhw1Ub' <%0U%~(J8x0T쮽ȥo6b mA=:< F#ܓ}Ƨsn|-h-E,mqVRM3^x!kzϸ+i3RdEk}{;%]<4Ut#GloZP ƣ&ԙCow c ,xZ8SM~rJ/`שGVORCJh ,udg4kFOxZ`{rlys ]-ȱ9v%Fteftg{B~]^.@5lCc9}3x >ucd[]#Rx,UDrJ@ evG**"gw ]X E[,mΝ/*ͫZcϸkXKj H989Mce|)?BERḎXt[{fq0Z-L cյ7;sKfZ1ѤP|`KMvr㫂y\uJL[ R6U׹Xpq64x)t4Ao?;Z[8NP֓ny3hd]<)(QVlci˙ZIsf҂Okx%}'aB[]~La9o;qw%1>M{Isq3ɶȗڛ0<*Q(>h!U޳PZ|+(DT_b&܂wԃ[2ly>[;"RiiZD'GWF)Q:~=)h!XJMd%س!X |/Tk(C_$B0Dp$$>zB* lv . fDCqUB U#/{UY̗c `3֖ͨdCMsy3\v5ttXOQ-FL5Èq5:K ,(H;ӽt*#ҏJ"Yhv߀kV4QeSӞݣ~&qVtOU/Lѳsh]I3-FTAcF5{JbUe pul5|ZtpIW?dÿFl#$׹,:/\%/'yJX5/ P.m X WkGq&Xc7c}a !ԳKO Ca1qp^,S$Y)~9pqa( Miy޳_ih%y9o"8{'5&$}= |6$ҫ5O0sJ E36I{}@<(@nPAR.4[#L*&")W~}YO|L[9X$bU]3~OnbjeOO c|7Om|Yeܭ*cN3s!p*M7l6WB E?k/)7O<:.rI I/{x%mSZpha9PӤdER%@aO/`1w 3Nе  |>]=^xMuR2|䩾HpSPCQu<}|5<=15ѱpgG2N\mv\Fӌ>wVm4ZLѰ`T#_ۑnoQp<`a$UfH(F{BѰh9|㞻_PilwQ/2@Kd'\cb>-yYݚE"n y8tRnj˗@1ۢ{,4L|x fIY6"!^x ttFwa"ra:)éQZ_V껁iڮd폏uTEqeZz'%֋& T3; L17[ 6lCJ2 d:ϕ[K]F;ޙ"#aƔ8qJ UƧa T:q],dXQp4oblgWxQ}H>_k9w**vyB.L~~tUB*M?DW̯vDxaU^O=h|Jm9w".EDI%Rknʘ#5xv#NJ" i5SBHiAҞ gz{l^Ȍ<\b:뾄[I>|81\Zò׭1g=k(.GqZ45yͻTTc_mYu8>=w+b󔟖"_-UF?]Psz_ bJFR)z6 j,\.F!;X=+W>\[h[)Ӟ ;UMtzO=NRxJdH cRFsu yX_sEJ:5yۣF4o4gN}@L һOLNFD莴G8Wa|q=yls@fbgE<]nԯ; Dp`&ѭ {|˨xVnY]{z:ZR1 =WGV _6"/ uf5P .ggli&\F-U+\No-rIBe_D Za >\x1t(,A+h:ԑ|ӓp| u /*^ mj~BXV,s~HELZSmwo;+łDa)˾U-U r/o!پ!P]#/!GxNUcPns>[An yi_S鼨*,~3K2Wˑ_YGqVtրAٟ}zo(vU%.斡Mv2ޟ0$s=`4CJR,ӝE8@!Du<`_˪k@9y[ …vႍ?k}g`F1-R+朷0 Tg SsXihNşiIwS[s%9{Roji#WbR߆~XK 2oP׶'A}4,TfHWkWV@$HCn%[+ϧFC Q>#KIPAdx?HoSd.W)n[?jEatw%*M s8s& E`ROtXc?cP;BCܪ6䈋 t%.vP绀𗌸ap4]ҋ̕]q`A2'Eu8ɓ湿_Fſ=ZY?x5K%9,I@/OӈaegδgzpsTTT9knw K"]Q]GoJ .#w݇׸~8i̞c(u'pwi$X/yyVfb_t&[ŘTHBq/+2{ڦNm.>n Ҕf&c1$oewL~W|+tSmKS٠p:geǬ,ٴ p-m?w N4"[Wo#|[3עfa u?1Y tdWQ%7sLFDc?N06$R{A]ʭ}7T=\Ej=%9YAsl3Z6:kuߣRD!Ow!ϔQ ]mJ/>%GL?Q0Jn`ca`T$UT#Űb\HtBaWzbUDKr:_%l0vvC FPA{,^F@ ?WՋAfj.Dt!'PH߈'X(%_w {B`p b<0RAbaT0{е'Cb) P[ے}׿#/W{3F}ɺ`kRF@!o0d q^Uz4?ޒy4kp?C(yFQR'qJgZsCW#ivES-،<jgX^}qln1Jf}66ӊ!ᯢ, PbYFol_D6x;X+ m Q&̓/8W{5t.b GiPy +cQs"q3lKdWgB_RFmE\]o1XuoۑMG]J5K]a,G_s2VgzϛAF3_ŞSuT Q}Cy141n@%$"yE-C;7$b)c9v2C ؿAXUjQePauEEb9p k(9ahR3skdW: vO'`[3"QWU%;[/|52n^&RHHȾr[!Y3$ТDĺJ;iBūJ1 njn jԥ?/~WFo!k=lȢ^p]p\'r :3vzF;Wⵥ&m>'$ ?6f#>kA +OYMO%}JFyuehu~nxۣmYص4QѥigDVWN4abBۃۡ|òW4".f.{O\a fN[Fs݊;"b? x#+}*#9#>cp. W3A5 #4A{tރjcӚ +]Os '-1Jۮy ̡ L! ֐۽bYs9U\s5 '?~M냲֨47:4<#N~XtoY4y0K2eH\3wnu>{|X i{;{MY\M6uC+DGXvM 42 Dzbrw B/oD]cimQփ1Lc%j"KV9N?VJF.\PLmb/;+(=F=u1PBD H wӃ]2nP_p^g̭3(NJgݹ[B:PvDOWy"w3|+î'!}0O|6dmn]X=;j +sma?6#Xұ"0q֮ukFmcLd] WXo;i%)=)tk Jh+}FJ#5? 4|ͰXbg;"O3v{-%>k]Kd+Sut){5Z1Fɖ} 4L) \ucQw VTGL5X 3iyrl955R )!cjL<\\:缀?:|nC!Q?G6#xeQa1<^~)|UGP>T/Zv%ɉ+u='EE(V ~w;i^Y~Ͱ}=4E(ū8V2/B.9t=Ch?5.|QYeIv 2IjcstuEA,ȐVðƥ#JBLuF"WIw{OH3]v@@F৶ݦpCQ`ޚdזs=Qt%7r)aح<^6`NGXЃR\ ($fg9lqᨰWiӸd6&c4e#C'.pba\@Hܞ5` +2[:o[ CIt0L'OZW_-):BbVBՈq4P\QNR҆'|Yzm/v%`1 v.m,'! ]Kh޶{ln3 *m9xcSzk/JaxԴlז."UzRk-98s>k29-ϣNx8htK ÙZ58) elyxLǶoFT\U#@'3Y5a&2n rcq81nŵss(ز+=' "AT|3'xc9$/e%o(7[.6XDjhϤۼ>-\nŊ&N llSR-\mW5蔔r3o/%ࠒs+n,#bq2R_}by;2<ѬSO?w#fM~ܹڔhg>ņ{}MkRFǠ>xU=Q;NeGbIY'uF䋲Bg3&2£[%3?=S*Kg= GQH魁>9FXx!; b `k }/Qk5 K%eM+v:k!LqJڢlF VPs:Iwr/p%u`Xb*F:Jkc\# G恴-YL>띃TAG((cqWHhI{ODؠ)K]P >+l˻hooUI0J wH؁e(!2~FcAysuVh=coI#X5r>Of=f&tH^mb X9X>5@_bKd'R힌z{{/jh@%`iI~$(ou *uPuj.bUY](1[޸dk T%穢gzUtE=!^5n2ӳ,6̱)M-Lն"sHUao 8ڰ<;eo +Y!RpCB5쟚lF'_J0y)nHL}??;yjiC3)peQ>`ޙ['EޓcW""i Q Т/E iڻ.NΌo~.C'؜0F&\=PbV] Qu ߚJmʕ+mPߊk .sYZr+MRn!+mIjІǛry5 x`sG轎aE*d9+oX!MPJ{Wg=3fzHUD~F6܇W[4/+:C mXZeOaS! OSUjC}Yp+#+DCKWdه3V6< aWIǏ^=B Z&ĀrQ+1|N:Cxy $b;/wl^:o H)8Nk)* GQ j4"phj27Ǚ bdǬiVbwhӪ L؋duՄ[BQdȎ^b `eDD.ZAS]PP+ kǭOhc/YH{XrK~%M@QV( ,rs4) *O}eap[Qpע;Z ]FNt)1~E{%+X}>F8):; i\,Oa 7)Eת{WWzdΜ\)GF9ր@x9Ά%Seq0Grk~VBf8]G#Q](8 Mi{.oqI$Dʧ3Kƍ3E[3lF<`}}PYnP/Ŷ*xVxgڈCVc֞r~ߑ8Q` x}xjQM2Ee>ŃZVCLr= A5p3!H:Wu̹s_o؋T[Yjp P0)kj~,OogM(;?nҚDcҋ %Fs lRCb)#iE28>u0 x:A ʼTO @qޯ^w F:x(ȑ4՜$ 7C+{!-'ɴ 텩i&|̮.uB2؞ Gr!6THSh1#s2@򝙟9, PDAy>!0SM7XXvU aP,X$xp29Z@~՜Q%&:'6XB #uMt:b~>R$eGlONjhȄx"!"h]@!IDS%'zfxġt#"56;T&b`ώˉ񉻞pd3$lŌ%"~F9p|+!{Y gT3~3 +ӥ09G\%ЃO=PƹL| Wޞ^*帄Jh~9[Nz(%";b2Ēh^$f-9+(-˹>JxҹC4f% Ǣ WZ:P(?!]b/eHy\u׌*~QWP QhD#op~{m,} V &X߯H0 8FJJsR =OY9oQ #cL0Sz?Y5P _ڠSx s,a'૬X33${v<ޕ-{ W?%.=>erU7Y+ bHDҋHl<+͉L<(>i4 sBX[rIcg qU]R@?t㑀M~ІaӸF&T?as ϪsOr{53p@BWΫP[IW $䔣@Y3Ypję.hM! ΀H/DӍ  &m"QyMRNaDܗD4GxW.w_͚иn)!_?`GG{RKDawc %W̸Di9 SF6)2_>+-O1rވB:E m?!15}Ή8Mh%gYЁYc+/:?Yұ?UFd,.4Jܭq$.#4d"hdaFnw }`c-涆+\ZA&,)lQ޳z^xaE[[YYE50سI|Jw7鎛1 5n9andVF-t%E i|`6l -΍:^.}*q/Giݝ]Xq@&r.kG(V6sg4UMX?n0 dűEE>L*(=*9 |ᲱDNu\zCYv  ^`q;|ZBQ~znDP1H3qF|Uaſ yӶ풆 :Q7M~m")3(YXI-6tbc!:c,'P8ZO UX8VXb\w+;x6e?DUʃ.T]nwgXjט)[6i^]5)gbŧA/a4.9]~~,'Zo6qUwoBe3 Qe=qBЁ5Tb[ً2Z_y!TER9Pz%;zy1ʟ# WL 9띙']O#Q+I nyg]AlKyN+Yve]v[KLK |8U`%FY&]A EM=qxE#Ӻi>10Z4׿ahzj Ɉ2y1DdڈSL"PᾲK^-]#<=zjI85u3"Qq@+RȪmyfM4KoXӸ !*rqOYw; 5 x0kˍKAUE跍R{z{5 P9VvC+N>É 1XI,C=Q>u4ᤗ{u#w]U-W< m GG*DNB Ue&v{7bһD ꛖٲ'S+xt'mgȹ*ռp)v4~ѐZ-W `o"0Ь\ȇUxX]t9m2G)j=p><Y+qDNP^T״`Hგ 6ϢRypkv$CGdDTUW$I]-=y.-tӂ.\,B&\I|ս df6dX!o1iY"ξV'i`4-˂\Z׸9'{ݠ@0SKqei^W ԛyށ]6Ycsv>MuͺaDkyV( _,~ixK rId$ڂKE+a>iP)XK} d,HbLIehivT;>֡,探P(3XTLY.KzXeЌ<8O$Y1-1Hm%QC=0+WzuOƠZuH)ވr@FA' ut}[ =mzp.xی3ߚƊq\TW]`t*$~AZtorTkq^-3A:/LYG|8e%S3`289r:f<y9Wv%]p4\C]YѠjQSʘIWڔlis q6]$K`ag5ыxZ4ɤw< (R6{%ī%x +dQw$♈~3n+WG,Rw͵ekO.h,\{mJTOq[/>Dj ΞrP]O&C({?Z8`&F>yk|&3I5p+SvPi! 4G>L8x+\50RU(蚼r`W E]2+-SMT[Q/ s+鍇39hhA09/$!$X€I' 3hwno|,;Q@u" lڜql`NCarD# 3Ĉ-GžiKx.@gןrbn&j>[Zn=W^ e~+T/&Ea?a-!Vfh]s4# cìiBD1[ځo=L|63avmːrڟQE܍ޥZ7* .0a7+^Fܱ$*}h=ug ?E FH3 >Z#ꑳ }'Nf4%aWh5K@ =@pYy.XP"nK늍h)='!˹VuTOJ %Ft[?Dͭܺ"Lvy+(ҚI}(i԰pƀs%\8ϓ1~Q# k>j7('el UP<$f_wd<8a,pYGlQR`˪/ )vl!xLb]9ErFQ3ϱ *cT.r<;*Vђdņ0Sɝi7o1LMyH&ʧ=hg#~O-Rq̏gQVyʍ~Ʋ6ғ..Z60w?["5\S&X J9Q74zl^U0pWfB!uODTFSv֡'; `$ y(RHg)\Ki;0Z']J%4KpM+<4[?Q, v6 1e[xS'.-R -y1I]PPU7xeb"%z{ -zHz<)ZZ%0g;ln܋:7|[k=S;:.ZQ]ԖҞW 5'El*B~B.:31G(!i%WeCtQ_N !FG(5cx?6)Tw+P irVAvukу>Dca.½.yMb+>]a VoUn@d'OYWQyo>O 7rFC#c}X:2 {\;\_wd%L 3UKpX8Bp9#!>^R̓qLEA#BMШ4pdju,r<מ6Q$VlOΠ;ݣޒ61*1:?[< c8$gKtCD=_sc5_~y%`agvX2|zУHGe|-";δ53+4"s)tO-Rܹ4PX>*$~! e ɦ)I¿ U~1h+V*-ơ uCZͼ犫\WėX|-m.&*U$$`B9}&M6*&>ER IaLܙFZgݷ0d4"nWc _ܒl{lMm_zT"4,E$YߕElfuQYossڝPBSeO-™s`/U#mFłIe)NB%*Z4x%~J($J2)O":Kagʣ[.?X@ llto||Y1nX-YT>M_ *A<_9&za ؽ':Ԛے;Zt۠&B˺n'/̈EݮYjL rRlAC2dqҬ<u*SA<.J>eCy½3a-T1xt-5h?nRX;$il@4W7Vxٴ P) zJwsޮ4uS0{UMp0b=f࿥}~B{> M Nz"-k*53\vTR= pgDB<'O4[V PP28w|:z( U\'!let.`os)-ֱZӳ=4{5]ON!cq٤z]dhcG<Qp,$j"[ntض0kƃk0a3gQi'ctΪ%m0eYߥpPCK\4})S=#Φz:ZKiOV .,&hW enJ?G\%gSTɌ"EYm`4K*y]R_d^jLxj>*4?0{9"sG9 g2BңLjn3+*OJDЩ=eΦГTtHG1,D~@!QW>fT4ifl?s tTY<ʭ=XЃ# apC!D[oX`X2Ҽt 7֌ˌLL~wSZ~A#g V?zy`*2|=(' DyQ%lgg R7XеX4wïde>z%+GP }:Q 95r([Ԯ%L/3CBQP+q8 ȄUw4(U&h\p霿>*op0g곘-WGxF\h;wQ]bL~wk^8sXc>]7IʳQ%Cv- d s(1/< -Mf?0LT}hN0]㷆-~&00gD@pϷHdicf(x$d*ee[ B5u. z3Ў,) P#g#C %Gh&{@8ftdnw7vbQL %b8q$]րs BH+f(8.rql_܎5u%j݂aLyG0hyB#q!D}rG. B*t-S7 6OћB|)'V30x4H~!K^v' #Nd()d2ZLbʴn,5'n!&cW5ŗd\K8u<nF~ƶ$@g8 j꾃$ZLKvO),_'bcu>LH#|ZkI~urFs3ao,V{Nl I&>ip屲b֡#Y K8o'Q?N| g.E:k4ٙD](qӫqTݻ`/} +<{=Jb4uCsw*)'bޭJZh W# _S4AѠY)Hc}&aH]ޞm)+Ä0sbd =O2lE*jjag\qQIGqQ1XiiǸwZv7&Bɧp$L120lGn)U4fxB{(7.- $ߡJ"v֗zrWٓV&>$J (;ഺkOA{#Ir/ jj{Hb(_Vo('&"3{^:;Pt]B +ro3K ox`nS6s&= H=%QT6XHPwkG{1r^_WQ*C8wE7_VM/Ct/u8i'Q1,zLC^QW6>T(z~`gS:ɗ( i{23]j|c;nK-ᮮ(-2VNbp,\G+ &2C*O{TtpM\ F9[ цWF,tu@17t)\J]{* `_EB +##.y*)NqCpNgX?m 'd nba(\"J"]c\ʕzXB_ڈ.=~[jAHF!ȳOhׂZ2D0:3LLeK(ޭWfg@}Uls-*Oᗂܾ1a $x4>y1Zp/M]"@$i ~]&JKєR\N15 ܊yej糰4Q5VTS%&BoZȺ9>4L9]61V(v-5+9jE/%"D+K͟oU08'&,,1$SBSx t6SC WW S0b ooWM3k(Joe=h ei^HtC]\‡6!cGR@۲-6H5n!90KԞ16,AGqCY$.4 +bl۪8)ΚY $ vDŽ lh%MYc*=>DquwS3RS1^.q~2Aʎ&oJ|0-c : FH31pw/@ZU\@ERӈ3p%;'i)p`l d_+BxL9yLpf2ke]v?F59FK HXfOy@Lv:Eb/Zcf_K=5҂9 \$58hYp@W&emź7H ,v2qu#:7M [H5F0a{6"ۘz(ʍŌPQCA;sKԔObD?_V] sv;K&w4"ynu "E[h{՜ya`1j֮\ ݊a}px3ZtIPX6ONf[Z)c%׊1\JH3/=2YkDW'Ff&B_bZwz+o1v!Pё)<JTW֩jY"^0seH" ݷ-V=YPzSy)ēe}F0zu&M-q]J*MϾ0LBR꼾cO颼5Trf8}Q X.*}A;c"mwb, O60;7Ϡd ji $p(^&f "}-.\nF Xt}l2:BP%SHEͺ6?q -P=5=S>SogenA PEܒoy٣Z!.o*v kD< %fq).CMٝJ u`KNk/ I,ɀ&!u -XSzTܯ_hbySlVYފK7(fXaNj⬁l"8SsvÒ92d#Bh*3LY؅h%LM4$Cyxz>CQC2XNp˧ a\nXЖ͇ k3>1cXr\|A±OOKU]%Y],a3 EE}^ lW)} .GKtv9'6,&aZ1;~67Cjz}D J gj7S l E袡0exkibrF+r.62N_QOP]]1G,34xJA-;tп0SoM|yA?ID ㋬zקg6IԿ$u1#8>5 M:݂AnN즜&|5xܛU1v<]]ᬚ km^%?lWJt%A"=kQ+)K hvuJ1@*Zt⤛-# *v}2"#Z0vHʟhRdKLݟPM->^juUZvm5`swNu|n!PLj5=TMD(+M}tVt5TGX߻l'ڜ EKm+{pM` = u!!oⵇB~=r+4io捍W,@uo&iۆemUj#bc/ sȨ/yeЙ,Gvl9 Jrm-KT=\)H R_ռny,3GkSCxgҰ%uy s}X*; +VړZBI/>J6kf"qMSp޸#rN[;%< s(}?[S)O>#.mUW 2ӸS #[0Ҏts?㚛[8T7eVV&ֽt,BSɘNƎQ}]DI  Dc-1Œ\7-ǿϼYq~oL=tGpe4N,vRK'4đoTX&U"C*r}D=`PO33].92.޺BnןP ^տS;= Aa_fx6}{`ۈ|PA0܈=٪Iƀ8ư9 /dNE&Nb 2DGvZ2}D#eCE>l:fuFj.WʀUZ?w.q{ebhy'o:t-vVfJdau=!`#!}h) #,gR{0%C"wJv$\q'6w>([n[/[CjYa^ !k`krX >{[r2 `Xxz1'X#K`v˝#.=-NoC/0cևʗ|ۋ|w3#7cvX5.΁jh@^G$i5 ˆ֡b:wWQ,-0U^CW8cN-Η}+<ȵ/- I%'ڭuR"_,T&sr>xx8'}̳ |$Mr z}Θi:<A!zC ǃRUV SJ1]H'd\Xc,w`AϸlC 8_(/.I!840 q"wƫ~<<#iB'm йg xJKy\cVÚwɸtHY,1/>[6Vk_ޙ}PN;L?΍bY#4/Z|V(ةxF˟6?e'Ɠ1rUqE]JVpTpKd^Ȉؓ<;O*{D`xb;Ph_q_Gg' ;ۤEV1zKKFH,IwC"ugNHӟMe說"[ˑϪc5}oGs/[{%ޥ`gYw"(CN<3*Ɠor|)+ɣ 2<>4zLzG?d{7\6x ioխ`y:SR(F#+ J ;>Yi+qU _k@dxVV5v/8ݱߖ 5t&?|b NefH(-Jr;؅ɬ3,JJJ"#k˳-(.+51WWҰ,[a- QTyW<#zjʄ7}E9S1#VBU l--qA)2[qў<ȼӴ'3拏%rZK.+ -:!v_ 9&@$K(*[?yk%o}Qlf^(3NQo}F"ؖGz~ . ~MvޣoQfR_ӵ }0(L.X(^Yò Y|,dBIs :J.Ǽݡł>e# VFywoˁku0"wJK`,{?jޫY2zIbv(n"XEe;Qu"6 lb8*4sQՅ}lG͵ 7KwydLOWm1r;8Z&ʀ.b:RxZ:>B9+e,Uq"/7.cO^F_`Es&y+>ڎI<D FKSN>=2d:`BN` @W&-yp]^>Jjx.:90Gǽ\.#v@j!"23O-:T {On}"yW4Wms?]b}-"cnI ~m9ppR㋯B9mx[m)Juam\$# 2)33Ǜp$\.#F n6 DIepu=Ie!pe` rgyWHn*,oehч#nV)# ZdL49p9Ӧm=#ao%\q)x!?A[I4 "6=ڧp&pmYx7tsLJ`qc"bqx[^%5> ЯaXmy=i2S-*ԉ2:VQ;=M5P:(EoUlҺ zaaSndϙ<~RX0g/qc|('.mjA\V5QH¿9 °dj|ҧ XIȜwf@uLN!|w`Q~o;)Hy@W95B[ʖB) TZ6uVЇBH3[Zjvtf@|{lŪ>Oۆs=XƵR./vI[!SwսbyK 2u0>[L*'#.f 73 #Uw/&x?NV9r+>'p#YӨ`03-7PnnL4'="2Ɯ-*%YfH}ߵ}Y_M7rX0`b2P?!ep&բU-M_gd͎l zjyBrS͌ܐ;'OGH tMyQryQ5,m;;T|nxm-T%e5~Bs~G2)1W+d3SdbpdK`Y{Zњ?5esȅ'g]4T1]WQ;$]>9qCe%zg_kkvcĺԡNsmdFs5&ߝ$5hάl1q{쾒g:Kkp9Mab;_"|clkKїRѲpe8dO$?; l*񃗦z5 miۦ3̙mMw}["8d^A]'޵/MJ:W!1rLޡr?4$4q#-)a`|7/ aL|t 纱?Cg>[Mf Àa } ( M|4Frpu[TLJ#pYX\o~SyOpD* ̋W~"  9yqݘ3\xަ/Y0{X^(SvOb^j}c0Jƻ>{T^b){gZ)cF:Hy{я!4嘥QY9[.!L.B+W|hPl %ݠsZOPN.kO}@Jtf^0qړd,nāgO ȥfd"T /b^RrV8T~~h=ŢAPoZeH@t>z3.[ _֕o홍W `{p-|l~Zz>:Pd$arlV2¼dgj(O:X1n) ީh+K{d s+^FoK7s}GW m>gwJ4TBY~CKPCxkkRבuo]㬚~{nPR2¨LFc#<>u)vFxPkid eIs"Nu5$ѓ46z5rP0V-q v9%Yao+~YQU~sl!_K_igIJ>C`>CӘ@?QMӞܝ@(Ł%Fƚ҂[ĖIwgC\=Kf~_z> 3 7<т~NAV?I))nqUKq!LBg\seWgd|,ʓ%ufn zxx=Ph|ckksS7 w>Œ9f\F;Cҍh^Lp8}Z a#ƅ2*F獖Ҕ`XBu"pv@0ӵJ>oM=IeIK]yFnyZ @=3Z1:Oqӈ1>p$,Z*c*0̌d34o44ʗG3ڦݕfB ,48 dxSrR뇯pjWXU.ȅ?%EKVdي85AB/Xf"B+#?s.N3!>v>}~}lAFE05D_ˤ=ו_ǸXCbx~R Am<7<߲;$h Pg[끭yݟ)&U# &Gb)$9]ؚb-2%p1<:Ϝl+U'5%kSl:@u 'eyCz Pېeb*7Av屾~UÀ^{-`oMDY-pWSCoga!\y!BRrK}gz\zFe4CCՄtuQξx.ΊN\aa$eWo6=)'pOnx鴟h@2ʹ*%ic7#9υqyZΌϲN.5[p[9#)Ӳv2w=|>̃*1kؠqDF[dXY "V `$^IE$*ؓ]AF\ᰋǣ+,l)(BԨR+O z@eQ$NN_2;Ɩdp!Z ({J'ՠeu\&yP˥̊e 0|׍;!$Zl0c[9;n`*Dz'l=;!\hLeӳiA;jS1Am4wu>lsvfk2UAIzA>Iv$LoIGc:rY59ux=ckc$m7`b/ˇ.JLEoH3[4qZr#MGJ{FӊB;ɭlȒ.ۗۮ_ҦOT@dȵ?s~J_(s\y3:9R%^IV+HRb _-f3۴j[ !R +WS\sv٠C:JH LxPDߘ%xj*즳-\sI&kkx!u}W|7bYQ 4MLƤu$1G[~%J™)6 }k $fī]:Y%3QY&ln Č%1ѯScr"[m*ELD^b2G>de8^59%ꔧ 5]d!bEFGA%OV"^YȡM-[; |z_7XlfJK񵙔1_.\OȮV;O(rUÚ~sgMgW\\R.%2 #P0%7(m(v;Ӈz{#oDW7tC(XW5?0};))D^>%T9 UQPl劐﹍zk&5(1+! 󕐁55P\(ʣz>w~I x&8]`U-j2Bsj =^-`fXۭLz5å}s}n`ޠ$/AeRmڨd7c9fgJČܮlmAT@9h uf#y#,XPX=eK˝¾;@'1ø$fIk<5o#W$+"7BO6:rk=^ne h-t@M0>+[S/>!Xɟo9J 3Ch{Q[|L pJEqsdLNQ R[CZȲe,>\ݯ EHPteqw#Yb:pёs ÷,(pq+fJ$x1BAjoJc|q'LO@^ }^=[\ܾ̿E9+je5| u&bEJxj%Fk}2"'`Ip#o9tނ롖C^Xg\1B|,^sQø@"cο7-Zrq>v}l| GflV&geLWx㳭94iT!CC_地Sj$G;2/ϒAKD3X*As&9ZRid$w1l1S[Tk0xJ}mŕ!S5Zvl[5]#v+w& }c:['9?-h)0K8i.u  ( q\ zf*J)zC_>m<59`/.F:?: gz[!BˁKj [ڜh8#D{7ip9N9gX_=Fj,,Y~Il (:!:^f9t:3DD8Q-"gY$r= p~z bo;Fɣe2O-E!TW-۹syd- m&<ThD.6晐nCuhY:"hIG);«P=-fwsD(QրwwZdW-.KԳq#Q HrmmQ# $mLS%*}\kqQxĎl7V`19I^E\OO1 TzSe _;FEI46ҒWx ~yN(GMiz2[5V \ ӮO@#$]('2WJosd^X 85A1iϷbzϖo῰YlПd gix"=')r`Virɶj,`[[BIyaGI"f.W 8 S9/@3n@ńm <"y%,&e)]eX,=ۧ;dX`ozTjvsZ|Mo;5iNv2 7a*b6/2Ы}K#k 5 fq ˖6ȔdKn6;r'p,6GiJdv4vӍt&x~$ BQésYQf]yޟ*<(XAME͋~$`{Osg\H]AT_quQg6PcĂH`NѪ_EM<I:U}EԽ,}ŮbAֲ^2/Ml܅GV eMhOq*2cv-y~52]r!1P[JhQA&E)#g=R5t-/k;{4;zn̬=A`|tc+cjpϰï 8ugs WƝF}/&XxX0cȼR>%|(5cim|1Ľh֓ʛa1;1sQhNn8۰+])Y5A}X>_6h $B}rhr $Qv(?VdFD j 冄3y(=8)tiw C!_v@Fw6 ʔ@64oWΏk#ptb-nIma _[FY =JjEU V" Hu@?I X5|}:]ҭ<>{f|=iH˺<5eŀBa5 ^ǿw+ܑ0{4BwlB08W 5%/}Ԛ;!9cSʪ>`_VڨEX:TVxγo^H|#+Mdܦ蚿(W/.ǟH!B~LbD|AwɵMW!Su)L;z 5|;5&7tlBFR3aA9Fl DJ D4h6sAA:e>Rbܱ2jSL[lyCW1pJf$TAX&7[\D!D-zżk!TrCz1\52/Mal#7ۼ܌fc#%?_aˀtMd0`YVoT&Hҁ(qheOtf9.TuDT7i2tq8=6!y6 !>vECE1Z87PQg1XӶz"BX"I*gµb1Cɸp[鍬|/zއXNS.=BpگiM[y#c~#n!0OڲhJ>Ƃ܎gQ=USDOLkf\r;+13=C8Vzl I~=` CHOvP1\ҖBABhĝ49V Rry!a %@yxuFIYOmA;&cĬc]1 'x8ڧTI;;bQd"Çn|=쉘XՐaF]NQ2J6檸CbZ8qBX~ *E=;?A6. ࡽkB[GZ;+F>0kg\+yg0Gun)gTVjh>dhliJh)yG=$kި4 u/>`GZrm3l I-%,a߆X_GXJ/`ŰRZ7) 0"fY6}Ə@'z9P7Y,˖p;ɑؙz T$pk[XؑTgMP v.t[GÎ%";vӆ!,'Ǧ~dܭP4ʓS4}D?0B1M&lң9󠪦F>ݪvRy~=#ߐ>v |F'ȝ[!ɳDYf4?/D S7( =^2W^7y^SY,ֆâ3󟖤k;oO ^rr#r+Torbp\begI*B\gd5xOkJ{Uek ـ39&B= gisqd|HEiwо׎{V<ғ{"9ȩZÍ w +=JMƳ܃ȲX(V'ɬ"ThahѰv  *㊝ x {'imiz *CLb :!mBd%]'2d[LPyCFnDV!Ju_ dhTsc  9_EIț>!t5r#Ҩx 5Y(ݳ3`™NrV7O(ix#m'Ҏ^4m,α*[϶w杞Gy4I?_sFB8g!՞0¹lVOZF(}Kp 1=wAg[4(10UwaaK 9V/"o:PX}WPP8U7d6ș"jnB#V]b';4K#DS{.)>ƀEyr 6Fx+l陕 $l%F& z[9Usi r^[E'Й(܅fc"kIV ]$u#yγח 9e< ]{Fb9 w;ulBݖGhhȊH2OfN "m]:o i1U+%S(5UTO$r2B$X1|[^i EDazf!2IϹ<žN:z-YE–BHF֔T {$jD\sF=g oGA==EɅXW,ru`IUdkC0$NFԫ)\:XrI4&$ޢWR:ܱ%vYF^ַa4;RܹҐevArK_1& #%zV:D<&iVe7ϧ.&f2,.)X[ٮ9zf  Pf: d[E00;eD2׾Q~*Tt3-, ^59k=E39jΑݡ4/G{7,U]ᮑv#s:wקhGn©+U.Kg+Lh5L ƮE4nT$}Sr:,fåv#Q,:zz]xw-`K*u<ܘ;۰ҭbjD]1Uoc@|{Sz}bcUYVʶkb;y򈋃J1cGXS3vQLqD''KѣQڭO?I#FS[I’xuVwMvZ3b=@n?_aC+VOݨXxi.;R lJr,:|W a+ Ah=Y#b+%W0N7Q*j8KAg ;+y1V/DD;?naq @<`E={x Z${RRup2Ȗ}?0І m޵w'ߗ ɂIмC,3Mى'.eBmr,%KW zб[A\+S醹NVʼlh2f`u }~9;k' k6c~ / NRoG>ZlWʁAfz`SQX,~n *y)5""In{U5eیdapoo^ځ0@]Z#G9Q~)⑭+Y~#<p@L .u;$$x4^I->ݻg3xdd " ^.Ss{Lw&1Y{֒U\bJ1Hׄ9n!s zHH׽f -S3?y#ӥLjV30~jJ ^j΃̠(2p/ҙ+Dׯr;[M+p-3`3VЁ'f! )CH@HvP-.Gxv:Q2XVB4X+ QlE;`SoՄo 1-E0H`V`8s,7Rw"#yE,~{.=ށr eh3RQq8RfCv,6A)Oy TO I(p /샟[ kܕΦRU(jʗi&gY p.\ 8ݚ@8 2 l_69A#lzϑS+}g}c:i<4aâƽ#ot | R4צT$^{XVZt/eSE=ysBͧx"32ˢl&(Q{v35eB~1o3H,ۃ Ƭ;(n& >e [*C;[w oX$a\'iPO}!zƄI:1D=vmܔQNn+ُyVSS2vR ۉU; CaaO&2erA%>./qq:eLΧ:LObQc2NwJ#pJ%&jӓuQEmǘϑ7_v3ѹ"7x})gK"'S= dKe ~?NP]Ԫ%`EZɮ=8?s٧oN ] Gk~t6Q4 9&%H{k b0]\%WB<)6G$64.?S ZqJaqr'dE~:9ii ))*y“tPbҷ v!υ^<PN R~Q5+D}SikPu=3_(k*̄\ysK[  ?NS{}< E?w V'%{Lymdޫͷ_1K4RL\_ M`F~!xN.iv;`<1N}E5H4"k4BZ9 [YyQ?5,>k )EX0c:h}.,tN>==doDB`~#&ON%F7!CcJzq <۫%ǍWs8N,Lʪ1-zЋ? ,:ڗ: ?IPEl6\Ty*0x=efE*g?|[&Ȫ'X۸y)kRx҄- =y]G^USEf3mh}rτ`r衉"9H C؃`0N94"`U=UBDk( hg f&-?Uc)gs'&W'ma; }w>CjwQ!1bX9K麢V)!?7 &AM1KA)i[휈Q0rh!Ҕ+l "84(Bl ڵjd½4\ È ;)=ʞJG`+T24q=k5[ck>Y_AH>O SH|tπ Ƒĝ )4<,`hv׏ !?Z]FBbdhuhc/d[vp=aW|:f[P)LQl]z )V hȻILv:&F%) . !pS]B C/dى5{&`B"`%|*6G<+050F?xC[$f{>zBfJ;(a %`$o|ncip8BQͶ/(u>d VƁ2[|]piPtQcD\ SѼ>\eEwdFb0!*WW(ѳv]+zx{aXʉhȨ|h;'xD.HUPz/5FOJUa-2 踥gK&2N`xn碞Ҿb?RDפä_@(}hGW-Wg6z? H GZ1!c~fr<h^zL"u@(,["$Iސ7f;)U_:-ΛbKYtU7XP-?i}Z[[#ԁG~T숱c;aۧ6Klӑ{|=`\ ͽيg4W>ӽ`QWqMe)` qҷslk%XЌ^f)10 IrAEW~IkMK6ؚbSs5Y󍹿x,bɥzD7 !*+ʚsi`<"=GPd5aWzW艉*~,B,RG xWr%~D@O/dӈY80(3^u !$_KzCX\i3j;x539RlU}Ψ`+{\-8 qwnA Xg{2^-Z9 1#n25J֚d Y+AlN&{,Peآ[㹷oKqmgxҗe"sKXyqFHњ1-++3{Ƒp8')ד3c>5޹ o $' ]GA,UKPP *'+\>+(LXQS('`%b .+O3͜``)Q x{qFE5z.* <sx߷yV ˊv|@hh_l}( aZJ=Ik&-U-Oy:5$k`x][_|k(*Cse;,{lPWkĆ610AZ)neOk;^e{[6Sקzv=C%&B[Oz`ޑkRE6uO 0uT8peBt^p2u"øJPrAƶS̱}0ֹQHy$C2OSG1k&} "O MyerْHgEL2*'%GÐ^j-EA 9+3Kb2lX9MiFw`4z]A%i XQ)+(ɚs<Uoa\4rtF/Eqk?)wN.1ə} se-̓Jڲ5/&~Q{Zw>eW{$6BY~X=^M)b67 ӽpHQ9)=YLƘt+p70[y nQ},7;dRRDrz!R4GaKPb,;_8nI홋ϝdˋ Jc;,|v7yaFj4?Yd!*/Aay1)uVJLJebz9vIZ$c>ȠIk#{!v`:ɲ(_urɤ7E^ԮnISdBy^"5v{[\H*K19O `!!whP+7ORìj݊C\(WhaM?>c9 dwjJ 7"~.gG:RӔz~1Xt} e>|ib뤘䴝+B-b{K6=,o T g^J%cqXD=AQz}Z4zTdzG%ZXJ(3*G)Z Є 럳,6͟4Ypq<}Q1Ȓaɋ0@ Sj)hGij_tkZ"k\7MY{ KuV?8qcg2r0t-Jl\ x8C( HZ^q}aMi,9Fnl|h}#[RHSZ6#ka>piKԄ1iI$ua9W,leJ>ԗ'F%o]ubV&Y\!jwB-3OjN ! )(~0HK}|=/ОԶ/s8UxAi:. h1H9~LYcb* W6?YRZ3Ș*c|M0K*YFlO]hyOVh MIDEQ!tGy|ghM g ]mkobsYs5yJ_yt  wt]</BJbQTS/3+[ݷh=9ixX:֯`LBԳѫ;h·JTK"W~7jw fp^m '^@}J{-.#jE̢QZe͊E0t/`ng"'hF4Rr!hyy˻eTo5D ~ KyHq }ٷ |5I2i5OQYʹ=K™knhH)d Y^.GJQIa/u<'8e-0ie {KNqp4ӠBk \ <KDYޮO,=G7+dqNʿ:n-y"~1 LrY[/eJE)H )?bdM7_CjNI%7R^\[,]L"=Q%chޒ[gO#Xq? bj=(X(2酫+BQGaZ^ C- 5nGPoGk3BdI8sUAp#lOdSF0uw S}ߢX$a_==:nNZXӦ**9sj L G -%*kC<[$`.`);k,kJ%֯M08⺈ަNre`}TyktPK+h:䷪I@]U& ƅnkl/N[Os6h[Bu;k3.,W:@g.6Ae}+A4 ߯EX/CGSku!?ܘKT'go=&5{M&͕Cp>jͣ0Fiڐ)4kƘ0֏[iwj ȇY: *BxzN_8ΆP j4FB, Bt*{W_E V{e:ʊ:ԭ[aDMۢ;@x$5Udd= J$}艩VŬ UX:ź!KHA]貫v ( %G+f kx-v*_БZ9j+z>J渿EkWV%*JJ=V;nhK%h J+z.ZX$&VEb<2Uܰ1JF!zu,:B+٬EREBs5{R~m~E:u> &;~N^Vg(ڧź' Lg1(.f4KZo9v$>bv\ߍiQ-"V8s,}1YATS|} L$$WzڱX45y]P/603f?{bjֱN7wɇy 2qyJ@%}jNb,cWNV,p]B3O[$1B^Q B&c\v@[ӊ >o9rA8f6Y}.bi.}EF^S|9!*: m-gBBx1)#֧bYLg&|a0ѥPW1b74 .Ehta&UgNѬ \/Sp>:][5zb;)" /E\H9{JqT)JgG4%fʹ 0S%Hor[bѼN1i҈L+*-s4%`inWeyF,=6_{LHPP={YA-_8B1,=sݔ?c 'Kt=ޮ᪢[6w8ĽÈ1-' J<#:=ɁNT[rH2t`Ü-b en ߁EIq$e-&)iYQJJHfh[Ҿ`J"\T_W"ʛ#!9| G%-#{1*uͰcx۪xdB ͮHQ`(8؈51EC:y&t"2a/x6/+ ~HkN V{%'}>iyn:'cX!?~IuM+uǛ4\URٷ~[2Y W :WkoJ~_)WZElJYOmg9e  S"; v_wLߋH~]aN +-P̀e汛44ueeq<ʟW;t`ۃE&=vxw+QEuz?Le1?\S5K<25=OwbikFNǣ_RhE9Ъ[hWahV<'p1 1YW ehjɩiL Wg`su=yIa}gOtt=*BkL>_aP͉p6E$EAȝs<5cKӍzz<^4_d7`"P[)F$ٓF7gyxVmFtE|_L)Q\.ںX[e?U^AEBe/m!0͟!|ҍ[1؅rXFHІTs5Щ,'!grA^ >~saU5g{98鹼~wjEt_!dFO>mb$sl!46aV{K<΍Ƽ  1eNv׋X *Y>AО1"젥$r[vGG L6?2,0P#opYiA2uڰ=IBXf׾G%dIm!sfnjTdj҈Ԉ؛¬uM GGcIkYs#E\CM$柭3yY}oM.~-.i"0FJSZWdѵjI=l4qc5>sTZ=?dW:񛭷(JiZK_@2*G`r7h[Fb6+쏻;NɴJ*_;tJ9@v?ޣ˯$5KI>ؓ3VlȼiP& ~p$WubVt܏traGZdqH0n5k<̐[,IQ{PV];vwUZ&3TwS,*{.P".9t2|ew{`kuXk؉+8䷭e~̩ay6.;XY3IPu󈓸9Ij`O4 Zn9dma؍曂u^(*8aObmV[Evpws'H0D6e11A0OpwA&0Gұ&wi&tt?Kkkn QF#<zfDAA+}E2 c_o ֒Mħj'f>a[6b?M^1)bJݎ~w?.8z;sSrYjNƒ2cu&F1ltUcҢ 20=XMYbƒT<2a_wp㏝C( (! skfΖg8Kg(#]Ax+9|p9 Kh {J{ĠD; n9H [?:tӫp*O =hb:Q>BFx6r92yݯ<'mL7?DzV^PүZH)ff:o'`Xsc/z!ij3G 币@D=ܣW`ij'Lg{4:8l4(h'&Ttaw '9xF94(8e_tYi#'8f<,wݐ^^BH+fb%(B(BDS sI Ӡr/z˰VzdUM2gJ=B|[D]&[[n#ޠW[aF~fFе^efT0ڐ,5ǸQh+î#rb433XWd~IԒfI3\1-UWE*fx% N?,? #~H3R~nO@Usoa4p߾H,YD5^\>Qǔ3_}KQ5zpƼAQPϥ7;Q ОL,*[3tF=8TQGEFYnkLNC{2Ǘ– |X`Z'e=S&eAn/Ϳ*o\"@u-d~Oүأ*HPVn@VQ.gG:IPyqi޻섧V]`0} 2J0jH iXћ8ϱaJ~%z]j*#> jOd]vIT5j<_,&^E7*#8~Y`VcciaA 씵{n1v 5${r=17}8 EX31:͠ؤhźlK/Ӷ$W`xrq՘#h)N4D:iПYNj9)Gޱ:[ t1䦎Ah7^ix:OQ[I{L:mQ;Dc2sf%x<ᥪ 0Mҏx=!(T̙7AUgIRhfʇrYy^6!1Ѹoߩb&yru||`^Qƺg{]@a 3@wla/]Vk5"PH~{LϲUC@,Yg oC%=!=bGF50!)RJ$ 5_':Xtd@_' HD ޾E}cm51)tf¡ PST}[YJ}]Ұ/Ck$"hbͯW?Gh|Bwt h$P!̙';3ot'^#tHmcm%=?DZߓ>j&vQT<0y&n >"P>/G>$5<% ]өn}jՁdxr0äp_czO4 .ĢaDJN:UOh4:A`wlW`&Uu)//?^F{-)gwOgN]#P\.ŬȔr^省 sfw50ƙF=U|_fyr@WAsG V4@k&;0Ώr>!w ߋ]nW4?& |>$akbFǙui]o)g+M8!rb}y(ug sDvtwսbd9?ײ~(a{3~27 ae #YZMu-8춃]]ed+x%.6}ǰ^ޕԢ3&,F2:k]bgt\`V ;5pԤ?IDB.$ !(O 4 ⠵3Xs=r>! PxukoG@]jfV"i1VC0@|)PL~gAu>KihO]ڽkny? \Ճ@W&#L _*Zg$L1z4G䋋iD|yIT7 ؖNcʈ%&NJxW]G?|fU"CT ޞ),R'(ഢPj̨Gt\OnR<+m:C%7Cm d"`ɲ1pNćz5_m~4OWM4![-}lj~чk&[ӳ-TUt}BKqJψh:YoNjmKnA+%f51ʈdH@[Ѐټ\s(&9ȵ1<&1%/~h:۽ƚ+H,FZ? 'A)U oS POr߸]MVl@ٺRhϟ0tW=كQkVHLkgɴnd[RQ֩Jidp@_uKtN~i$OI39 WM1I4bA ?+A9HĤ8nIC}L1+[h=F߾A `oY'_h+\BںS=Ţb,3iUcoCʙBByk2^ [Py9HlӴ*t-QA8qNڋ|:Lu­>Ϟ]R‹mhj8m5]=J/ &?is9#;SBZʡ!0q2wQaIZiוYb.Rg\s œ>?jKP=)es._j8t tuG7&$q}G]>3J[u5{j40 q{Kq}o^xv(t|\\A{`RuяUo"P >mwp_Zv_XзٕAe=naO[' :ZIZbH ,_pnarZ:VRb;}Fa&z?۾`eޯF:R@tvC3kIZfeN!T<;j<0HivHPb<+obG^7b4Msj6yq;"TJ`ѩɊ(| +QF z|{?oI?M^5iu٪)lo3Xw(?ش,[5DIUqܔluIrxOcVw^> >9J{}Qg[]Lud"#uN R{'sj9}0]y04:(c.c* Sv#!zm8jjKJlө=Ngk!T.pb0~5J -^V ;]&<(긪22s12,{sb<鈯m+tTxnDmu?y "s,gk꺧n;;IVS[jn.3*^UGm)cyQ,M;iVƈ nZPog/?z ?+eRa{vD^9?EYo nE0VLΒft# UѬ(QDT8$ڋz6WaYLTSƐBnGY΃svO#V14 ;o2(B()<yە0nZFP$Gz#20!M>Y_-#kIQ5g SK,*F*䕎4jwNlA|ϕ]p6c'd aﺞqZjzwD2Oy$ e)%"(* Tc 8yWe)˷$U ,&BV- 9>4RsPSDBťxGmksnbt:_"x0ig:ZNh+s큙X5{8[ONx@ jb3|[긽b;w .m':K4i|$WbGʄO_9C=9$tJpZMH3ٙ\/rfZqgUa}}m}HQ^3@uAT.`)ڂj6.<"T{1JFYu8M qwBhjg iUC#ih` M/i/* G\glX7)+UNA_:E ?4y,&.%̐ƭpT 爦g|5Xe0B/\ENQ8ez %]Uߍ38ŐE䒊OUO#A͛綠x摝vJ_T6 LұY9ZAL{zZFN{y[Rmr.NOy_g1BktZS~G:t#ZD~2s̨c_Wz 5:RPYu.2:+] Ӑ.%m1 =*a7kLjŋz|-6 "vgk!,- zl?r1zk*Շȧ' 5%\P{ ;E -rnEE=S[%Pv]'+RT ~ Z[R>MG^RF DfbGO*N]l@^f0 g(iQ)B-tnT/#vk#oa-@L\_^/oIǔ:5A2Jh gg](:< `ӛ';ͺK ;'%=u=l8Dxf/z\ LŘߟBr/C-H48a.+nl-rcRGp2$a܄C >ܙt_M꒓ [f:As!leRWo~W=uTfhu,^6e O7+>jWw69&mmXaA7P/d>vF_FԭX ސ`L1 %X=7ar"Ѡ\=t\{n d@[1 )UZ,^ả`d RRZ5c ]iA2`@=m{߽^fnHoKsfgx-R٬hyrĚK"JzqAXp+FupMw*l0h.Jy#].,H~UH-"4>Yb_߆o֌2*kc(ou)oxTSlT[22-‘0i#^}n|ku9h=`i2 KN`nNP@E) %eGdoe S[A+BLY=:LPі j@:`t>7d4MH4ch1 :Kq]9SmOճ鞑 y#ΗPU4a\bZx+FNv<:}FhZ%xn::Z8~5,.8IFM<8c5Uu FHxj52 ^}#W$_cuqw Aidnzv hp8_WBka[nǒ 3GY"YI 唂֟e ߵlј;z[۟8_C+_MN5ւۯFT #*~>iPKPZv\M=bf[_mmVӴepXSѲLD[ă[ ֟NF~kls(;f!jU~)U<6c) ,6RQ`R.. D[OSU '=v-[n{XLL܎D3Q*.y/P/m_`Ow#x6zcn5lqg>3%*yS[`~ixM%(̂jU.(m=&J.X\_Rg}3CUGoJҰ9NҔnW .s?5%#zɝxn=V(AcGl?"z|7W)3IF"¯*֯šeyJ5BL^wZ4Y;/9{=TR;+9uq) kv)Zk8]lkHIij}ܦꨑ;7T}X# .`&2h]V\tx%5P,JwSԞnh[q4-(GqfcmM8`d/]i(m0%7I|,gL"c ;Qlz=pxO\aL0J(K}VփH')ȣҲHZ'WLVFU &qsbzay{(HTؘȰLV,+`Ԝuh[zTknT%O7IaAY[]C2|]"RxV"GJ:Ch7I0h`9Șٽ:9K[>`ܿdVFeyؕg (ZGpa>v:oM86Op % UEc ʄZ;ɻˆJHwbE3yKuS_eZPhݏEG`# wq#UN~OzU6*"ITMQ!c{~z!zTe{Q4r簓{Ғ"-' ֓~쯦͉!.l7 wB/HڕP3])󞭋h20+/_#$[>+vߓ-:Ft18>4YK]@d,kM źCՏ9@t2=V^߾ߙw}ڼ?X<^>&?v7ɧ(xwwP&/]QZђ9߮Ĵ\s|r:cu[$095'jij{CĹ)u)yOֆ]AM?w ~iVr1 rS/h, \8pu \] S%atBQsn\Y椼R,*ض;uz#Aϙ3s=N;ʭмz%E@5<4:{#b¾`ED8 u졤Ar1k"e'+C1h< Kڔk=k^ufwH% zkYjLg bK8H_d `:`KӯS PJ883>to8?pSz;7~kKe5SDE[&KmgͿܷ{Ӷ~f\{9Jc''R]Vﺛ'4udb?jg\hr_КfYY 5T fºܵç4pf/H=r[[8x姯LOd µ ϰcv?]'QT:sTQAƗ5qEQ\3ѰX4\v˔\! 9zXEJO}g1}|̺Wab(7_67<݉N?Sao8s|wOEwZC9WרRDP<Ǘyl9Pذ|؈qb슬&ɩ]N{oY'oztOneTbɖR ~QP: ¯VE Iۗs1tta-S#r+I+aWCT 5=(1IiqJXv*n=*Gc%a854G%C:2wc %[* |[:-x1"|m٧Ja}gWuv[x!N I9oQR*/b0xAXWx )2?[Y\:|$2.65!aT!̞H)Z6+_ ISHE޳QR-QҴH𱿰h:n{kw[S#rl"˰i vM}NH~y"Š{ w '^G p 4'i籡dh1)Ͱ,#G(25N,y7ɾh)dZkXyɌo@xGmgpdB\!ce04`iq;5WZΟ9ٛKy+| `\W$SLZ,:(10c*Dݍ.Ϡ" 7|rUX܎MZFnĺoM$Qk{?dyw$m CC%к3"9qE1^@^ 0BC.  Q<< e֛ҭ|D{d|v1ʮK/@|vu #ʬւ SV} șS$X/5e݊j ǚ;LU- ~nݓ6~!B6QZԅ~3Ͱh w YhVY@P=v_SJǿ_suaBz.ssO{:3N 9lf8÷Hb;~{$YS&lPYu- -K^-)T++M?X||b6&XDpDU@"6mp/,ZwZҏV"1zɌS6%垳ꘌQr3-BbA`a)4b_:*e*ԇzLp-3"*hyN<R\K'?!$t}#cӚ]J#7dZ9|?®;Eh"HIFVO rEgGu\}ɥ y~x=e?;#_x&!$Nb#g6Sip?Q٧*{®6qLR\3`JUWw:|i!@;D/c#6G{-NG@5hۡlӃ;ČexN->JO&*hPxi)_WMYj+&R *[M'X ]ƃt 5 N^"HiL2+Z4={tax"ɰ>M^ɴ7ςxM]SYՙr NwQoƊTiP,␳ȴ,tzMM9d]= AhuWր̆CVJkbyoz? PFj3s =?}>9c+-L;&-RҠ,g#˾)O鶥IaA^{)#0uW|iʞ#<_ ~-t/DFIm0e9d>54,Fv4M&gߑwTCelk3[_%5E8DP״T{TO9F$^"gÙz#L@4Gc F03LXY`ȏ /_췡cn~P?aU+#X +PLӒHM`gun16[HJvej c4TaZ| *y|<6>p<_F&n67}/5$29*[$`{ ly ;utԵa)j9Vnd{02y)JX27gm>S<2k|B3D4Ğ C݁80}usqjLEMqmT Jf@grs(p狪Cju>J,Y24XxC%٠l :ʋɥz EPUP7H{~! YC+ h܂ؗ[-e$QHU_ɩ”оo.B}|޲qPF/ai}:)+lX,VAԍrr%t4%E-sr DdD5 6FX܁ͻ^g[ m)0f"Tz(˟MRdQy-m]ºLoit#]TGe^vxf#r2UEh/ja~g>J̿R'TH&\\LN IW_a7I:qPlR.P=s`4nJ) = kOcq1QVo JA <{VƄvuT7fsPTd뤲<8Qo(-Ox4% ;[rͯtX"ϱ]_LNl똚{\t]H2ͼDMgLPmt=Yµo3<;<{VvӱyH£H1_$ a eO#3K#Trg0gVηUfJCڊ'X?3^X *T|u{G*N$asK`ڲA'Z~OٮC|;?9 .`~cnd*pR:+Q ;ClJzn~~-ְ $!g2Mo XürT FDi vx 0pCqbT$'&g>\E0Ͽ n*)kSXҍ=$0ՈapKc_Vdz߬9Xs:ǍkQ=fc`O~KjW[KǚG`~8c5zXBǢ==-poKs{¸rШ HN5ukJkK7QG- )iyOmQѬ0jtZ.:'D j1Դc}K |U k0<̄X4l^}D>w.+AO# ΉU$$זlVjiOHWQep72 aF(YRP1.Rp?&[ K֣5~ I :(}MqZО5ʂy݉F\*VU jıJ8u{5UBs7Xߞ_vrbU*=f9+bg%=(HUz^퍚g7Lſ2o>ʤ#dJIS3grpt.+gѼ4Y2H gy8"1T(V&׈"⮍j3GZ^[Ye|Xdt SΤnYg?BW{ n,w9X^)28+Es[Bۡ n6 a;H%0RE:IT? T\e;we6h%-HPf3Kdƣ~ܕXkz5RH*w.r[@Q:eŃ&@ΑyLnxF`EU uؓʑ{pBL#ϩ@jәPZO=]tϨ]J !8' #|ĉA9 07zA xT~q5HERvzm{CӮ9ξCV, 4.S0X>1$,c<>;',DϬZ%6Ub895{-*J -JZl{}z6YOAbl0O6=-"c Eޡ?|"ؙ#߷*~0+4Tt'y\{&0 )C<=CPӶFE~mђya2l_鼅e8i=_(,:e,B}2(qDIsmAdiymWkT>;)`{,ьo y[*bM4H _IT,ҝBE'g\ Q6G:PZ#=j{FW}Tw^oϧ֙{9+@@ඣ%^xb98? <[1x(P d) Z*s" `af2\(E 1T,"ް03wX$%QNg` ^Ğ{ X)fD}p#NW$QM{#vCKTLnC=ܙoCG|\XS׫7.is^Bgm# Bx/0ƲѣPT-M'ǼC~!Lehq|EXй}xy֑} X Z/G$@걚M|mDR@c9BXù9qP\gxD>9aQSlVcvGl¡ZXz޸?[{qYMP[cq"bzbpЯۗ,&) 25 8(壩ͻRM PSa|pPn84 fтYpLYH|6kQ:\YOU5;Z5!t**|ZܢK:Q61 V ^X61r%h_|-22BڦMʰuz{P5>3{(\R=S@<#)آzȫ{Y8igڭJ DG탳wᮆ]4y5I0uIDI3AJepr&J7a ,5t xHhHxn#=rvY_f&iLi,/* >uq9l_kRt ki6Po+YB^Ke;y2?f j@:M%sd_`XK bA,Y&s%\#8` Z~'UMbl|;0 v#Df{`d$>uAV(rf>: >.#-=@:+u7W5;3՞_zJϑ )HDwq! +᧚Vk vQ8EJvUH/&l;:UoY\C\W+O'`kBSC-5كC4uƓq|=ڹԈ eNkc(VeDay>c\=&hN?- (Ժ&شAto:9 *@Dܑt-΃z/W!zaz/= j-grb+N$;QOɕ@=Uǻ\goKl44fVV"X[V%9*,S\uҺ4N{̸wg2`CV3&{xC̿r,)]fz"9IY8['#r4Gt '"[PP˂a!9sr[B~Ì +xe_}(1F.pBt;jy7p|%F=8@lɃr`*<#DJut Ӳ`ŀUU\wIMpLu2Uja Lk<쒅p!4w!Q2Z坣1HWsS{wY_a\Asy>Od4ZR$UoyF>?Pn Tg.*zeI)Pt7ft`trXaҾJê1;Tz@GJQ  oXv5b,1Z=%GSzX>rBZ'Ӟ-$z7%dQSk*,ς*4x# Pڳ_S'aStr[Et-x~DM-6׊zPw4NP FFgEhCGͨC#'AE0~/ǚXXdI0݁'c盹ܧRI'IvŦ7y( Eh)& 705mjͅ/`uKkTݬmdVIg+Pmzq.saS'Ѭ HSvmg̐]D/#c˥vd<8/* sJ'AiYv t`Q߅^.wH8{ZA}8ŏLQ!R 턠ފCjCܒ$v)2:RYB(h/z}DU]I)A 1f?N'tϾlhmCx mT%Yy`ȷR$s"a*gSUóU޲8b%]}C-Y+1Qhw–iHƶs53?:wtG&7|n9$JT DE;Jdgբ0D|2R]R(sݚm/wkEujџĢ\&Oc< LSTR1% ahȈKA9[juTV;Y* o›x|Ml7iKcCSfk8\@[6Brff 5}#3dsJOeV}+?3ɞ4'LqoF&z@vW@rn{`Bā/zppѹ[ ٴq= jxE$,¬iK~!.dʠ[AAye=EgFFY]Xm1*qàǹq]B .'i%6%;P5W2ʕ(AL'ͭ2:cHOxDխ\g.8 T~z2OZt߇5h_:#+^z.%*%3w϶n/]e#!_Y>=Lcכ-b qs{OJE`@ʥJ^^tF!tf&,;FwUujFĊ x&TU5 JQ[>iM\m)?c_;2-!Uı;:"d2}\n(pjHoߛ 9"=#}J-^O-l=:TZd5uhN(a,BuTpd{/agN&ߋcM`d]W{0Rٲ|,U _ؘS q'Ut܀TP]LGY59豗T^XTw66H{ٍo!e-Њbr殱,YXhFj]řR$^X`vM4UPPk#벅2]EPyMݵDyE-vjVXzpCn%bPW}2b`ޒ&{Ps/ٟjY~cToky[OH}nEe=E{FbM:LV]*3= QVt( u]ox+Q~ -%1yNW&QPU",?+ަs&swQ8өdM 1PoW2nJnw9D}ycǨP {N35=?oa9 ,\>Ir7$^3YIHZչjD$Ǐ\\@0Ct tYzO,! 9QSlF]y#RqC_dQ^3Y[5i7`g.}iE(b-)}W DꆴHLщ(+:|k3O,u~j{t\ߛc^W5S | `Gxk6zTpRW8"+w(bkLa 5$jeK2k$3Mvvww_V,8xDdԓ6,EeKco19%$Īcϓa4MRE~i$NjKl1߮Ą A3P }Fk hD-ƶ#Qivt 4.S ;9ϙ\HIo4_vVeRm JP[5-\ \:ƢM#9YУ)>fI~M8GjM-GR(ԚX@ܡDKxm=<ǥ܍,/53,G~7:N'e<%-< ʉKnrSU+t4%X#0|Kn6;l!1XzPDBõI ]_-2xVE}]5CmFXD U㖫ĉ?E^_O 7qR][&ş1W;|gK7"rBfN0O$21lNDLɵ.ƃi X1!~9α?{w~A @?=u)aBNKg1$.MtK&x8//@K=ZE]tj'7Y| $ݪ6x@^洬--J# &m*=ɅKM0M_g Ewȝ8Q1 dwȷ(-37>ɕ߷=,VGߙ;bQnA491C{:>x (`B3a'WS_ lE{s-/1RMq{4O(enjdJ$[{Ckd 蚚r LW-F9⏪xc[iqZѹ!'븎;GpZ^Q5y kj!a 50q|H1ӂZ]2qL3)hXִ?g*Rw%Kd, :Ceg 3{W <Y![Sb5=(#P=OHX'JEYKzh7MR&sI<E8nvb t4q(4)K#RNaLºXn 4cL#d%0 |AԠn[(wCql!%t˴of- lQc3 В(emk>mq㭒]O{%X = z$Hzi&~zߕRSF_$O~@$MK[\J|2ئԝ‰|`7ɪB펃=ʟ%מt!ROWŧ3Fpyg)t]+` oO'ӈߠ [+^#NZ~̬trVk [=u>T>4&LasVW[ewy,7ݵ?sg E`1@aVA>.#N,?Sy<==\3&WU}8 O!keic ._IBMv6b2$ ׭q[TeF>Xߧz U35[da#As@dEJ8y?ݖ?| pem:ByKG-N^. E1\ᾇvJYIT{q{{ 5 ;Q :gF+H\Q4Dy i" =HKwh$E!P?,&gZ2la7 [jk2:ǹ,Y33h4CJ pڏpk}p(V]Co8 hdA!*CK0q7֔/#S/9Qdg]=&>A:OdaUH( 9=$nj*LAnJN?3FX:M%qXBd=);@=ĀZ=mIڣ* B zcʫ"OWRƼS8oa[b^*!]c&z%!L%@l鰟OЛ-*#DCE ,&}f>]mT: &L5G*[,bZӚV[i:LӞuk3f;c TNFWCf?.܋o3ve  %1j߹XlTsc-Hj Øɤ$^֗7mZqc֮MGs)# D0#P!TJZq R{$^P$c*1b".Mn%E7D;~WFICs6*'b^Hm-67;|_Ӆ[85^Rl">`!,z2Ί[q-^޻_|9hgQh3zze"w7C TP bwد`\:?]ko*tg,Ea2zsT[8TJW]LLhiY@ _,#JDrWc'[hlhYqOPyJ[V<>Zf JQt:i-<@KUUVn9*s|n=]h:*y0?MTN/ DB!eJ=;#ĿQ5+$psOz~6#h3|+9X;F!]#g_|o_cw5J;ZE>^ZA2SRslj^iC㑊d0EXl?)8)˴017gKҒu;njb&y؊*s?=Sʩ2tsj_=9Yx2j2p; XFi3N֚=P-}¡Q5|-5SȚƳfp>2dCVRV,hČN ˥PS 8E6ZHz0gK}(;[`D/*yv_S_/yJd<hJ\>#$vs>$aEV 70tD36U]y_9 3/\6C W(ϻT)Z:A33%&NQ@!WMDff+}y:~<~}{Tۗ+mEz䑳+#5T9Q?FC@}ʯtgKwpι$9U{3uNYL lY زݣ'W)#ŞLA= dcBg'6yەիypTHUm!LzlB&X%i.xSk/O H2fFal0dE|+ Lɜ<%VMFv E#,S1?8eDE&e{~Zq`ć$\=*Ѭq?E!@? V\S[ߔٓG}un钞y[׼e3,b4*fþc.ޫ zfpn;)^ętzV%dʰ+E)L޶m4  #]'^TzPt!˚9S[Z7N!>Z_xºrVo 20!Hol5\ qr/ 5ԕ"-g.s:՞y\< [nM%-"x.6]e?te% >X Κd&AӀDlETs~CPmW~`Ltf@ĤXls9ͶҬq%'_B 8nÓ^jQd)KemS qSF /Fht? {}{bo6܋o<-K[J/*x~];Pն)/&W$2iUWbG+0m|c[L:};1Z"iJk,#,!jD x*xxFW u"ZOZ$]f ZotvS{<6!+%l4zjΑ5f>Yfzf~ 4=54:I7.K2K+}u_qcQ7X}OK 6IȤfIc+m\#޻)q13;C?8ϵXh@GSH}@6)V}ޝΒ36_@&eGX+4zqf3'-.<1zM_2 B-=5rOx"M?iMb\zW=RK?wPMe$ݸQʜWp, fP{hdhR 8Ha=ȁ+O>x ;ZR,- g3'LАDo"Ct`3oeƝ0dʘ4>8.DV(^r/F*_T l³)UIea;ǹl%kcK" plݶ@X<э8lSٶ7B)ʔ&zc]O YzVy3Z֮='*)vw1~%!k]]#M^ʆj{2/)NO C)`/ `ib iAO''棑i׾%;kd^FϨJvID;xc8jegވVEnjLpgk=Uf%߽Rç<_p{8v16`z񒶽Z=_q"toi+PgXYO`Op8|:Gy ߜy d[dZR]hKd|l5]F{f"*) J hb6+itȉ-7m6}=ɋ~U+yKOk%;=/H\b`WSd2m3&RS%*LY>2W0IR9DO5ͫx2zaqWs2j yB8>{<Xm2WV˥aረ \c)uO֞:l\?b[ J4lݍS%B)W Jy{.+'{A]݄̈̉I,BdHfz_,#nRuצ6 Lr??!co9Ӷr6ߌU-b˽QMOv[sEޗ<0[ M!W=]q3u Ő %ۜe4!O:Uu-#DjpCXϯ\+; ꓱۑ6Rxj)*רA-4[q~}G):ڌ%+4 ݞeςuBJ}TFv:MۈlEĦ̫#v)y 'E1sknk0,5jVƠ,IBe'|9oGY>ISG? Zʄ#}% ;A+z,_Z+3-jHlI7^@u+s$A6F.)B7Bi[2]9MmtgHeM_E_ U O[X%rt{ mP\!شܴ`앥 6oG&R糆5һj70zaOp! ʤg64 7`j[%vFw_LFڟ_mMmvjU<2Kӂʧ][xjLbY C7W̚,o%tXqfH^sabliK˪zg)ud) w߯%;?$`Vt!׷z%Nyϫ[.8^״ e=]x&>2:f>~ b8ijugo_M7Cȇ>Nи{8? S [FқGWfh3]O"`9ÚJ̴չ Aban_(ӊبүF}7J7]ڻ)5W]xoQOd"J7K?b=FDґw`G뛲5h ՘Qb/XcTֿE2ՠ1Uf.i]Fjx,5ZYx9*`lE4Op~?XlwHȤgq! B{v>T|{\ָ7"/99<E׹oiUgA&CH+);}*17eu0*y'MiTvScńG % Hpr {i쟇Z0:/A$W. pbѓaak]`&b:q$nGS7^`ʺ聇,`0/r ̊$tހ$E}|FhָGO`)mPSBo>BV "aS"aqR z9LOQ6gl#!ƒSU< mqb-KOeX0kE/- 1Gav2;8@09BuxHC;q\(a!~%rrgP6r($`/`z=P4tx:&CHG18yw@3iBXҘ8 `&Џ,U`Uzu 2z,Ν)rFd7I>"~+znkGG|5T7.!wӸ-w ƸJ{F6hdt!u*q4Jssd[pcϧ>|ƭ4j \ &ϣs}4 ql%yӦ$?tͪ/tuA]n4I"4xޞmǾ !}Б**`'fn=$_W?=;I 'tքزݽ%,V]3yVh)E J Ѿ*n>TMqJ.y]RMo 5ka'ˬ`YV 2m.Ssq 9s-xPszafx&yag-ZURELedR4z:(OUfS++RP=oW.PR7&Jv՘z;B~J߃igTOO+ziԪ# Ьb!3Sc^Sl#L)]Ԭ4%2ÝV8J\Jd=BE)1v0z[`'c}I 5Wb2N;䔣SJDd sjP!^XuendaӁ}̎ax~=A̘N x'}aϾrMg6FT0wHr5MyrlE\󠽼!1I]b ci>)`yc%wD) |us[KHE~y/!k6-j2ڣK&$i;%:&+d__9c*6pW̟,L5jKUK\}lIa,;Hsd Fnm~_W3F*n5tRCf]:[:!e~˺dn$$,?>tR)59QD>-MF|k@^p~usW#?6:*^w"-)2-t 'd+tЃ4t]\H8tfXV@#í.&">Ė)z\Ts5˕Ԧ|5'3XJn]ѦSk3jelITC+02_A6 HytuQL ]'$𨶫|MAve,o$'bMC@J_Dhh̐GC֟HUP)Ml~\h)ى'9=u(q=~1P2IVb?Z|>M@P.( FMJF8P`xF@Lx.eӫA#⏇ Url>xCKMu%"YQpš˔8c]78gpQҁRLK3xK)F> jh3'уbޱr9D%vR  GB&֣|ZkX?up^0"3tUɥɜ_DUUʃڼrdas;FM̏z ג K$44 oFDu|laV \I3yl4I l\t@ X1bV @w}Xd6kc ]a䶃&Q;[[O員gN?w\fWS2C р>tW/'W*+i>4rV7^*Dg(,ƭP-l)M 㽾vɞU4#wFa+):'C L '_ t,\nGao&_Fx-YÒ;v>Þ_3k3JkY 8'[+A,l1Om+QEuF2`RZ?ATy)?M`& պʽb _Ce +0 _s rh| \7^-շsRABãV(k=]IevfQmM{M,3R'tHlӷ55"z<UeVт5 +. C [?_u,&އY߈;řuj54;l/X{d" Mb'ϵu/Z2ۂg=FRfL[37;9MDžSus >q[DNټ _ ofbrn>aFKXFZ1cmr35 C~ !B#7t G }HC% hf徧أڶ˖]Ô;_xЫ6`F; g3.ԧֳQ}*R{d3_KX~OOMd>ܶqtaW^#IJ("$s(o F ~-K 2>YϨtL|?w6E¹`*3X?6ٕD`*ticQ_dr3aVsDj+P'Z9vzL\(uLqE GMV12`9>j[E{1KH6$uLo.ƷHP7ѦNlhE#Ĭt6`A¨ZKbYZ*X P[$ީ9NL " 57-0u)RoW{d+T-z`|zj}&=;EJ+oȺ Rs](1n9kUl~{̨G5"zIE2V{-xGV Ϡa8_'*X0[f:-Vr%K=X-  a$!OSnIcpypUe0Dw+!\)N;b"VAa%<ɄoP#^A?1tģ Ԣ4myTK{P+ 4q~? ߆]գ꧆eNBdNAz)) q-o0oQ%%`yf`9ґmwA#Q>N)KNG69c8<L@XGT\Vu;(VRx;“O D[l!3dbl*Fb@Hٴȓ/ IWzoB'C=h5/9EтOs]-4-R5& Tn0VK #WGRg ܗ/>c)[Y|~  p$sJE 0ĩ΃@iKs%YtK y aNOH)Ih","EE=!mqV?ޙ:+{K%(z Qh5y2DZH*t*١^BZ_S}WT-%29\ `O X2q%kuMblߊ%%yld/_6*yEX,CߞK[;Y,CҡC'O+Տ>brC;a}ęL΂-Abcؘu~"qG}JulXPR~_U%91IDJow!Qo+ Kt%NepRzx<cj8oj_ڽnp>T܉Y!x{l?]ùj% A IX6cY> L*Fe!xPN8KUeL֐cd[_ry j [aPPp#]#2$SZ>l$ N$KNk2UhW_v۵8"M6+v QƾwBƘ xꜝJ͂LhZ_#4~2/tnYL@1j {t"o5/A^8/Or܈70 Uet09˲+~1?{ڲRbgvm7.J#ϳPk-pY5-]B~ `}eg+'$VY. Ȇ_QvO;N۳b(Fyd:f%w'P گŹ&6"pfZMbiB5%n/5d);_xFd ܈A?bII=3,rb - 5%'=oRBN @8:u{5+pC'k3Ռknq5ڠ^e.0{83ǍbbB ;yu7GsUպtFZ[do}<JÔ:r+i[|@`LBZbxj 2X#d ~}Fk\goÅIמ%=BxbLvOT *9,)޴~IuTē1NSZzU&#sLN]xKU^9ɰyEv_ؚjXV# `W %]aSE5\.4' UgIIt_)=.kIXW "w?ϰGǗ>'LvyJH])\B7c' LLFxo5vyT1Y{t W%6p?c!i 0F檻l'.'Rj5~x4(;ƀ٨bMDӦF(&1+E7ba8SMೠ`aڞXOMkew:Ԝ,2a3o=[D U@HbRwD 5xXk+{B%}A 2rv_F88g|υ3e>Ckj4LR~ܲXL,7Th t0'L;ZJUeiLW, ˢY*f.H˥塩8֙y=^ K0j_1BI/b|F# UDV!EvQ'C8c JD9v֜ɝ~Mȃ_v2cZ0xHSQ7CW ܃bp͚.ecI98`⓰ayy>%'0~E'Y_^=FKz!fsK>]\ú)mL3'5'#&Q8n*9}'Ṭj-OU %t[?j_h3ggtaMvW dPcY|eЋL!&0.B{D-~\"fpgػϺ=%`SOPJ"w4.)\EԐ(o*C:.D`UKCه.ss$_wr54ĩ(uR{O lw:/O4V&]{5,!Ú<:+†L_a``Ґ6)> a 1(.۟NL=뽹r +5aoKGr1<;." ya% j'&WqM!ou<M1Q ,jDU=U4=xFIzpP&2#>2.b gSFۢ[~ߧ\ l(C(,Z<^[cEJj%uP0XbK sE}]TEJGADKA~.XұPlyzS>gl c$ Ց3y7ՀRV򞅦Ae~~|oƎ`(Yv B1:wpeN#O|S(dlG]EwG 5v~{1^]6J;jqVXbET QCCq4#[br13ƦP)XG #l 3M"Uv4},%c۪ 2AׂOJN*^\eqtF h:nK>Pw11s#>{ f)o^YSx4Ӫ<.^ L7359թeDeXXFe=3[Xk.c!O'2OJ_27I5:F2gۊ# $ Ew˺zϖIØn* _4XCms-VOdնEmsK|2w%i W6dJ6[C YLSq#@mJ-|<}֛Y;I h9IS}@Cݔ5b:bcFrķqMC:)&IXZEaQ|6YߒS=OPW^\8 1|b/L 1V_j1y><{ f0askpv"K $?2]`_;Gb=6=k(GLT))F{E\Ch b4k>UNOU 94~+ +M{B^:cD˘S \7jP.G<'Fh $$@2=x'hIӥyl$-7t0ˢS3Md`IjJӠ2s$17nD&u-( ML x0w$TK{0^k9rScCKdtei1Z1]3uw'߼hؠqs[t6%AP |SQK{&C _n  /ȗBS2(++z 2=%1! .':DgԔL3?~B/-,2Ku#(y|UMk:49f}~Rk2iNe^, ڜ=ϵ^h B|ӚQM*Osp|,-A)۪6x󩛠zoǪa)";ZMs,sēupΝG u2\^Nph-Z/*FfV{秝(}k,F@4E E|NOF0hԨ4/l K|7%1 `PQ,etQp%8԰t5R:򭜿CqmDR\Mpxӛit-ˠ~$a=EDMC8yME]ú(zv>Dkoa3MA`!Hm)vt-ep(7B R0dTL\d );{\hSsQIVr%|ϴJNYFN z~lt5og#daHFK@R$q`m:y,SKqFbaBZN zV<INTƮ nTd5 ,Vj`[CQUf熈:8;} p~T%䥅Mʰu@%f+2޳3.(^Ʉ`n+~A;xM0̈mIPUniQ5EJTQmpQ)4=@umu]^M5Q褽1[FNϐYC}܆s{kT;JOV֪|%EIWw yF0"-u8;2J߾>#6slW݄kU9ɠ?c@Tߢ;ujz8îDCK/0=.1`Tˆ|I'|f>.{[Gl۽ 3kg{UzL]T>У7u/#Uv=x9>ԥ,R6 Qf 1lF~j"G[%R<(T谈!ж0^v/v6y8KChY d&db)X|I-dPbn#}r5 QnW k[lJ'XGXw{I~3s]%I-h(aߨogCc$#D0jdd/.?e?45ʬ_1TEu u#r|1Q qyg9<9k`ӭ ڹ[dǵINE`{p>BNݣl3oa 3I._CߙrT{\lХpx4ݨ@a9_LLa5щS6=Rby;IY3K,>LeMVt(rmr)Ke+ owǫ6"`>5jI[,S`Sjv_Qvׄ=ǿ༧rԲ^6cwrd ƚ @lL.-:uʓ C inu}\U@$XiE^.niJ3.Redd9ۤd7=fbnx#.z8 A课|TrI4LK=^x ƞe!S榖εxVznQc鹅:. a8,/Q~ aFʸpq}verO?f} if^vqޔ'Zeʩ@&,Q0QeiL?NGlX7P.-q gTL2#6}R$=2;/0Z˜n[W(IA ^ EJqI o6,mʰk%i| &1.ssaS죆R`D:CoPe+L}A^,GK0ΣB`t8TM <`|zOm$OdaB$mC+nD-pS Ra3|F<9k'­Iܟ2Ά" >*ѓ/JCd(Oq캹OP MH9S0*ͼ-#k0ی9Sľl[DOW۞- mmp;tR1\1[_ IK_Kw5b}(9 5I,+y1~SZtieJ+Z;,8q|ed˨ Gq#ffetJ#{ׅCEB#yL.Y E~j--EQ@hOE:(yy@*#ھFS:nh~l?X"<ˈ6dRݔmdWWMs $fi@'( lhK]n>Zy<ӥ]-{`j$?Ejv+wQԃE:/u p/m/)rQeXokbԱLC ]{аZ ߄QS{Tq,bbf)ZSFw@(ghRJsW2B|Q.A Isc8f 7ݖ(_(ie+nJ¦RG"V 1Kw` c?B_\WRcI#R9AT6ݓsbBRZ%=~;{=^ u\N4x]z8Y/D2~8 3[ \nvt~W.Qyyh {n_XG!M4d1SU :(|).sGJ~zUA~֊gt,1^lg>ՄgiDB*0y.JYr&8 p"AK֒kX쥑Z.ǵ]m6ϯì"lX7 FMfu2Jt+NԖ7`/18X_Ν[{MlٖC8Gt6C0%7[ӳw[er^x!&;@{h}z̏;y66<~ aNиrEx:gLCQۆ\$s hBlqUƥhWUk?O8ʧ+$s<P$tk!]P=#:%)p798;)ڿ+u}smS{Mh\5tfz~xKLBpv$MDvB4az qG`,?\#>xs˖2I 3 zFfDOo.Sh+ sq[f xKk39v @& /P(ƚ MSqױCse~YfgEUԥoK?2 =1²4ywqMe9]]LM,S?(ya;V ڶ>s%ok2d=b3?l +4rc d6M_xq2 ܲd,S#In0|z,BaRU)G0z$,RzK,8=Ɛ5!|2\>M;b'>.LedqhGH ?^Ί l"ʢ`!飻(JñP71*KG U5NZD4V2)cHS!zҹl3k W0\e遶 IDlbJ㝇֥D_E=L9K)T\ a՚,F$>:6{08=:aѲd:…'woWp֓J5)FLݕFV3!s8&]ΐb]Bƥl,1-1ꛋ0Ibi0nYrY%b2@[r})5( .y.Q)}H(@UHl`uzjG =<>jf(Y5 x]`[pϩʭ5TKvS^1Rj]gKFrnɄMh(ZDAڜ9.B|Ԑ(R׸Bދ8O~pdi C >1{Ln֑Kt2زR5čͿ93|.+^7ΉPWW_r-A0-'ف2b'p\kh4pJ%N(M [9JyZNtkH$˓)p5ȥ9@HTv~-EP-&GԫM 5}p;Ѯ';+#k*mV| D#6JICܙ#]$jc"wI˷xt[x{ak3pw]36^scvx뺘) F؅wy8Im$*A_=@5e|ͻf Qbn5sCos]$yBC."DRa/\TRi`Yb_o;JuG0$%$˰Sv-&sU`ܬ Mӌۥ]i"ی[cT6&funUL C7d7,f9(ZoZgOP3M>.C~C-5rGl>rV[b̿+ 7cʮC[ZRu*m2>J 19?^nʾ}d==&{Ù[DAh BcQL781W.FٵmhW@%7YWIL(ەR='}*5K(1flC 7!QSyX9cFӡwc\R3>4練&nF5.ݘ*G- %zԮd~Qo#RF̵َpCE W [Ŀ-: EU/ ~b|l,.s!E` 1u?*&+}50%t Od5/m(|"S 0qNWgitsdA~U댟F4^"} k|rrIʗő6Kkexj# NH:'V1ePv֪&# g4 JmQQq7Q1e'h7%6U1TgSVq&A*LYy̱rb> b=5Q۟ζpQX^ڳ"<~?0"oڪOMGh%R`ӊe֙hzW {㛑ƒ/y==wI<n7 {d1zy O¡S5=jaydgJĝH>%l*%=hfG).Mw j}Xa.@%%X0Il#EҴZr Ȍ82:6"T{֚z?:ҰfHJh>9^_Lv֛;ePf$7+6C 4jܟP+DGoam\`dl/w(i.ugB WÐh$as]fw/MQ9~OP+T.{[SR̿_|3P끐,8߯0#Tp)}zlWX*dSZlp-ŋտ`U46>P-CeM_yҎOoAnODPBv}ỹДl ]}:%'~3dX-!FЧUgb*Z$QF[fbj}vf`QB!%liU1Y|-&%t " -Y ÖҦ @;'$ԞJP:eO G.3qZ&nr;f(K:(3cIJl7 v8{вt ~sUjW_ ZI\%[#?rhCaۯȒl+OjF#|,y.rDfW?iV*scv6E2٠i](-$8@Fp]:+gϝR7 $b,$5λo{4y$R+%(>@! .poJ钇lew/I& ,n48EPN6f XO&kT"W} L k"sLӚ;L$!_ H_fKanl1Gp7%,a l>]; 6$0r'wJBt묜,A~`NI ť  t aMI~?ڵHݧ|aB,jgÝz#YF0e#R׺e\X9Hj|ـ4v҃7Mg+p$mDHU>9KzeM\%r|#OVg=3̺nD37\F֜ggy[zފtއ@ump~aEmђ06#4 [QJ8|H"ϒ92KEÏ:@&+  %Jz#+UOc㘫AM7T9 ('ag> @k(R)|@? Syrs.|%֚vc.d~fKaMshmP~wʶHib,4aB,`|oȤIӿ~ 51HkhcK@`>䪩TT$Au9 Sԩp0 O [9dsOm]힄2יOWY$Sl[uVusO"ֳ=)2_\a0OS;aҰD[`;;h,&bǡ -N˪y. P~ң 9rEYF@43)2=켲3XvCځ'$Y,:D"^ҡ\6®ƸӄS TH[TIՒ&[N\s_3ڣ:0g.9@85EkNڵ-Ѷ!X ʁ%\۩ׄPZZɴl8z34HvvO;翛*Ȃ g% 2ݾ@ q|KX07A n3D|=[?i$D2Cջ g\04dK =/ @y&xlLZ~Pzucbm<ɉ=MDlq;l9BG=Q5| :Us -)RXv2~+Wu췘dz@GM3 kݏh"/ HYT gZ H=ݒAZ7.X޵-v|RD['V Mv%Hi2Fdi<ړ얓MdCV듇iP+KZ[%7LC໯2">2}DGʡCl6E\W LbHǀ?-T5m7G<X 'eTt ~sPq'0N}z3O HnnY= [)1^S1?̺b4Jmd&IxzkLgkD?L 7-:Gyt>PN;֞jQDꁈĊ-rl-0(?(vZO@`vV55 eNIZJojjRAWxUu_X# 8Ӎ>r{\\RL,d}T(GEME]v3Sk Q| "t&џA{.&e*E;]G}Fb- JGM'xHm(s풧/Y ;5MiCi ;$wHw,w:W]Pi+l.pa'  @wA2e ֌5 npE ^Y.ҜzHW}˘p(2Ypg5#JBFwȗxJ#\5g-Eo K7І{/Ħj-9sd`nvxGvS`v~T]z_RDnѨ ]j&'?O w VUe% 0ؒ]=Lcba0 N{U~ұ rF"a<»g/$~᰽+fSb]¯H2b:8d2Sޅ [.&z"ה_2Z`Ʈ.îrP@M hW/GJ)"H=eÐ;J_8S/mszhLO޽?2<5V;v|*9{`8nj ӹ}9d~e_Ok2,ĴIG*c9.|ÔU_\EV{eIH{sKUhEOIOݎxzjz("^lvc܇(lB>{'n[1.(ؘI禄v ^#,j<%2j7rh+XV'ԯ;aDC:]d!prVdF5M^Ѩ$k!ûSA!ѡK#htn o{<R'NQ Oiޯs&#Y2tˆ^<^zb-OCDgjmVd/c&p$K *iP옵)r,!{1~׹ϝ+*a4]&N@ܠ*R)nJpC R{o=飅]xPVSZ44x9^[?sr o;qhLNľZ8] @xbJ)b GBQa-D(]H_(' p n$SBCY#( D`%1k[%=U|nHq5HbqpfeJby$SsZIq:[""][]] ~WqHrp AbqPo:MsbrV>Bo0z]nxs,نVBx~w zMU%h}."'h JsVazD҆5FDz!\⪴t(6*aX/q`H̴Ui 3wWsmZFϠ~;s|H^} SV@U)]b 5^+4<@[L-;iVj;-gBVO42.iw,u~]RF7?BwHo*Fny`fr|GlGHsc`Yn޿Ń 0 p8dV{"xw`QoB@fj'<8H/A YK=vzPBYlUKcض`8/Ut#oeu3?iL fEK^ Ek5? /q/?E_L;EP>/qYp=X@$` Yve><]Iuy4:f9Ěf{ydn\#no}{Lh8z+-uB G9ۗ1R8<(Mvz#9yЯ#X%δ=R!?"ұJ|Q9tk6ׇݷTz\ICǒ%{jS֔)udxP<4ߗ]W[ab>*70QNJ(Ͼ#IM8OA_ ik/Bi~zÉ7yyv̈TdŸx +i6D#UBMχ pVͫѪ@=? C3NcpѮҡ^J?<+)Uwb@ʳ&f'UͯBtvz9ڔcLϋ~x{ ,˯Y<ArEhZ{,ʐw[/3=<SX|~lV< /'.ZJ|hGHc8B3fuX|R& a ~i#}'I<,+U\xkoaݥxn$OUcB!tSց0mA8+;Cȸ + Ov sg3`<'1xf% [&X8.-A?f&%̞j\q'zT&Gu6SCi^Iһbfq|͕|pم3=> Wt8X>I>ݷ]CSŶă(< ~A(a}5zѺ-rۚBZLY]1db?aDٌqPp"9(vE#رlMۗ.O4. RJEEȋc(i<ZrOV÷T2<Ӹ"}8R̟r=@`'ĩt_;!צGcHI~ NŲq=* MS3o'f>F*{F(, AILg2)V9ɿ[_\Glqq˱::" ;GДOr&qq# ,?qRhߡ2OMriEigW)~e\ i_5j[UxP)/st=+~=TW犎E4JlΒ}L0n 0 wB :8y2n%tʹt&(.HM_@7n󻥛sbY?'"k4 J+{z/~ eTu:?OdtYSeSQ ZMNrr鎋'Hy)(¿~lb(ʄ.t0z[d#B&]߱nxEÇKqиJaU +ۦ:X -d#2νPO[f-4kΰFc؜,D,g+iB޳Kz70=\a_qAܒ.Fbhj1}g3)pi;Qǘy4q֟»)&GT GabL>tLBs`:t5`-ɢIBB[2!s>}W")ܩ( yZD{Ur{D1ӫpo s$a|N#AA M.F W+#<3b(YaY ڦzli|4y>5D5{TC?E ;b zq(:o ԰J ;&;$pFZߓݑDڣH6Bwic'h5楡ehw߮ɾd`B=3t`B2{T+}Y%<*2_ >&kLQsita)O8%{̛=ql":oE[ixov-LqT8Ϛp)F7-w_*u%['i VtF2GSZjJ:JF|=#d˄1|Ǵ/"VRS9]s!3\)A)z@!!$lOpI-،~)W:ptAĉyl2h܃0_Ä}O[y:GDزw ]Olz1&c0[ @dMF׸|Y ^b{,I~k%>il5XƕkwAK [qƺ^U%,D.OFKQ1}cbf=+JQ- 8L"2nlI+IhcN)Ot@rhz&eEW;BQѕ1-]/1E⭀.~Jj:qy[E nhm4բ3Hԫj麨A1~ G >!kZy$?P~4yݠkQh=7g>yC̯A |g$j)kv&TgXMz悪G*#E{2DK QxJ\l;L68N6B O gP'_љaڋ'?JyhةB!_(w+ K̷Lu4MȨ5S ,l-S~]@إ\)5` /{ǚV#-FRhDW5b9bHʐg6ll'μeB؍Ocmyoa()U;[ieXU0ߗ} .vgT-~87: &6Ii-=l8QM\KuH f~J;jŸs"؂T/Y B5B,F+uqOw蔲žEݔF]%T% .NJXB3F=~n"{_iJ` i\Sk2dA'V7:n;`rn ډ*ZL4Ѵ$w$?e%q]uV۶Bؚ.1-vp\1Eɪ$)m]NU|+BZC:"o@!^7&+Oi [%N@q>>d:=a`It 3[$]2֎Qt[mCIv=Xzx д _" *?ALegCGNF`QuŒQrYH7Ań20œ W%ZXY"oc$I|Ǹg)wjNw 37_$S^AQ9¸x"# pg]-{p5v: 6Ɛǒla<0? v -T7A˰!Vc Rm*@yV:_@@Z,kN Jy>z91 aN2h$l˟ǡMYB .ȝ?AY/k3Gݴh!kIq =jO |kxd uu,̥4ɛFLTn~]z)AϱFb;]Ų p !oalT!M׉jyeM `23XIV0 !Ι egGKNґIKWlcf+D#):P]>`fgI%4$\HNt"u\rt=:\1 M9$UAOФړUx\H_ sI;h]^TZ,sy׳,luR5|4qd>&?#"W"=2עSL axT=!&':bMޱ^TW[Ķ20Q{$A]j4= `?WY3h s(n=0?S"_ 8 Hp]1ӫyv<`MrVƦň E  yCEص'Ju5&N|o!Ur\fSlz"L!7P\*,X9qVLQCt C)H1L<&,8^ANh_Ѕᠱ BcYj*7bV,42vj,PY ;&/.?UUcm7k 7&]Ͱ[@5e3b qj &;gC0K::@%45 ![l͚y`kBJ$ΗS&a ÞyoBN8k+tȱy\ҌK @)ŕ{ ɡ,gm9J6nS"6%ߓv-|j!"@ld* _+=BR0M@cHC?t%& 䫼ʏD%stvʧGo"0jB zHՈ~0\>NU!nQRe)Og͵7HQ$!bdU V l}bמQ soŜt|ҊKUQdGύ_SvmgX٦8;i="a802$dM %0b?.U}QIu|d2c 1֩nzKMʍ:fA7)؍ғPd AqN@ZB:1n` EHSlҊpnj=27R8kIn0(ĸ@4~Wd3f ! _&&H\;.Ѧ,M02<٣8&b >/+~s8aӕ Ȋ쓃<U]v2n0Jx/.=GW{I&U}kI3${f>韌נe%.ϯ.HjyPwT[~z<-pNCv1]zNLBS,l`ZlY\5q 6棃 4´)7v= TP[òZ-߸Óͭ<\r":nq4FhGgX(]FL5Cw6>뉍f _(569o2[NbC|T &*_@DaH~=qB *{Putwy 5FVdQ2~"5-'ו-zр_XB_fǔq:z`GID9M0VKG"jňydǹM=ozC;fDg@JJ~av},SǤDiD:Fx(Q7&Kx{M2*\ŝi*ZER9 "{V$MI*OU>mJiLǻ$Sr'` w$%&QGEqq!'k&~lDM'=4%V@uwFc2ml%$gz[b8wRxǒTs~:q^$\D/\Ae?+oU"淠Oe/tо11wd\~ypԫѮBaW[gC(HcC3PŤY&˃@,>S729ezGw*H,]EWK4<.9F5#]) 35mi mcz ] 6D*ɇBikK^\zWJ]3ٲL@|Uv̶GR;}ܨjlcQF~ g jy9[r ec-"7"PTc5<}4-׿i|&YR{>-)+pcevNd?ߙKCzTHwrI:n(Rv'1,gxazt]u;rEB@ Okk ~J%bY1w0q1ꀯi>$0E cAD|} 3`NIb)KVe _W=SUt<ÔzaU2 3:'H@OT>#m%Pudcgy;Iňwoָg3tlS#4f t6wGoTw,,h4M=֦8NXC T NXFuoLǘYn*=T4S1%3[cTv [a4C8)$u菿s׍H*&&-+BCY?^"C㽖 [Dy2[c⫱$܏GĶL;zܚZZFI8SB7zNAYBPO*TA=l26Vs#/2+8Kj*{ܣ NEvâ7PA\0LΣl|heJ~"Ņd-]q'؏jF(z:-EzN=nr U%s鬛pvi$ẍ6Yq¯iԸ%ovg_y/Aol >1`;֧.*ex5" ]4 SZ2 $S@Ո5fW~sFN,ֳĸ[47=# LjkU3)v~tEj@"?RpT8y*q B)ԡ)'Be__ޙ{6?2=A+UEOK=0!/i!{Y+hPV4{F]KXbe|󏰢qMLFL?]K=Lj9/ Yoٽ? ƍ]&o~@[SLݏu PcKsHq]p<{WIƅU>U)\t F[{ƍP9+EqFz3oYܑc*:s?ތM|Ӧ.?^{Rd_!BڦƩqnBP LtV/loLD.[uN)H+ǃv?v ©U)m?E&( ʞ(ԯHG44 PA 8?gpMf\j'[eyD_,3Q܉Jg@2oh@z- rtCE/?ehFEt3KX.7;|찘o̍/ƒ-'S z JGUeso} |V|I 4Ke󅹕E )ڂ?kI,bDY1xju22>*歨$6ྯ{y@h6_J0TLFehv̐5Fr5Q;qKcj=yVroc&kQHny8'ƟV%&a&ָ=Fҵ[5Nh-M{$^r E=ew2؝$OPl[uZ6S"m̎,xvڳEcxu5r)Mۜ:-?Uغ.oqH3BJ֧񥤷1y_~nBJ$ D5D玏Q\V+iI]7@$sE.5q *HW-"_,_8V:qN-8ī?%f"sF"Wӻzx@y#Jŋ،*,5k W+MjO[9ϸ+E-h!x ữ4 RKМ'cTو$_ݱ/x\iV G g3uK@'ٺtxڱSDRϛ/IԇŐtD̗~=nt߂[oY [4;(hx掻;|. oIxy"W)S]YdZ@t=9bUw띒7 ?\]Mj;59{zjB,EG ޞR??NQJNvK܏t9oR@v5 +1_f_aj2(8TuCƨ8prƎQ}1C~z x1ј4\˘uc1Ч q%cUo<+PeQ"S3l l`BZQyx,>P*z\ yZELZ`KW$@~RvH,"`V|~IfA`SM3؜cҝ ʥD0={t DN1tHnșBqڇ&15V0C$:&C=汌+&.yM2'Ѩca`X _{#-EȦT!9)`'S!Iݑ) ZxFc <*1v;?|9h-g:_\Ѥ%֣:MT)dnXkdѕ`ΈZ}Yג4/49ޑ»̼]Efu2zuI;ڕό+-ȇă<ԽduS [v!/F IZv*+eg]acojՈ[5Q8XPFl%6Q3@ q9˙Gض!S Բ?hH|^҉e6\\Yt#E-ED w7t6j%tpYr%Vϑ*,rHt JʼdWA S1()&QD 5#v=meGÐ6xk?l~1!<׸qۓWD -3t~n=F3ݨHI(PlbiRp#}=*b.RF,:͔.W͸#I@aYٌV\yxk Ы0y'-(@"rhZK~, mOnJ KzWb)Xt_1,/QĘjY. @G[j5 ]Ug;^km\ni.q(&-SG+(f>=D%i,O__Sn5Xvᯈhoi)؅vؙؽ07s,a)`$4uXFj[3SV=+r6/MTՠI}WJy~DMYHu!X]0bhDrDlIr[WH><r.rX׼K$cMl}I_lˎ;{X|ٻ,ې18"kY|wq%F3;0%3^2PqFhCuv!#ͺ@f/&4FeZ -pUiJP>j8fBUM Q:57@~4r;*\efnfFXf^˒t^)q \/*:?|.c9WWf%JɈ:y]YC{ZC~ݞK_:xwm7oPy d|2{_$Lx{7[$q߅KMC%4 Q;7}$0H/ڰj 3[kW?d04m+K uVBIJy¾f#lo_:':eCOt(kn;VݝvX\TEqAE mx Y#j*zozbzd*n!.%]+! jT^\g t<2BSh# )S|r!1* 2ockcdۻ/ԺC3X%ԨrGB~j dtЍTQ]…at|*6{54)smp]ImE͍iݏ}qmuC3>rLLS/Y)3 U(bxL%@mJp$oOow^x!5N_ۆ  vnkk<(FOXzYAX#8Z˘ya8Uud,RA+\X[s S 0Ap̈́,Rb|.rCINnu>S˦P֫eKx*P[aJ W\(pulfFjKȽ,<%]wJDb1X)G8[=ӂr7b3Hpq6 oRF7x-bYXj[gP-e,T o!=Hp$ثRBJBBI>  Ų>b=ϥX} TY(j6g;UFY :]1|O fr\^]7bf#MeXEU#kid=ywJFl؀N!+ћ%{Ҙ/b3Ԣ*pSu8Mϲt, mJ7R¹|q|fk春(0L_KpM[ c!_R'CqINnQHt\(M:I(ID]߱M)\}#UIlf'`ޜlQnJZ!UYUfM|,eWa/zKoPz^`M+' Pqf?%4A.i0TZأhwew0z]*T ruYC1 *`YQ~yoԇ|n! 'q1TGL^0JzT ]Z.}5lf}xQgl_'>JH^ݨZ5Z͆PAjLR5] M\𯩓 BȄƋ^ĻvAhqwu &VEsKJ,ùV˨Jw^}"bZl]<lHl] r YʉXyIYe^HXvx["x<pqO>Pgؖema?W8AuM nC*|I-鉖r;RhEDONyf=xt%Cf g%K|t1d16%x20Z!Q{xvO`VSh{1S[  wUq,3_~#ݞnw֖ZN[u']g0{틶ՀI@#yH?-bږ92k!NaܵKp=X"J$%sp4lpԵТLhz