photutils-0.4/0000755000214200020070000000000013175654702015605 5ustar lbradleySTSCI\science00000000000000photutils-0.4/ah_bootstrap.py0000644000214200020070000010434413175634532020651 0ustar lbradleySTSCI\science00000000000000""" This bootstrap module contains code for ensuring that the astropy_helpers package will be importable by the time the setup.py script runs. It also includes some workarounds to ensure that a recent-enough version of setuptools is being used for the installation. This module should be the first thing imported in the setup.py of distributions that make use of the utilities in astropy_helpers. If the distribution ships with its own copy of astropy_helpers, this module will first attempt to import from the shipped copy. However, it will also check PyPI to see if there are any bug-fix releases on top of the current version that may be useful to get past platform-specific bugs that have been fixed. When running setup.py, use the ``--offline`` command-line option to disable the auto-upgrade checks. When this module is imported or otherwise executed it automatically calls a main function that attempts to read the project's setup.cfg file, which it checks for a configuration section called ``[ah_bootstrap]`` the presences of that section, and options therein, determine the next step taken: If it contains an option called ``auto_use`` with a value of ``True``, it will automatically call the main function of this module called `use_astropy_helpers` (see that function's docstring for full details). Otherwise no further action is taken (however, ``ah_bootstrap.use_astropy_helpers`` may be called manually from within the setup.py script). Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same names as the arguments to `use_astropy_helpers`, and can be used to configure the bootstrap script when ``auto_use = True``. See https://github.com/astropy/astropy-helpers for more details, and for the latest version of this module. """ import contextlib import errno import imp import io import locale import os import re import subprocess as sp import sys try: from ConfigParser import ConfigParser, RawConfigParser except ImportError: from configparser import ConfigParser, RawConfigParser if sys.version_info[0] < 3: _str_types = (str, unicode) _text_type = unicode PY3 = False else: _str_types = (str, bytes) _text_type = str PY3 = True # What follows are several import statements meant to deal with install-time # issues with either missing or misbehaving pacakges (including making sure # setuptools itself is installed): # Some pre-setuptools checks to ensure that either distribute or setuptools >= # 0.7 is used (over pre-distribute setuptools) if it is available on the path; # otherwise the latest setuptools will be downloaded and bootstrapped with # ``ez_setup.py``. This used to be included in a separate file called # setuptools_bootstrap.py; but it was combined into ah_bootstrap.py try: import pkg_resources _setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7') # This may raise a DistributionNotFound in which case no version of # setuptools or distribute is properly installed _setuptools = pkg_resources.get_distribution('setuptools') if _setuptools not in _setuptools_req: # Older version of setuptools; check if we have distribute; again if # this results in DistributionNotFound we want to give up _distribute = pkg_resources.get_distribution('distribute') if _setuptools != _distribute: # It's possible on some pathological systems to have an old version # of setuptools and distribute on sys.path simultaneously; make # sure distribute is the one that's used sys.path.insert(1, _distribute.location) _distribute.activate() imp.reload(pkg_resources) except: # There are several types of exceptions that can occur here; if all else # fails bootstrap and use the bootstrapped version from ez_setup import use_setuptools use_setuptools() # typing as a dependency for 1.6.1+ Sphinx causes issues when imported after # initializing submodule with ah_boostrap.py # See discussion and references in # https://github.com/astropy/astropy-helpers/issues/302 try: import typing # noqa except ImportError: pass # Note: The following import is required as a workaround to # https://github.com/astropy/astropy-helpers/issues/89; if we don't import this # module now, it will get cleaned up after `run_setup` is called, but that will # later cause the TemporaryDirectory class defined in it to stop working when # used later on by setuptools try: import setuptools.py31compat # noqa except ImportError: pass # matplotlib can cause problems if it is imported from within a call of # run_setup(), because in some circumstances it will try to write to the user's # home directory, resulting in a SandboxViolation. See # https://github.com/matplotlib/matplotlib/pull/4165 # Making sure matplotlib, if it is available, is imported early in the setup # process can mitigate this (note importing matplotlib.pyplot has the same # issue) try: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot except: # Ignore if this fails for *any* reason* pass # End compatibility imports... # In case it didn't successfully import before the ez_setup checks import pkg_resources from setuptools import Distribution from setuptools.package_index import PackageIndex from setuptools.sandbox import run_setup from distutils import log from distutils.debug import DEBUG # TODO: Maybe enable checking for a specific version of astropy_helpers? DIST_NAME = 'astropy-helpers' PACKAGE_NAME = 'astropy_helpers' # Defaults for other options DOWNLOAD_IF_NEEDED = True INDEX_URL = 'https://pypi.python.org/simple' USE_GIT = True OFFLINE = False AUTO_UPGRADE = True # A list of all the configuration options and their required types CFG_OPTIONS = [ ('auto_use', bool), ('path', str), ('download_if_needed', bool), ('index_url', str), ('use_git', bool), ('offline', bool), ('auto_upgrade', bool) ] class _Bootstrapper(object): """ Bootstrapper implementation. See ``use_astropy_helpers`` for parameter documentation. """ def __init__(self, path=None, index_url=None, use_git=None, offline=None, download_if_needed=None, auto_upgrade=None): if path is None: path = PACKAGE_NAME if not (isinstance(path, _str_types) or path is False): raise TypeError('path must be a string or False') if PY3 and not isinstance(path, _text_type): fs_encoding = sys.getfilesystemencoding() path = path.decode(fs_encoding) # path to unicode self.path = path # Set other option attributes, using defaults where necessary self.index_url = index_url if index_url is not None else INDEX_URL self.offline = offline if offline is not None else OFFLINE # If offline=True, override download and auto-upgrade if self.offline: download_if_needed = False auto_upgrade = False self.download = (download_if_needed if download_if_needed is not None else DOWNLOAD_IF_NEEDED) self.auto_upgrade = (auto_upgrade if auto_upgrade is not None else AUTO_UPGRADE) # If this is a release then the .git directory will not exist so we # should not use git. git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git')) if use_git is None and not git_dir_exists: use_git = False self.use_git = use_git if use_git is not None else USE_GIT # Declared as False by default--later we check if astropy-helpers can be # upgraded from PyPI, but only if not using a source distribution (as in # the case of import from a git submodule) self.is_submodule = False @classmethod def main(cls, argv=None): if argv is None: argv = sys.argv config = cls.parse_config() config.update(cls.parse_command_line(argv)) auto_use = config.pop('auto_use', False) bootstrapper = cls(**config) if auto_use: # Run the bootstrapper, otherwise the setup.py is using the old # use_astropy_helpers() interface, in which case it will run the # bootstrapper manually after reconfiguring it. bootstrapper.run() return bootstrapper @classmethod def parse_config(cls): if not os.path.exists('setup.cfg'): return {} cfg = ConfigParser() try: cfg.read('setup.cfg') except Exception as e: if DEBUG: raise log.error( "Error reading setup.cfg: {0!r}\n{1} will not be " "automatically bootstrapped and package installation may fail." "\n{2}".format(e, PACKAGE_NAME, _err_help_msg)) return {} if not cfg.has_section('ah_bootstrap'): return {} config = {} for option, type_ in CFG_OPTIONS: if not cfg.has_option('ah_bootstrap', option): continue if type_ is bool: value = cfg.getboolean('ah_bootstrap', option) else: value = cfg.get('ah_bootstrap', option) config[option] = value return config @classmethod def parse_command_line(cls, argv=None): if argv is None: argv = sys.argv config = {} # For now we just pop recognized ah_bootstrap options out of the # arg list. This is imperfect; in the unlikely case that a setup.py # custom command or even custom Distribution class defines an argument # of the same name then we will break that. However there's a catch22 # here that we can't just do full argument parsing right here, because # we don't yet know *how* to parse all possible command-line arguments. if '--no-git' in argv: config['use_git'] = False argv.remove('--no-git') if '--offline' in argv: config['offline'] = True argv.remove('--offline') return config def run(self): strategies = ['local_directory', 'local_file', 'index'] dist = None # First, remove any previously imported versions of astropy_helpers; # this is necessary for nested installs where one package's installer # is installing another package via setuptools.sandbox.run_setup, as in # the case of setup_requires for key in list(sys.modules): try: if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'): del sys.modules[key] except AttributeError: # Sometimes mysterious non-string things can turn up in # sys.modules continue # Check to see if the path is a submodule self.is_submodule = self._check_submodule() for strategy in strategies: method = getattr(self, 'get_{0}_dist'.format(strategy)) dist = method() if dist is not None: break else: raise _AHBootstrapSystemExit( "No source found for the {0!r} package; {0} must be " "available and importable as a prerequisite to building " "or installing this package.".format(PACKAGE_NAME)) # This is a bit hacky, but if astropy_helpers was loaded from a # directory/submodule its Distribution object gets a "precedence" of # "DEVELOP_DIST". However, in other cases it gets a precedence of # "EGG_DIST". However, when activing the distribution it will only be # placed early on sys.path if it is treated as an EGG_DIST, so always # do that dist = dist.clone(precedence=pkg_resources.EGG_DIST) # Otherwise we found a version of astropy-helpers, so we're done # Just active the found distribution on sys.path--if we did a # download this usually happens automatically but it doesn't hurt to # do it again # Note: Adding the dist to the global working set also activates it # (makes it importable on sys.path) by default. try: pkg_resources.working_set.add(dist, replace=True) except TypeError: # Some (much) older versions of setuptools do not have the # replace=True option here. These versions are old enough that all # bets may be off anyways, but it's easy enough to work around just # in case... if dist.key in pkg_resources.working_set.by_key: del pkg_resources.working_set.by_key[dist.key] pkg_resources.working_set.add(dist) @property def config(self): """ A `dict` containing the options this `_Bootstrapper` was configured with. """ return dict((optname, getattr(self, optname)) for optname, _ in CFG_OPTIONS if hasattr(self, optname)) def get_local_directory_dist(self): """ Handle importing a vendored package from a subdirectory of the source distribution. """ if not os.path.isdir(self.path): return log.info('Attempting to import astropy_helpers from {0} {1!r}'.format( 'submodule' if self.is_submodule else 'directory', self.path)) dist = self._directory_import() if dist is None: log.warn( 'The requested path {0!r} for importing {1} does not ' 'exist, or does not contain a copy of the {1} ' 'package.'.format(self.path, PACKAGE_NAME)) elif self.auto_upgrade and not self.is_submodule: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist def get_local_file_dist(self): """ Handle importing from a source archive; this also uses setup_requires but points easy_install directly to the source archive. """ if not os.path.isfile(self.path): return log.info('Attempting to unpack and import astropy_helpers from ' '{0!r}'.format(self.path)) try: dist = self._do_download(find_links=[self.path]) except Exception as e: if DEBUG: raise log.warn( 'Failed to import {0} from the specified archive {1!r}: ' '{2}'.format(PACKAGE_NAME, self.path, str(e))) dist = None if dist is not None and self.auto_upgrade: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist def get_index_dist(self): if not self.download: log.warn('Downloading {0!r} disabled.'.format(DIST_NAME)) return None log.warn( "Downloading {0!r}; run setup.py with the --offline option to " "force offline installation.".format(DIST_NAME)) try: dist = self._do_download() except Exception as e: if DEBUG: raise log.warn( 'Failed to download and/or install {0!r} from {1!r}:\n' '{2}'.format(DIST_NAME, self.index_url, str(e))) dist = None # No need to run auto-upgrade here since we've already presumably # gotten the most up-to-date version from the package index return dist def _directory_import(self): """ Import astropy_helpers from the given path, which will be added to sys.path. Must return True if the import succeeded, and False otherwise. """ # Return True on success, False on failure but download is allowed, and # otherwise raise SystemExit path = os.path.abspath(self.path) # Use an empty WorkingSet rather than the man # pkg_resources.working_set, since on older versions of setuptools this # will invoke a VersionConflict when trying to install an upgrade ws = pkg_resources.WorkingSet([]) ws.add_entry(path) dist = ws.by_key.get(DIST_NAME) if dist is None: # We didn't find an egg-info/dist-info in the given path, but if a # setup.py exists we can generate it setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): with _silence(): run_setup(os.path.join(path, 'setup.py'), ['egg_info']) for dist in pkg_resources.find_distributions(path, True): # There should be only one... return dist return dist def _do_download(self, version='', find_links=None): if find_links: allow_hosts = '' index_url = None else: allow_hosts = None index_url = self.index_url # Annoyingly, setuptools will not handle other arguments to # Distribution (such as options) before handling setup_requires, so it # is not straightforward to programmatically augment the arguments which # are passed to easy_install class _Distribution(Distribution): def get_option_dict(self, command_name): opts = Distribution.get_option_dict(self, command_name) if command_name == 'easy_install': if find_links is not None: opts['find_links'] = ('setup script', find_links) if index_url is not None: opts['index_url'] = ('setup script', index_url) if allow_hosts is not None: opts['allow_hosts'] = ('setup script', allow_hosts) return opts if version: req = '{0}=={1}'.format(DIST_NAME, version) else: req = DIST_NAME attrs = {'setup_requires': [req]} try: if DEBUG: _Distribution(attrs=attrs) else: with _silence(): _Distribution(attrs=attrs) # If the setup_requires succeeded it will have added the new dist to # the main working_set return pkg_resources.working_set.by_key.get(DIST_NAME) except Exception as e: if DEBUG: raise msg = 'Error retrieving {0} from {1}:\n{2}' if find_links: source = find_links[0] elif index_url != INDEX_URL: source = index_url else: source = 'PyPI' raise Exception(msg.format(DIST_NAME, source, repr(e))) def _do_upgrade(self, dist): # Build up a requirement for a higher bugfix release but a lower minor # release (so API compatibility is guaranteed) next_version = _next_version(dist.parsed_version) req = pkg_resources.Requirement.parse( '{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version)) package_index = PackageIndex(index_url=self.index_url) upgrade = package_index.obtain(req) if upgrade is not None: return self._do_download(version=upgrade.version) def _check_submodule(self): """ Check if the given path is a git submodule. See the docstrings for ``_check_submodule_using_git`` and ``_check_submodule_no_git`` for further details. """ if (self.path is None or (os.path.exists(self.path) and not os.path.isdir(self.path))): return False if self.use_git: return self._check_submodule_using_git() else: return self._check_submodule_no_git() def _check_submodule_using_git(self): """ Check if the given path is a git submodule. If so, attempt to initialize and/or update the submodule if needed. This function makes calls to the ``git`` command in subprocesses. The ``_check_submodule_no_git`` option uses pure Python to check if the given path looks like a git submodule, but it cannot perform updates. """ cmd = ['git', 'submodule', 'status', '--', self.path] try: log.info('Running `{0}`; use the --no-git option to disable git ' 'commands'.format(' '.join(cmd))) returncode, stdout, stderr = run_cmd(cmd) except _CommandNotFound: # The git command simply wasn't found; this is most likely the # case on user systems that don't have git and are simply # trying to install the package from PyPI or a source # distribution. Silently ignore this case and simply don't try # to use submodules return False stderr = stderr.strip() if returncode != 0 and stderr: # Unfortunately the return code alone cannot be relied on, as # earlier versions of git returned 0 even if the requested submodule # does not exist # This is a warning that occurs in perl (from running git submodule) # which only occurs with a malformatted locale setting which can # happen sometimes on OSX. See again # https://github.com/astropy/astropy/issues/2749 perl_warning = ('perl: warning: Falling back to the standard locale ' '("C").') if not stderr.strip().endswith(perl_warning): # Some other unknown error condition occurred log.warn('git submodule command failed ' 'unexpectedly:\n{0}'.format(stderr)) return False # Output of `git submodule status` is as follows: # # 1: Status indicator: '-' for submodule is uninitialized, '+' if # submodule is initialized but is not at the commit currently indicated # in .gitmodules (and thus needs to be updated), or 'U' if the # submodule is in an unstable state (i.e. has merge conflicts) # # 2. SHA-1 hash of the current commit of the submodule (we don't really # need this information but it's useful for checking that the output is # correct) # # 3. The output of `git describe` for the submodule's current commit # hash (this includes for example what branches the commit is on) but # only if the submodule is initialized. We ignore this information for # now _git_submodule_status_re = re.compile( '^(?P[+-U ])(?P[0-9a-f]{40}) ' '(?P\S+)( .*)?$') # The stdout should only contain one line--the status of the # requested submodule m = _git_submodule_status_re.match(stdout) if m: # Yes, the path *is* a git submodule self._update_submodule(m.group('submodule'), m.group('status')) return True else: log.warn( 'Unexpected output from `git submodule status`:\n{0}\n' 'Will attempt import from {1!r} regardless.'.format( stdout, self.path)) return False def _check_submodule_no_git(self): """ Like ``_check_submodule_using_git``, but simply parses the .gitmodules file to determine if the supplied path is a git submodule, and does not exec any subprocesses. This can only determine if a path is a submodule--it does not perform updates, etc. This function may need to be updated if the format of the .gitmodules file is changed between git versions. """ gitmodules_path = os.path.abspath('.gitmodules') if not os.path.isfile(gitmodules_path): return False # This is a minimal reader for gitconfig-style files. It handles a few of # the quirks that make gitconfig files incompatible with ConfigParser-style # files, but does not support the full gitconfig syntax (just enough # needed to read a .gitmodules file). gitmodules_fileobj = io.StringIO() # Must use io.open for cross-Python-compatible behavior wrt unicode with io.open(gitmodules_path) as f: for line in f: # gitconfig files are more flexible with leading whitespace; just # go ahead and remove it line = line.lstrip() # comments can start with either # or ; if line and line[0] in (':', ';'): continue gitmodules_fileobj.write(line) gitmodules_fileobj.seek(0) cfg = RawConfigParser() try: cfg.readfp(gitmodules_fileobj) except Exception as exc: log.warn('Malformatted .gitmodules file: {0}\n' '{1} cannot be assumed to be a git submodule.'.format( exc, self.path)) return False for section in cfg.sections(): if not cfg.has_option(section, 'path'): continue submodule_path = cfg.get(section, 'path').rstrip(os.sep) if submodule_path == self.path.rstrip(os.sep): return True return False def _update_submodule(self, submodule, status): if status == ' ': # The submodule is up to date; no action necessary return elif status == '-': if self.offline: raise _AHBootstrapSystemExit( "Cannot initialize the {0} submodule in --offline mode; " "this requires being able to clone the submodule from an " "online repository.".format(submodule)) cmd = ['update', '--init'] action = 'Initializing' elif status == '+': cmd = ['update'] action = 'Updating' if self.offline: cmd.append('--no-fetch') elif status == 'U': raise _AHBootstrapSystemExit( 'Error: Submodule {0} contains unresolved merge conflicts. ' 'Please complete or abandon any changes in the submodule so that ' 'it is in a usable state, then try again.'.format(submodule)) else: log.warn('Unknown status {0!r} for git submodule {1!r}. Will ' 'attempt to use the submodule as-is, but try to ensure ' 'that the submodule is in a clean state and contains no ' 'conflicts or errors.\n{2}'.format(status, submodule, _err_help_msg)) return err_msg = None cmd = ['git', 'submodule'] + cmd + ['--', submodule] log.warn('{0} {1} submodule with: `{2}`'.format( action, submodule, ' '.join(cmd))) try: log.info('Running `{0}`; use the --no-git option to disable git ' 'commands'.format(' '.join(cmd))) returncode, stdout, stderr = run_cmd(cmd) except OSError as e: err_msg = str(e) else: if returncode != 0: err_msg = stderr if err_msg is not None: log.warn('An unexpected error occurred updating the git submodule ' '{0!r}:\n{1}\n{2}'.format(submodule, err_msg, _err_help_msg)) class _CommandNotFound(OSError): """ An exception raised when a command run with run_cmd is not found on the system. """ def run_cmd(cmd): """ Run a command in a subprocess, given as a list of command-line arguments. Returns a ``(returncode, stdout, stderr)`` tuple. """ try: p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) # XXX: May block if either stdout or stderr fill their buffers; # however for the commands this is currently used for that is # unlikely (they should have very brief output) stdout, stderr = p.communicate() except OSError as e: if DEBUG: raise if e.errno == errno.ENOENT: msg = 'Command not found: `{0}`'.format(' '.join(cmd)) raise _CommandNotFound(msg, cmd) else: raise _AHBootstrapSystemExit( 'An unexpected error occurred when running the ' '`{0}` command:\n{1}'.format(' '.join(cmd), str(e))) # Can fail of the default locale is not configured properly. See # https://github.com/astropy/astropy/issues/2749. For the purposes under # consideration 'latin1' is an acceptable fallback. try: stdio_encoding = locale.getdefaultlocale()[1] or 'latin1' except ValueError: # Due to an OSX oddity locale.getdefaultlocale() can also crash # depending on the user's locale/language settings. See: # http://bugs.python.org/issue18378 stdio_encoding = 'latin1' # Unlikely to fail at this point but even then let's be flexible if not isinstance(stdout, _text_type): stdout = stdout.decode(stdio_encoding, 'replace') if not isinstance(stderr, _text_type): stderr = stderr.decode(stdio_encoding, 'replace') return (p.returncode, stdout, stderr) def _next_version(version): """ Given a parsed version from pkg_resources.parse_version, returns a new version string with the next minor version. Examples ======== >>> _next_version(pkg_resources.parse_version('1.2.3')) '1.3.0' """ if hasattr(version, 'base_version'): # New version parsing from setuptools >= 8.0 if version.base_version: parts = version.base_version.split('.') else: parts = [] else: parts = [] for part in version: if part.startswith('*'): break parts.append(part) parts = [int(p) for p in parts] if len(parts) < 3: parts += [0] * (3 - len(parts)) major, minor, micro = parts[:3] return '{0}.{1}.{2}'.format(major, minor + 1, 0) class _DummyFile(object): """A noop writeable object.""" errors = '' # Required for Python 3.x encoding = 'utf-8' def write(self, s): pass def flush(self): pass @contextlib.contextmanager def _silence(): """A context manager that silences sys.stdout and sys.stderr.""" old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() exception_occurred = False try: yield except: exception_occurred = True # Go ahead and clean up so that exception handling can work normally sys.stdout = old_stdout sys.stderr = old_stderr raise if not exception_occurred: sys.stdout = old_stdout sys.stderr = old_stderr _err_help_msg = """ If the problem persists consider installing astropy_helpers manually using pip (`pip install astropy_helpers`) or by manually downloading the source archive, extracting it, and installing by running `python setup.py install` from the root of the extracted source code. """ class _AHBootstrapSystemExit(SystemExit): def __init__(self, *args): if not args: msg = 'An unknown problem occurred bootstrapping astropy_helpers.' else: msg = args[0] msg += '\n' + _err_help_msg super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:]) BOOTSTRAPPER = _Bootstrapper.main() def use_astropy_helpers(**kwargs): """ Ensure that the `astropy_helpers` module is available and is importable. This supports automatic submodule initialization if astropy_helpers is included in a project as a git submodule, or will download it from PyPI if necessary. Parameters ---------- path : str or None, optional A filesystem path relative to the root of the project's source code that should be added to `sys.path` so that `astropy_helpers` can be imported from that path. If the path is a git submodule it will automatically be initialized and/or updated. The path may also be to a ``.tar.gz`` archive of the astropy_helpers source distribution. In this case the archive is automatically unpacked and made temporarily available on `sys.path` as a ``.egg`` archive. If `None` skip straight to downloading. download_if_needed : bool, optional If the provided filesystem path is not found an attempt will be made to download astropy_helpers from PyPI. It will then be made temporarily available on `sys.path` as a ``.egg`` archive (using the ``setup_requires`` feature of setuptools. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. index_url : str, optional If provided, use a different URL for the Python package index than the main PyPI server. use_git : bool, optional If `False` no git commands will be used--this effectively disables support for git submodules. If the ``--no-git`` option is given at the command line the value of this argument is overridden to `False`. auto_upgrade : bool, optional By default, when installing a package from a non-development source distribution ah_boostrap will try to automatically check for patch releases to astropy-helpers on PyPI and use the patched version over any bundled versions. Setting this to `False` will disable that functionality. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. offline : bool, optional If `False` disable all actions that require an internet connection, including downloading packages from the package index and fetching updates to any git submodule. Defaults to `True`. """ global BOOTSTRAPPER config = BOOTSTRAPPER.config config.update(**kwargs) # Create a new bootstrapper with the updated configuration and run it BOOTSTRAPPER = _Bootstrapper(**config) BOOTSTRAPPER.run() photutils-0.4/astropy_helpers/0000755000214200020070000000000013175654702021030 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/ah_bootstrap.py0000644000214200020070000010434413175633272024074 0ustar lbradleySTSCI\science00000000000000""" This bootstrap module contains code for ensuring that the astropy_helpers package will be importable by the time the setup.py script runs. It also includes some workarounds to ensure that a recent-enough version of setuptools is being used for the installation. This module should be the first thing imported in the setup.py of distributions that make use of the utilities in astropy_helpers. If the distribution ships with its own copy of astropy_helpers, this module will first attempt to import from the shipped copy. However, it will also check PyPI to see if there are any bug-fix releases on top of the current version that may be useful to get past platform-specific bugs that have been fixed. When running setup.py, use the ``--offline`` command-line option to disable the auto-upgrade checks. When this module is imported or otherwise executed it automatically calls a main function that attempts to read the project's setup.cfg file, which it checks for a configuration section called ``[ah_bootstrap]`` the presences of that section, and options therein, determine the next step taken: If it contains an option called ``auto_use`` with a value of ``True``, it will automatically call the main function of this module called `use_astropy_helpers` (see that function's docstring for full details). Otherwise no further action is taken (however, ``ah_bootstrap.use_astropy_helpers`` may be called manually from within the setup.py script). Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same names as the arguments to `use_astropy_helpers`, and can be used to configure the bootstrap script when ``auto_use = True``. See https://github.com/astropy/astropy-helpers for more details, and for the latest version of this module. """ import contextlib import errno import imp import io import locale import os import re import subprocess as sp import sys try: from ConfigParser import ConfigParser, RawConfigParser except ImportError: from configparser import ConfigParser, RawConfigParser if sys.version_info[0] < 3: _str_types = (str, unicode) _text_type = unicode PY3 = False else: _str_types = (str, bytes) _text_type = str PY3 = True # What follows are several import statements meant to deal with install-time # issues with either missing or misbehaving pacakges (including making sure # setuptools itself is installed): # Some pre-setuptools checks to ensure that either distribute or setuptools >= # 0.7 is used (over pre-distribute setuptools) if it is available on the path; # otherwise the latest setuptools will be downloaded and bootstrapped with # ``ez_setup.py``. This used to be included in a separate file called # setuptools_bootstrap.py; but it was combined into ah_bootstrap.py try: import pkg_resources _setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7') # This may raise a DistributionNotFound in which case no version of # setuptools or distribute is properly installed _setuptools = pkg_resources.get_distribution('setuptools') if _setuptools not in _setuptools_req: # Older version of setuptools; check if we have distribute; again if # this results in DistributionNotFound we want to give up _distribute = pkg_resources.get_distribution('distribute') if _setuptools != _distribute: # It's possible on some pathological systems to have an old version # of setuptools and distribute on sys.path simultaneously; make # sure distribute is the one that's used sys.path.insert(1, _distribute.location) _distribute.activate() imp.reload(pkg_resources) except: # There are several types of exceptions that can occur here; if all else # fails bootstrap and use the bootstrapped version from ez_setup import use_setuptools use_setuptools() # typing as a dependency for 1.6.1+ Sphinx causes issues when imported after # initializing submodule with ah_boostrap.py # See discussion and references in # https://github.com/astropy/astropy-helpers/issues/302 try: import typing # noqa except ImportError: pass # Note: The following import is required as a workaround to # https://github.com/astropy/astropy-helpers/issues/89; if we don't import this # module now, it will get cleaned up after `run_setup` is called, but that will # later cause the TemporaryDirectory class defined in it to stop working when # used later on by setuptools try: import setuptools.py31compat # noqa except ImportError: pass # matplotlib can cause problems if it is imported from within a call of # run_setup(), because in some circumstances it will try to write to the user's # home directory, resulting in a SandboxViolation. See # https://github.com/matplotlib/matplotlib/pull/4165 # Making sure matplotlib, if it is available, is imported early in the setup # process can mitigate this (note importing matplotlib.pyplot has the same # issue) try: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot except: # Ignore if this fails for *any* reason* pass # End compatibility imports... # In case it didn't successfully import before the ez_setup checks import pkg_resources from setuptools import Distribution from setuptools.package_index import PackageIndex from setuptools.sandbox import run_setup from distutils import log from distutils.debug import DEBUG # TODO: Maybe enable checking for a specific version of astropy_helpers? DIST_NAME = 'astropy-helpers' PACKAGE_NAME = 'astropy_helpers' # Defaults for other options DOWNLOAD_IF_NEEDED = True INDEX_URL = 'https://pypi.python.org/simple' USE_GIT = True OFFLINE = False AUTO_UPGRADE = True # A list of all the configuration options and their required types CFG_OPTIONS = [ ('auto_use', bool), ('path', str), ('download_if_needed', bool), ('index_url', str), ('use_git', bool), ('offline', bool), ('auto_upgrade', bool) ] class _Bootstrapper(object): """ Bootstrapper implementation. See ``use_astropy_helpers`` for parameter documentation. """ def __init__(self, path=None, index_url=None, use_git=None, offline=None, download_if_needed=None, auto_upgrade=None): if path is None: path = PACKAGE_NAME if not (isinstance(path, _str_types) or path is False): raise TypeError('path must be a string or False') if PY3 and not isinstance(path, _text_type): fs_encoding = sys.getfilesystemencoding() path = path.decode(fs_encoding) # path to unicode self.path = path # Set other option attributes, using defaults where necessary self.index_url = index_url if index_url is not None else INDEX_URL self.offline = offline if offline is not None else OFFLINE # If offline=True, override download and auto-upgrade if self.offline: download_if_needed = False auto_upgrade = False self.download = (download_if_needed if download_if_needed is not None else DOWNLOAD_IF_NEEDED) self.auto_upgrade = (auto_upgrade if auto_upgrade is not None else AUTO_UPGRADE) # If this is a release then the .git directory will not exist so we # should not use git. git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git')) if use_git is None and not git_dir_exists: use_git = False self.use_git = use_git if use_git is not None else USE_GIT # Declared as False by default--later we check if astropy-helpers can be # upgraded from PyPI, but only if not using a source distribution (as in # the case of import from a git submodule) self.is_submodule = False @classmethod def main(cls, argv=None): if argv is None: argv = sys.argv config = cls.parse_config() config.update(cls.parse_command_line(argv)) auto_use = config.pop('auto_use', False) bootstrapper = cls(**config) if auto_use: # Run the bootstrapper, otherwise the setup.py is using the old # use_astropy_helpers() interface, in which case it will run the # bootstrapper manually after reconfiguring it. bootstrapper.run() return bootstrapper @classmethod def parse_config(cls): if not os.path.exists('setup.cfg'): return {} cfg = ConfigParser() try: cfg.read('setup.cfg') except Exception as e: if DEBUG: raise log.error( "Error reading setup.cfg: {0!r}\n{1} will not be " "automatically bootstrapped and package installation may fail." "\n{2}".format(e, PACKAGE_NAME, _err_help_msg)) return {} if not cfg.has_section('ah_bootstrap'): return {} config = {} for option, type_ in CFG_OPTIONS: if not cfg.has_option('ah_bootstrap', option): continue if type_ is bool: value = cfg.getboolean('ah_bootstrap', option) else: value = cfg.get('ah_bootstrap', option) config[option] = value return config @classmethod def parse_command_line(cls, argv=None): if argv is None: argv = sys.argv config = {} # For now we just pop recognized ah_bootstrap options out of the # arg list. This is imperfect; in the unlikely case that a setup.py # custom command or even custom Distribution class defines an argument # of the same name then we will break that. However there's a catch22 # here that we can't just do full argument parsing right here, because # we don't yet know *how* to parse all possible command-line arguments. if '--no-git' in argv: config['use_git'] = False argv.remove('--no-git') if '--offline' in argv: config['offline'] = True argv.remove('--offline') return config def run(self): strategies = ['local_directory', 'local_file', 'index'] dist = None # First, remove any previously imported versions of astropy_helpers; # this is necessary for nested installs where one package's installer # is installing another package via setuptools.sandbox.run_setup, as in # the case of setup_requires for key in list(sys.modules): try: if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'): del sys.modules[key] except AttributeError: # Sometimes mysterious non-string things can turn up in # sys.modules continue # Check to see if the path is a submodule self.is_submodule = self._check_submodule() for strategy in strategies: method = getattr(self, 'get_{0}_dist'.format(strategy)) dist = method() if dist is not None: break else: raise _AHBootstrapSystemExit( "No source found for the {0!r} package; {0} must be " "available and importable as a prerequisite to building " "or installing this package.".format(PACKAGE_NAME)) # This is a bit hacky, but if astropy_helpers was loaded from a # directory/submodule its Distribution object gets a "precedence" of # "DEVELOP_DIST". However, in other cases it gets a precedence of # "EGG_DIST". However, when activing the distribution it will only be # placed early on sys.path if it is treated as an EGG_DIST, so always # do that dist = dist.clone(precedence=pkg_resources.EGG_DIST) # Otherwise we found a version of astropy-helpers, so we're done # Just active the found distribution on sys.path--if we did a # download this usually happens automatically but it doesn't hurt to # do it again # Note: Adding the dist to the global working set also activates it # (makes it importable on sys.path) by default. try: pkg_resources.working_set.add(dist, replace=True) except TypeError: # Some (much) older versions of setuptools do not have the # replace=True option here. These versions are old enough that all # bets may be off anyways, but it's easy enough to work around just # in case... if dist.key in pkg_resources.working_set.by_key: del pkg_resources.working_set.by_key[dist.key] pkg_resources.working_set.add(dist) @property def config(self): """ A `dict` containing the options this `_Bootstrapper` was configured with. """ return dict((optname, getattr(self, optname)) for optname, _ in CFG_OPTIONS if hasattr(self, optname)) def get_local_directory_dist(self): """ Handle importing a vendored package from a subdirectory of the source distribution. """ if not os.path.isdir(self.path): return log.info('Attempting to import astropy_helpers from {0} {1!r}'.format( 'submodule' if self.is_submodule else 'directory', self.path)) dist = self._directory_import() if dist is None: log.warn( 'The requested path {0!r} for importing {1} does not ' 'exist, or does not contain a copy of the {1} ' 'package.'.format(self.path, PACKAGE_NAME)) elif self.auto_upgrade and not self.is_submodule: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist def get_local_file_dist(self): """ Handle importing from a source archive; this also uses setup_requires but points easy_install directly to the source archive. """ if not os.path.isfile(self.path): return log.info('Attempting to unpack and import astropy_helpers from ' '{0!r}'.format(self.path)) try: dist = self._do_download(find_links=[self.path]) except Exception as e: if DEBUG: raise log.warn( 'Failed to import {0} from the specified archive {1!r}: ' '{2}'.format(PACKAGE_NAME, self.path, str(e))) dist = None if dist is not None and self.auto_upgrade: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist def get_index_dist(self): if not self.download: log.warn('Downloading {0!r} disabled.'.format(DIST_NAME)) return None log.warn( "Downloading {0!r}; run setup.py with the --offline option to " "force offline installation.".format(DIST_NAME)) try: dist = self._do_download() except Exception as e: if DEBUG: raise log.warn( 'Failed to download and/or install {0!r} from {1!r}:\n' '{2}'.format(DIST_NAME, self.index_url, str(e))) dist = None # No need to run auto-upgrade here since we've already presumably # gotten the most up-to-date version from the package index return dist def _directory_import(self): """ Import astropy_helpers from the given path, which will be added to sys.path. Must return True if the import succeeded, and False otherwise. """ # Return True on success, False on failure but download is allowed, and # otherwise raise SystemExit path = os.path.abspath(self.path) # Use an empty WorkingSet rather than the man # pkg_resources.working_set, since on older versions of setuptools this # will invoke a VersionConflict when trying to install an upgrade ws = pkg_resources.WorkingSet([]) ws.add_entry(path) dist = ws.by_key.get(DIST_NAME) if dist is None: # We didn't find an egg-info/dist-info in the given path, but if a # setup.py exists we can generate it setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): with _silence(): run_setup(os.path.join(path, 'setup.py'), ['egg_info']) for dist in pkg_resources.find_distributions(path, True): # There should be only one... return dist return dist def _do_download(self, version='', find_links=None): if find_links: allow_hosts = '' index_url = None else: allow_hosts = None index_url = self.index_url # Annoyingly, setuptools will not handle other arguments to # Distribution (such as options) before handling setup_requires, so it # is not straightforward to programmatically augment the arguments which # are passed to easy_install class _Distribution(Distribution): def get_option_dict(self, command_name): opts = Distribution.get_option_dict(self, command_name) if command_name == 'easy_install': if find_links is not None: opts['find_links'] = ('setup script', find_links) if index_url is not None: opts['index_url'] = ('setup script', index_url) if allow_hosts is not None: opts['allow_hosts'] = ('setup script', allow_hosts) return opts if version: req = '{0}=={1}'.format(DIST_NAME, version) else: req = DIST_NAME attrs = {'setup_requires': [req]} try: if DEBUG: _Distribution(attrs=attrs) else: with _silence(): _Distribution(attrs=attrs) # If the setup_requires succeeded it will have added the new dist to # the main working_set return pkg_resources.working_set.by_key.get(DIST_NAME) except Exception as e: if DEBUG: raise msg = 'Error retrieving {0} from {1}:\n{2}' if find_links: source = find_links[0] elif index_url != INDEX_URL: source = index_url else: source = 'PyPI' raise Exception(msg.format(DIST_NAME, source, repr(e))) def _do_upgrade(self, dist): # Build up a requirement for a higher bugfix release but a lower minor # release (so API compatibility is guaranteed) next_version = _next_version(dist.parsed_version) req = pkg_resources.Requirement.parse( '{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version)) package_index = PackageIndex(index_url=self.index_url) upgrade = package_index.obtain(req) if upgrade is not None: return self._do_download(version=upgrade.version) def _check_submodule(self): """ Check if the given path is a git submodule. See the docstrings for ``_check_submodule_using_git`` and ``_check_submodule_no_git`` for further details. """ if (self.path is None or (os.path.exists(self.path) and not os.path.isdir(self.path))): return False if self.use_git: return self._check_submodule_using_git() else: return self._check_submodule_no_git() def _check_submodule_using_git(self): """ Check if the given path is a git submodule. If so, attempt to initialize and/or update the submodule if needed. This function makes calls to the ``git`` command in subprocesses. The ``_check_submodule_no_git`` option uses pure Python to check if the given path looks like a git submodule, but it cannot perform updates. """ cmd = ['git', 'submodule', 'status', '--', self.path] try: log.info('Running `{0}`; use the --no-git option to disable git ' 'commands'.format(' '.join(cmd))) returncode, stdout, stderr = run_cmd(cmd) except _CommandNotFound: # The git command simply wasn't found; this is most likely the # case on user systems that don't have git and are simply # trying to install the package from PyPI or a source # distribution. Silently ignore this case and simply don't try # to use submodules return False stderr = stderr.strip() if returncode != 0 and stderr: # Unfortunately the return code alone cannot be relied on, as # earlier versions of git returned 0 even if the requested submodule # does not exist # This is a warning that occurs in perl (from running git submodule) # which only occurs with a malformatted locale setting which can # happen sometimes on OSX. See again # https://github.com/astropy/astropy/issues/2749 perl_warning = ('perl: warning: Falling back to the standard locale ' '("C").') if not stderr.strip().endswith(perl_warning): # Some other unknown error condition occurred log.warn('git submodule command failed ' 'unexpectedly:\n{0}'.format(stderr)) return False # Output of `git submodule status` is as follows: # # 1: Status indicator: '-' for submodule is uninitialized, '+' if # submodule is initialized but is not at the commit currently indicated # in .gitmodules (and thus needs to be updated), or 'U' if the # submodule is in an unstable state (i.e. has merge conflicts) # # 2. SHA-1 hash of the current commit of the submodule (we don't really # need this information but it's useful for checking that the output is # correct) # # 3. The output of `git describe` for the submodule's current commit # hash (this includes for example what branches the commit is on) but # only if the submodule is initialized. We ignore this information for # now _git_submodule_status_re = re.compile( '^(?P[+-U ])(?P[0-9a-f]{40}) ' '(?P\S+)( .*)?$') # The stdout should only contain one line--the status of the # requested submodule m = _git_submodule_status_re.match(stdout) if m: # Yes, the path *is* a git submodule self._update_submodule(m.group('submodule'), m.group('status')) return True else: log.warn( 'Unexpected output from `git submodule status`:\n{0}\n' 'Will attempt import from {1!r} regardless.'.format( stdout, self.path)) return False def _check_submodule_no_git(self): """ Like ``_check_submodule_using_git``, but simply parses the .gitmodules file to determine if the supplied path is a git submodule, and does not exec any subprocesses. This can only determine if a path is a submodule--it does not perform updates, etc. This function may need to be updated if the format of the .gitmodules file is changed between git versions. """ gitmodules_path = os.path.abspath('.gitmodules') if not os.path.isfile(gitmodules_path): return False # This is a minimal reader for gitconfig-style files. It handles a few of # the quirks that make gitconfig files incompatible with ConfigParser-style # files, but does not support the full gitconfig syntax (just enough # needed to read a .gitmodules file). gitmodules_fileobj = io.StringIO() # Must use io.open for cross-Python-compatible behavior wrt unicode with io.open(gitmodules_path) as f: for line in f: # gitconfig files are more flexible with leading whitespace; just # go ahead and remove it line = line.lstrip() # comments can start with either # or ; if line and line[0] in (':', ';'): continue gitmodules_fileobj.write(line) gitmodules_fileobj.seek(0) cfg = RawConfigParser() try: cfg.readfp(gitmodules_fileobj) except Exception as exc: log.warn('Malformatted .gitmodules file: {0}\n' '{1} cannot be assumed to be a git submodule.'.format( exc, self.path)) return False for section in cfg.sections(): if not cfg.has_option(section, 'path'): continue submodule_path = cfg.get(section, 'path').rstrip(os.sep) if submodule_path == self.path.rstrip(os.sep): return True return False def _update_submodule(self, submodule, status): if status == ' ': # The submodule is up to date; no action necessary return elif status == '-': if self.offline: raise _AHBootstrapSystemExit( "Cannot initialize the {0} submodule in --offline mode; " "this requires being able to clone the submodule from an " "online repository.".format(submodule)) cmd = ['update', '--init'] action = 'Initializing' elif status == '+': cmd = ['update'] action = 'Updating' if self.offline: cmd.append('--no-fetch') elif status == 'U': raise _AHBootstrapSystemExit( 'Error: Submodule {0} contains unresolved merge conflicts. ' 'Please complete or abandon any changes in the submodule so that ' 'it is in a usable state, then try again.'.format(submodule)) else: log.warn('Unknown status {0!r} for git submodule {1!r}. Will ' 'attempt to use the submodule as-is, but try to ensure ' 'that the submodule is in a clean state and contains no ' 'conflicts or errors.\n{2}'.format(status, submodule, _err_help_msg)) return err_msg = None cmd = ['git', 'submodule'] + cmd + ['--', submodule] log.warn('{0} {1} submodule with: `{2}`'.format( action, submodule, ' '.join(cmd))) try: log.info('Running `{0}`; use the --no-git option to disable git ' 'commands'.format(' '.join(cmd))) returncode, stdout, stderr = run_cmd(cmd) except OSError as e: err_msg = str(e) else: if returncode != 0: err_msg = stderr if err_msg is not None: log.warn('An unexpected error occurred updating the git submodule ' '{0!r}:\n{1}\n{2}'.format(submodule, err_msg, _err_help_msg)) class _CommandNotFound(OSError): """ An exception raised when a command run with run_cmd is not found on the system. """ def run_cmd(cmd): """ Run a command in a subprocess, given as a list of command-line arguments. Returns a ``(returncode, stdout, stderr)`` tuple. """ try: p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) # XXX: May block if either stdout or stderr fill their buffers; # however for the commands this is currently used for that is # unlikely (they should have very brief output) stdout, stderr = p.communicate() except OSError as e: if DEBUG: raise if e.errno == errno.ENOENT: msg = 'Command not found: `{0}`'.format(' '.join(cmd)) raise _CommandNotFound(msg, cmd) else: raise _AHBootstrapSystemExit( 'An unexpected error occurred when running the ' '`{0}` command:\n{1}'.format(' '.join(cmd), str(e))) # Can fail of the default locale is not configured properly. See # https://github.com/astropy/astropy/issues/2749. For the purposes under # consideration 'latin1' is an acceptable fallback. try: stdio_encoding = locale.getdefaultlocale()[1] or 'latin1' except ValueError: # Due to an OSX oddity locale.getdefaultlocale() can also crash # depending on the user's locale/language settings. See: # http://bugs.python.org/issue18378 stdio_encoding = 'latin1' # Unlikely to fail at this point but even then let's be flexible if not isinstance(stdout, _text_type): stdout = stdout.decode(stdio_encoding, 'replace') if not isinstance(stderr, _text_type): stderr = stderr.decode(stdio_encoding, 'replace') return (p.returncode, stdout, stderr) def _next_version(version): """ Given a parsed version from pkg_resources.parse_version, returns a new version string with the next minor version. Examples ======== >>> _next_version(pkg_resources.parse_version('1.2.3')) '1.3.0' """ if hasattr(version, 'base_version'): # New version parsing from setuptools >= 8.0 if version.base_version: parts = version.base_version.split('.') else: parts = [] else: parts = [] for part in version: if part.startswith('*'): break parts.append(part) parts = [int(p) for p in parts] if len(parts) < 3: parts += [0] * (3 - len(parts)) major, minor, micro = parts[:3] return '{0}.{1}.{2}'.format(major, minor + 1, 0) class _DummyFile(object): """A noop writeable object.""" errors = '' # Required for Python 3.x encoding = 'utf-8' def write(self, s): pass def flush(self): pass @contextlib.contextmanager def _silence(): """A context manager that silences sys.stdout and sys.stderr.""" old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() exception_occurred = False try: yield except: exception_occurred = True # Go ahead and clean up so that exception handling can work normally sys.stdout = old_stdout sys.stderr = old_stderr raise if not exception_occurred: sys.stdout = old_stdout sys.stderr = old_stderr _err_help_msg = """ If the problem persists consider installing astropy_helpers manually using pip (`pip install astropy_helpers`) or by manually downloading the source archive, extracting it, and installing by running `python setup.py install` from the root of the extracted source code. """ class _AHBootstrapSystemExit(SystemExit): def __init__(self, *args): if not args: msg = 'An unknown problem occurred bootstrapping astropy_helpers.' else: msg = args[0] msg += '\n' + _err_help_msg super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:]) BOOTSTRAPPER = _Bootstrapper.main() def use_astropy_helpers(**kwargs): """ Ensure that the `astropy_helpers` module is available and is importable. This supports automatic submodule initialization if astropy_helpers is included in a project as a git submodule, or will download it from PyPI if necessary. Parameters ---------- path : str or None, optional A filesystem path relative to the root of the project's source code that should be added to `sys.path` so that `astropy_helpers` can be imported from that path. If the path is a git submodule it will automatically be initialized and/or updated. The path may also be to a ``.tar.gz`` archive of the astropy_helpers source distribution. In this case the archive is automatically unpacked and made temporarily available on `sys.path` as a ``.egg`` archive. If `None` skip straight to downloading. download_if_needed : bool, optional If the provided filesystem path is not found an attempt will be made to download astropy_helpers from PyPI. It will then be made temporarily available on `sys.path` as a ``.egg`` archive (using the ``setup_requires`` feature of setuptools. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. index_url : str, optional If provided, use a different URL for the Python package index than the main PyPI server. use_git : bool, optional If `False` no git commands will be used--this effectively disables support for git submodules. If the ``--no-git`` option is given at the command line the value of this argument is overridden to `False`. auto_upgrade : bool, optional By default, when installing a package from a non-development source distribution ah_boostrap will try to automatically check for patch releases to astropy-helpers on PyPI and use the patched version over any bundled versions. Setting this to `False` will disable that functionality. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. offline : bool, optional If `False` disable all actions that require an internet connection, including downloading packages from the package index and fetching updates to any git submodule. Defaults to `True`. """ global BOOTSTRAPPER config = BOOTSTRAPPER.config config.update(**kwargs) # Create a new bootstrapper with the updated configuration and run it BOOTSTRAPPER = _Bootstrapper(**config) BOOTSTRAPPER.run() photutils-0.4/astropy_helpers/astropy_helpers/0000755000214200020070000000000013175654702024253 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/__init__.py0000644000214200020070000000345412632702120026353 0ustar lbradleySTSCI\science00000000000000try: from .version import version as __version__ from .version import githash as __githash__ except ImportError: __version__ = '' __githash__ = '' # If we've made it as far as importing astropy_helpers, we don't need # ah_bootstrap in sys.modules anymore. Getting rid of it is actually necessary # if the package we're installing has a setup_requires of another package that # uses astropy_helpers (and possibly a different version at that) # See https://github.com/astropy/astropy/issues/3541 import sys if 'ah_bootstrap' in sys.modules: del sys.modules['ah_bootstrap'] # Note, this is repeated from ah_bootstrap.py, but is here too in case this # astropy-helpers was upgraded to from an older version that did not have this # check in its ah_bootstrap. # matplotlib can cause problems if it is imported from within a call of # run_setup(), because in some circumstances it will try to write to the user's # home directory, resulting in a SandboxViolation. See # https://github.com/matplotlib/matplotlib/pull/4165 # Making sure matplotlib, if it is available, is imported early in the setup # process can mitigate this (note importing matplotlib.pyplot has the same # issue) try: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot except: # Ignore if this fails for *any* reason* pass import os # Ensure that all module-level code in astropy or other packages know that # we're in setup mode: if ('__main__' in sys.modules and hasattr(sys.modules['__main__'], '__file__')): filename = os.path.basename(sys.modules['__main__'].__file__) if filename.rstrip('co') == 'setup.py': if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins builtins._ASTROPY_SETUP_ = True del filename photutils-0.4/astropy_helpers/astropy_helpers/commands/0000755000214200020070000000000013175654702026054 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/commands/__init__.py0000644000214200020070000000000012477406127030153 0ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/commands/_dummy.py0000644000214200020070000000557412632702254027724 0ustar lbradleySTSCI\science00000000000000""" Provides a base class for a 'dummy' setup.py command that has no functionality (probably due to a missing requirement). This dummy command can raise an exception when it is run, explaining to the user what dependencies must be met to use this command. The reason this is at all tricky is that we want the command to be able to provide this message even when the user passes arguments to the command. If we don't know ahead of time what arguments the command can take, this is difficult, because distutils does not allow unknown arguments to be passed to a setup.py command. This hacks around that restriction to provide a useful error message even when a user passes arguments to the dummy implementation of a command. Use this like: try: from some_dependency import SetupCommand except ImportError: from ._dummy import _DummyCommand class SetupCommand(_DummyCommand): description = \ 'Implementation of SetupCommand from some_dependency; ' 'some_dependency must be installed to run this command' # This is the message that will be raised when a user tries to # run this command--define it as a class attribute. error_msg = \ "The 'setup_command' command requires the some_dependency " "package to be installed and importable." """ import sys from setuptools import Command from distutils.errors import DistutilsArgError from textwrap import dedent class _DummyCommandMeta(type): """ Causes an exception to be raised on accessing attributes of a command class so that if ``./setup.py command_name`` is run with additional command-line options we can provide a useful error message instead of the default that tells users the options are unrecognized. """ def __init__(cls, name, bases, members): if bases == (Command, object): # This is the _DummyCommand base class, presumably return if not hasattr(cls, 'description'): raise TypeError( "_DummyCommand subclass must have a 'description' " "attribute.") if not hasattr(cls, 'error_msg'): raise TypeError( "_DummyCommand subclass must have an 'error_msg' " "attribute.") def __getattribute__(cls, attr): if attr in ('description', 'error_msg'): # Allow cls.description to work so that `./setup.py # --help-commands` still works return super(_DummyCommandMeta, cls).__getattribute__(attr) raise DistutilsArgError(cls.error_msg) if sys.version_info[0] < 3: exec(dedent(""" class _DummyCommand(Command, object): __metaclass__ = _DummyCommandMeta """)) else: exec(dedent(""" class _DummyCommand(Command, object, metaclass=_DummyCommandMeta): pass """)) photutils-0.4/astropy_helpers/astropy_helpers/commands/_test_compat.py0000644000214200020070000002671213175633272031116 0ustar lbradleySTSCI\science00000000000000""" Old implementation of ``./setup.py test`` command. This has been moved to astropy.tests as of Astropy v1.1.0, but a copy of the implementation is kept here for backwards compatibility. """ from __future__ import absolute_import, unicode_literals import inspect import os import shutil import subprocess import sys import tempfile from setuptools import Command from ..compat import _fix_user_options PY3 = sys.version_info[0] == 3 class AstropyTest(Command, object): description = 'Run the tests for this package' user_options = [ ('package=', 'P', "The name of a specific package to test, e.g. 'io.fits' or 'utils'. " "If nothing is specified, all default tests are run."), ('test-path=', 't', 'Specify a test location by path. If a relative path to a .py file, ' 'it is relative to the built package, so e.g., a leading "astropy/" ' 'is necessary. If a relative path to a .rst file, it is relative to ' 'the directory *below* the --docs-path directory, so a leading ' '"docs/" is usually necessary. May also be an absolute path.'), ('verbose-results', 'V', 'Turn on verbose output from pytest.'), ('plugins=', 'p', 'Plugins to enable when running pytest.'), ('pastebin=', 'b', "Enable pytest pastebin output. Either 'all' or 'failed'."), ('args=', 'a', 'Additional arguments to be passed to pytest.'), ('remote-data', 'R', 'Run tests that download remote data.'), ('pep8', '8', 'Enable PEP8 checking and disable regular tests. ' 'Requires the pytest-pep8 plugin.'), ('pdb', 'd', 'Start the interactive Python debugger on errors.'), ('coverage', 'c', 'Create a coverage report. Requires the coverage package.'), ('open-files', 'o', 'Fail if any tests leave files open. Requires the ' 'psutil package.'), ('parallel=', 'j', 'Run the tests in parallel on the specified number of ' 'CPUs. If negative, all the cores on the machine will be ' 'used. Requires the pytest-xdist plugin.'), ('docs-path=', None, 'The path to the documentation .rst files. If not provided, and ' 'the current directory contains a directory called "docs", that ' 'will be used.'), ('skip-docs', None, "Don't test the documentation .rst files."), ('repeat=', None, 'How many times to repeat each test (can be used to check for ' 'sporadic failures).'), ('temp-root=', None, 'The root directory in which to create the temporary testing files. ' 'If unspecified the system default is used (e.g. /tmp) as explained ' 'in the documentation for tempfile.mkstemp.') ] user_options = _fix_user_options(user_options) package_name = '' def initialize_options(self): self.package = None self.test_path = None self.verbose_results = False self.plugins = None self.pastebin = None self.args = None self.remote_data = False self.pep8 = False self.pdb = False self.coverage = False self.open_files = False self.parallel = 0 self.docs_path = None self.skip_docs = False self.repeat = None self.temp_root = None def finalize_options(self): # Normally we would validate the options here, but that's handled in # run_tests pass # Most of the test runner arguments have the same name as attributes on # this command class, with one exception (for now) _test_runner_arg_attr_map = { 'verbose': 'verbose_results' } def generate_testing_command(self): """ Build a Python script to run the tests. """ cmd_pre = '' # Commands to run before the test function cmd_post = '' # Commands to run after the test function if self.coverage: pre, post = self._generate_coverage_commands() cmd_pre += pre cmd_post += post def get_attr(arg): attr = self._test_runner_arg_attr_map.get(arg, arg) return getattr(self, attr) test_args = filter(lambda arg: hasattr(self, arg), self._get_test_runner_args()) test_args = ', '.join('{0}={1!r}'.format(arg, get_attr(arg)) for arg in test_args) if PY3: set_flag = "import builtins; builtins._ASTROPY_TEST_ = True" else: set_flag = "import __builtin__; __builtin__._ASTROPY_TEST_ = True" cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ' '{1.package_name}.test({test_args}); {cmd_post}' 'sys.exit(result)') return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post, test_args=test_args) def _validate_required_deps(self): """ This method checks that any required modules are installed before running the tests. """ try: import astropy # noqa except ImportError: raise ImportError( "The 'test' command requires the astropy package to be " "installed and importable.") def run(self): """ Run the tests! """ # Ensure there is a doc path if self.docs_path is None: if os.path.exists('docs'): self.docs_path = os.path.abspath('docs') # Build a testing install of the package self._build_temp_install() # Ensure all required packages are installed self._validate_required_deps() # Run everything in a try: finally: so that the tmp dir gets deleted. try: # Construct this modules testing command cmd = self.generate_testing_command() # Run the tests in a subprocess--this is necessary since # new extension modules may have appeared, and this is the # easiest way to set up a new environment # On Python 3.x prior to 3.3, the creation of .pyc files # is not atomic. py.test jumps through some hoops to make # this work by parsing import statements and carefully # importing files atomically. However, it can't detect # when __import__ is used, so its carefulness still fails. # The solution here (admittedly a bit of a hack), is to # turn off the generation of .pyc files altogether by # passing the `-B` switch to `python`. This does mean # that each core will have to compile .py file to bytecode # itself, rather than getting lucky and borrowing the work # already done by another core. Compilation is an # insignificant fraction of total testing time, though, so # it's probably not worth worrying about. retcode = subprocess.call([sys.executable, '-B', '-c', cmd], cwd=self.testing_path, close_fds=False) finally: # Remove temporary directory shutil.rmtree(self.tmp_dir) raise SystemExit(retcode) def _build_temp_install(self): """ Build the package and copy the build to a temporary directory for the purposes of testing this avoids creating pyc and __pycache__ directories inside the build directory """ self.reinitialize_command('build', inplace=True) self.run_command('build') build_cmd = self.get_finalized_command('build') new_path = os.path.abspath(build_cmd.build_lib) # On OSX the default path for temp files is under /var, but in most # cases on OSX /var is actually a symlink to /private/var; ensure we # dereference that link, because py.test is very sensitive to relative # paths... tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-', dir=self.temp_root) self.tmp_dir = os.path.realpath(tmp_dir) self.testing_path = os.path.join(self.tmp_dir, os.path.basename(new_path)) shutil.copytree(new_path, self.testing_path) new_docs_path = os.path.join(self.tmp_dir, os.path.basename(self.docs_path)) shutil.copytree(self.docs_path, new_docs_path) self.docs_path = new_docs_path shutil.copy('setup.cfg', self.tmp_dir) def _generate_coverage_commands(self): """ This method creates the post and pre commands if coverage is to be generated """ if self.parallel != 0: raise ValueError( "--coverage can not be used with --parallel") try: import coverage # noqa except ImportError: raise ImportError( "--coverage requires that the coverage package is " "installed.") # Don't use get_pkg_data_filename here, because it # requires importing astropy.config and thus screwing # up coverage results for those packages. coveragerc = os.path.join( self.testing_path, self.package_name, 'tests', 'coveragerc') # We create a coveragerc that is specific to the version # of Python we're running, so that we can mark branches # as being specifically for Python 2 or Python 3 with open(coveragerc, 'r') as fd: coveragerc_content = fd.read() if PY3: ignore_python_version = '2' else: ignore_python_version = '3' coveragerc_content = coveragerc_content.replace( "{ignore_python_version}", ignore_python_version).replace( "{packagename}", self.package_name) tmp_coveragerc = os.path.join(self.tmp_dir, 'coveragerc') with open(tmp_coveragerc, 'wb') as tmp: tmp.write(coveragerc_content.encode('utf-8')) cmd_pre = ( 'import coverage; ' 'cov = coverage.coverage(data_file="{0}", config_file="{1}"); ' 'cov.start();'.format( os.path.abspath(".coverage"), tmp_coveragerc)) cmd_post = ( 'cov.stop(); ' 'from astropy.tests.helper import _save_coverage; ' '_save_coverage(cov, result, "{0}", "{1}");'.format( os.path.abspath('.'), self.testing_path)) return cmd_pre, cmd_post def _get_test_runner_args(self): """ A hack to determine what arguments are supported by the package's test() function. In the future there should be a more straightforward API to determine this (really it should be determined by the ``TestRunner`` class for whatever version of Astropy is in use). """ if PY3: import builtins builtins._ASTROPY_TEST_ = True else: import __builtin__ __builtin__._ASTROPY_TEST_ = True try: pkg = __import__(self.package_name) if not hasattr(pkg, 'test'): raise ImportError( 'package {0} does not have a {0}.test() function as ' 'required by the Astropy test runner'.format(self.package_name)) argspec = inspect.getargspec(pkg.test) return argspec.args finally: if PY3: del builtins._ASTROPY_TEST_ else: del __builtin__._ASTROPY_TEST_ photutils-0.4/astropy_helpers/astropy_helpers/commands/build_ext.py0000644000214200020070000004636413175633272030421 0ustar lbradleySTSCI\science00000000000000import errno import os import re import shlex import shutil import subprocess import sys import textwrap from distutils import log, ccompiler, sysconfig from distutils.core import Extension from distutils.ccompiler import get_default_compiler from setuptools.command.build_ext import build_ext as SetuptoolsBuildExt from ..utils import get_numpy_include_path, invalidate_caches, classproperty from ..version_helpers import get_pkg_version_module def should_build_with_cython(package, release=None): """Returns the previously used Cython version (or 'unknown' if not previously built) if Cython should be used to build extension modules from pyx files. If the ``release`` parameter is not specified an attempt is made to determine the release flag from `astropy.version`. """ try: version_module = __import__(package + '.cython_version', fromlist=['release', 'cython_version']) except ImportError: version_module = None if release is None and version_module is not None: try: release = version_module.release except AttributeError: pass try: cython_version = version_module.cython_version except AttributeError: cython_version = 'unknown' # Only build with Cython if, of course, Cython is installed, we're in a # development version (i.e. not release) or the Cython-generated source # files haven't been created yet (cython_version == 'unknown'). The latter # case can happen even when release is True if checking out a release tag # from the repository have_cython = False try: import Cython # noqa have_cython = True except ImportError: pass if have_cython and (not release or cython_version == 'unknown'): return cython_version else: return False _compiler_versions = {} def get_compiler_version(compiler): if compiler in _compiler_versions: return _compiler_versions[compiler] # Different flags to try to get the compiler version # TODO: It might be worth making this configurable to support # arbitrary odd compilers; though all bets may be off in such # cases anyway flags = ['--version', '--Version', '-version', '-Version', '-v', '-V'] def try_get_version(flag): process = subprocess.Popen( shlex.split(compiler, posix=('win' not in sys.platform)) + [flag], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() if process.returncode != 0: return 'unknown' output = stdout.strip().decode('latin-1') # Safest bet if not output: # Some compilers return their version info on stderr output = stderr.strip().decode('latin-1') if not output: output = 'unknown' return output for flag in flags: version = try_get_version(flag) if version != 'unknown': break # Cache results to speed up future calls _compiler_versions[compiler] = version return version # TODO: I think this can be reworked without having to create the class # programmatically. def generate_build_ext_command(packagename, release): """ Creates a custom 'build_ext' command that allows for manipulating some of the C extension options at build time. We use a function to build the class since the base class for build_ext may be different depending on certain build-time parameters (for example, we may use Cython's build_ext instead of the default version in distutils). Uses the default distutils.command.build_ext by default. """ class build_ext(SetuptoolsBuildExt, object): package_name = packagename is_release = release _user_options = SetuptoolsBuildExt.user_options[:] _boolean_options = SetuptoolsBuildExt.boolean_options[:] _help_options = SetuptoolsBuildExt.help_options[:] force_rebuild = False _broken_compiler_mapping = [ ('i686-apple-darwin[0-9]*-llvm-gcc-4.2', 'clang') ] # Warning: Spaghetti code ahead. # During setup.py, the setup_helpers module needs the ability to add # items to a command's user_options list. At this stage we don't know # whether or not we can build with Cython, and so don't know for sure # what base class will be used for build_ext; nevertheless we want to # be able to provide a list to add options into. # # Later, once setup() has been called we should have all build # dependencies included via setup_requires available. distutils needs # to be able to access the user_options as a *class* attribute before # the class has been initialized, but we do need to be able to # enumerate the options for the correct base class at that point @classproperty def user_options(cls): from distutils import core if core._setup_distribution is None: # We haven't gotten into setup() yet, and the Distribution has # not yet been initialized return cls._user_options return cls._final_class.user_options @classproperty def boolean_options(cls): # Similar to user_options above from distutils import core if core._setup_distribution is None: # We haven't gotten into setup() yet, and the Distribution has # not yet been initialized return cls._boolean_options return cls._final_class.boolean_options @classproperty def help_options(cls): # Similar to user_options above from distutils import core if core._setup_distribution is None: # We haven't gotten into setup() yet, and the Distribution has # not yet been initialized return cls._help_options return cls._final_class.help_options @classproperty(lazy=True) def _final_class(cls): """ Late determination of what the build_ext base class should be, depending on whether or not Cython is available. """ uses_cython = should_build_with_cython(cls.package_name, cls.is_release) if uses_cython: # We need to decide late on whether or not to use Cython's # build_ext (since Cython may not be available earlier in the # setup.py if it was brought in via setup_requires) try: from Cython.Distutils.old_build_ext import old_build_ext as base_cls except ImportError: from Cython.Distutils import build_ext as base_cls else: base_cls = SetuptoolsBuildExt # Create and return an instance of a new class based on this class # using one of the above possible base classes def merge_options(attr): base = getattr(base_cls, attr) ours = getattr(cls, '_' + attr) all_base = set(opt[0] for opt in base) return base + [opt for opt in ours if opt[0] not in all_base] boolean_options = (base_cls.boolean_options + [opt for opt in cls._boolean_options if opt not in base_cls.boolean_options]) members = dict(cls.__dict__) members.update({ 'user_options': merge_options('user_options'), 'help_options': merge_options('help_options'), 'boolean_options': boolean_options, 'uses_cython': uses_cython, }) # Update the base class for the original build_ext command build_ext.__bases__ = (base_cls, object) # Create a new class for the existing class, but now with the # appropriate base class depending on whether or not to use Cython. # Ensure that object is one of the bases to make a new-style class. return type(cls.__name__, (build_ext,), members) def __new__(cls, *args, **kwargs): # By the time the command is actually instantialized, the # Distribution instance for the build has been instantiated, which # means setup_requires has been processed--now we can determine # what base class we can use for the actual build, and return an # instance of a build_ext command that uses that base class (right # now the options being Cython.Distutils.build_ext, or the stock # setuptools build_ext) new_cls = super(build_ext, cls._final_class).__new__( cls._final_class) # Since the new cls is not a subclass of the original cls, we must # manually call its __init__ new_cls.__init__(*args, **kwargs) return new_cls def finalize_options(self): # Add a copy of the _compiler.so module as well, but only if there # are in fact C modules to compile (otherwise there's no reason to # include a record of the compiler used) # Note, self.extensions may not be set yet, but # self.distribution.ext_modules is where any extension modules # passed to setup() can be found self._adjust_compiler() extensions = self.distribution.ext_modules if extensions: build_py = self.get_finalized_command('build_py') package_dir = build_py.get_package_dir(packagename) src_path = os.path.relpath( os.path.join(os.path.dirname(__file__), 'src')) shutil.copy(os.path.join(src_path, 'compiler.c'), os.path.join(package_dir, '_compiler.c')) ext = Extension(self.package_name + '._compiler', [os.path.join(package_dir, '_compiler.c')]) extensions.insert(0, ext) super(build_ext, self).finalize_options() # Generate if self.uses_cython: try: from Cython import __version__ as cython_version except ImportError: # This shouldn't happen if we made it this far cython_version = None if (cython_version is not None and cython_version != self.uses_cython): self.force_rebuild = True # Update the used cython version self.uses_cython = cython_version # Regardless of the value of the '--force' option, force a rebuild # if the debug flag changed from the last build if self.force_rebuild: self.force = True def run(self): # For extensions that require 'numpy' in their include dirs, # replace 'numpy' with the actual paths np_include = get_numpy_include_path() for extension in self.extensions: if 'numpy' in extension.include_dirs: idx = extension.include_dirs.index('numpy') extension.include_dirs.insert(idx, np_include) extension.include_dirs.remove('numpy') self._check_cython_sources(extension) super(build_ext, self).run() # Update cython_version.py if building with Cython try: cython_version = get_pkg_version_module( packagename, fromlist=['cython_version'])[0] except (AttributeError, ImportError): cython_version = 'unknown' if self.uses_cython and self.uses_cython != cython_version: build_py = self.get_finalized_command('build_py') package_dir = build_py.get_package_dir(packagename) cython_py = os.path.join(package_dir, 'cython_version.py') with open(cython_py, 'w') as f: f.write('# Generated file; do not modify\n') f.write('cython_version = {0!r}\n'.format(self.uses_cython)) if os.path.isdir(self.build_lib): # The build/lib directory may not exist if the build_py # command was not previously run, which may sometimes be # the case self.copy_file(cython_py, os.path.join(self.build_lib, cython_py), preserve_mode=False) invalidate_caches() def _adjust_compiler(self): """ This function detects broken compilers and switches to another. If the environment variable CC is explicitly set, or a compiler is specified on the commandline, no override is performed -- the purpose here is to only override a default compiler. The specific compilers with problems are: * The default compiler in XCode-4.2, llvm-gcc-4.2, segfaults when compiling wcslib. The set of broken compilers can be updated by changing the compiler_mapping variable. It is a list of 2-tuples where the first in the pair is a regular expression matching the version of the broken compiler, and the second is the compiler to change to. """ if 'CC' in os.environ: # Check that CC is not set to llvm-gcc-4.2 c_compiler = os.environ['CC'] try: version = get_compiler_version(c_compiler) except OSError: msg = textwrap.dedent( """ The C compiler set by the CC environment variable: {compiler:s} cannot be found or executed. """.format(compiler=c_compiler)) log.warn(msg) sys.exit(1) for broken, fixed in self._broken_compiler_mapping: if re.match(broken, version): msg = textwrap.dedent( """Compiler specified by CC environment variable ({compiler:s}:{version:s}) will fail to compile {pkg:s}. Please set CC={fixed:s} and try again. You can do this, for example, by running: CC={fixed:s} python setup.py where is the command you ran. """.format(compiler=c_compiler, version=version, pkg=self.package_name, fixed=fixed)) log.warn(msg) sys.exit(1) # If C compiler is set via CC, and isn't broken, we are good to go. We # should definitely not try accessing the compiler specified by # ``sysconfig.get_config_var('CC')`` lower down, because this may fail # if the compiler used to compile Python is missing (and maybe this is # why the user is setting CC). For example, the official Python 2.7.3 # MacOS X binary was compiled with gcc-4.2, which is no longer available # in XCode 4. return if self.compiler is not None: # At this point, self.compiler will be set only if a compiler # was specified in the command-line or via setup.cfg, in which # case we don't do anything return compiler_type = ccompiler.get_default_compiler() if compiler_type == 'unix': # We have to get the compiler this way, as this is the one that is # used if os.environ['CC'] is not set. It is actually read in from # the Python Makefile. Note that this is not necessarily the same # compiler as returned by ccompiler.new_compiler() c_compiler = sysconfig.get_config_var('CC') try: version = get_compiler_version(c_compiler) except OSError: msg = textwrap.dedent( """ The C compiler used to compile Python {compiler:s}, and which is normally used to compile C extensions, is not available. You can explicitly specify which compiler to use by setting the CC environment variable, for example: CC=gcc python setup.py or if you are using MacOS X, you can try: CC=clang python setup.py """.format(compiler=c_compiler)) log.warn(msg) sys.exit(1) for broken, fixed in self._broken_compiler_mapping: if re.match(broken, version): os.environ['CC'] = fixed break def _check_cython_sources(self, extension): """ Where relevant, make sure that the .c files associated with .pyx modules are present (if building without Cython installed). """ # Determine the compiler we'll be using if self.compiler is None: compiler = get_default_compiler() else: compiler = self.compiler # Replace .pyx with C-equivalents, unless c files are missing for jdx, src in enumerate(extension.sources): base, ext = os.path.splitext(src) pyxfn = base + '.pyx' cfn = base + '.c' cppfn = base + '.cpp' if not os.path.isfile(pyxfn): continue if self.uses_cython: extension.sources[jdx] = pyxfn else: if os.path.isfile(cfn): extension.sources[jdx] = cfn elif os.path.isfile(cppfn): extension.sources[jdx] = cppfn else: msg = ( 'Could not find C/C++ file {0}.(c/cpp) for Cython ' 'file {1} when building extension {2}. Cython ' 'must be installed to build from a git ' 'checkout.'.format(base, pyxfn, extension.name)) raise IOError(errno.ENOENT, msg, cfn) # Current versions of Cython use deprecated Numpy API features # the use of which produces a few warnings when compiling. # These additional flags should squelch those warnings. # TODO: Feel free to remove this if/when a Cython update # removes use of the deprecated Numpy API if compiler == 'unix': extension.extra_compile_args.extend([ '-Wp,-w', '-Wno-unused-function']) return build_ext photutils-0.4/astropy_helpers/astropy_helpers/commands/build_py.py0000644000214200020070000000265612477406127030246 0ustar lbradleySTSCI\science00000000000000from setuptools.command.build_py import build_py as SetuptoolsBuildPy from ..utils import _get_platlib_dir class AstropyBuildPy(SetuptoolsBuildPy): user_options = SetuptoolsBuildPy.user_options[:] boolean_options = SetuptoolsBuildPy.boolean_options[:] def finalize_options(self): # Update build_lib settings from the build command to always put # build files in platform-specific subdirectories of build/, even # for projects with only pure-Python source (this is desirable # specifically for support of multiple Python version). build_cmd = self.get_finalized_command('build') platlib_dir = _get_platlib_dir(build_cmd) build_cmd.build_purelib = platlib_dir build_cmd.build_lib = platlib_dir self.build_lib = platlib_dir SetuptoolsBuildPy.finalize_options(self) def run_2to3(self, files, doctests=False): # Filter the files to exclude things that shouldn't be 2to3'd skip_2to3 = self.distribution.skip_2to3 filtered_files = [] for filename in files: for package in skip_2to3: if filename[len(self.build_lib) + 1:].startswith(package): break else: filtered_files.append(filename) SetuptoolsBuildPy.run_2to3(self, filtered_files, doctests) def run(self): # first run the normal build_py SetuptoolsBuildPy.run(self) photutils-0.4/astropy_helpers/astropy_helpers/commands/build_sphinx.py0000644000214200020070000002427713175633272031131 0ustar lbradleySTSCI\science00000000000000from __future__ import print_function import inspect import os import pkgutil import re import shutil import subprocess import sys import textwrap import warnings from distutils import log from distutils.cmd import DistutilsOptionError import sphinx from sphinx.setup_command import BuildDoc as SphinxBuildDoc from ..utils import minversion, AstropyDeprecationWarning PY3 = sys.version_info[0] >= 3 class AstropyBuildDocs(SphinxBuildDoc): """ A version of the ``build_docs`` command that uses the version of Astropy that is built by the setup ``build`` command, rather than whatever is installed on the system. To build docs against the installed version, run ``make html`` in the ``astropy/docs`` directory. This also automatically creates the docs/_static directories--this is needed because GitHub won't create the _static dir because it has no tracked files. """ description = 'Build Sphinx documentation for Astropy environment' user_options = SphinxBuildDoc.user_options[:] user_options.append( ('warnings-returncode', 'w', 'Parses the sphinx output and sets the return code to 1 if there ' 'are any warnings. Note that this will cause the sphinx log to ' 'only update when it completes, rather than continuously as is ' 'normally the case.')) user_options.append( ('clean-docs', 'l', 'Completely clean previous builds, including ' 'automodapi-generated files before building new ones')) user_options.append( ('no-intersphinx', 'n', 'Skip intersphinx, even if conf.py says to use it')) user_options.append( ('open-docs-in-browser', 'o', 'Open the docs in a browser (using the webbrowser module) if the ' 'build finishes successfully.')) boolean_options = SphinxBuildDoc.boolean_options[:] boolean_options.append('warnings-returncode') boolean_options.append('clean-docs') boolean_options.append('no-intersphinx') boolean_options.append('open-docs-in-browser') _self_iden_rex = re.compile(r"self\.([^\d\W][\w]+)", re.UNICODE) def initialize_options(self): SphinxBuildDoc.initialize_options(self) self.clean_docs = False self.no_intersphinx = False self.open_docs_in_browser = False self.warnings_returncode = False def finalize_options(self): SphinxBuildDoc.finalize_options(self) # Clear out previous sphinx builds, if requested if self.clean_docs: dirstorm = [os.path.join(self.source_dir, 'api'), os.path.join(self.source_dir, 'generated')] if self.build_dir is None: dirstorm.append('docs/_build') else: dirstorm.append(self.build_dir) for d in dirstorm: if os.path.isdir(d): log.info('Cleaning directory ' + d) shutil.rmtree(d) else: log.info('Not cleaning directory ' + d + ' because ' 'not present or not a directory') def run(self): # TODO: Break this method up into a few more subroutines and # document them better import webbrowser if PY3: from urllib.request import pathname2url else: from urllib import pathname2url # This is used at the very end of `run` to decide if sys.exit should # be called. If it's None, it won't be. retcode = None # If possible, create the _static dir if self.build_dir is not None: # the _static dir should be in the same place as the _build dir # for Astropy basedir, subdir = os.path.split(self.build_dir) if subdir == '': # the path has a trailing /... basedir, subdir = os.path.split(basedir) staticdir = os.path.join(basedir, '_static') if os.path.isfile(staticdir): raise DistutilsOptionError( 'Attempted to build_docs in a location where' + staticdir + 'is a file. Must be a directory.') self.mkpath(staticdir) # Now make sure Astropy is built and determine where it was built build_cmd = self.reinitialize_command('build') build_cmd.inplace = 0 self.run_command('build') build_cmd = self.get_finalized_command('build') build_cmd_path = os.path.abspath(build_cmd.build_lib) ah_importer = pkgutil.get_importer('astropy_helpers') ah_path = os.path.abspath(ah_importer.path) # Now generate the source for and spawn a new process that runs the # command. This is needed to get the correct imports for the built # version runlines, runlineno = inspect.getsourcelines(SphinxBuildDoc.run) subproccode = textwrap.dedent(""" from sphinx.setup_command import * os.chdir({srcdir!r}) sys.path.insert(0, {build_cmd_path!r}) sys.path.insert(0, {ah_path!r}) """).format(build_cmd_path=build_cmd_path, ah_path=ah_path, srcdir=self.source_dir) # runlines[1:] removes 'def run(self)' on the first line subproccode += textwrap.dedent(''.join(runlines[1:])) # All "self.foo" in the subprocess code needs to be replaced by the # values taken from the current self in *this* process subproccode = self._self_iden_rex.split(subproccode) for i in range(1, len(subproccode), 2): iden = subproccode[i] val = getattr(self, iden) if iden.endswith('_dir'): # Directories should be absolute, because the `chdir` call # in the new process moves to a different directory subproccode[i] = repr(os.path.abspath(val)) else: subproccode[i] = repr(val) subproccode = ''.join(subproccode) optcode = textwrap.dedent(""" class Namespace(object): pass self = Namespace() self.pdb = {pdb!r} self.verbosity = {verbosity!r} self.traceback = {traceback!r} """).format(pdb=getattr(self, 'pdb', False), verbosity=getattr(self, 'verbosity', 0), traceback=getattr(self, 'traceback', False)) subproccode = optcode + subproccode # This is a quick gross hack, but it ensures that the code grabbed from # SphinxBuildDoc.run will work in Python 2 if it uses the print # function if minversion(sphinx, '1.3'): subproccode = 'from __future__ import print_function' + subproccode if self.no_intersphinx: # the confoverrides variable in sphinx.setup_command.BuildDoc can # be used to override the conf.py ... but this could well break # if future versions of sphinx change the internals of BuildDoc, # so remain vigilant! subproccode = subproccode.replace( 'confoverrides = {}', 'confoverrides = {\'intersphinx_mapping\':{}}') log.debug('Starting subprocess of {0} with python code:\n{1}\n' '[CODE END])'.format(sys.executable, subproccode)) # To return the number of warnings, we need to capture stdout. This # prevents a continuous updating at the terminal, but there's no # apparent way around this. if self.warnings_returncode: proc = subprocess.Popen([sys.executable, '-c', subproccode], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) retcode = 1 with proc.stdout: for line in iter(proc.stdout.readline, b''): line = line.strip(b'\r\n') print(line.decode('utf-8')) if 'build succeeded.' == line.decode('utf-8'): retcode = 0 # Poll to set proc.retcode proc.wait() if retcode != 0: if os.environ.get('TRAVIS', None) == 'true': # this means we are in the travis build, so customize # the message appropriately. msg = ('The build_docs travis build FAILED ' 'because sphinx issued documentation ' 'warnings (scroll up to see the warnings).') else: # standard failure message msg = ('build_docs returning a non-zero exit ' 'code because sphinx issued documentation ' 'warnings.') log.warn(msg) else: proc = subprocess.Popen([sys.executable], stdin=subprocess.PIPE) proc.communicate(subproccode.encode('utf-8')) if proc.returncode == 0: if self.open_docs_in_browser: if self.builder == 'html': absdir = os.path.abspath(self.builder_target_dir) index_path = os.path.join(absdir, 'index.html') fileurl = 'file://' + pathname2url(index_path) webbrowser.open(fileurl) else: log.warn('open-docs-in-browser option was given, but ' 'the builder is not html! Ignoring.') else: log.warn('Sphinx Documentation subprocess failed with return ' 'code ' + str(proc.returncode)) retcode = proc.returncode if retcode is not None: # this is potentially dangerous in that there might be something # after the call to `setup` in `setup.py`, and exiting here will # prevent that from running. But there's no other apparent way # to signal what the return code should be. sys.exit(retcode) class AstropyBuildSphinx(AstropyBuildDocs): # pragma: no cover description = 'deprecated alias to the build_docs command' def run(self): warnings.warn( 'The "build_sphinx" command is now deprecated. Use' '"build_docs" instead.', AstropyDeprecationWarning) AstropyBuildDocs.run(self) photutils-0.4/astropy_helpers/astropy_helpers/commands/install.py0000644000214200020070000000074612477406127030103 0ustar lbradleySTSCI\science00000000000000from setuptools.command.install import install as SetuptoolsInstall from ..utils import _get_platlib_dir class AstropyInstall(SetuptoolsInstall): user_options = SetuptoolsInstall.user_options[:] boolean_options = SetuptoolsInstall.boolean_options[:] def finalize_options(self): build_cmd = self.get_finalized_command('build') platlib_dir = _get_platlib_dir(build_cmd) self.build_lib = platlib_dir SetuptoolsInstall.finalize_options(self) photutils-0.4/astropy_helpers/astropy_helpers/commands/install_lib.py0000644000214200020070000000100012477406127030711 0ustar lbradleySTSCI\science00000000000000from setuptools.command.install_lib import install_lib as SetuptoolsInstallLib from ..utils import _get_platlib_dir class AstropyInstallLib(SetuptoolsInstallLib): user_options = SetuptoolsInstallLib.user_options[:] boolean_options = SetuptoolsInstallLib.boolean_options[:] def finalize_options(self): build_cmd = self.get_finalized_command('build') platlib_dir = _get_platlib_dir(build_cmd) self.build_dir = platlib_dir SetuptoolsInstallLib.finalize_options(self) photutils-0.4/astropy_helpers/astropy_helpers/commands/register.py0000644000214200020070000000454712477406127030264 0ustar lbradleySTSCI\science00000000000000from setuptools.command.register import register as SetuptoolsRegister class AstropyRegister(SetuptoolsRegister): """Extends the built in 'register' command to support a ``--hidden`` option to make the registered version hidden on PyPI by default. The result of this is that when a version is registered as "hidden" it can still be downloaded from PyPI, but it does not show up in the list of actively supported versions under http://pypi.python.org/pypi/astropy, and is not set as the most recent version. Although this can always be set through the web interface it may be more convenient to be able to specify via the 'register' command. Hidden may also be considered a safer default when running the 'register' command, though this command uses distutils' normal behavior if the ``--hidden`` option is omitted. """ user_options = SetuptoolsRegister.user_options + [ ('hidden', None, 'mark this release as hidden on PyPI by default') ] boolean_options = SetuptoolsRegister.boolean_options + ['hidden'] def initialize_options(self): SetuptoolsRegister.initialize_options(self) self.hidden = False def build_post_data(self, action): data = SetuptoolsRegister.build_post_data(self, action) if action == 'submit' and self.hidden: data['_pypi_hidden'] = '1' return data def _set_config(self): # The original register command is buggy--if you use .pypirc with a # server-login section *at all* the repository you specify with the -r # option will be overwritten with either the repository in .pypirc or # with the default, # If you do not have a .pypirc using the -r option will just crash. # Way to go distutils # If we don't set self.repository back to a default value _set_config # can crash if there was a user-supplied value for this option; don't # worry, we'll get the real value back afterwards self.repository = 'pypi' SetuptoolsRegister._set_config(self) options = self.distribution.get_option_dict('register') if 'repository' in options: source, value = options['repository'] # Really anything that came from setup.cfg or the command line # should override whatever was in .pypirc self.repository = value photutils-0.4/astropy_helpers/astropy_helpers/commands/setup_package.py0000644000214200020070000000017012732006043031222 0ustar lbradleySTSCI\science00000000000000from os.path import join def get_package_data(): return {'astropy_helpers.commands': [join('src', 'compiler.c')]} photutils-0.4/astropy_helpers/astropy_helpers/commands/src/0000755000214200020070000000000013175654702026643 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/commands/src/compiler.c0000644000214200020070000000573112477406127030627 0ustar lbradleySTSCI\science00000000000000#include /*************************************************************************** * Macros for determining the compiler version. * * These are borrowed from boost, and majorly abridged to include only * the compilers we care about. ***************************************************************************/ #ifndef PY3K #if PY_MAJOR_VERSION >= 3 #define PY3K 1 #else #define PY3K 0 #endif #endif #define STRINGIZE(X) DO_STRINGIZE(X) #define DO_STRINGIZE(X) #X #if defined __clang__ /* Clang C++ emulates GCC, so it has to appear early. */ # define COMPILER "Clang version " __clang_version__ #elif defined(__INTEL_COMPILER) || defined(__ICL) || defined(__ICC) || defined(__ECC) /* Intel */ # if defined(__INTEL_COMPILER) # define INTEL_VERSION __INTEL_COMPILER # elif defined(__ICL) # define INTEL_VERSION __ICL # elif defined(__ICC) # define INTEL_VERSION __ICC # elif defined(__ECC) # define INTEL_VERSION __ECC # endif # define COMPILER "Intel C compiler version " STRINGIZE(INTEL_VERSION) #elif defined(__GNUC__) /* gcc */ # define COMPILER "GCC version " __VERSION__ #elif defined(__SUNPRO_CC) /* Sun Workshop Compiler */ # define COMPILER "Sun compiler version " STRINGIZE(__SUNPRO_CC) #elif defined(_MSC_VER) /* Microsoft Visual C/C++ Must be last since other compilers define _MSC_VER for compatibility as well */ # if _MSC_VER < 1200 # define COMPILER_VERSION 5.0 # elif _MSC_VER < 1300 # define COMPILER_VERSION 6.0 # elif _MSC_VER == 1300 # define COMPILER_VERSION 7.0 # elif _MSC_VER == 1310 # define COMPILER_VERSION 7.1 # elif _MSC_VER == 1400 # define COMPILER_VERSION 8.0 # elif _MSC_VER == 1500 # define COMPILER_VERSION 9.0 # elif _MSC_VER == 1600 # define COMPILER_VERSION 10.0 # else # define COMPILER_VERSION _MSC_VER # endif # define COMPILER "Microsoft Visual C++ version " STRINGIZE(COMPILER_VERSION) #else /* Fallback */ # define COMPILER "Unknown compiler" #endif /*************************************************************************** * Module-level ***************************************************************************/ struct module_state { /* The Sun compiler can't handle empty structs */ #if defined(__SUNPRO_C) || defined(_MSC_VER) int _dummy; #endif }; #if PY3K static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_compiler", NULL, sizeof(struct module_state), NULL, NULL, NULL, NULL, NULL }; #define INITERROR return NULL PyMODINIT_FUNC PyInit__compiler(void) #else #define INITERROR return PyMODINIT_FUNC init_compiler(void) #endif { PyObject* m; #if PY3K m = PyModule_Create(&moduledef); #else m = Py_InitModule3("_compiler", NULL, NULL); #endif if (m == NULL) INITERROR; PyModule_AddStringConstant(m, "compiler", COMPILER); #if PY3K return m; #endif } photutils-0.4/astropy_helpers/astropy_helpers/commands/test.py0000644000214200020070000000252413175633272027407 0ustar lbradleySTSCI\science00000000000000""" Different implementations of the ``./setup.py test`` command depending on what's locally available. If Astropy v1.1.0.dev or later is available it should be possible to import AstropyTest from ``astropy.tests.command``. If ``astropy`` can be imported but not ``astropy.tests.command`` (i.e. an older version of Astropy), we can use the backwards-compat implementation of the command. If Astropy can't be imported at all then there is a skeleton implementation that allows users to at least discover the ``./setup.py test`` command and learn that they need Astropy to run it. """ # Previously these except statements caught only ImportErrors, but there are # some other obscure exceptional conditions that can occur when importing # astropy.tests (at least on older versions) that can cause these imports to # fail try: import astropy # noqa try: from astropy.tests.command import AstropyTest except Exception: from ._test_compat import AstropyTest except Exception: # No astropy at all--provide the dummy implementation from ._dummy import _DummyCommand class AstropyTest(_DummyCommand): command_name = 'test' description = 'Run the tests for this package' error_msg = ( "The 'test' command requires the astropy package to be " "installed and importable.") photutils-0.4/astropy_helpers/astropy_helpers/compat/0000755000214200020070000000000013175654702025536 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/compat/__init__.py0000644000214200020070000000056012346164025027641 0ustar lbradleySTSCI\science00000000000000def _fix_user_options(options): """ This is for Python 2.x and 3.x compatibility. distutils expects Command options to all be byte strings on Python 2 and Unicode strings on Python 3. """ def to_str_or_none(x): if x is None: return None return str(x) return [tuple(to_str_or_none(x) for x in y) for y in options] photutils-0.4/astropy_helpers/astropy_helpers/distutils_helpers.py0000644000214200020070000001736213175633272030403 0ustar lbradleySTSCI\science00000000000000""" This module contains various utilities for introspecting the distutils module and the setup process. Some of these utilities require the `astropy_helpers.setup_helpers.register_commands` function to be called first, as it will affect introspection of setuptools command-line arguments. Other utilities in this module do not have that restriction. """ import os import sys from distutils import ccompiler, log from distutils.dist import Distribution from distutils.errors import DistutilsError from .utils import silence # This function, and any functions that call it, require the setup in # `astropy_helpers.setup_helpers.register_commands` to be run first. def get_dummy_distribution(): """ Returns a distutils Distribution object used to instrument the setup environment before calling the actual setup() function. """ from .setup_helpers import _module_state if _module_state['registered_commands'] is None: raise RuntimeError( 'astropy_helpers.setup_helpers.register_commands() must be ' 'called before using ' 'astropy_helpers.setup_helpers.get_dummy_distribution()') # Pre-parse the Distutils command-line options and config files to if # the option is set. dist = Distribution({'script_name': os.path.basename(sys.argv[0]), 'script_args': sys.argv[1:]}) dist.cmdclass.update(_module_state['registered_commands']) with silence(): try: dist.parse_config_files() dist.parse_command_line() except (DistutilsError, AttributeError, SystemExit): # Let distutils handle DistutilsErrors itself AttributeErrors can # get raise for ./setup.py --help SystemExit can be raised if a # display option was used, for example pass return dist def get_distutils_option(option, commands): """ Returns the value of the given distutils option. Parameters ---------- option : str The name of the option commands : list of str The list of commands on which this option is available Returns ------- val : str or None the value of the given distutils option. If the option is not set, returns None. """ dist = get_dummy_distribution() for cmd in commands: cmd_opts = dist.command_options.get(cmd) if cmd_opts is not None and option in cmd_opts: return cmd_opts[option][1] else: return None def get_distutils_build_option(option): """ Returns the value of the given distutils build option. Parameters ---------- option : str The name of the option Returns ------- val : str or None The value of the given distutils build option. If the option is not set, returns None. """ return get_distutils_option(option, ['build', 'build_ext', 'build_clib']) def get_distutils_install_option(option): """ Returns the value of the given distutils install option. Parameters ---------- option : str The name of the option Returns ------- val : str or None The value of the given distutils build option. If the option is not set, returns None. """ return get_distutils_option(option, ['install']) def get_distutils_build_or_install_option(option): """ Returns the value of the given distutils build or install option. Parameters ---------- option : str The name of the option Returns ------- val : str or None The value of the given distutils build or install option. If the option is not set, returns None. """ return get_distutils_option(option, ['build', 'build_ext', 'build_clib', 'install']) def get_compiler_option(): """ Determines the compiler that will be used to build extension modules. Returns ------- compiler : str The compiler option specified for the build, build_ext, or build_clib command; or the default compiler for the platform if none was specified. """ compiler = get_distutils_build_option('compiler') if compiler is None: return ccompiler.get_default_compiler() return compiler def add_command_option(command, name, doc, is_bool=False): """ Add a custom option to a setup command. Issues a warning if the option already exists on that command. Parameters ---------- command : str The name of the command as given on the command line name : str The name of the build option doc : str A short description of the option, for the `--help` message is_bool : bool, optional When `True`, the option is a boolean option and doesn't require an associated value. """ dist = get_dummy_distribution() cmdcls = dist.get_command_class(command) if (hasattr(cmdcls, '_astropy_helpers_options') and name in cmdcls._astropy_helpers_options): return attr = name.replace('-', '_') if hasattr(cmdcls, attr): raise RuntimeError( '{0!r} already has a {1!r} class attribute, barring {2!r} from ' 'being usable as a custom option name.'.format(cmdcls, attr, name)) for idx, cmd in enumerate(cmdcls.user_options): if cmd[0] == name: log.warn('Overriding existing {0!r} option ' '{1!r}'.format(command, name)) del cmdcls.user_options[idx] if name in cmdcls.boolean_options: cmdcls.boolean_options.remove(name) break cmdcls.user_options.append((name, None, doc)) if is_bool: cmdcls.boolean_options.append(name) # Distutils' command parsing requires that a command object have an # attribute with the same name as the option (with '-' replaced with '_') # in order for that option to be recognized as valid setattr(cmdcls, attr, None) # This caches the options added through add_command_option so that if it is # run multiple times in the same interpreter repeated adds are ignored # (this way we can still raise a RuntimeError if a custom option overrides # a built-in option) if not hasattr(cmdcls, '_astropy_helpers_options'): cmdcls._astropy_helpers_options = set([name]) else: cmdcls._astropy_helpers_options.add(name) def get_distutils_display_options(): """ Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or -- """ short_display_opts = set('-' + o[1] for o in Distribution.display_options if o[1]) long_display_opts = set('--' + o[0] for o in Distribution.display_options) # Include -h and --help which are not explicitly listed in # Distribution.display_options (as they are handled by optparse) short_display_opts.add('-h') long_display_opts.add('--help') # This isn't the greatest approach to hardcode these commands. # However, there doesn't seem to be a good way to determine # whether build *will be* run as part of the command at this # phase. display_commands = set([ 'clean', 'register', 'setopt', 'saveopts', 'egg_info', 'alias']) return short_display_opts.union(long_display_opts.union(display_commands)) def is_distutils_display_option(): """ Returns True if sys.argv contains any of the distutils display options such as --version or --name. """ display_options = get_distutils_display_options() return bool(set(sys.argv[1:]).intersection(display_options)) photutils-0.4/astropy_helpers/astropy_helpers/extern/0000755000214200020070000000000013175654702025560 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/extern/__init__.py0000644000214200020070000000111513175633272027666 0ustar lbradleySTSCI\science00000000000000# The ``astropy_helpers.extern`` sub-module includes modules developed elsewhere # that are bundled here for convenience. At the moment, this consists of the # following two sphinx extensions: # # * `numpydoc `_, a Sphinx extension # developed as part of the Numpy project. This is used to parse docstrings # in Numpy format # # * `sphinx-automodapi `_, a Sphinx # developed as part of the Astropy project. This used to be developed directly # in ``astropy-helpers`` but is now a standalone package. photutils-0.4/astropy_helpers/astropy_helpers/extern/automodapi/0000755000214200020070000000000013175654702027722 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/extern/automodapi/__init__.py0000644000214200020070000000002413175633272032026 0ustar lbradleySTSCI\science00000000000000__version__ = '0.6' photutils-0.4/astropy_helpers/astropy_helpers/extern/automodapi/autodoc_enhancements.py0000644000214200020070000001230413175633272034461 0ustar lbradleySTSCI\science00000000000000""" Miscellaneous enhancements to help autodoc along. """ import inspect import sys import types import sphinx from distutils.version import LooseVersion from sphinx.ext.autodoc import AttributeDocumenter, ModuleDocumenter from sphinx.util.inspect import isdescriptor if sys.version_info[0] == 3: class_types = (type,) else: class_types = (type, types.ClassType) SPHINX_LT_15 = (LooseVersion(sphinx.__version__) < LooseVersion('1.5')) MethodDescriptorType = type(type.__subclasses__) # See # https://github.com/astropy/astropy-helpers/issues/116#issuecomment-71254836 # for further background on this. def type_object_attrgetter(obj, attr, *defargs): """ This implements an improved attrgetter for type objects (i.e. classes) that can handle class attributes that are implemented as properties on a metaclass. Normally `getattr` on a class with a `property` (say, "foo"), would return the `property` object itself. However, if the class has a metaclass which *also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find the "foo" property on the metaclass and resolve it. For the purposes of autodoc we just want to document the "foo" property defined on the class, not on the metaclass. For example:: >>> class Meta(type): ... @property ... def foo(cls): ... return 'foo' ... >>> class MyClass(metaclass=Meta): ... @property ... def foo(self): ... \"\"\"Docstring for MyClass.foo property.\"\"\" ... return 'myfoo' ... >>> getattr(MyClass, 'foo') 'foo' >>> type_object_attrgetter(MyClass, 'foo') >>> type_object_attrgetter(MyClass, 'foo').__doc__ 'Docstring for MyClass.foo property.' The last line of the example shows the desired behavior for the purposes of autodoc. """ for base in obj.__mro__: if attr in base.__dict__: if isinstance(base.__dict__[attr], property): # Note, this should only be used for properties--for any other # type of descriptor (classmethod, for example) this can mess # up existing expectations of what getattr(cls, ...) returns return base.__dict__[attr] break return getattr(obj, attr, *defargs) if SPHINX_LT_15: # Provided to work around a bug in Sphinx # See https://github.com/sphinx-doc/sphinx/pull/1843 class AttributeDocumenter(AttributeDocumenter): @classmethod def can_document_member(cls, member, membername, isattr, parent): non_attr_types = cls.method_types + class_types + \ (MethodDescriptorType,) isdatadesc = isdescriptor(member) and not \ isinstance(member, non_attr_types) and not \ type(member).__name__ == "instancemethod" # That last condition addresses an obscure case of C-defined # methods using a deprecated type in Python 3, that is not # otherwise exported anywhere by Python return isdatadesc or (not isinstance(parent, ModuleDocumenter) and not inspect.isroutine(member) and not isinstance(member, class_types)) def setup(app): # Must have the autodoc extension set up first so we can override it app.setup_extension('sphinx.ext.autodoc') # Need to import this too since it re-registers all the documenter types # =_= import sphinx.ext.autosummary.generate app.add_autodoc_attrgetter(type, type_object_attrgetter) if sphinx.version_info < (1, 4, 2): # this is a really ugly hack to supress a warning that sphinx 1.4 # generates when overriding an existing directive (which is *desired* # behavior here). As of sphinx v1.4.2, this has been fixed: # https://github.com/sphinx-doc/sphinx/issues/2451 # But we leave it in for 1.4.0/1.4.1 . But if the "needs_sphinx" is # eventually updated to >= 1.4.2, this should be removed entirely (in # favor of the line in the "else" clause) _oldwarn = app._warning _oldwarncount = app._warncount try: try: # *this* is in a try/finally because we don't want to force six as # a real dependency. In sphinx 1.4, six is a prerequisite, so # there's no issue. But in older sphinxes this may not be true... # but the inderlying warning is absent anyway so we let it slide. from six import StringIO app._warning = StringIO() except ImportError: pass app.add_autodocumenter(AttributeDocumenter) finally: app._warning = _oldwarn app._warncount = _oldwarncount else: suppress_warnigns_orig = app.config.suppress_warnings[:] if 'app.add_directive' not in app.config.suppress_warnings: app.config.suppress_warnings.append('app.add_directive') try: app.add_autodocumenter(AttributeDocumenter) finally: app.config.suppress_warnings = suppress_warnigns_orig photutils-0.4/astropy_helpers/astropy_helpers/extern/automodapi/automodapi.py0000644000214200020070000003665113175633272032450 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This directive takes a single argument that must be a module or package. It will produce a block of documentation that includes the docstring for the package, an :ref:`automodsumm` directive, and an :ref:`automod-diagram` if there are any classes in the module. If only the main docstring of the module/package is desired in the documentation, use `automodule`_ instead of `automodapi`_. It accepts the following options: * ``:include-all-objects:`` If present, include not just functions and classes, but all objects. This includes variables, for which a possible docstring after the variable definition will be shown. * ``:no-inheritance-diagram:`` If present, the inheritance diagram will not be shown even if the module/package has classes. * ``:skip: str`` This option results in the specified object being skipped, that is the object will *not* be included in the generated documentation. This option may appear any number of times to skip multiple objects. * ``:no-main-docstr:`` If present, the docstring for the module/package will not be generated. The function and class tables will still be used, however. * ``:headings: str`` Specifies the characters (in one string) used as the heading levels used for the generated section. This must have at least 2 characters (any after 2 will be ignored). This also *must* match the rest of the documentation on this page for sphinx to be happy. Defaults to "-^", which matches the convention used for Python's documentation, assuming the automodapi call is inside a top-level section (which usually uses '='). * ``:no-heading:`` If specified do not create a top level heading for the section. That is, do not create a title heading with text like "packagename Package". The actual docstring for the package/module will still be shown, though, unless ``:no-main-docstr:`` is given. * ``:allowed-package-names: str`` Specifies the packages that functions/classes documented here are allowed to be from, as comma-separated list of package names. If not given, only objects that are actually in a subpackage of the package currently being documented are included. * ``:inherited-members:`` / ``:no-inherited-members:`` The global sphinx configuration option ``automodsumm_inherited_members`` decides if members that a class inherits from a base class are included in the generated documentation. The option ``:inherited-members:`` or ``:no-inherited-members:`` allows the user to overrride the global setting. This extension also adds three sphinx configuration options: * ``automodapi_toctreedirnm`` This must be a string that specifies the name of the directory the automodsumm generated documentation ends up in. This directory path should be relative to the documentation root (e.g., same place as ``index.rst``). Defaults to ``'api'``. * ``automodapi_writereprocessed`` Should be a bool, and if `True`, will cause `automodapi`_ to write files with any `automodapi`_ sections replaced with the content Sphinx processes after `automodapi`_ has run. The output files are not actually used by sphinx, so this option is only for figuring out the cause of sphinx warnings or other debugging. Defaults to `False`. * ``automodsumm_inherited_members`` Should be a bool and if ``True`` members that a class inherits from a base class are included in the generated documentation. Defaults to ``False``. .. _automodule: http://sphinx-doc.org/latest/ext/autodoc.html?highlight=automodule#directive-automodule """ # Implementation note: # The 'automodapi' directive is not actually implemented as a docutils # directive. Instead, this extension searches for the 'automodapi' text in # all sphinx documents, and replaces it where necessary from a template built # into this extension. This is necessary because automodsumm (and autosummary) # use the "builder-inited" event, which comes before the directives are # actually built. import inspect import io import os import re import sys from .utils import find_mod_objs if sys.version_info[0] == 3: text_type = str else: text_type = unicode automod_templ_modheader = """ {modname} {pkgormod} {modhds}{pkgormodhds} {automoduleline} """ automod_templ_classes = """ Classes {clshds} .. automodsumm:: {modname} :classes-only: {clsfuncoptions} """ automod_templ_funcs = """ Functions {funchds} .. automodsumm:: {modname} :functions-only: {clsfuncoptions} """ automod_templ_vars = """ Variables {otherhds} .. automodsumm:: {modname} :variables-only: {clsfuncoptions} """ automod_templ_inh = """ Class Inheritance Diagram {clsinhsechds} .. automod-diagram:: {modname} :private-bases: :parts: 1 {allowedpkgnms} {skip} """ _automodapirex = re.compile(r'^(?:\.\.\s+automodapi::\s*)([A-Za-z0-9_.]+)' r'\s*$((?:\n\s+:[a-zA-Z_\-]+:.*$)*)', flags=re.MULTILINE) # the last group of the above regex is intended to go into finall with the below _automodapiargsrex = re.compile(r':([a-zA-Z_\-]+):(.*)$', flags=re.MULTILINE) def automodapi_replace(sourcestr, app, dotoctree=True, docname=None, warnings=True): """ Replaces `sourcestr`'s entries of ".. automdapi::" with the automodapi template form based on provided options. This is used with the sphinx event 'source-read' to replace `automodapi`_ entries before sphinx actually processes them, as automodsumm needs the code to be present to generate stub documentation. Parameters ---------- sourcestr : str The string with sphinx source to be checked for automodapi replacement. app : `sphinx.application.Application` The sphinx application. dotoctree : bool If `True`, a ":toctree:" option will be added in the ".. automodsumm::" sections of the template, pointing to the appropriate "generated" directory based on the Astropy convention (e.g. in ``docs/api``) docname : str The name of the file for this `sourcestr` (if known - if not, it can be `None`). If not provided and `dotoctree` is `True`, the generated files may end up in the wrong place. warnings : bool If `False`, all warnings that would normally be issued are silenced. Returns ------- newstr :str The string with automodapi entries replaced with the correct sphinx markup. """ spl = _automodapirex.split(sourcestr) if len(spl) > 1: # automodsumm is in this document # Use app.srcdir because api folder should be inside source folder not # at folder where sphinx is run. if dotoctree: toctreestr = ':toctree: ' api_dir = os.path.join(app.srcdir, app.config.automodapi_toctreedirnm) if docname is None: doc_path = '.' else: doc_path = os.path.join(app.srcdir, docname) toctreestr += os.path.relpath(api_dir, os.path.dirname(doc_path)) else: toctreestr = '' newstrs = [spl[0]] for grp in range(len(spl) // 3): modnm = spl[grp * 3 + 1] # find where this is in the document for warnings if docname is None: location = None else: location = (docname, spl[0].count('\n')) # initialize default options toskip = [] inhdiag = maindocstr = top_head = True hds = '-^' allowedpkgnms = [] allowothers = False # look for actual options unknownops = [] inherited_members = None for opname, args in _automodapiargsrex.findall(spl[grp * 3 + 2]): if opname == 'skip': toskip.append(args.strip()) elif opname == 'no-inheritance-diagram': inhdiag = False elif opname == 'no-main-docstr': maindocstr = False elif opname == 'headings': hds = args elif opname == 'no-heading': top_head = False elif opname == 'allowed-package-names': allowedpkgnms.append(args.strip()) elif opname == 'inherited-members': inherited_members = True elif opname == 'no-inherited-members': inherited_members = False elif opname == 'include-all-objects': allowothers = True else: unknownops.append(opname) # join all the allowedpkgnms if len(allowedpkgnms) == 0: allowedpkgnms = '' onlylocals = True else: allowedpkgnms = ':allowed-package-names: ' + ','.join(allowedpkgnms) onlylocals = allowedpkgnms # get the two heading chars if len(hds) < 2: msg = 'Not enough headings (got {0}, need 2), using default -^' if warnings: app.warn(msg.format(len(hds)), location) hds = '-^' h1, h2 = hds.lstrip()[:2] # tell sphinx that the remaining args are invalid. if len(unknownops) > 0 and app is not None: opsstrs = ','.join(unknownops) msg = 'Found additional options ' + opsstrs + ' in automodapi.' if warnings: app.warn(msg, location) ispkg, hascls, hasfuncs, hasother = _mod_info( modnm, toskip, onlylocals=onlylocals) # add automodule directive only if no-main-docstr isn't present if maindocstr: automodline = '.. automodule:: {modname}'.format(modname=modnm) else: automodline = '' if top_head: newstrs.append(automod_templ_modheader.format( modname=modnm, modhds=h1 * len(modnm), pkgormod='Package' if ispkg else 'Module', pkgormodhds=h1 * (8 if ispkg else 7), automoduleline=automodline)) # noqa else: newstrs.append(automod_templ_modheader.format( modname='', modhds='', pkgormod='', pkgormodhds='', automoduleline=automodline)) # construct the options for the class/function sections # start out indented at 4 spaces, but need to keep the indentation. clsfuncoptions = [] if toctreestr: clsfuncoptions.append(toctreestr) if toskip: clsfuncoptions.append(':skip: ' + ','.join(toskip)) if allowedpkgnms: clsfuncoptions.append(allowedpkgnms) if hascls: # This makes no sense unless there are classes. if inherited_members is True: clsfuncoptions.append(':inherited-members:') if inherited_members is False: clsfuncoptions.append(':no-inherited-members:') clsfuncoptionstr = '\n '.join(clsfuncoptions) if hasfuncs: newstrs.append(automod_templ_funcs.format( modname=modnm, funchds=h2 * 9, clsfuncoptions=clsfuncoptionstr)) if hascls: newstrs.append(automod_templ_classes.format( modname=modnm, clshds=h2 * 7, clsfuncoptions=clsfuncoptionstr)) if allowothers and hasother: newstrs.append(automod_templ_vars.format( modname=modnm, otherhds=h2 * 9, clsfuncoptions=clsfuncoptionstr)) if inhdiag and hascls: # add inheritance diagram if any classes are in the module if toskip: clsskip = ':skip: ' + ','.join(toskip) else: clsskip = '' diagram_entry = automod_templ_inh.format( modname=modnm, clsinhsechds=h2 * 25, allowedpkgnms=allowedpkgnms, skip=clsskip) diagram_entry = diagram_entry.replace(' \n', '') newstrs.append(diagram_entry) newstrs.append(spl[grp * 3 + 3]) newsourcestr = ''.join(newstrs) if app.config.automodapi_writereprocessed: # sometimes they are unicode, sometimes not, depending on how # sphinx has processed things if isinstance(newsourcestr, text_type): ustr = newsourcestr else: ustr = newsourcestr.decode(app.config.source_encoding) if docname is None: with io.open(os.path.join(app.srcdir, 'unknown.automodapi'), 'a', encoding='utf8') as f: f.write(u'\n**NEW DOC**\n\n') f.write(ustr) else: env = app.builder.env # Determine the filename associated with this doc (specifically # the extension) filename = docname + os.path.splitext(env.doc2path(docname))[1] filename += '.automodapi' with io.open(os.path.join(app.srcdir, filename), 'w', encoding='utf8') as f: f.write(ustr) return newsourcestr else: return sourcestr def _mod_info(modname, toskip=[], onlylocals=True): """ Determines if a module is a module or a package and whether or not it has classes or functions. """ hascls = hasfunc = hasother = False for localnm, fqnm, obj in zip(*find_mod_objs(modname, onlylocals=onlylocals)): if localnm not in toskip: hascls = hascls or inspect.isclass(obj) hasfunc = hasfunc or inspect.isroutine(obj) hasother = hasother or (not inspect.isclass(obj) and not inspect.isroutine(obj)) if hascls and hasfunc and hasother: break # find_mod_objs has already imported modname # TODO: There is probably a cleaner way to do this, though this is pretty # reliable for all Python versions for most cases that we care about. pkg = sys.modules[modname] ispkg = (hasattr(pkg, '__file__') and isinstance(pkg.__file__, str) and os.path.split(pkg.__file__)[1].startswith('__init__.py')) return ispkg, hascls, hasfunc, hasother def process_automodapi(app, docname, source): source[0] = automodapi_replace(source[0], app, True, docname) def setup(app): app.setup_extension('sphinx.ext.autosummary') # Note: we use __name__ here instead of just writing the module name in # case this extension is bundled into another package from . import automodsumm app.setup_extension(automodsumm.__name__) app.connect('source-read', process_automodapi) app.add_config_value('automodapi_toctreedirnm', 'api', True) app.add_config_value('automodapi_writereprocessed', False, True) photutils-0.4/astropy_helpers/astropy_helpers/extern/automodapi/automodsumm.py0000644000214200020070000006265213175633272032660 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This directive will produce an "autosummary"-style table for public attributes of a specified module. See the `sphinx.ext.autosummary`_ extension for details on this process. The main difference from the `autosummary`_ directive is that `autosummary`_ requires manually inputting all attributes that appear in the table, while this captures the entries automatically. This directive requires a single argument that must be a module or package. It also accepts any options supported by the `autosummary`_ directive- see `sphinx.ext.autosummary`_ for details. It also accepts some additional options: * ``:classes-only:`` If present, the autosummary table will only contain entries for classes. This cannot be used at the same time with ``:functions-only:`` or ``:variables-only:``. * ``:functions-only:`` If present, the autosummary table will only contain entries for functions. This cannot be used at the same time with ``:classes-only:`` or ``:variables-only:``. * ``:variables-only:`` If present, the autosummary table will only contain entries for variables (everything except functions and classes). This cannot be used at the same time with ``:classes-only:`` or ``:functions-only:``. * ``:skip: obj1, [obj2, obj3, ...]`` If present, specifies that the listed objects should be skipped and not have their documentation generated, nor be included in the summary table. * ``:allowed-package-names: pkgormod1, [pkgormod2, pkgormod3, ...]`` Specifies the packages that functions/classes documented here are allowed to be from, as comma-separated list of package names. If not given, only objects that are actually in a subpackage of the package currently being documented are included. * ``:inherited-members:`` or ``:no-inherited-members:`` The global sphinx configuration option ``automodsumm_inherited_members`` decides if members that a class inherits from a base class are included in the generated documentation. The flags ``:inherited-members:`` or ``:no-inherited-members:`` allows overrriding this global setting. This extension also adds two sphinx configuration options: * ``automodsumm_writereprocessed`` Should be a bool, and if ``True``, will cause `automodsumm`_ to write files with any ``automodsumm`` sections replaced with the content Sphinx processes after ``automodsumm`` has run. The output files are not actually used by sphinx, so this option is only for figuring out the cause of sphinx warnings or other debugging. Defaults to ``False``. * ``automodsumm_inherited_members`` Should be a bool and if ``True``, will cause `automodsumm`_ to document class members that are inherited from a base class. This value can be overriden for any particular automodsumm directive by including the ``:inherited-members:`` or ``:no-inherited-members:`` options. Defaults to ``False``. .. _sphinx.ext.autosummary: http://sphinx-doc.org/latest/ext/autosummary.html .. _autosummary: http://sphinx-doc.org/latest/ext/autosummary.html#directive-autosummary .. _automod-diagram: automod-diagram directive ========================= This directive will produce an inheritance diagram like that of the `sphinx.ext.inheritance_diagram`_ extension. This directive requires a single argument that must be a module or package. It accepts no options. .. note:: Like 'inheritance-diagram', 'automod-diagram' requires `graphviz `_ to generate the inheritance diagram. .. _sphinx.ext.inheritance_diagram: http://sphinx-doc.org/latest/ext/inheritance.html """ import inspect import os import re import io from sphinx.ext.autosummary import Autosummary from sphinx.ext.inheritance_diagram import InheritanceDiagram from docutils.parsers.rst.directives import flag from .utils import find_mod_objs, cleanup_whitespace def _str_list_converter(argument): """ A directive option conversion function that converts the option into a list of strings. Used for 'skip' option. """ if argument is None: return [] else: return [s.strip() for s in argument.split(',')] class Automodsumm(Autosummary): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = False has_content = False option_spec = dict(Autosummary.option_spec) option_spec['functions-only'] = flag option_spec['classes-only'] = flag option_spec['variables-only'] = flag option_spec['skip'] = _str_list_converter option_spec['allowed-package-names'] = _str_list_converter option_spec['inherited-members'] = flag option_spec['no-inherited-members'] = flag def run(self): env = self.state.document.settings.env modname = self.arguments[0] self.warnings = [] nodelist = [] try: localnames, fqns, objs = find_mod_objs(modname) except ImportError: self.warnings = [] self.warn("Couldn't import module " + modname) return self.warnings try: # set self.content to trick the autosummary internals. # Be sure to respect functions-only and classes-only. funconly = 'functions-only' in self.options clsonly = 'classes-only' in self.options varonly = 'variables-only' in self.options if [clsonly, funconly, varonly].count(True) > 1: self.warning('more than one of functions-only, classes-only, ' 'or variables-only defined. Ignoring.') clsonly = funconly = varonly = False skipnames = [] if 'skip' in self.options: option_skipnames = set(self.options['skip']) for lnm in localnames: if lnm in option_skipnames: option_skipnames.remove(lnm) skipnames.append(lnm) if len(option_skipnames) > 0: self.warn('Tried to skip objects {objs} in module {mod}, ' 'but they were not present. Ignoring.' .format(objs=option_skipnames, mod=modname)) if funconly: cont = [] for nm, obj in zip(localnames, objs): if nm not in skipnames and inspect.isroutine(obj): cont.append(nm) elif clsonly: cont = [] for nm, obj in zip(localnames, objs): if nm not in skipnames and inspect.isclass(obj): cont.append(nm) elif varonly: cont = [] for nm, obj in zip(localnames, objs): if nm not in skipnames and not (inspect.isclass(obj) or inspect.isroutine(obj)): cont.append(nm) else: cont = [nm for nm in localnames if nm not in skipnames] self.content = cont # for some reason, even though ``currentmodule`` is substituted in, # sphinx doesn't necessarily recognize this fact. So we just force # it internally, and that seems to fix things env.temp_data['py:module'] = modname env.ref_context['py:module'] = modname # can't use super because Sphinx/docutils has trouble return # super(Autosummary,self).run() nodelist.extend(Autosummary.run(self)) return self.warnings + nodelist finally: # has_content = False for the Automodsumm self.content = [] def get_items(self, names): self.genopt['imported-members'] = True return Autosummary.get_items(self, names) # <-------------------automod-diagram stuff-----------------------------------> class Automoddiagram(InheritanceDiagram): option_spec = dict(InheritanceDiagram.option_spec) option_spec['allowed-package-names'] = _str_list_converter option_spec['skip'] = _str_list_converter def run(self): try: ols = self.options.get('allowed-package-names', []) ols = True if len(ols) == 0 else ols # if none are given, assume only local nms, objs = find_mod_objs(self.arguments[0], onlylocals=ols)[1:] except ImportError: self.warnings = [] self.warn("Couldn't import module " + self.arguments[0]) return self.warnings # Check if some classes should be skipped skip = self.options.get('skip', []) clsnms = [] for n, o in zip(nms, objs): if n.split('.')[-1] in skip: continue if inspect.isclass(o): clsnms.append(n) oldargs = self.arguments try: if len(clsnms) > 0: self.arguments = [' '.join(clsnms)] return InheritanceDiagram.run(self) finally: self.arguments = oldargs # <---------------------automodsumm generation stuff--------------------------> def process_automodsumm_generation(app): env = app.builder.env filestosearch = [] for docname in env.found_docs: filename = env.doc2path(docname) if os.path.isfile(filename): filestosearch.append(docname + os.path.splitext(filename)[1]) liness = [] for sfn in filestosearch: lines = automodsumm_to_autosummary_lines(sfn, app) liness.append(lines) if app.config.automodsumm_writereprocessed: if lines: # empty list means no automodsumm entry is in the file outfn = os.path.join(app.srcdir, sfn) + '.automodsumm' with open(outfn, 'w') as f: for l in lines: f.write(l) f.write('\n') for sfn, lines in zip(filestosearch, liness): suffix = os.path.splitext(sfn)[1] if len(lines) > 0: generate_automodsumm_docs( lines, sfn, builder=app.builder, warn=app.warn, info=app.info, suffix=suffix, base_path=app.srcdir, inherited_members=app.config.automodsumm_inherited_members) # _automodsummrex = re.compile(r'^(\s*)\.\. automodsumm::\s*([A-Za-z0-9_.]+)\s*' # r'\n\1(\s*)(\S|$)', re.MULTILINE) _lineendrex = r'(?:\n|$)' _hdrex = r'^\n?(\s*)\.\. automodsumm::\s*(\S+)\s*' + _lineendrex _oprex1 = r'(?:\1(\s+)\S.*' + _lineendrex + ')' _oprex2 = r'(?:\1\4\S.*' + _lineendrex + ')' _automodsummrex = re.compile(_hdrex + '(' + _oprex1 + '?' + _oprex2 + '*)', re.MULTILINE) def automodsumm_to_autosummary_lines(fn, app): """ Generates lines from a file with an "automodsumm" entry suitable for feeding into "autosummary". Searches the provided file for `automodsumm` directives and returns a list of lines specifying the `autosummary` commands for the modules requested. This does *not* return the whole file contents - just an autosummary section in place of any :automodsumm: entries. Note that any options given for `automodsumm` are also included in the generated `autosummary` section. Parameters ---------- fn : str The name of the file to search for `automodsumm` entries. app : sphinx.application.Application The sphinx Application object Return ------ lines : list of str Lines for all `automodsumm` entries with the entries replaced by `autosummary` and the module's members added. """ fullfn = os.path.join(app.builder.env.srcdir, fn) with io.open(fullfn, encoding='utf8') as fr: # Note: we use __name__ here instead of just writing the module name in # case this extension is bundled into another package from . import automodapi try: extensions = app.extensions except AttributeError: # Sphinx <1.6 extensions = app._extensions if automodapi.__name__ in extensions: # Must do the automodapi on the source to get the automodsumm # that might be in there docname = os.path.splitext(fn)[0] filestr = automodapi.automodapi_replace(fr.read(), app, True, docname, False) else: filestr = fr.read() spl = _automodsummrex.split(filestr) # 0th entry is the stuff before the first automodsumm line indent1s = spl[1::5] mods = spl[2::5] opssecs = spl[3::5] indent2s = spl[4::5] remainders = spl[5::5] # only grab automodsumm sections and convert them to autosummary with the # entries for all the public objects newlines = [] # loop over all automodsumms in this document for i, (i1, i2, modnm, ops, rem) in enumerate(zip(indent1s, indent2s, mods, opssecs, remainders)): allindent = i1 + (' ' if i2 is None else i2) # filter out functions-only, classes-only, and ariables-only # options if present. oplines = ops.split('\n') toskip = [] allowedpkgnms = [] funcsonly = clssonly = varsonly = False for i, ln in reversed(list(enumerate(oplines))): if ':functions-only:' in ln: funcsonly = True del oplines[i] if ':classes-only:' in ln: clssonly = True del oplines[i] if ':variables-only:' in ln: varsonly = True del oplines[i] if ':skip:' in ln: toskip.extend(_str_list_converter(ln.replace(':skip:', ''))) del oplines[i] if ':allowed-package-names:' in ln: allowedpkgnms.extend(_str_list_converter(ln.replace(':allowed-package-names:', ''))) del oplines[i] if [funcsonly, clssonly, varsonly].count(True) > 1: msg = ('Defined more than one of functions-only, classes-only, ' 'and variables-only. Skipping this directive.') lnnum = sum([spl[j].count('\n') for j in range(i * 5 + 1)]) app.warn('[automodsumm]' + msg, (fn, lnnum)) continue # Use the currentmodule directive so we can just put the local names # in the autosummary table. Note that this doesn't always seem to # actually "take" in Sphinx's eyes, so in `Automodsumm.run`, we have to # force it internally, as well. newlines.extend([i1 + '.. currentmodule:: ' + modnm, '', '.. autosummary::']) newlines.extend(oplines) ols = True if len(allowedpkgnms) == 0 else allowedpkgnms for nm, fqn, obj in zip(*find_mod_objs(modnm, onlylocals=ols)): if nm in toskip: continue if funcsonly and not inspect.isroutine(obj): continue if clssonly and not inspect.isclass(obj): continue if varsonly and (inspect.isclass(obj) or inspect.isroutine(obj)): continue newlines.append(allindent + nm) # add one newline at the end of the autosummary block newlines.append('') return newlines def generate_automodsumm_docs(lines, srcfn, suffix='.rst', warn=None, info=None, base_path=None, builder=None, template_dir=None, inherited_members=False): """ This function is adapted from `sphinx.ext.autosummary.generate.generate_autosummmary_docs` to generate source for the automodsumm directives that should be autosummarized. Unlike generate_autosummary_docs, this function is called one file at a time. """ from sphinx.jinja2glue import BuiltinTemplateLoader from sphinx.ext.autosummary import import_by_name, get_documenter from sphinx.ext.autosummary.generate import (_simple_info, _simple_warn) from sphinx.util.osutil import ensuredir from sphinx.util.inspect import safe_getattr from jinja2 import FileSystemLoader, TemplateNotFound from jinja2.sandbox import SandboxedEnvironment from .utils import find_autosummary_in_lines_for_automodsumm as find_autosummary_in_lines if info is None: info = _simple_info if warn is None: warn = _simple_warn # info('[automodsumm] generating automodsumm for: ' + srcfn) # Create our own templating environment - here we use Astropy's # templates rather than the default autosummary templates, in order to # allow docstrings to be shown for methods. template_dirs = [os.path.join(os.path.dirname(__file__), 'templates'), os.path.join(base_path, '_templates')] if builder is not None: # allow the user to override the templates template_loader = BuiltinTemplateLoader() template_loader.init(builder, dirs=template_dirs) else: if template_dir: template_dirs.insert(0, template_dir) template_loader = FileSystemLoader(template_dirs) template_env = SandboxedEnvironment(loader=template_loader) # read # items = find_autosummary_in_files(sources) items = find_autosummary_in_lines(lines, filename=srcfn) if len(items) > 0: msg = '[automodsumm] {1}: found {0} automodsumm entries to generate' info(msg.format(len(items), srcfn)) # gennms = [item[0] for item in items] # if len(gennms) > 20: # gennms = gennms[:10] + ['...'] + gennms[-10:] # info('[automodsumm] generating autosummary for: ' + ', '.join(gennms)) # remove possible duplicates items = list(set(items)) # keep track of new files new_files = [] # write for name, path, template_name, inherited_mem in sorted(items): if path is None: # The corresponding autosummary:: directive did not have # a :toctree: option continue path = os.path.abspath(os.path.join(base_path, path)) ensuredir(path) try: import_by_name_values = import_by_name(name) except ImportError as e: warn('[automodsumm] failed to import %r: %s' % (name, e)) continue # if block to accommodate Sphinx's v1.2.2 and v1.2.3 respectively if len(import_by_name_values) == 3: name, obj, parent = import_by_name_values elif len(import_by_name_values) == 4: name, obj, parent, module_name = import_by_name_values fn = os.path.join(path, name + suffix) # skip it if it exists if os.path.isfile(fn): continue new_files.append(fn) f = open(fn, 'w') try: doc = get_documenter(obj, parent) if template_name is not None: template = template_env.get_template(template_name) else: tmplstr = 'autosummary_core/%s.rst' try: template = template_env.get_template(tmplstr % doc.objtype) except TemplateNotFound: template = template_env.get_template(tmplstr % 'base') def get_members_mod(obj, typ, include_public=[]): """ typ = None -> all """ items = [] for name in dir(obj): try: documenter = get_documenter(safe_getattr(obj, name), obj) except AttributeError: continue if typ is None or documenter.objtype == typ: items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] return public, items def get_members_class(obj, typ, include_public=[], include_base=False): """ typ = None -> all include_base -> include attrs that are from a base class """ items = [] # using dir gets all of the attributes, including the elements # from the base class, otherwise use __slots__ or __dict__ if include_base: names = dir(obj) else: if hasattr(obj, '__slots__'): names = tuple(getattr(obj, '__slots__')) else: names = getattr(obj, '__dict__').keys() for name in names: try: documenter = get_documenter(safe_getattr(obj, name), obj) except AttributeError: continue if typ is None or documenter.objtype == typ: items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] return public, items ns = {} if doc.objtype == 'module': ns['members'] = get_members_mod(obj, None) ns['functions'], ns['all_functions'] = \ get_members_mod(obj, 'function') ns['classes'], ns['all_classes'] = \ get_members_mod(obj, 'class') ns['exceptions'], ns['all_exceptions'] = \ get_members_mod(obj, 'exception') elif doc.objtype == 'class': if inherited_mem is not None: # option set in this specifc directive include_base = inherited_mem else: # use default value include_base = inherited_members api_class_methods = ['__init__', '__call__'] ns['members'] = get_members_class(obj, None, include_base=include_base) ns['methods'], ns['all_methods'] = \ get_members_class(obj, 'method', api_class_methods, include_base=include_base) ns['attributes'], ns['all_attributes'] = \ get_members_class(obj, 'attribute', include_base=include_base) ns['methods'].sort() ns['attributes'].sort() parts = name.split('.') if doc.objtype in ('method', 'attribute'): mod_name = '.'.join(parts[:-2]) cls_name = parts[-2] obj_name = '.'.join(parts[-2:]) ns['class'] = cls_name else: mod_name, obj_name = '.'.join(parts[:-1]), parts[-1] ns['fullname'] = name ns['module'] = mod_name ns['objname'] = obj_name ns['name'] = parts[-1] ns['objtype'] = doc.objtype ns['underline'] = len(obj_name) * '=' # We now check whether a file for reference footnotes exists for # the module being documented. We first check if the # current module is a file or a directory, as this will give a # different path for the reference file. For example, if # documenting astropy.wcs then the reference file is at # ../wcs/references.txt, while if we are documenting # astropy.config.logging_helper (which is at # astropy/config/logging_helper.py) then the reference file is set # to ../config/references.txt if '.' in mod_name: mod_name_dir = mod_name.replace('.', '/').split('/', 1)[1] else: mod_name_dir = mod_name if not os.path.isdir(os.path.join(base_path, mod_name_dir)) \ and os.path.isdir(os.path.join(base_path, mod_name_dir.rsplit('/', 1)[0])): mod_name_dir = mod_name_dir.rsplit('/', 1)[0] # We then have to check whether it exists, and if so, we pass it # to the template. if os.path.exists(os.path.join(base_path, mod_name_dir, 'references.txt')): # An important subtlety here is that the path we pass in has # to be relative to the file being generated, so we have to # figure out the right number of '..'s ndirsback = path.replace(base_path, '').count('/') ref_file_rel_segments = ['..'] * ndirsback ref_file_rel_segments.append(mod_name_dir) ref_file_rel_segments.append('references.txt') ns['referencefile'] = os.path.join(*ref_file_rel_segments) rendered = template.render(**ns) f.write(cleanup_whitespace(rendered)) finally: f.close() def setup(app): # need autodoc fixes # Note: we use __name__ here instead of just writing the module name in # case this extension is bundled into another package from . import autodoc_enhancements app.setup_extension(autodoc_enhancements.__name__) # need inheritance-diagram for automod-diagram app.setup_extension('sphinx.ext.inheritance_diagram') app.add_directive('automod-diagram', Automoddiagram) app.add_directive('automodsumm', Automodsumm) app.connect('builder-inited', process_automodsumm_generation) app.add_config_value('automodsumm_writereprocessed', False, True) app.add_config_value('automodsumm_inherited_members', False, 'env') photutils-0.4/astropy_helpers/astropy_helpers/extern/automodapi/smart_resolver.py0000644000214200020070000000717713175633272033356 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The classes in the astropy docs are documented by their API location, which is not necessarily where they are defined in the source. This causes a problem when certain automated features of the doc build, such as the inheritance diagrams or the `Bases` list of a class reference a class by its canonical location rather than its "user" location. In the `autodoc-process-docstring` event, a mapping from the actual name to the API name is maintained. Later, in the `missing-reference` event, unresolved references are looked up in this dictionary and corrected if possible. """ from docutils.nodes import literal, reference def process_docstring(app, what, name, obj, options, lines): if isinstance(obj, type): env = app.env if not hasattr(env, 'class_name_mapping'): env.class_name_mapping = {} mapping = env.class_name_mapping mapping[obj.__module__ + '.' + obj.__name__] = name def missing_reference_handler(app, env, node, contnode): if not hasattr(env, 'class_name_mapping'): env.class_name_mapping = {} mapping = env.class_name_mapping reftype = node['reftype'] reftarget = node['reftarget'] if reftype in ('obj', 'class', 'exc', 'meth'): reftarget = node['reftarget'] suffix = '' if reftarget not in mapping: if '.' in reftarget: front, suffix = reftarget.rsplit('.', 1) else: suffix = reftarget if suffix.startswith('_') and not suffix.startswith('__'): # If this is a reference to a hidden class or method, # we can't link to it, but we don't want to have a # nitpick warning. return node[0].deepcopy() if reftype in ('obj', 'meth') and '.' in reftarget: if front in mapping: reftarget = front suffix = '.' + suffix if (reftype in ('class', ) and '.' in reftarget and reftarget not in mapping): if '.' in front: reftarget, _ = front.rsplit('.', 1) suffix = '.' + suffix reftarget = reftarget + suffix prefix = reftarget.rsplit('.')[0] inventory = env.intersphinx_named_inventory if (reftarget not in mapping and prefix in inventory): if reftarget in inventory[prefix]['py:class']: newtarget = inventory[prefix]['py:class'][reftarget][2] if not node['refexplicit'] and \ '~' not in node.rawsource: contnode = literal(text=reftarget) newnode = reference('', '', internal=True) newnode['reftitle'] = reftarget newnode['refuri'] = newtarget newnode.append(contnode) return newnode if reftarget in mapping: newtarget = mapping[reftarget] + suffix if not node['refexplicit'] and '~' not in node.rawsource: contnode = literal(text=newtarget) newnode = env.domains['py'].resolve_xref( env, node['refdoc'], app.builder, 'class', newtarget, node, contnode) if newnode is not None: newnode['reftitle'] = reftarget return newnode def setup(app): app.connect('autodoc-process-docstring', process_docstring) app.connect('missing-reference', missing_reference_handler) photutils-0.4/astropy_helpers/astropy_helpers/extern/automodapi/utils.py0000644000214200020070000001574413175633272031446 0ustar lbradleySTSCI\science00000000000000import inspect import sys import re import os from warnings import warn from sphinx.ext.autosummary.generate import find_autosummary_in_docstring if sys.version_info[0] >= 3: def iteritems(dictionary): return dictionary.items() else: def iteritems(dictionary): return dictionary.iteritems() # We use \n instead of os.linesep because even on Windows, the generated files # use \n as the newline character. SPACE_NEWLINE = ' \n' SINGLE_NEWLINE = '\n' DOUBLE_NEWLINE = '\n\n' TRIPLE_NEWLINE = '\n\n\n' def cleanup_whitespace(text): """ Make sure there are never more than two consecutive newlines, and that there are no trailing whitespaces. """ # Get rid of overall leading/trailing whitespace text = text.strip() + '\n' # Get rid of trailing whitespace on each line while SPACE_NEWLINE in text: text = text.replace(SPACE_NEWLINE, SINGLE_NEWLINE) # Avoid too many consecutive newlines while TRIPLE_NEWLINE in text: text = text.replace(TRIPLE_NEWLINE, DOUBLE_NEWLINE) return text def find_mod_objs(modname, onlylocals=False): """ Returns all the public attributes of a module referenced by name. .. note:: The returned list *not* include subpackages or modules of `modname`,nor does it include private attributes (those that beginwith '_' or are not in `__all__`). Parameters ---------- modname : str The name of the module to search. onlylocals : bool If True, only attributes that are either members of `modname` OR one of its modules or subpackages will be included. Returns ------- localnames : list of str A list of the names of the attributes as they are named in the module `modname` . fqnames : list of str A list of the full qualified names of the attributes (e.g., ``astropy.utils.misc.find_mod_objs``). For attributes that are simple variables, this is based on the local name, but for functions or classes it can be different if they are actually defined elsewhere and just referenced in `modname`. objs : list of objects A list of the actual attributes themselves (in the same order as the other arguments) """ __import__(modname) mod = sys.modules[modname] if hasattr(mod, '__all__'): pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__] else: pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_'] # filter out modules and pull the names and objs out ismodule = inspect.ismodule localnames = [k for k, v in pkgitems if not ismodule(v)] objs = [v for k, v in pkgitems if not ismodule(v)] # fully qualified names can be determined from the object's module fqnames = [] for obj, lnm in zip(objs, localnames): if hasattr(obj, '__module__') and hasattr(obj, '__name__'): fqnames.append(obj.__module__ + '.' + obj.__name__) else: fqnames.append(modname + '.' + lnm) if onlylocals: valids = [fqn.startswith(modname) for fqn in fqnames] localnames = [e for i, e in enumerate(localnames) if valids[i]] fqnames = [e for i, e in enumerate(fqnames) if valids[i]] objs = [e for i, e in enumerate(objs) if valids[i]] return localnames, fqnames, objs def find_autosummary_in_lines_for_automodsumm(lines, module=None, filename=None): """Find out what items appear in autosummary:: directives in the given lines. Returns a list of (name, toctree, template, inherited_members) where *name* is a name of an object and *toctree* the :toctree: path of the corresponding autosummary directive (relative to the root of the file name), *template* the value of the :template: option, and *inherited_members* is the value of the :inherited-members: option. *toctree*, *template*, and *inherited_members* are ``None`` if the directive does not have the corresponding options set. .. note:: This is a slightly modified version of ``sphinx.ext.autosummary.generate.find_autosummary_in_lines`` which recognizes the ``inherited-members`` option. """ autosummary_re = re.compile(r'^(\s*)\.\.\s+autosummary::\s*') automodule_re = re.compile( r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$') module_re = re.compile( r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?') toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$') inherited_members_arg_re = re.compile(r'^\s+:inherited-members:\s*$') no_inherited_members_arg_re = re.compile(r'^\s+:no-inherited-members:\s*$') documented = [] toctree = None template = None inherited_members = None current_module = module in_autosummary = False base_indent = "" for line in lines: if in_autosummary: m = toctree_arg_re.match(line) if m: toctree = m.group(1) if filename: toctree = os.path.join(os.path.dirname(filename), toctree) continue m = template_arg_re.match(line) if m: template = m.group(1).strip() continue m = inherited_members_arg_re.match(line) if m: inherited_members = True continue m = no_inherited_members_arg_re.match(line) if m: inherited_members = False continue if line.strip().startswith(':'): warn(line) continue # skip options m = autosummary_item_re.match(line) if m: name = m.group(1).strip() if name.startswith('~'): name = name[1:] if current_module and \ not name.startswith(current_module + '.'): name = "%s.%s" % (current_module, name) documented.append((name, toctree, template, inherited_members)) continue if not line.strip() or line.startswith(base_indent + " "): continue in_autosummary = False m = autosummary_re.match(line) if m: in_autosummary = True base_indent = m.group(1) toctree = None template = None inherited_members = None continue m = automodule_re.search(line) if m: current_module = m.group(1).strip() # recurse into the automodule docstring documented.extend(find_autosummary_in_docstring( current_module, filename=filename)) continue m = module_re.match(line) if m: current_module = m.group(2) continue return documented photutils-0.4/astropy_helpers/astropy_helpers/extern/numpydoc/0000755000214200020070000000000013175654702027416 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/extern/numpydoc/__init__.py0000644000214200020070000000016513175633272031530 0ustar lbradleySTSCI\science00000000000000from __future__ import division, absolute_import, print_function __version__ = '0.7.0' from .numpydoc import setup photutils-0.4/astropy_helpers/astropy_helpers/extern/numpydoc/docscrape.py0000644000214200020070000004452113175633272031740 0ustar lbradleySTSCI\science00000000000000"""Extract reference documentation from the NumPy source tree. """ from __future__ import division, absolute_import, print_function import inspect import textwrap import re import pydoc from warnings import warn import collections import copy import sys class Reader(object): """A line-based string reader. """ def __init__(self, data): """ Parameters ---------- data : str String with lines separated by '\n'. """ if isinstance(data, list): self._str = data else: self._str = data.split('\n') # store string as list of lines self.reset() def __getitem__(self, n): return self._str[n] def reset(self): self._l = 0 # current line nr def read(self): if not self.eof(): out = self[self._l] self._l += 1 return out else: return '' def seek_next_non_empty_line(self): for l in self[self._l:]: if l.strip(): break else: self._l += 1 def eof(self): return self._l >= len(self._str) def read_to_condition(self, condition_func): start = self._l for line in self[start:]: if condition_func(line): return self[start:self._l] self._l += 1 if self.eof(): return self[start:self._l+1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() def is_empty(line): return not line.strip() return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) def peek(self, n=0): if self._l + n < len(self._str): return self[self._l + n] else: return '' def is_empty(self): return not ''.join(self._str).strip() class ParseError(Exception): def __str__(self): message = self.args[0] if hasattr(self, 'docstring'): message = "%s in %r" % (message, self.docstring) return message class NumpyDocString(collections.Mapping): sections = { 'Signature': '', 'Summary': [''], 'Extended Summary': [], 'Parameters': [], 'Returns': [], 'Yields': [], 'Raises': [], 'Warns': [], 'Other Parameters': [], 'Attributes': [], 'Methods': [], 'See Also': [], 'Notes': [], 'Warnings': [], 'References': '', 'Examples': '', 'index': {} } def __init__(self, docstring, config={}): orig_docstring = docstring docstring = textwrap.dedent(docstring).split('\n') self._doc = Reader(docstring) self._parsed_data = copy.deepcopy(self.sections) try: self._parse() except ParseError as e: e.docstring = orig_docstring raise def __getitem__(self, key): return self._parsed_data[key] def __setitem__(self, key, val): if key not in self._parsed_data: warn("Unknown section %s" % key) else: self._parsed_data[key] = val def __iter__(self): return iter(self._parsed_data) def __len__(self): return len(self._parsed_data) def _is_at_section(self): self._doc.seek_next_non_empty_line() if self._doc.eof(): return False l1 = self._doc.peek().strip() # e.g. Parameters if l1.startswith('.. index::'): return True l2 = self._doc.peek(1).strip() # ---------- or ========== return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) def _strip(self, doc): i = 0 j = 0 for i, line in enumerate(doc): if line.strip(): break for j, line in enumerate(doc[::-1]): if line.strip(): break return doc[i:len(doc)-j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): if not self._doc.peek(-1).strip(): # previous line was empty section += [''] section += self._doc.read_to_next_empty_line() return section def _read_sections(self): while not self._doc.eof(): data = self._read_to_next_section() name = data[0].strip() if name.startswith('..'): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) def _parse_param_list(self, content): r = Reader(content) params = [] while not r.eof(): header = r.read().strip() if ' : ' in header: arg_name, arg_type = header.split(' : ')[:2] else: arg_name, arg_type = header, '' desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) params.append((arg_name, arg_type, desc)) return params _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) def _parse_see_also(self, content): """ func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3 """ items = [] def parse_item_name(text): """Match ':role:`name`' or 'name'""" m = self._name_rgx.match(text) if m: g = m.groups() if g[1] is None: return g[3], None else: return g[2], g[1] raise ParseError("%s is not a item name" % text) def push_item(name, rest): if not name: return name, role = parse_item_name(name) items.append((name, list(rest), role)) del rest[:] current_func = None rest = [] for line in content: if not line.strip(): continue m = self._name_rgx.match(line) if m and line[m.end():].strip().startswith(':'): push_item(current_func, rest) current_func, line = line[:m.end()], line[m.end():] rest = [line.split(':', 1)[1].strip()] if not rest[0]: rest = [] elif not line.startswith(' '): push_item(current_func, rest) current_func = None if ',' in line: for func in line.split(','): if func.strip(): push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: rest.append(line.strip()) push_item(current_func, rest) return items def _parse_index(self, section, content): """ .. index: default :refguide: something, else, and more """ def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if len(section) > 1: out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if len(line) > 2: out[line[1]] = strip_each_in(line[2].split(',')) return out def _parse_summary(self): """Grab signature (if given) and summary""" if self._is_at_section(): return # If several signatures present, take the last one while True: summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): self['Signature'] = summary_str if not self._is_at_section(): continue break if summary is not None: self['Summary'] = summary if not self._is_at_section(): self['Extended Summary'] = self._read_to_next_section() def _parse(self): self._doc.reset() self._parse_summary() sections = list(self._read_sections()) section_names = set([section for section, content in sections]) has_returns = 'Returns' in section_names has_yields = 'Yields' in section_names # We could do more tests, but we are not. Arbitrarily. if has_returns and has_yields: msg = 'Docstring contains both a Returns and Yields section.' raise ValueError(msg) for (section, content) in sections: if not section.startswith('..'): section = (s.capitalize() for s in section.split(' ')) section = ' '.join(section) if self.get(section): if hasattr(self, '_obj'): # we know where the docs came from: try: filename = inspect.getsourcefile(self._obj) except TypeError: filename = None msg = ("The section %s appears twice in " "the docstring of %s in %s." % (section, self._obj, filename)) raise ValueError(msg) else: msg = ("The section %s appears twice" % section) raise ValueError(msg) if section in ('Parameters', 'Returns', 'Yields', 'Raises', 'Warns', 'Other Parameters', 'Attributes', 'Methods'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) elif section == 'See Also': self['See Also'] = self._parse_see_also(content) else: self[section] = content # string conversion routines def _str_header(self, name, symbol='-'): return [name, len(name)*symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): if self['Signature']: return [self['Signature'].replace('*', '\*')] + [''] else: return [''] def _str_summary(self): if self['Summary']: return self['Summary'] + [''] else: return [] def _str_extended_summary(self): if self['Extended Summary']: return self['Extended Summary'] + [''] else: return [] def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) for param, param_type, desc in self[name]: if param_type: out += ['%s : %s' % (param, param_type)] else: out += [param] out += self._str_indent(desc) out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += self[name] out += [''] return out def _str_see_also(self, func_role): if not self['See Also']: return [] out = [] out += self._str_header("See Also") last_had_desc = True for func, desc, role in self['See Also']: if role: link = ':%s:`%s`' % (role, func) elif func_role: link = ':%s:`%s`' % (func_role, func) else: link = "`%s`_" % func if desc or last_had_desc: out += [''] out += [link] else: out[-1] += ", %s" % link if desc: out += self._str_indent([' '.join(desc)]) last_had_desc = True else: last_had_desc = False out += [''] return out def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.items(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] return out def __str__(self, func_role=''): out = [] out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Yields', 'Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) for s in ('Notes', 'References', 'Examples'): out += self._str_section(s) for param_list in ('Attributes', 'Methods'): out += self._str_param_list(param_list) out += self._str_index() return '\n'.join(out) def indent(str, indent=4): indent_str = ' '*indent if str is None: return indent_str lines = str.split('\n') return '\n'.join(indent_str + l for l in lines) def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") def header(text, style='-'): return text + '\n' + style*len(text) + '\n' class FunctionDoc(NumpyDocString): def __init__(self, func, role='func', doc=None, config={}): self._f = func self._role = role # e.g. "func" or "meth" if doc is None: if func is None: raise ValueError("No function or docstring given") doc = inspect.getdoc(func) or '' NumpyDocString.__init__(self, doc) if not self['Signature'] and func is not None: func, func_name = self.get_func() try: try: signature = str(inspect.signature(func)) except (AttributeError, ValueError): # try to read signature, backward compat for older Python if sys.version_info[0] >= 3: argspec = inspect.getfullargspec(func) else: argspec = inspect.getargspec(func) signature = inspect.formatargspec(*argspec) signature = '%s%s' % (func_name, signature.replace('*', '\*')) except TypeError: signature = '%s()' % func_name self['Signature'] = signature def get_func(self): func_name = getattr(self._f, '__name__', self.__class__.__name__) if inspect.isclass(self._f): func = getattr(self._f, '__call__', self._f.__init__) else: func = self._f return func, func_name def __str__(self): out = '' func, func_name = self.get_func() signature = self['Signature'].replace('*', '\*') roles = {'func': 'function', 'meth': 'method'} if self._role: if self._role not in roles: print("Warning: invalid role %s" % self._role) out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) return out class ClassDoc(NumpyDocString): extra_public_methods = ['__call__'] def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, config={}): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls self.show_inherited_members = config.get( 'show_inherited_class_members', True) if modulename and not modulename.endswith('.'): modulename += '.' self._mod = modulename if doc is None: if cls is None: raise ValueError("No class or documentation string given") doc = pydoc.getdoc(cls) NumpyDocString.__init__(self, doc) if config.get('show_class_members', True): def splitlines_x(s): if not s: return [] else: return s.splitlines() for field, items in [('Methods', self.methods), ('Attributes', self.properties)]: if not self[field]: doc_list = [] for name in sorted(items): try: doc_item = pydoc.getdoc(getattr(self._cls, name)) doc_list.append((name, '', splitlines_x(doc_item))) except AttributeError: pass # method doesn't exist self[field] = doc_list @property def methods(self): if self._cls is None: return [] return [name for name, func in inspect.getmembers(self._cls) if ((not name.startswith('_') or name in self.extra_public_methods) and isinstance(func, collections.Callable) and self._is_show_member(name))] @property def properties(self): if self._cls is None: return [] return [name for name, func in inspect.getmembers(self._cls) if (not name.startswith('_') and (func is None or isinstance(func, property) or inspect.isgetsetdescriptor(func)) and self._is_show_member(name))] def _is_show_member(self, name): if self.show_inherited_members: return True # show all class members if name not in self._cls.__dict__: return False # class member is inherited, we do not show it return True photutils-0.4/astropy_helpers/astropy_helpers/extern/numpydoc/docscrape_sphinx.py0000644000214200020070000002510613175633272033327 0ustar lbradleySTSCI\science00000000000000from __future__ import division, absolute_import, print_function import sys import re import inspect import textwrap import pydoc import collections import os from jinja2 import FileSystemLoader from jinja2.sandbox import SandboxedEnvironment import sphinx from sphinx.jinja2glue import BuiltinTemplateLoader from .docscrape import NumpyDocString, FunctionDoc, ClassDoc if sys.version_info[0] >= 3: sixu = lambda s: s else: sixu = lambda s: unicode(s, 'unicode_escape') class SphinxDocString(NumpyDocString): def __init__(self, docstring, config={}): NumpyDocString.__init__(self, docstring, config=config) self.load_config(config) def load_config(self, config): self.use_plots = config.get('use_plots', False) self.class_members_toctree = config.get('class_members_toctree', True) self.template = config.get('template', None) if self.template is None: template_dirs = [os.path.join(os.path.dirname(__file__), 'templates')] template_loader = FileSystemLoader(template_dirs) template_env = SandboxedEnvironment(loader=template_loader) self.template = template_env.get_template('numpydoc_docstring.rst') # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_returns(self, name='Returns'): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: if param_type: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) else: out += self._str_indent([param.strip()]) if desc: out += [''] out += self._str_indent(desc, 8) out += [''] return out def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: if param_type: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) else: out += self._str_indent(['**%s**' % param.strip()]) if desc: out += [''] out += self._str_indent(desc, 8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() # Check if the referenced member can have a docstring or not param_obj = getattr(self._obj, param, None) if not (callable(param_obj) or isinstance(param_obj, property) or inspect.isgetsetdescriptor(param_obj)): param_obj = None if param_obj and (pydoc.getdoc(param_obj) or not desc): # Referenced object has a docstring autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: out += ['.. autosummary::'] if self.class_members_toctree: out += [' :toctree:'] out += [''] + autosum if others: maxlen_0 = max(3, max([len(x[0]) + 4 for x in others])) hdr = sixu("=") * maxlen_0 + sixu(" ") + sixu("=") * 10 fmt = sixu('%%%ds %%s ') % (maxlen_0,) out += ['', '', hdr] for param, param_type, desc in others: desc = sixu(" ").join(x.strip() for x in desc).strip() if param_type: desc = "(%s) %s" % (param_type, desc) out += [fmt % ("**" + param.strip() + "**", desc)] out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.items(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it if sphinx.__version__ >= "0.6": out += ['.. only:: latex', ''] else: out += ['.. latexonly::', ''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): ns = { 'signature': self._str_signature(), 'index': self._str_index(), 'summary': self._str_summary(), 'extended_summary': self._str_extended_summary(), 'parameters': self._str_param_list('Parameters'), 'returns': self._str_returns('Returns'), 'yields': self._str_returns('Yields'), 'other_parameters': self._str_param_list('Other Parameters'), 'raises': self._str_param_list('Raises'), 'warns': self._str_param_list('Warns'), 'warnings': self._str_warnings(), 'see_also': self._str_see_also(func_role), 'notes': self._str_section('Notes'), 'references': self._str_references(), 'examples': self._str_examples(), 'attributes': self._str_member_list('Attributes'), 'methods': self._str_member_list('Methods'), } ns = dict((k, '\n'.join(v)) for k, v in ns.items()) rendered = self.template.render(**ns) return '\n'.join(self._str_indent(rendered.split('\n'), indent)) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.load_config(config) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.load_config(config) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config={}): self._f = obj self.load_config(config) SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}, builder=None): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif isinstance(obj, collections.Callable): what = 'function' else: what = 'object' template_dirs = [os.path.join(os.path.dirname(__file__), 'templates')] if builder is not None: template_loader = BuiltinTemplateLoader() template_loader.init(builder, dirs=template_dirs) else: template_loader = FileSystemLoader(template_dirs) template_env = SandboxedEnvironment(loader=template_loader) config['template'] = template_env.get_template('numpydoc_docstring.rst') if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config) photutils-0.4/astropy_helpers/astropy_helpers/extern/numpydoc/numpydoc.py0000644000214200020070000002253313175633272031632 0ustar lbradleySTSCI\science00000000000000""" ======== numpydoc ======== Sphinx extension that handles docstrings in the Numpy standard format. [1] It will: - Convert Parameters etc. sections to field lists. - Convert See Also section to a See also entry. - Renumber references. - Extract the signature from the docstring, if it can't be determined otherwise. .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ from __future__ import division, absolute_import, print_function import sys import re import pydoc import sphinx import inspect import collections if sphinx.__version__ < '1.0.1': raise RuntimeError("Sphinx 1.0.1 or newer is required") from .docscrape_sphinx import get_doc_object, SphinxDocString if sys.version_info[0] >= 3: sixu = lambda s: s else: sixu = lambda s: unicode(s, 'unicode_escape') def rename_references(app, what, name, obj, options, lines, reference_offset=[0]): # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() m = re.match(sixu('^.. \\[(%s)\\]') % app.config.numpydoc_citation_re, line, re.I) if m: references.append(m.group(1)) if references: for i, line in enumerate(lines): for r in references: if re.match(sixu('^\\d+$'), r): new_r = sixu("R%d") % (reference_offset[0] + int(r)) else: new_r = sixu("%s%d") % (r, reference_offset[0]) lines[i] = lines[i].replace(sixu('[%s]_') % r, sixu('[%s]_') % new_r) lines[i] = lines[i].replace(sixu('.. [%s]') % r, sixu('.. [%s]') % new_r) reference_offset[0] += len(references) def mangle_docstrings(app, what, name, obj, options, lines): cfg = {'use_plots': app.config.numpydoc_use_plots, 'show_class_members': app.config.numpydoc_show_class_members, 'show_inherited_class_members': app.config.numpydoc_show_inherited_class_members, 'class_members_toctree': app.config.numpydoc_class_members_toctree} u_NL = sixu('\n') if what == 'module': # Strip top title pattern = '^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*' title_re = re.compile(sixu(pattern), re.I | re.S) lines[:] = title_re.sub(sixu(''), u_NL.join(lines)).split(u_NL) else: doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg, builder=app.builder) if sys.version_info[0] >= 3: doc = str(doc) else: doc = unicode(doc) lines[:] = doc.split(u_NL) if (app.config.numpydoc_edit_link and hasattr(obj, '__name__') and obj.__name__): if hasattr(obj, '__module__'): v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] lines += [sixu(' %s') % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # call function to replace reference numbers so that there are no # duplicates rename_references(app, what, name, obj, options, lines) def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) sig = doc['Signature'] or getattr(obj, '__text_signature__', None) if sig: sig = re.sub(sixu("^[^(]*"), sixu(""), sig) return sig, sixu('') def setup(app, get_doc_object_=get_doc_object): if not hasattr(app, 'add_config_value'): return # probably called by nose, better bail out global get_doc_object get_doc_object = get_doc_object_ app.connect('autodoc-process-docstring', mangle_docstrings) app.connect('autodoc-process-signature', mangle_signature) app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) app.add_config_value('numpydoc_show_inherited_class_members', True, True) app.add_config_value('numpydoc_class_members_toctree', True, True) app.add_config_value('numpydoc_citation_re', '[a-z0-9_.-]+', True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) metadata = {'parallel_read_safe': True} return metadata # ------------------------------------------------------------------------------ # Docstring-mangling domains # ------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain class ManglingDomainBase(object): directive_mangling_map = {} def __init__(self, *a, **kw): super(ManglingDomainBase, self).__init__(*a, **kw) self.wrap_mangling_directives() def wrap_mangling_directives(self): for name, objtype in list(self.directive_mangling_map.items()): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) class NumpyPythonDomain(ManglingDomainBase, PythonDomain): name = 'np' directive_mangling_map = { 'function': 'function', 'class': 'class', 'exception': 'class', 'method': 'function', 'classmethod': 'function', 'staticmethod': 'function', 'attribute': 'attribute', } indices = [] class NumpyCDomain(ManglingDomainBase, CDomain): name = 'np-c' directive_mangling_map = { 'function': 'function', 'member': 'attribute', 'macro': 'function', 'type': 'class', 'var': 'object', } def match_items(lines, content_old): """Create items for mangled lines. This function tries to match the lines in ``lines`` with the items (source file references and line numbers) in ``content_old``. The ``mangle_docstrings`` function changes the actual docstrings, but doesn't keep track of where each line came from. The manging does many operations on the original lines, which are hard to track afterwards. Many of the line changes come from deleting or inserting blank lines. This function tries to match lines by ignoring blank lines. All other changes (such as inserting figures or changes in the references) are completely ignored, so the generated line numbers will be off if ``mangle_docstrings`` does anything non-trivial. This is a best-effort function and the real fix would be to make ``mangle_docstrings`` actually keep track of the ``items`` together with the ``lines``. Examples -------- >>> lines = ['', 'A', '', 'B', ' ', '', 'C', 'D'] >>> lines_old = ['a', '', '', 'b', '', 'c'] >>> items_old = [('file1.py', 0), ('file1.py', 1), ('file1.py', 2), ... ('file2.py', 0), ('file2.py', 1), ('file2.py', 2)] >>> content_old = ViewList(lines_old, items=items_old) >>> match_items(lines, content_old) # doctest: +NORMALIZE_WHITESPACE [('file1.py', 0), ('file1.py', 0), ('file2.py', 0), ('file2.py', 0), ('file2.py', 2), ('file2.py', 2), ('file2.py', 2), ('file2.py', 2)] >>> # first 2 ``lines`` are matched to 'a', second 2 to 'b', rest to 'c' >>> # actual content is completely ignored. Notes ----- The algorithm tries to match any line in ``lines`` with one in ``lines_old``. It skips over all empty lines in ``lines_old`` and assigns this line number to all lines in ``lines``, unless a non-empty line is found in ``lines`` in which case it goes to the next line in ``lines_old``. """ items_new = [] lines_old = content_old.data items_old = content_old.items j = 0 for i, line in enumerate(lines): # go to next non-empty line in old: # line.strip() checks whether the string is all whitespace while j < len(lines_old) - 1 and not lines_old[j].strip(): j += 1 items_new.append(items_old[j]) if line.strip() and j < len(lines_old) - 1: j += 1 assert(len(items_new) == len(lines)) return items_new def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) if self.content: items = match_items(lines, self.content) self.content = ViewList(lines, items=items, parent=self.content.parent) return base_directive.run(self) return directive photutils-0.4/astropy_helpers/astropy_helpers/extern/setup_package.py0000644000214200020070000000027513175633272030750 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst def get_package_data(): return {'astropy_helpers.extern': ['automodapi/templates/*/*.rst', 'numpydoc/templates/*.rst']} photutils-0.4/astropy_helpers/astropy_helpers/git_helpers.py0000644000214200020070000001450013175633272027131 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Utilities for retrieving revision information from a project's git repository. """ # Do not remove the following comment; it is used by # astropy_helpers.version_helpers to determine the beginning of the code in # this module # BEGIN import locale import os import subprocess import warnings def _decode_stdio(stream): try: stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8' except ValueError: stdio_encoding = 'utf-8' try: text = stream.decode(stdio_encoding) except UnicodeDecodeError: # Final fallback text = stream.decode('latin1') return text def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: # otherwise it's already the true/release version return version def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 128: # git returns 128 if the command is not run from within a git # repository tree. In this case, a warning is produced above but we # return the default dev version of '0'. return '0' elif not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip() # This function is tested but it is only ever executed within a subprocess when # creating a fake package, so it doesn't get picked up by coverage metrics. def _get_repo_path(pathname, levels=None): # pragma: no cover """ Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo. """ if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None photutils-0.4/astropy_helpers/astropy_helpers/openmp_helpers.py0000644000214200020070000000605313175633272027650 0ustar lbradleySTSCI\science00000000000000# This module defines functions that can be used to check whether OpenMP is # available and if so what flags to use. To use this, import the # add_openmp_flags_if_available function in a setup_package.py file where you # are defining your extensions: # # from astropy_helpers.openmp_helpers import add_openmp_flags_if_available # # then call it with a single extension as the only argument: # # add_openmp_flags_if_available(extension) # # this will add the OpenMP flags if available. from __future__ import absolute_import, print_function import os import sys import glob import tempfile import subprocess from distutils import log from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler from distutils.errors import CompileError, LinkError from .setup_helpers import get_compiler_option __all__ = ['add_openmp_flags_if_available'] CCODE = """ #include #include int main(void) { #pragma omp parallel printf("nthreads=%d\\n", omp_get_num_threads()); return 0; } """ def add_openmp_flags_if_available(extension): """ Add OpenMP compilation flags, if available (if not a warning will be printed to the console and no flags will be added) Returns `True` if the flags were added, `False` otherwise. """ ccompiler = new_compiler() customize_compiler(ccompiler) tmp_dir = tempfile.mkdtemp() start_dir = os.path.abspath('.') if get_compiler_option() == 'msvc': compile_flag = '-openmp' link_flag = '' else: compile_flag = '-fopenmp' link_flag = '-fopenmp' try: os.chdir(tmp_dir) with open('test_openmp.c', 'w') as f: f.write(CCODE) os.mkdir('objects') # Compile, link, and run test program ccompiler.compile(['test_openmp.c'], output_dir='objects', extra_postargs=[compile_flag]) ccompiler.link_executable(glob.glob(os.path.join('objects', '*')), 'test_openmp', extra_postargs=[link_flag]) output = subprocess.check_output('./test_openmp').decode(sys.stdout.encoding or 'utf-8').splitlines() if 'nthreads=' in output[0]: nthreads = int(output[0].strip().split('=')[1]) if len(output) == nthreads: using_openmp = True else: log.warn("Unexpected number of lines from output of test OpenMP " "program (output was {0})".format(output)) using_openmp = False else: log.warn("Unexpected output from test OpenMP " "program (output was {0})".format(output)) using_openmp = False except (CompileError, LinkError): using_openmp = False finally: os.chdir(start_dir) if using_openmp: log.info("Compiling Cython extension with OpenMP support") extension.extra_compile_args.append(compile_flag) extension.extra_link_args.append(link_flag) else: log.warn("Cannot compile Cython extension with OpenMP, reverting to non-parallel code") return using_openmp photutils-0.4/astropy_helpers/astropy_helpers/setup_helpers.py0000644000214200020070000006635513175633272027525 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains a number of utilities for use during setup/build/packaging that are useful to astropy as a whole. """ from __future__ import absolute_import, print_function import collections import os import re import subprocess import sys import traceback import warnings from distutils import log from distutils.dist import Distribution from distutils.errors import DistutilsOptionError, DistutilsModuleError from distutils.core import Extension from distutils.core import Command from distutils.command.sdist import sdist as DistutilsSdist from setuptools import find_packages as _find_packages from .distutils_helpers import (add_command_option, get_compiler_option, get_dummy_distribution, get_distutils_build_option, get_distutils_build_or_install_option) from .version_helpers import get_pkg_version_module from .utils import (walk_skip_hidden, import_file, extends_doc, resolve_name, AstropyDeprecationWarning) from .commands.build_ext import generate_build_ext_command from .commands.build_py import AstropyBuildPy from .commands.install import AstropyInstall from .commands.install_lib import AstropyInstallLib from .commands.register import AstropyRegister from .commands.test import AstropyTest # These imports are not used in this module, but are included for backwards # compat with older versions of this module from .utils import get_numpy_include_path, write_if_different # noqa from .commands.build_ext import should_build_with_cython, get_compiler_version # noqa _module_state = {'registered_commands': None, 'have_sphinx': False, 'package_cache': None, 'exclude_packages': set(), 'excludes_too_late': False} try: import sphinx # noqa _module_state['have_sphinx'] = True except ValueError as e: # This can occur deep in the bowels of Sphinx's imports by way of docutils # and an occurrence of this bug: http://bugs.python.org/issue18378 # In this case sphinx is effectively unusable if 'unknown locale' in e.args[0]: log.warn( "Possible misconfiguration of one of the environment variables " "LC_ALL, LC_CTYPES, LANG, or LANGUAGE. For an example of how to " "configure your system's language environment on OSX see " "http://blog.remibergsma.com/2012/07/10/" "setting-locales-correctly-on-mac-osx-terminal-application/") except ImportError: pass except SyntaxError: # occurs if markupsafe is recent version, which doesn't support Python 3.2 pass PY3 = sys.version_info[0] >= 3 # This adds a new keyword to the setup() function Distribution.skip_2to3 = [] def adjust_compiler(package): """ This function detects broken compilers and switches to another. If the environment variable CC is explicitly set, or a compiler is specified on the commandline, no override is performed -- the purpose here is to only override a default compiler. The specific compilers with problems are: * The default compiler in XCode-4.2, llvm-gcc-4.2, segfaults when compiling wcslib. The set of broken compilers can be updated by changing the compiler_mapping variable. It is a list of 2-tuples where the first in the pair is a regular expression matching the version of the broken compiler, and the second is the compiler to change to. """ warnings.warn( 'Direct use of the adjust_compiler function in setup.py is ' 'deprecated and can be removed from your setup.py. This ' 'functionality is now incorporated directly into the build_ext ' 'command.', AstropyDeprecationWarning) def get_debug_option(packagename): """ Determines if the build is in debug mode. Returns ------- debug : bool True if the current build was started with the debug option, False otherwise. """ try: current_debug = get_pkg_version_module(packagename, fromlist=['debug'])[0] except (ImportError, AttributeError): current_debug = None # Only modify the debug flag if one of the build commands was explicitly # run (i.e. not as a sub-command of something else) dist = get_dummy_distribution() if any(cmd in dist.commands for cmd in ['build', 'build_ext']): debug = bool(get_distutils_build_option('debug')) else: debug = bool(current_debug) if current_debug is not None and current_debug != debug: build_ext_cmd = dist.get_command_class('build_ext') build_ext_cmd.force_rebuild = True return debug def add_exclude_packages(excludes): if _module_state['excludes_too_late']: raise RuntimeError( "add_package_excludes must be called before all other setup helper " "functions in order to properly handle excluded packages") _module_state['exclude_packages'].add(set(excludes)) def register_commands(package, version, release, srcdir='.'): if _module_state['registered_commands'] is not None: return _module_state['registered_commands'] if _module_state['have_sphinx']: try: from .commands.build_sphinx import (AstropyBuildSphinx, AstropyBuildDocs) except ImportError: AstropyBuildSphinx = AstropyBuildDocs = FakeBuildSphinx else: AstropyBuildSphinx = AstropyBuildDocs = FakeBuildSphinx _module_state['registered_commands'] = registered_commands = { 'test': generate_test_command(package), # Use distutils' sdist because it respects package_data. # setuptools/distributes sdist requires duplication of information in # MANIFEST.in 'sdist': DistutilsSdist, # The exact form of the build_ext command depends on whether or not # we're building a release version 'build_ext': generate_build_ext_command(package, release), # We have a custom build_py to generate the default configuration file 'build_py': AstropyBuildPy, # Since install can (in some circumstances) be run without # first building, we also need to override install and # install_lib. See #2223 'install': AstropyInstall, 'install_lib': AstropyInstallLib, 'register': AstropyRegister, 'build_sphinx': AstropyBuildSphinx, 'build_docs': AstropyBuildDocs } # Need to override the __name__ here so that the commandline options are # presented as being related to the "build" command, for example; normally # this wouldn't be necessary since commands also have a command_name # attribute, but there is a bug in distutils' help display code that it # uses __name__ instead of command_name. Yay distutils! for name, cls in registered_commands.items(): cls.__name__ = name # Add a few custom options; more of these can be added by specific packages # later for option in [ ('use-system-libraries', "Use system libraries whenever possible", True)]: add_command_option('build', *option) add_command_option('install', *option) add_command_hooks(registered_commands, srcdir=srcdir) return registered_commands def add_command_hooks(commands, srcdir='.'): """ Look through setup_package.py modules for functions with names like ``pre__hook`` and ``post__hook`` where ```` is the name of a ``setup.py`` command (e.g. build_ext). If either hook is present this adds a wrapped version of that command to the passed in ``commands`` `dict`. ``commands`` may be pre-populated with other custom distutils command classes that should be wrapped if there are hooks for them (e.g. `AstropyBuildPy`). """ hook_re = re.compile(r'^(pre|post)_(.+)_hook$') # Distutils commands have a method of the same name, but it is not a # *classmethod* (which probably didn't exist when distutils was first # written) def get_command_name(cmdcls): if hasattr(cmdcls, 'command_name'): return cmdcls.command_name else: return cmdcls.__name__ packages = filter_packages(find_packages(srcdir)) dist = get_dummy_distribution() hooks = collections.defaultdict(dict) for setuppkg in iter_setup_packages(srcdir, packages): for name, obj in vars(setuppkg).items(): match = hook_re.match(name) if not match: continue hook_type = match.group(1) cmd_name = match.group(2) if hook_type not in hooks[cmd_name]: hooks[cmd_name][hook_type] = [] hooks[cmd_name][hook_type].append((setuppkg.__name__, obj)) for cmd_name, cmd_hooks in hooks.items(): commands[cmd_name] = generate_hooked_command( cmd_name, dist.get_command_class(cmd_name), cmd_hooks) def generate_hooked_command(cmd_name, cmd_cls, hooks): """ Returns a generated subclass of ``cmd_cls`` that runs the pre- and post-command hooks for that command before and after the ``cmd_cls.run`` method. """ def run(self, orig_run=cmd_cls.run): self.run_command_hooks('pre_hooks') orig_run(self) self.run_command_hooks('post_hooks') return type(cmd_name, (cmd_cls, object), {'run': run, 'run_command_hooks': run_command_hooks, 'pre_hooks': hooks.get('pre', []), 'post_hooks': hooks.get('post', [])}) def run_command_hooks(cmd_obj, hook_kind): """Run hooks registered for that command and phase. *cmd_obj* is a finalized command object; *hook_kind* is either 'pre_hook' or 'post_hook'. """ hooks = getattr(cmd_obj, hook_kind, None) if not hooks: return for modname, hook in hooks: if isinstance(hook, str): try: hook_obj = resolve_name(hook) except ImportError as exc: raise DistutilsModuleError( 'cannot find hook {0}: {1}'.format(hook, exc)) else: hook_obj = hook if not callable(hook_obj): raise DistutilsOptionError('hook {0!r} is not callable' % hook) log.info('running {0} from {1} for {2} command'.format( hook_kind.rstrip('s'), modname, cmd_obj.get_command_name())) try: hook_obj(cmd_obj) except Exception: log.error('{0} command hook {1} raised an exception: %s\n'.format( hook_obj.__name__, cmd_obj.get_command_name())) log.error(traceback.format_exc()) sys.exit(1) def generate_test_command(package_name): """ Creates a custom 'test' command for the given package which sets the command's ``package_name`` class attribute to the name of the package being tested. """ return type(package_name.title() + 'Test', (AstropyTest,), {'package_name': package_name}) def update_package_files(srcdir, extensions, package_data, packagenames, package_dirs): """ This function is deprecated and maintained for backward compatibility with affiliated packages. Affiliated packages should update their setup.py to use `get_package_info` instead. """ info = get_package_info(srcdir) extensions.extend(info['ext_modules']) package_data.update(info['package_data']) packagenames = list(set(packagenames + info['packages'])) package_dirs.update(info['package_dir']) def get_package_info(srcdir='.', exclude=()): """ Collates all of the information for building all subpackages and returns a dictionary of keyword arguments that can be passed directly to `distutils.setup`. The purpose of this function is to allow subpackages to update the arguments to the package's ``setup()`` function in its setup.py script, rather than having to specify all extensions/package data directly in the ``setup.py``. See Astropy's own ``setup.py`` for example usage and the Astropy development docs for more details. This function obtains that information by iterating through all packages in ``srcdir`` and locating a ``setup_package.py`` module. This module can contain the following functions: ``get_extensions()``, ``get_package_data()``, ``get_build_options()``, ``get_external_libraries()``, and ``requires_2to3()``. Each of those functions take no arguments. - ``get_extensions`` returns a list of `distutils.extension.Extension` objects. - ``get_package_data()`` returns a dict formatted as required by the ``package_data`` argument to ``setup()``. - ``get_build_options()`` returns a list of tuples describing the extra build options to add. - ``get_external_libraries()`` returns a list of libraries that can optionally be built using external dependencies. - ``get_entry_points()`` returns a dict formatted as required by the ``entry_points`` argument to ``setup()``. - ``requires_2to3()`` should return `True` when the source code requires `2to3` processing to run on Python 3.x. If ``requires_2to3()`` is missing, it is assumed to return `True`. """ ext_modules = [] packages = [] package_data = {} package_dir = {} skip_2to3 = [] if exclude: warnings.warn( "Use of the exclude parameter is no longer supported since it does " "not work as expected. Use add_exclude_packages instead. Note that " "it must be called prior to any other calls from setup helpers.", AstropyDeprecationWarning) # Use the find_packages tool to locate all packages and modules packages = filter_packages(find_packages(srcdir, exclude=exclude)) # Update package_dir if the package lies in a subdirectory if srcdir != '.': package_dir[''] = srcdir # For each of the setup_package.py modules, extract any # information that is needed to install them. The build options # are extracted first, so that their values will be available in # subsequent calls to `get_extensions`, etc. for setuppkg in iter_setup_packages(srcdir, packages): if hasattr(setuppkg, 'get_build_options'): options = setuppkg.get_build_options() for option in options: add_command_option('build', *option) if hasattr(setuppkg, 'get_external_libraries'): libraries = setuppkg.get_external_libraries() for library in libraries: add_external_library(library) if hasattr(setuppkg, 'requires_2to3'): requires_2to3 = setuppkg.requires_2to3() else: requires_2to3 = True if not requires_2to3: skip_2to3.append( os.path.dirname(setuppkg.__file__)) for setuppkg in iter_setup_packages(srcdir, packages): # get_extensions must include any Cython extensions by their .pyx # filename. if hasattr(setuppkg, 'get_extensions'): ext_modules.extend(setuppkg.get_extensions()) if hasattr(setuppkg, 'get_package_data'): package_data.update(setuppkg.get_package_data()) # Locate any .pyx files not already specified, and add their extensions in. # The default include dirs include numpy to facilitate numerical work. ext_modules.extend(get_cython_extensions(srcdir, packages, ext_modules, ['numpy'])) # Now remove extensions that have the special name 'skip_cython', as they # exist Only to indicate that the cython extensions shouldn't be built for i, ext in reversed(list(enumerate(ext_modules))): if ext.name == 'skip_cython': del ext_modules[i] # On Microsoft compilers, we need to pass the '/MANIFEST' # commandline argument. This was the default on MSVC 9.0, but is # now required on MSVC 10.0, but it doesn't seem to hurt to add # it unconditionally. if get_compiler_option() == 'msvc': for ext in ext_modules: ext.extra_link_args.append('/MANIFEST') return { 'ext_modules': ext_modules, 'packages': packages, 'package_dir': package_dir, 'package_data': package_data, 'skip_2to3': skip_2to3 } def iter_setup_packages(srcdir, packages): """ A generator that finds and imports all of the ``setup_package.py`` modules in the source packages. Returns ------- modgen : generator A generator that yields (modname, mod), where `mod` is the module and `modname` is the module name for the ``setup_package.py`` modules. """ for packagename in packages: package_parts = packagename.split('.') package_path = os.path.join(srcdir, *package_parts) setup_package = os.path.relpath( os.path.join(package_path, 'setup_package.py')) if os.path.isfile(setup_package): module = import_file(setup_package, name=packagename + '.setup_package') yield module def iter_pyx_files(package_dir, package_name): """ A generator that yields Cython source files (ending in '.pyx') in the source packages. Returns ------- pyxgen : generator A generator that yields (extmod, fullfn) where `extmod` is the full name of the module that the .pyx file would live in based on the source directory structure, and `fullfn` is the path to the .pyx file. """ for dirpath, dirnames, filenames in walk_skip_hidden(package_dir): for fn in filenames: if fn.endswith('.pyx'): fullfn = os.path.relpath(os.path.join(dirpath, fn)) # Package must match file name extmod = '.'.join([package_name, fn[:-4]]) yield (extmod, fullfn) break # Don't recurse into subdirectories def get_cython_extensions(srcdir, packages, prevextensions=tuple(), extincludedirs=None): """ Looks for Cython files and generates Extensions if needed. Parameters ---------- srcdir : str Path to the root of the source directory to search. prevextensions : list of `~distutils.core.Extension` objects The extensions that are already defined. Any .pyx files already here will be ignored. extincludedirs : list of str or None Directories to include as the `include_dirs` argument to the generated `~distutils.core.Extension` objects. Returns ------- exts : list of `~distutils.core.Extension` objects The new extensions that are needed to compile all .pyx files (does not include any already in `prevextensions`). """ # Vanilla setuptools and old versions of distribute include Cython files # as .c files in the sources, not .pyx, so we cannot simply look for # existing .pyx sources in the previous sources, but we should also check # for .c files with the same remaining filename. So we look for .pyx and # .c files, and we strip the extension. prevsourcepaths = [] ext_modules = [] for ext in prevextensions: for s in ext.sources: if s.endswith(('.pyx', '.c', '.cpp')): sourcepath = os.path.realpath(os.path.splitext(s)[0]) prevsourcepaths.append(sourcepath) for package_name in packages: package_parts = package_name.split('.') package_path = os.path.join(srcdir, *package_parts) for extmod, pyxfn in iter_pyx_files(package_path, package_name): sourcepath = os.path.realpath(os.path.splitext(pyxfn)[0]) if sourcepath not in prevsourcepaths: ext_modules.append(Extension(extmod, [pyxfn], include_dirs=extincludedirs)) return ext_modules class DistutilsExtensionArgs(collections.defaultdict): """ A special dictionary whose default values are the empty list. This is useful for building up a set of arguments for `distutils.Extension` without worrying whether the entry is already present. """ def __init__(self, *args, **kwargs): def default_factory(): return [] super(DistutilsExtensionArgs, self).__init__( default_factory, *args, **kwargs) def update(self, other): for key, val in other.items(): self[key].extend(val) def pkg_config(packages, default_libraries, executable='pkg-config'): """ Uses pkg-config to update a set of distutils Extension arguments to include the flags necessary to link against the given packages. If the pkg-config lookup fails, default_libraries is applied to libraries. Parameters ---------- packages : list of str A list of pkg-config packages to look up. default_libraries : list of str A list of library names to use if the pkg-config lookup fails. Returns ------- config : dict A dictionary containing keyword arguments to `distutils.Extension`. These entries include: - ``include_dirs``: A list of include directories - ``library_dirs``: A list of library directories - ``libraries``: A list of libraries - ``define_macros``: A list of macro defines - ``undef_macros``: A list of macros to undefine - ``extra_compile_args``: A list of extra arguments to pass to the compiler """ flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries', '-D': 'define_macros', '-U': 'undef_macros'} command = "{0} --libs --cflags {1}".format(executable, ' '.join(packages)), result = DistutilsExtensionArgs() try: pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0].strip() except subprocess.CalledProcessError as e: lines = [ ("{0} failed. This may cause the build to fail below." .format(executable)), " command: {0}".format(e.cmd), " returncode: {0}".format(e.returncode), " output: {0}".format(e.output) ] log.warn('\n'.join(lines)) result['libraries'].extend(default_libraries) else: if pipe.returncode != 0: lines = [ "pkg-config could not lookup up package(s) {0}.".format( ", ".join(packages)), "This may cause the build to fail below." ] log.warn('\n'.join(lines)) result['libraries'].extend(default_libraries) else: for token in output.split(): # It's not clear what encoding the output of # pkg-config will come to us in. It will probably be # some combination of pure ASCII (for the compiler # flags) and the filesystem encoding (for any argument # that includes directories or filenames), but this is # just conjecture, as the pkg-config documentation # doesn't seem to address it. arg = token[:2].decode('ascii') value = token[2:].decode(sys.getfilesystemencoding()) if arg in flag_map: if arg == '-D': value = tuple(value.split('=', 1)) result[flag_map[arg]].append(value) else: result['extra_compile_args'].append(value) return result def add_external_library(library): """ Add a build option for selecting the internal or system copy of a library. Parameters ---------- library : str The name of the library. If the library is `foo`, the build option will be called `--use-system-foo`. """ for command in ['build', 'build_ext', 'install']: add_command_option(command, str('use-system-' + library), 'Use the system {0} library'.format(library), is_bool=True) def use_system_library(library): """ Returns `True` if the build configuration indicates that the given library should use the system copy of the library rather than the internal one. For the given library `foo`, this will be `True` if `--use-system-foo` or `--use-system-libraries` was provided at the commandline or in `setup.cfg`. Parameters ---------- library : str The name of the library Returns ------- use_system : bool `True` if the build should use the system copy of the library. """ return ( get_distutils_build_or_install_option('use_system_{0}'.format(library)) or get_distutils_build_or_install_option('use_system_libraries')) @extends_doc(_find_packages) def find_packages(where='.', exclude=(), invalidate_cache=False): """ This version of ``find_packages`` caches previous results to speed up subsequent calls. Use ``invalide_cache=True`` to ignore cached results from previous ``find_packages`` calls, and repeat the package search. """ if exclude: warnings.warn( "Use of the exclude parameter is no longer supported since it does " "not work as expected. Use add_exclude_packages instead. Note that " "it must be called prior to any other calls from setup helpers.", AstropyDeprecationWarning) # Calling add_exclude_packages after this point will have no effect _module_state['excludes_too_late'] = True if not invalidate_cache and _module_state['package_cache'] is not None: return _module_state['package_cache'] packages = _find_packages( where=where, exclude=list(_module_state['exclude_packages'])) _module_state['package_cache'] = packages return packages def filter_packages(packagenames): """ Removes some packages from the package list that shouldn't be installed on the current version of Python. """ if PY3: exclude = '_py2' else: exclude = '_py3' return [x for x in packagenames if not x.endswith(exclude)] class FakeBuildSphinx(Command): """ A dummy build_sphinx command that is called if Sphinx is not installed and displays a relevant error message """ # user options inherited from sphinx.setup_command.BuildDoc user_options = [ ('fresh-env', 'E', ''), ('all-files', 'a', ''), ('source-dir=', 's', ''), ('build-dir=', None, ''), ('config-dir=', 'c', ''), ('builder=', 'b', ''), ('project=', None, ''), ('version=', None, ''), ('release=', None, ''), ('today=', None, ''), ('link-index', 'i', '')] # user options appended in astropy.setup_helpers.AstropyBuildSphinx user_options.append(('warnings-returncode', 'w', '')) user_options.append(('clean-docs', 'l', '')) user_options.append(('no-intersphinx', 'n', '')) user_options.append(('open-docs-in-browser', 'o', '')) def initialize_options(self): try: raise RuntimeError("Sphinx and its dependencies must be installed " "for build_docs.") except: log.error('error: Sphinx and its dependencies must be installed ' 'for build_docs.') sys.exit(1) photutils-0.4/astropy_helpers/astropy_helpers/sphinx/0000755000214200020070000000000013175654702025564 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/__init__.py0000644000214200020070000000066513175633272027703 0ustar lbradleySTSCI\science00000000000000""" This package contains utilities and extensions for the Astropy sphinx documentation. In particular, the `astropy.sphinx.conf` should be imported by the sphinx ``conf.py`` file for affiliated packages that wish to make use of the Astropy documentation format. Note that some sphinx extensions which are bundled as-is (numpydoc and sphinx-automodapi) are included in astropy_helpers.extern rather than astropy_helpers.sphinx.ext. """ photutils-0.4/astropy_helpers/astropy_helpers/sphinx/conf.py0000644000214200020070000002721713175633272027073 0ustar lbradleySTSCI\science00000000000000# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # # Astropy shared Sphinx settings. These settings are shared between # astropy itself and affiliated packages. # # Note that not all possible configuration values are present in this file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys import warnings from os import path import sphinx from distutils.version import LooseVersion # -- General configuration ---------------------------------------------------- # The version check in Sphinx itself can only compare the major and # minor parts of the version number, not the micro. To do a more # specific version check, call check_sphinx_version("x.y.z.") from # your project's conf.py needs_sphinx = '1.3' on_rtd = os.environ.get('READTHEDOCS', None) == 'True' def check_sphinx_version(expected_version): sphinx_version = LooseVersion(sphinx.__version__) expected_version = LooseVersion(expected_version) if sphinx_version < expected_version: raise RuntimeError( "At least Sphinx version {0} is required to build this " "documentation. Found {1}.".format( expected_version, sphinx_version)) # Configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('https://docs.python.org/3/', (None, 'http://data.astropy.org/intersphinx/python3.inv')), 'pythonloc': ('http://docs.python.org/', path.abspath(path.join(path.dirname(__file__), 'local/python3_local_links.inv'))), 'numpy': ('https://docs.scipy.org/doc/numpy/', (None, 'http://data.astropy.org/intersphinx/numpy.inv')), 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', (None, 'http://data.astropy.org/intersphinx/scipy.inv')), 'matplotlib': ('http://matplotlib.org/', (None, 'http://data.astropy.org/intersphinx/matplotlib.inv')), 'astropy': ('http://docs.astropy.org/en/stable/', None), 'h5py': ('http://docs.h5py.org/en/latest/', None)} if sys.version_info[0] == 2: intersphinx_mapping['python'] = ( 'https://docs.python.org/2/', (None, 'http://data.astropy.org/intersphinx/python2.inv')) intersphinx_mapping['pythonloc'] = ( 'http://docs.python.org/', path.abspath(path.join(path.dirname(__file__), 'local/python2_local_links.inv'))) # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # The reST default role (used for this markup: `text`) to use for all # documents. Set to the "smart" one. default_role = 'obj' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # This is added to the end of RST files - a good place to put substitutions to # be used globally. rst_epilog = """ .. _Astropy: http://astropy.org """ # A list of warning types to suppress arbitrary warning messages. We mean to # override directives in astropy_helpers.sphinx.ext.autodoc_enhancements, # thus need to ignore those warning. This can be removed once the patch gets # released in upstream Sphinx (https://github.com/sphinx-doc/sphinx/pull/1843). # Suppress the warnings requires Sphinx v1.4.2 suppress_warnings = ['app.add_directive', ] # -- Project information ------------------------------------------------------ # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. #pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Settings for extensions and extension options ---------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.viewcode', 'astropy_helpers.extern.numpydoc', 'astropy_helpers.extern.automodapi.automodapi', 'astropy_helpers.extern.automodapi.smart_resolver', 'astropy_helpers.sphinx.ext.tocdepthfix', 'astropy_helpers.sphinx.ext.doctest', 'astropy_helpers.sphinx.ext.changelog_links'] if not on_rtd and LooseVersion(sphinx.__version__) < LooseVersion('1.4'): extensions.append('sphinx.ext.pngmath') else: extensions.append('sphinx.ext.mathjax') try: import matplotlib.sphinxext.plot_directive extensions += [matplotlib.sphinxext.plot_directive.__name__] # AttributeError is checked here in case matplotlib is installed but # Sphinx isn't. Note that this module is imported by the config file # generator, even if we're not building the docs. except (ImportError, AttributeError): warnings.warn( "matplotlib's plot_directive could not be imported. " + "Inline plots will not be included in the output") # Don't show summaries of the members in each class along with the # class' docstring numpydoc_show_class_members = False autosummary_generate = True automodapi_toctreedirnm = 'api' # Class documentation should contain *both* the class docstring and # the __init__ docstring autoclass_content = "both" # Render inheritance diagrams in SVG graphviz_output_format = "svg" graphviz_dot_args = [ '-Nfontsize=10', '-Nfontname=Helvetica Neue, Helvetica, Arial, sans-serif', '-Efontsize=10', '-Efontname=Helvetica Neue, Helvetica, Arial, sans-serif', '-Gfontsize=10', '-Gfontname=Helvetica Neue, Helvetica, Arial, sans-serif' ] # -- Options for HTML output ------------------------------------------------- # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [path.abspath(path.join(path.dirname(__file__), 'themes'))] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'bootstrap-astropy' # Custom sidebar templates, maps document names to template names. html_sidebars = { '**': ['localtoc.html'], 'search': [], 'genindex': [], 'py-modindex': [], } # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # included in the bootstrap-astropy theme html_favicon = path.join(html_theme_path[0], html_theme, 'static', 'astropy_logo.ico') # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%d %b %Y' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. latex_toplevel_sectioning = 'part' # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False latex_elements = {} # Additional stuff for the LaTeX preamble. latex_elements['preamble'] = r""" % Use a more modern-looking monospace font \usepackage{inconsolata} % The enumitem package provides unlimited nesting of lists and enums. % Sphinx may use this in the future, in which case this can be removed. % See https://bitbucket.org/birkenfeld/sphinx/issue/777/latex-output-too-deeply-nested \usepackage{enumitem} \setlistdepth{15} % In the parameters section, place a newline after the Parameters % header. (This is stolen directly from Numpy's conf.py, since it % affects Numpy-style docstrings). \usepackage{expdlist} \let\latexdescription=\description \def\description{\latexdescription{}{} \breaklabel} % Support the superscript Unicode numbers used by the "unicode" units % formatter \DeclareUnicodeCharacter{2070}{\ensuremath{^0}} \DeclareUnicodeCharacter{00B9}{\ensuremath{^1}} \DeclareUnicodeCharacter{00B2}{\ensuremath{^2}} \DeclareUnicodeCharacter{00B3}{\ensuremath{^3}} \DeclareUnicodeCharacter{2074}{\ensuremath{^4}} \DeclareUnicodeCharacter{2075}{\ensuremath{^5}} \DeclareUnicodeCharacter{2076}{\ensuremath{^6}} \DeclareUnicodeCharacter{2077}{\ensuremath{^7}} \DeclareUnicodeCharacter{2078}{\ensuremath{^8}} \DeclareUnicodeCharacter{2079}{\ensuremath{^9}} \DeclareUnicodeCharacter{207B}{\ensuremath{^-}} \DeclareUnicodeCharacter{00B0}{\ensuremath{^{\circ}}} \DeclareUnicodeCharacter{2032}{\ensuremath{^{\prime}}} \DeclareUnicodeCharacter{2033}{\ensuremath{^{\prime\prime}}} % Make the "warning" and "notes" sections use a sans-serif font to % make them stand out more. \renewenvironment{notice}[2]{ \def\py@noticetype{#1} \csname py@noticestart@#1\endcsname \textsf{\textbf{#2}} }{\csname py@noticeend@\py@noticetype\endcsname} """ # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # -- Options for the linkcheck builder ---------------------------------------- # A timeout value, in seconds, for the linkcheck builder linkcheck_timeout = 60 photutils-0.4/astropy_helpers/astropy_helpers/sphinx/ext/0000755000214200020070000000000013175654702026364 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/ext/__init__.py0000644000214200020070000000010213175633272030465 0ustar lbradleySTSCI\science00000000000000from __future__ import division, absolute_import, print_function photutils-0.4/astropy_helpers/astropy_helpers/sphinx/ext/changelog_links.py0000644000214200020070000000554313175633272032073 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This sphinx extension makes the issue numbers in the changelog into links to GitHub issues. """ from __future__ import print_function import re from docutils.nodes import Text, reference BLOCK_PATTERN = re.compile('\[#.+\]', flags=re.DOTALL) ISSUE_PATTERN = re.compile('#[0-9]+') def process_changelog_links(app, doctree, docname): for rex in app.changelog_links_rexes: if rex.match(docname): break else: # if the doc doesn't match any of the changelog regexes, don't process return app.info('[changelog_links] Adding changelog links to "{0}"'.format(docname)) for item in doctree.traverse(): if not isinstance(item, Text): continue # We build a new list of items to replace the current item. If # a link is found, we need to use a 'reference' item. children = [] # First cycle through blocks of issues (delimited by []) then # iterate inside each one to find the individual issues. prev_block_end = 0 for block in BLOCK_PATTERN.finditer(item): block_start, block_end = block.start(), block.end() children.append(Text(item[prev_block_end:block_start])) block = item[block_start:block_end] prev_end = 0 for m in ISSUE_PATTERN.finditer(block): start, end = m.start(), m.end() children.append(Text(block[prev_end:start])) issue_number = block[start:end] refuri = app.config.github_issues_url + issue_number[1:] children.append(reference(text=issue_number, name=issue_number, refuri=refuri)) prev_end = end prev_block_end = block_end # If no issues were found, this adds the whole item, # otherwise it adds the remaining text. children.append(Text(block[prev_end:block_end])) # If no blocks were found, this adds the whole item, otherwise # it adds the remaining text. children.append(Text(item[prev_block_end:])) # Replace item by the new list of items we have generated, # which may contain links. item.parent.replace(item, children) def setup_patterns_rexes(app): app.changelog_links_rexes = [re.compile(pat) for pat in app.config.changelog_links_docpattern] def setup(app): app.connect('doctree-resolved', process_changelog_links) app.connect('builder-inited', setup_patterns_rexes) app.add_config_value('github_issues_url', None, True) app.add_config_value('changelog_links_docpattern', ['.*changelog.*', 'whatsnew/.*'], True) return {'parallel_read_safe': True, 'parallel_write_safe': True} photutils-0.4/astropy_helpers/astropy_helpers/sphinx/ext/doctest.py0000644000214200020070000000364113175633272030406 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This is a set of three directives that allow us to insert metadata about doctests into the .rst files so the testing framework knows which tests to skip. This is quite different from the doctest extension in Sphinx itself, which actually does something. For astropy, all of the testing is centrally managed from py.test and Sphinx is not used for running tests. """ import re from docutils.nodes import literal_block from docutils.parsers.rst import Directive class DoctestSkipDirective(Directive): has_content = True def run(self): # Check if there is any valid argument, and skip it. Currently only # 'win32' is supported in astropy.tests.pytest_plugins. if re.match('win32', self.content[0]): self.content = self.content[2:] code = '\n'.join(self.content) return [literal_block(code, code)] class DoctestOmitDirective(Directive): has_content = True def run(self): # Simply do not add any content when this directive is encountered return [] class DoctestRequiresDirective(DoctestSkipDirective): # This is silly, but we really support an unbounded number of # optional arguments optional_arguments = 64 def setup(app): app.add_directive('doctest-requires', DoctestRequiresDirective) app.add_directive('doctest-skip', DoctestSkipDirective) app.add_directive('doctest-skip-all', DoctestSkipDirective) app.add_directive('doctest', DoctestSkipDirective) # Code blocks that use this directive will not appear in the generated # documentation. This is intended to hide boilerplate code that is only # useful for testing documentation using doctest, but does not actually # belong in the documentation itself. app.add_directive('testsetup', DoctestOmitDirective) return {'parallel_read_safe': True, 'parallel_write_safe': True} photutils-0.4/astropy_helpers/astropy_helpers/sphinx/ext/edit_on_github.py0000644000214200020070000001346413175633272031730 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This extension makes it easy to edit documentation on github. It adds links associated with each docstring that go to the corresponding view source page on Github. From there, the user can push the "Edit" button, edit the docstring, and submit a pull request. It has the following configuration options (to be set in the project's ``conf.py``): * ``edit_on_github_project`` The name of the github project, in the form "username/projectname". * ``edit_on_github_branch`` The name of the branch to edit. If this is a released version, this should be a git tag referring to that version. For a dev version, it often makes sense for it to be "master". It may also be a git hash. * ``edit_on_github_source_root`` The location within the source tree of the root of the Python package. Defaults to "lib". * ``edit_on_github_doc_root`` The location within the source tree of the root of the documentation source. Defaults to "doc", but it may make sense to set it to "doc/source" if the project uses a separate source directory. * ``edit_on_github_docstring_message`` The phrase displayed in the links to edit a docstring. Defaults to "[edit on github]". * ``edit_on_github_page_message`` The phrase displayed in the links to edit a RST page. Defaults to "[edit this page on github]". * ``edit_on_github_help_message`` The phrase displayed as a tooltip on the edit links. Defaults to "Push the Edit button on the next page" * ``edit_on_github_skip_regex`` When the path to the .rst file matches this regular expression, no "edit this page on github" link will be added. Defaults to ``"_.*"``. """ import inspect import os import re import sys from docutils import nodes from sphinx import addnodes def import_object(modname, name): """ Import the object given by *modname* and *name* and return it. If not found, or the import fails, returns None. """ try: __import__(modname) mod = sys.modules[modname] obj = mod for part in name.split('.'): obj = getattr(obj, part) return obj except: return None def get_url_base(app): return 'http://github.com/%s/tree/%s/' % ( app.config.edit_on_github_project, app.config.edit_on_github_branch) def doctree_read(app, doctree): # Get the configuration parameters if app.config.edit_on_github_project == 'REQUIRED': raise ValueError( "The edit_on_github_project configuration variable must be " "provided in the conf.py") source_root = app.config.edit_on_github_source_root url = get_url_base(app) docstring_message = app.config.edit_on_github_docstring_message # Handle the docstring-editing links for objnode in doctree.traverse(addnodes.desc): if objnode.get('domain') != 'py': continue names = set() for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue modname = signode.get('module') if not modname: continue fullname = signode.get('fullname') if fullname in names: # only one link per name, please continue names.add(fullname) obj = import_object(modname, fullname) anchor = None if obj is not None: try: lines, lineno = inspect.getsourcelines(obj) except: pass else: anchor = '#L%d' % lineno if anchor: real_modname = inspect.getmodule(obj).__name__ path = '%s%s%s.py%s' % ( url, source_root, real_modname.replace('.', '/'), anchor) onlynode = addnodes.only(expr='html') onlynode += nodes.reference( reftitle=app.config.edit_on_github_help_message, refuri=path) onlynode[0] += nodes.inline( '', '', nodes.raw('', ' ', format='html'), nodes.Text(docstring_message), classes=['edit-on-github', 'viewcode-link']) signode += onlynode def html_page_context(app, pagename, templatename, context, doctree): if (templatename == 'page.html' and not re.match(app.config.edit_on_github_skip_regex, pagename)): doc_root = app.config.edit_on_github_doc_root if doc_root != '' and not doc_root.endswith('/'): doc_root += '/' doc_path = os.path.relpath(doctree.get('source'), app.builder.srcdir) url = get_url_base(app) page_message = app.config.edit_on_github_page_message context['edit_on_github'] = url + doc_root + doc_path context['edit_on_github_page_message'] = page_message def setup(app): app.add_config_value('edit_on_github_project', 'REQUIRED', True) app.add_config_value('edit_on_github_branch', 'master', True) app.add_config_value('edit_on_github_source_root', 'lib', True) app.add_config_value('edit_on_github_doc_root', 'doc', True) app.add_config_value('edit_on_github_docstring_message', '[edit on github]', True) app.add_config_value('edit_on_github_page_message', 'Edit This Page on Github', True) app.add_config_value('edit_on_github_help_message', 'Push the Edit button on the next page', True) app.add_config_value('edit_on_github_skip_regex', '_.*', True) app.connect('doctree-read', doctree_read) app.connect('html-page-context', html_page_context) return {'parallel_read_safe': True, 'parallel_write_safe': True} photutils-0.4/astropy_helpers/astropy_helpers/sphinx/ext/tests/0000755000214200020070000000000013175654702027526 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/ext/tests/__init__.py0000644000214200020070000000000013175633272031624 0ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/ext/tocdepthfix.py0000644000214200020070000000137013175633272031257 0ustar lbradleySTSCI\science00000000000000from sphinx import addnodes def fix_toc_entries(app, doctree): # Get the docname; I don't know why this isn't just passed in to the # callback # This seems a bit unreliable as it's undocumented, but it's not "private" # either: docname = app.builder.env.temp_data['docname'] if app.builder.env.metadata[docname].get('tocdepth', 0) != 0: # We need to reprocess any TOC nodes in the doctree and make sure all # the files listed in any TOCs are noted for treenode in doctree.traverse(addnodes.toctree): app.builder.env.note_toctree(docname, treenode) def setup(app): app.connect('doctree-read', fix_toc_entries) return {'parallel_read_safe': True, 'parallel_write_safe': True} photutils-0.4/astropy_helpers/astropy_helpers/sphinx/local/0000755000214200020070000000000013175654702026656 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/local/python2_local_links.inv0000644000214200020070000000106213062313555033341 0ustar lbradleySTSCI\science00000000000000# Sphinx inventory version 2 # Project: Python # Version: 2.7 and 3.5 # The remainder of this file should be compressed using zlib. xœ¥”=OÃ0@wÿŠ“ºÀà Z!¤n€U µbwœ+1ríp¶KïÇIi›”QeIä˽wþ8g³"Wf ʬÐxK%¬œ²†lS²ï(ý¦¥Ï­‰‘×Í×1 “k&ƒQrÃóp)”ÉÀ.ÀçÊÁBi—Û 3H¤]„ÎaÁ)ó_Z¥I¤‹Ú>d“H¯ï‰,ÅÐ× _M…_Âð"æ’ òbvIî—zЀ8›<ŸŒ?oÙ',O…wg¯B<•o@œÍËâdÁžá»}5ºdsµDüqã¨ÛØÄ8KK®ÂÁÈßô6úo²9 ¤«£%üè|‡¶@Ó­9ȯ]Τ52ÅvMÁ‡ØQÉCý®üR çÚò#ùunÏÑóœPdSkõýeð›]íW³¸Þ€ÉKõ쨰ɨ¥ÍdÎ RÆŸÿû¼êü_­/ªÖø“'RÉšãG4Hâ¯Õ·ØíÒÛÁ½„·ä·±fžªC{ÃÒÖß| åEª»;åö½¤-¿³ï2½ä{ÉÁÌ]iäÄÇMë;û–¨«HÏãm‹8û‹´RKphotutils-0.4/astropy_helpers/astropy_helpers/sphinx/local/python2_local_links.txt0000644000214200020070000000314613062313555033371 0ustar lbradleySTSCI\science00000000000000# Sphinx inventory version 2 # Project: Python # Version: 2.7 and 3.5 # The remainder of this file should be compressed using zlib. # python2 IndexError py:exception -1 2/library/exceptions.html#IndexError - IOError py:exception -1 2/library/exceptions.html#IOError - KeyError py:exception -1 2/library/exceptions.html#KeyError - ValueError py:exception -1 2/library/exceptions.html#ValueError - TypeError py:exception -1 2/library/exceptions.html#TypeError - # python3 only TimeoutError py:exception -1 3/library/exceptions.html#TimeoutError - bytes py:function -1 3/library/functions.html#bytes - urllib.request.urlopen py:function -1 3/library/urllib.request.html#urllib.request.urlopen - concurrent.futures.Future py:class -1 3/library/concurrent.futures.html#concurrent.futures.Future - concurrent.futures.ThreadPoolExecutor py:class -1 3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor - queue.Queue py:class -1 3/library/queue.html#queue.Queue - print() py:function -1 3/library/functions.html#print - # python3 only collections.abc collections.Generator py:class -1 3/library/collections.abc.html#collections.abc.Generator - collections.ByteString py:class -1 3/library/collections.abc.html#collections.abc.ByteString - collections.Awaitable py:class -1 3/library/collections.abc.html#collections.abc.Awaitable - collections.Coroutine py:class -1 3/library/collections.abc.html#collections.abc.Coroutine - collections.AsyncIterable py:class -1 3/library/collections.abc.html#collections.abc.AsyncIterable - collections.AsyncIterator py:class -1 3/library/collections.abc.html#collections.abc.AsyncIterator - photutils-0.4/astropy_helpers/astropy_helpers/sphinx/local/python3_local_links.inv0000644000214200020070000000122213062313555033340 0ustar lbradleySTSCI\science00000000000000# Sphinx inventory version 2 # Project: Python # Version: 3.5 # The remainder of this file should be compressed using zlib. xœ­ÕÁŽÚ0Ð{¾b$.í!YTUâÖîa·ª*!Qq­{h\;µ¶Ù¯ïgÉšlÏÇvÆ ØV…ÔAê#jolG´N Ëdk~#÷kØ4¾0šZv]ï–Ù'`ZÀ*ûHÍ? ‹%“Z ³_H{©\aj% Gব,:‡j'õ/xU2Ï(ºjõ%­PR\Â7’(ºjÖ¥5ýJ?Àò,³ÍCÕöf…/Õ¢ëOûˆ4©µäF´±ûZszš ºouÐO“Ü ‘Ïål;ƒ”Ýð4’û¬`îç›6éyDí¼ðM…ç”AÄ-}Jwq‰!dÞj´!#âÈT·"ç Ú«hè2£oS¡žßˆ~`· “¨t8«¤²Rûwï§ÒjnKÛñcŸRxÂržŒÿ?íéèÒÉ%Ÿ+æ\ˆOb»ÓÆ ’ø3sË»£ü¿ö`„ôV¾Ò»áv@ˆ>2¥bç;!ý•I,=Whª_ÑôÉé'Ôñö™l!þ©Qó¨%Œþ^ûÓBÝ#Ô }inuüÜÂD¹#e³ªŠ\‹ž¸:ï{ø¡tuþ;‰/wx†–yó®–.–¿ !þn²X{0Bz×Þo±øH ùÏ/LúØòxA&UÝXS{ºžâ®¸™ÌÜ5šß£¸‡Ð\’ÈRBiòJFü?photutils-0.4/astropy_helpers/astropy_helpers/sphinx/local/python3_local_links.txt0000644000214200020070000000540413062313555033371 0ustar lbradleySTSCI\science00000000000000# Sphinx inventory version 2 # Project: Python # Version: 2.7 and 3.5 # The remainder of this file should be compressed using zlib. # python2 only links cPickle py:module -1 2/library/pickle.html#module-cPickle - unicode py:function -1 2/library/functions.html#unicode - bsddb py:module -1 2/library/bsddb.html#module-bsddb - dict.has_key py:method -1 2/library/stdtypes.html#dict.has_key - dict.iteritems py:method -1 2/library/stdtypes.html#dict.iteritems - dict.iterkeys py:method -1 2/library/stdtypes.html#dict.iterkeys - dict.itervalues py:method -1 2/library/stdtypes.html#dict.itervalues - urllib2.urlopen py:function -1 2/library/urllib2.html#urllib2.urlopen - # python3 print() py:function -1 3/library/functions.html#print - # python3 collections.abc collections.Container py:class -1 3/library/collections.abc.html#collections.abc.Container - collections.Hashable py:class -1 3/library/collections.abc.html#collections.abc.Hashable - collections.Sized py:class -1 3/library/collections.abc.html#collections.abc.Sized - collections.Callable py:class -1 3/library/collections.abc.html#collections.abc.Callable - collections.Iterable py:class -1 3/library/collections.abc.html#collections.abc.Iterable - collections.Iterator py:class -1 3/library/collections.abc.html#collections.abc.Iterator - collections.Generator py:class -1 3/library/collections.abc.html#collections.abc.Generator - collections.Sequence py:class -1 3/library/collections.abc.html#collections.abc.Sequence - collections.MutableSequence py:class -1 3/library/collections.abc.html#collections.abc.MutableSequence - collections.ByteString py:class -1 3/library/collections.abc.html#collections.abc.ByteString - collections.Set py:class -1 3/library/collections.abc.html#collections.abc.Set - collections.MutableSet py:class -1 3/library/collections.abc.html#collections.abc.MutableSet - collections.Mapping py:class -1 3/library/collections.abc.html#collections.abc.Mapping - collections.MutableMapping py:class -1 3/library/collections.abc.html#collections.abc.MutableMapping - collections.MappingView py:class -1 3/library/collections.abc.html#collections.abc.MappingView - collections.ItemsView py:class -1 3/library/collections.abc.html#collections.abc.ItemsView - collections.KeysView py:class -1 3/library/collections.abc.html#collections.abc.KeysView - collections.ValuesView py:class -1 3/library/collections.abc.html#collections.abc.ValuesView - collections.Awaitable py:class -1 3/library/collections.abc.html#collections.abc.Awaitable - collections.Coroutine py:class -1 3/library/collections.abc.html#collections.abc.Coroutine - collections.AsyncIterable py:class -1 3/library/collections.abc.html#collections.abc.AsyncIterable - collections.AsyncIterator py:class -1 3/library/collections.abc.html#collections.abc.AsyncIterator - photutils-0.4/astropy_helpers/astropy_helpers/sphinx/setup_package.py0000644000214200020070000000044413175633272030752 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst def get_package_data(): # Install the theme files return { 'astropy_helpers.sphinx': [ 'local/*.inv', 'themes/bootstrap-astropy/*.*', 'themes/bootstrap-astropy/static/*.*']} photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/0000755000214200020070000000000013175654702027051 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/0000755000214200020070000000000013175654702032565 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/globaltoc.html0000644000214200020070000000011112346164025035403 0ustar lbradleySTSCI\science00000000000000

Table of Contents

{{ toctree(maxdepth=-1, titles_only=true) }} photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/layout.html0000644000214200020070000000655112605531164034770 0ustar lbradleySTSCI\science00000000000000{% extends "basic/layout.html" %} {# Collapsible sidebar script from default/layout.html in Sphinx #} {% set script_files = script_files + ['_static/sidebar.js'] %} {# Add the google webfonts needed for the logo #} {% block extrahead %} {% if not embedded %}{% endif %} {% endblock %} {% block header %}
{{ theme_logotext1 }}{{ theme_logotext2 }}{{ theme_logotext3 }}
  • Index
  • Modules
  • {% block sidebarsearch %} {% include "searchbox.html" %} {% endblock %}
{% endblock %} {% block relbar1 %} {% endblock %} {# Silence the bottom relbar. #} {% block relbar2 %}{% endblock %} {%- block footer %}

{%- if edit_on_github %} {{ edit_on_github_page_message }}   {%- endif %} {%- if show_source and has_source and sourcename %} {{ _('Page Source') }} {%- endif %}   Back to Top

{%- if show_copyright %} {%- if hasdoc('copyright') %} {% trans path=pathto('copyright'), copyright=copyright|e %}© Copyright {{ copyright }}.{% endtrans %}
{%- else %} {% trans copyright=copyright|e %}© Copyright {{ copyright }}.{% endtrans %}
{%- endif %} {%- endif %} {%- if show_sphinx %} {% trans sphinx_version=sphinx_version|e %}Created using Sphinx {{ sphinx_version }}.{% endtrans %}   {%- endif %} {%- if last_updated %} {% trans last_updated=last_updated|e %}Last built {{ last_updated }}.{% endtrans %}
{%- endif %}

{%- endblock %} photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/localtoc.html0000644000214200020070000000004212346164025035240 0ustar lbradleySTSCI\science00000000000000

Page Contents

{{ toc }} photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/searchbox.html0000644000214200020070000000042012346164025035416 0ustar lbradleySTSCI\science00000000000000{%- if pagename != "search" %}
{%- endif %} photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/0000755000214200020070000000000013175654702034054 5ustar lbradleySTSCI\science00000000000000././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout.svgphotutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout0000644000214200020070000001212112605531164037233 0ustar lbradleySTSCI\science00000000000000 ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout_20.pngphotutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout0000644000214200020070000000327512346164025037245 0ustar lbradleySTSCI\science00000000000000‰PNG  IHDR[â8A bKGDÿÿÿ ½§“ oFFsvek pHYs × ×B(›x vpAg\@0¢Ð¬IDATXÃå˜iŒ_SÆϘÚ+Új‹fF« BH‘XbOÐέ†ª}‰-Z¤Abû¢$¤Öi…V#¸T•ZCÕ– µIi™ÚU”d¦ª÷ý›;·÷?™)Ó*OrsïyÏsÏûžçžóžs®è!ج’ôLOùÙ`[À–=é`œ3|¼±;»1`{ÛͶﱽÔv]mú«ßØÞX°=˜l¦y’Zjs„á@30ŒlÈ<,éÝ’ÆöÆ @+ð60SÒ϶ûÇG½í‰ñü¡¤mo œ¬‘t—íþÀ%À `¶¤4üÔ pÐX<,’Ô1¦„:`•R~qÂPà` ð.°0kœÐ¨WJéŒs¶@R>)é÷ÎÀ´Ntž$éS`6p6pTØím¢5…—ÿÆHš“s8˜Éã{à@`»¿ ÷J:×v=ð%``/à9`çàœ/iší~À\`ÿbŸ{ƒçœH7KBäÝ€§"Æ“o€f¥´:¡/°hRÊʱ' J™\"ö`ànàÜ*ý[!©ÍöåÀ”ˆÿ `'I­ØÆö¶µ}Ÿí ¶o´Ý9÷#Ûg›Ùþ6ì l²}’í—m¿h[¶›lO·ýeð~ŽòtÛgE;õnÇÛkmϳ=Ëö^ÑÎKQ¿&âš~*¸² Ò NøÑ §ìµNxÊ ×æl30¡L-'ÌwÂ~¥uö ÛOÒ lOŒ˜Ïm)†ÙÞ©`»"×±ÁakÈÙšs\"5äߟ[m,ˆÝfû˜Bý±¹ú 9{ígÃþ[Œþ¼Ø“ªØà„'(Ê8á}'ëðú;aqÑ^{N•:l_q-ãÔHZ"éëx©.„Ü5ÇkŠû×ÀOñ|[ì86—„¤_Y?Ü-éé‚í¸¸ÿB6m‰8×wDqkÚ×… ÚÊ(eY´5$ʯwdz"ðD%¿—iZMh²´1/éѪbÛîmûZÛŸ‘åÒ¸0Çë] ŒV’-Ž_Ù¾9öÕ냲…ª1îK%­)Ôå®AÝðÓBûº08­À9•lî *±íN¶à'’ž M/ÎØÛÛo×;·GcJ=IÏÛ€€þÀeÀ›¶û®§àÕ:T6’܆ò}ÖæÊ³€£œP à„F 7°¸“6J}Kú h,ÌÐa¡S‡ÎŒŠV`¤¤‹%½üXU é[I—»WEÀÿˆÔ°<îM¶‹;¤Á¹çeÝh³1ÏWÊjà% 2úF3;I!±ËF6’Z ¦âÇ¥†ÈcÀrIKªtªÝ›=¢"€¤VIS€rªà·¸°½Y7Å®ï·ÎÈù8/ŠmÀü®4æ„}Õdg‡<¦çÄóhàÁ.4§.p*Úv»ø*žw·}=YJ9ÖÝÙ¼,²=øì”…9ú;À @_`†í¹ÀÊ.þ'IÉöê#{lï |Hv868·Hú¦ðÞÞNRòûï-ÈRãÍ%£öM Þ ûµJÿšQÕÐVCvNé öŒ¶¸&ìk"À“ÉrrÉv$Ä•Ç:ŽŒidi¥8%®WiµU!i­íÑÀcáçÒ\õÀý¹XóÌsÂL²…w7`2°¸o?)8áNàqàÖ.ŠØd{rxS˜yÙ¾ÓÞ¸˜,¡¯î—ôží1À²³ýòàöŽúß‘”æåOtÁ\ V $MSë©A{UÒGeÑFºj&;öö#›IIZg‹dK| ó€=ÉÆJYTM'lE¶»¤”–ÎÔ‹³Äé]ü(¯Hú üMq¨¹h=ÞÛÏ ¯lˆkþ~›<&wmGÿk±pYº™½!üõäÿì%âÿÈ#ÀædëÀX¥·h=…ÿ’ØSß»À3p5™Ø‹óÛĞƟ ½§pÅ%tEXtdate:create2012-10-18T20:57:33+02:00¢p_f%tEXtdate:modify2012-10-18T20:57:33+02:00Ó-çÚtEXtSoftwarewww.inkscape.org›î<IEND®B`‚././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.icophotutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.ic0000644000214200020070000010033412605531164037104 0ustar lbradleySTSCI\science00000000000000@@ (@F  (n@ ( –P (¾Y(@€ ÿÿ ÿ* ÿVýy ý›¬±ûÕúûüí÷ùüìýýýíýýýí§ªû× ü«ý‹ýoüKÿ ÿ ÿÿîûHýýÌúìýó ûøûü°µÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ˜Ÿÿÿûþ ûúûöýòúéûÇýÿIóÿ¿ÿ!ýyûÏüðüúÿÿÿÿÿÿ ÿÿ>KÿÿÏÓÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‡ÿÿ ÿÿÿÿÿÿÿÿÿÿÿÿûúÿðûÚü—û?ã ÿÿý|ûÚýöÿÿÿÿÿÿþÿýÿýÿLYþÿÌÏÿÿûüÿÿÿÿÿÿÿÿÿÿÿÿÿÿþþÿÿerþÿýÿýÿýÿýÿýÿþÿÿÿÿÿÿÿüûûíú½ÿZÿÿüJûÇýöÿÿÿÿþÿýÿýÿýÿ*<þÿ™¡ÿÿêíÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÉÎÿÿ3Eýÿ ýÿýÿýÿýÿýÿýÿýÿýÿþÿÿÿÿÿÿÿÿðûÄüXÿÿü•üêÿÿÿÿþÿýÿýÿýÿ1ýÿ“þÿäèÿÿûûÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿrþÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿÿÿÿÿÿÿüïù´ÿ=ÿø$û®ýúÿÿÿÿýÿýÿýÿýÿ.Cþÿ·¿ÿÿùúÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ”Ÿþÿ*ýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿÿÿÿÿÿÿúâû†ÿÿ3üÃüþÿÿýÿýÿýÿýÿýÿ^pþÿÓÙÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ÷øÿÿ…“þÿ+ýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿÿÿÿÿýöü¸ÿ7 ÿ(øÃ!ÿÿ ÿÿ ýÿ ýÿ ýÿýÿýÿsƒþÿåéÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÅÍþÿPfýÿ %ýÿýÿýÿ ýÿ ýÿ ýÿ ýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿþÿÿÿ ÿÿûÙ ÿi#ÿ úº#ÿÿ"ÿÿ"ýÿ"ýÿ"ýÿ ýÿ ýÿy‹ÿÿîðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿâåþÿu‡ýÿ =ýÿýÿýÿ!ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ!ýÿ!ÿÿ!ÿÿ ûìýƒÿÿ$ý›$ýÿ$ÿÿ$ýÿ$ýÿ$ýÿ"ýÿýÿmþÿëîÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýýÿÿ¦²þÿ8Týÿ%ýÿýÿ!ýÿ$ýÿ$ýÿ$ýÿ$ýÿ$ýÿ#ýÿ#ýÿ#ýÿ"ýÿ"ýÿ"ýÿ"ýÿ#ýÿ#ýÿ#ýÿ$ýÿ$ýÿ$ýÿ$ýÿ$ýÿ$ýÿ#ýÿ#ýÿ$þÿ$ÿÿ#üõ!ü’'ÿ &ÿj'ûñ(ÿÿ&ýÿ&ýÿ&ýÿ&ýÿ!ýÿNiþÿÞãÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿçëþÿmƒýÿ5ýÿ ýÿ ýÿ%ýÿ&ýÿ&ýÿ&ýÿ%ýÿ%ýÿ$ýÿ#ýÿ#ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ#ýÿ#ýÿ$ýÿ$ýÿ%ýÿ&ýÿ&ýÿ&ýÿ&ýÿ&ýÿ&ýÿ&þÿ&ÿÿ%ýø&üš3ÿ)ÿ%(ûØ*ÿÿ)ýÿ)ýÿ)ýÿ(ýÿ%ýÿ'IþÿÇÐÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÙßþÿUqýÿ'ýÿ!ýÿ'ýÿ)ýÿ(ýÿ(ýÿ(ýÿ'ýÿ&ýÿ#ýÿ(ýÿ"DþÿPkþÿp†þÿ‡™þÿ’£þÿ‘¢þÿ„˜þÿm„þÿNjþÿ!Cþÿ&ýÿ"ýÿ%ýÿ&ýÿ'ýÿ(ýÿ(ýÿ(ýÿ(ýÿ(ýÿ(ýÿ(ÿÿ&üú'ü˜9ÿ ÿ+ý”*þÿ+ÿÿ*þÿ+þÿ+þÿ)þÿ*þÿ®ÿÿþþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàåÿÿQoþÿ%þÿ%þÿ+þÿ+þÿ+þÿ+þÿ*þÿ'þÿ$þÿ"Gþÿt‹ÿÿ¶ÂÿÿÔÛÿÿäèÿÿïñÿÿöøÿÿúûÿÿúûÿÿö÷ÿÿîñÿÿãèÿÿÔÛÿÿ¹Åÿÿ}’ÿÿ/Rþÿ'þÿ&þÿ)þÿ)þÿ*þÿ*þÿ*þÿ*þÿ*þÿ*ÿÿ'ûù'ý‰Uª+ÿ;-üæ.ÿÿ-þÿ-þÿ-þÿ,þÿ)þÿVtÿÿêîÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿíñÿÿ]zþÿ(þÿ'þÿ-þÿ-þÿ-þÿ-þÿ,þÿ$þÿFþÿ‰ÿÿÒÚÿÿðóÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿóõÿÿ×Þÿÿ£³ÿÿ?`ÿÿ)þÿ)þÿ,þÿ,þÿ,þÿ,þÿ,þÿ,þÿ.ÿÿ,üñ.ýt@ÿ/ý™/þÿ0ÿÿ0þÿ0þÿ0þÿ-þÿ :þÿ¿Ìÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿz“þÿ1þÿ(þÿ0þÿ/þÿ.þÿ/þÿ,þÿ+þÿZwÿÿÇÑÿÿøúÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÜâÿÿ”§ÿÿ#Lþÿ'þÿ.þÿ/þÿ/þÿ/þÿ/þÿ/þÿ0ÿÿ.üâ-ÿI0ÿ%0ýá2ÿÿ1þÿ1þÿ1þÿ0þÿ/þÿbÿÿðóÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¯¿þÿHþÿ)þÿ1þÿ2þÿ2þÿ1þÿ.þÿ8þÿx’ÿÿæëÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿÄÏÿÿTsÿÿ+þÿ/þÿ1þÿ0þÿ0þÿ0þÿ1ÿÿ1ÿÿ/üÈ2ÿ$@¿3ýn3üø5ÿÿ3þÿ3þÿ3þÿ1þÿ8þÿÀÌÿÿþþÿÿÿÿÿÿÿÿÿÿÿÿÿÿëïÿÿEkþÿ,þÿ2þÿ4þÿ3þÿ4þÿ1þÿ8þÿ€šÿÿñôÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿôöÿÿÕÞÿÿËÕÿÿËÖÿÿ×ßÿÿõöÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿâçÿÿqÿÿ1þÿ0þÿ3þÿ2þÿ2þÿ2þÿ3ÿÿ2þÿ0üŸ@ÿã 2ü¶6üþ6ÿÿ6þÿ6þÿ5þÿ5þÿDlþÿèíÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ”«þÿ @þÿ/þÿ6þÿ5þÿ5þÿ4þÿ0þÿt‘ÿÿñôÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿäéÿÿ—­þÿ\~þÿ?eþÿ1\þÿ,Xþÿ,Xþÿ2]þÿ?fþÿZ}þÿ§þÿÖßÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿïòÿÿu’ÿÿ3þÿ3þÿ5þÿ5þÿ5þÿ5þÿ6ÿÿ4üð4ÿX8ÿ 5ûå:ÿÿ8þÿ8þÿ8þÿ7þÿ7þÿ–®ÿÿûüÿÿÿÿÿÿÿÿÿÿÿÿÿÿðôÿÿ@jþÿ0þÿ8þÿ8þÿ8þÿ8þÿ.þÿRxÿÿæëÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿñóÿÿާþÿ:eþÿEþÿ1þÿ*þÿ-þÿ.þÿ.þÿ-þÿ*þÿ0þÿCþÿ3]þÿj‹þÿËÖþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿíòÿÿlŒÿÿ1þÿ6þÿ8þÿ7þÿ7þÿ8þÿ8ÿÿ5ýÌ5ÿ:ÿO9üô<ÿÿ:þÿ:þÿ:þÿ8þÿ BþÿÏÚÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ®ÁþÿMþÿ3þÿ;þÿ:þÿ;þÿ7þÿOþÿ»ËÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÕÞþÿ^„þÿ@þÿ/þÿ4þÿ9þÿ:þÿ:þÿ:þÿ9þÿ9þÿ9þÿ8þÿ4þÿ.þÿ6þÿ/]þÿ‹¥þÿóõÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàçÿÿMuÿÿ3þÿ9þÿ9þÿ9þÿ9þÿ;ÿÿ8ûý8ÿÿ<ÿ{;ü÷=ÿÿ<þÿ<þÿ<þÿ;þÿ9hþÿæìÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿcˆþÿ<þÿ:þÿ<þÿ<þÿ<þÿ2þÿx™ÿÿõøÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÒÝþÿOyþÿ6þÿ5þÿ<þÿ=þÿ<þÿ<þÿ<þÿ<þÿ<þÿ<þÿ<þÿ<þÿ<þÿ<þÿ9þÿ2þÿ Aþÿf‹þÿàçÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÅÓÿÿ!Sþÿ9þÿ;þÿ;þÿ;þÿ<þÿ=ÿÿ9üÝ;ÿ'=ý¢?ûý@ÿÿ?þÿ?þÿ=þÿ?þÿkÿÿðôÿÿÿÿÿÿÿÿÿÿÿÿÿÿñôÿÿ7gþÿ5þÿ>þÿ>þÿ>þÿ;þÿMþÿ¾Îÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿêïÿÿc‰þÿ8þÿ9þÿ?þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ?þÿ>þÿ7þÿ:þÿ[„þÿàèÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿ‘¬ÿÿ=þÿ<þÿ>þÿ>þÿ>þÿ@ÿÿ>üþ>ýˆÿAþ¿BÿÿBÿÿAþÿAþÿ?þÿBþÿ•°ÿÿúüÿÿÿÿÿÿÿÿÿÿÿÿÿÿÉ×ÿÿ#[þÿ9þÿAþÿAþÿAþÿ:þÿUÿÿäëÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ”°þÿ Fþÿ9þÿAþÿ@þÿ@þÿ@þÿ>þÿ:þÿ6þÿ5þÿ5þÿ7þÿ;þÿ?þÿ@þÿ@þÿ@þÿ@þÿAþÿ;þÿ=þÿeþÿñõÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿãëÿÿ>oþÿ>þÿ@þÿ@þÿ@þÿAþÿBÿÿ?üÝ<ÿBûÒDÿÿCÿÿCþÿCþÿAþÿDþÿ³Çÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¢ºþÿTþÿ>þÿCþÿCþÿCþÿ9þÿŒ«ÿÿûýÿÿÿÿÿÿÿÿÿÿÿÿÿÿâêÿÿEvþÿ7þÿCþÿCþÿCþÿ>þÿ4þÿDþÿBsþÿs˜þÿƒ£ÿÿ„£ÿÿjÿÿ0fþÿ>þÿ:þÿBþÿBþÿBþÿBþÿCþÿ;þÿ Fþÿˆ¦þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿ§¾ÿÿCþÿ@þÿBþÿBþÿBþÿDÿÿBûúBýhUÿEûãGÿÿEþÿEþÿEþÿBþÿFþÿÈÖÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‡¨þÿOþÿAþÿEþÿEþÿDþÿ@þÿ®Äÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ©ÁþÿYþÿ?þÿFþÿCþÿ9þÿ&aþÿ¡ÿÿ¨ÀÿÿÔàÿÿöùÿÿÿÿÿÿÿÿÿÿïóÿÿÈÖÿÿ—³ÿÿ?rÿÿ?þÿBþÿEþÿEþÿEþÿEþÿ=þÿ$_þÿÇÖþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿâêÿÿ4kþÿCþÿDþÿDþÿDþÿDÿÿEýþBüÄ@ÿ FúêJÿÿHþÿHþÿHþÿEþÿIþÿÕáÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿxžþÿMþÿDþÿGþÿGþÿFþÿ Oþÿ¾Ðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‚¥þÿIþÿEþÿ?þÿHþÿ_Šÿÿ¿ÑÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÒßÿÿbŽÿÿHþÿEþÿGþÿGþÿGþÿEþÿEþÿ\‰þÿþþÿÿÿÿÿÿÿÿÿÿÿÿÿÿùûÿÿ‰ªÿÿEþÿEþÿGþÿGþÿGþÿIÿÿDûñJÿ4IûåLÿÿJþÿJþÿJþÿGþÿLþÿÙäÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿtœþÿNþÿGþÿJþÿJþÿHþÿZþÿÃÕÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿp™þÿBþÿ=þÿ ]ÿÿЬÿÿêðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿèïÿÿ]ŒÿÿBþÿIþÿIþÿIþÿJþÿAþÿ `þÿÆ×þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿËÚÿÿ PþÿGþÿIþÿIþÿIþÿKÿÿHüùJÿxLúàNÿÿLÿÿLþÿLþÿIþÿNþÿÖãÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ{¢þÿ RþÿIþÿLþÿLþÿJþÿ\þÿÃÕÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿrœþÿ;þÿ.hÿÿ¬ÄÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿËÛÿÿ+iÿÿGþÿKþÿKþÿKþÿHþÿNþÿo™þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿçîÿÿ5pþÿJþÿKþÿKþÿKþÿKÿÿKüþLü¿OûÓPÿÿNÿÿNþÿNþÿLþÿOþÿÈÙÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ°þÿYþÿJþÿNþÿNþÿMþÿRþÿ¼Ðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€¦þÿ5nÿÿ®ÈÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿøûÿÿŠ®ÿÿFþÿMþÿNþÿNþÿNþÿFþÿ9uþÿô÷ÿÿÿÿÿÿÿÿÿÿÿÿÿÿðõÿÿnšÿÿMþÿKþÿMþÿMþÿMþÿOÿÿLûéPüºQþÿQÿÿQþÿQþÿOþÿRþÿ±Éÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¬ÆÿÿbþÿJþÿPþÿPþÿPþÿHþÿ¨ÃÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿºÏÿÿ¹ÐÿÿüýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÀÕÿÿ\þÿNþÿPþÿPþÿPþÿIþÿ!gþÿÈÙÿÿÿÿÿÿÿÿÿÿÿÿÿÿûýÿÿ™¹ÿÿPþÿNþÿPþÿPþÿPþÿRÿÿPüïRýŸQüüTÿÿSþÿSþÿQþÿTþÿ‘µÿÿùûÿÿÿÿÿÿÿÿÿÿÿÿÿÿÚæÿÿ'mþÿJþÿSþÿSþÿSþÿJþÿ}§ÿÿõøÿÿÿÿÿÿÿÿÿÿÿÿÿÿ÷ùÿÿþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿûüÿÿüýÿÿÿÿÿÿÿÿÿÿÿÿÿÿØäÿÿAÿÿNþÿRþÿRþÿRþÿMþÿ`þÿ›¼þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿºÐÿÿTþÿPþÿRþÿRþÿRþÿTÿÿPüíVÿ}UüøVÿÿUþÿUþÿSþÿTþÿf™ÿÿïôÿÿÿÿÿÿÿÿÿÿÿÿÿÿûýÿÿF„þÿOþÿTþÿUþÿUþÿQþÿ9{ÿÿØåÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÑàÿÿœ½ÿÿéðÿÿÿÿÿÿÿÿÿÿÿÿÿÿæîÿÿ_“ÿÿNþÿTþÿTþÿTþÿPþÿ Zþÿ~¨þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÑàÿÿVþÿQþÿTþÿTþÿTþÿVÿÿRüíVÿPWüóYÿÿWþÿWþÿVþÿUþÿ0wþÿåîÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿˆ±þÿ _þÿSþÿWþÿWþÿVþÿXþÿ¡ÁÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿçïÿÿSÿÿ:}þÿÖäÿÿÿÿÿÿÿÿÿÿÿÿÿÿìóÿÿjÿÿPþÿVþÿVþÿVþÿTþÿYþÿjœþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÛçÿÿ ]þÿSþÿVþÿVþÿVþÿXÿÿVüíZÿ"Xúì\ÿÿYþÿYþÿYþÿWþÿ^þÿÃÙÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÜéÿÿ-vþÿQþÿYþÿYþÿYþÿRþÿ;€ÿÿÙæÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿßêÿÿb™ÿÿQþÿ9þÿÛèÿÿÿÿÿÿÿÿÿÿÿÿÿÿëòÿÿhœÿÿSþÿYþÿYþÿYþÿWþÿZþÿb™þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÞéÿÿeþÿWþÿXþÿXþÿXþÿ[ÿÿXüífÿ XüÇ]ýþ\ÿÿ\þÿ\þÿZþÿZþÿ~¬ÿÿ÷úÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿzªþÿ^þÿWþÿ\þÿ[þÿ[þÿWþÿd›ÿÿêòÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¿×ÿÿOÿÿXþÿKþÿNŽþÿó÷ÿÿÿÿÿÿÿÿÿÿÿÿÿÿáìÿÿV’ÿÿVþÿ[þÿ[þÿ[þÿYþÿ\þÿeœþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÝéÿÿ dþÿYþÿ[þÿ[þÿ[þÿ^ÿÿ[üífÿ_ý„]üù_ÿÿ^þÿ^þÿ]þÿ\þÿ*xþÿÜèÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàëÿÿ<„þÿXþÿ]þÿ^þÿ^þÿ\þÿ_þÿdÿÿÓäÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÑâÿÿ‡³ÿÿ)wÿÿTþÿ[þÿYþÿt§þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÒâÿÿ4þÿYþÿ]þÿ]þÿ]þÿZþÿaþÿr¥þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÙçÿÿ`þÿZþÿ]þÿ]þÿ]þÿ`ÿÿ]üíÿbÿ<`ûñcÿÿ`þÿ`þÿ`þÿ^þÿ]þÿ˜¿ÿÿûýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ®ÌþÿoþÿYþÿ`þÿ`þÿ`þÿ]þÿ[þÿBˆÿÿ—¾ÿÿÏáÿÿøúÿÿÿÿÿÿüýÿÿäîÿÿ´Ðÿÿ‹·ÿÿ?‡ÿÿZþÿYþÿ`þÿWþÿ"vþÿ½Õþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¹Óÿÿeþÿ]þÿ_þÿ_þÿ_þÿ\þÿhþÿеþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÈÜÿÿ`þÿ\þÿ_þÿ_þÿ_þÿbÿÿ]üímíbýÌdýþbþÿbþÿbþÿbþÿaþÿ/€þÿÚèÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‘»þÿiþÿ[þÿaþÿbþÿbþÿaþÿZþÿ_þÿ;†þÿr¨ÿÿ°ÿÿy¬ÿÿW—þÿoþÿWþÿ[þÿaþÿbþÿ_þÿ]þÿp§þÿøûÿÿÿÿÿÿÿÿÿÿÿÿÿÿùûÿÿ‰·ÿÿYþÿaþÿbþÿbþÿbþÿ]þÿrþÿ¬Ìþÿÿÿÿÿÿÿÿÿÿÿÿÿþÿÿÿ­Ìÿÿbþÿ_þÿaþÿaþÿaþÿdÿÿ_üíUªeýwdüùgÿÿeþÿeþÿeþÿdþÿ`þÿy®ÿÿùûÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýþÿÿºþÿoþÿ\þÿbþÿdþÿdþÿdþÿcþÿ_þÿZþÿYþÿZþÿ\þÿaþÿdþÿdþÿdþÿaþÿ]þÿGþÿÔäþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿØçÿÿ9‡ÿÿ_þÿdþÿdþÿdþÿdþÿ\þÿ(|þÿÜêÿÿÿÿÿÿÿÿÿÿÿÿÿÿ÷ûÿÿ‰·ÿÿeþÿbþÿdþÿdþÿdþÿgÿÿdüígÿ%füâjÿÿgþÿgþÿgþÿgþÿeþÿpþÿ°Ïÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ Æþÿ3…þÿcþÿ_þÿfþÿgþÿgþÿgþÿfþÿfþÿfþÿfþÿfþÿfþÿfþÿ_þÿcþÿG‘þÿÆÝþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿ˜Áÿÿcþÿeþÿfþÿfþÿfþÿeþÿ`þÿGþÿþþÿÿÿÿÿÿÿÿÿÿÿÿÿÿìôÿÿZ›þÿeþÿeþÿfþÿfþÿfþÿiÿÿfüíÿiý—hýýkÿÿiþÿiþÿiþÿhþÿeþÿ5‡þÿÍáÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿËáþÿg¥þÿ'}þÿeþÿ`þÿdþÿfþÿgþÿhþÿhþÿgþÿeþÿaþÿbþÿwþÿj§þÿØèþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÐäÿÿ5‡ÿÿcþÿhþÿhþÿhþÿhþÿdþÿ pþÿ‰¹þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàíÿÿ&þÿfþÿgþÿhþÿhþÿhþÿkÿÿhüíjÿ0kúánÿÿkþÿkþÿkþÿkþÿjþÿeþÿN˜ÿÿ×éÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¹Öþÿi¨þÿ=þÿ|þÿ qþÿhþÿdþÿfþÿlþÿvþÿ0…þÿXžþÿ«Îþÿüýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿïöÿÿd¦ÿÿdþÿjþÿkþÿkþÿkþÿkþÿdþÿ*„þÿØéÿÿÿÿÿÿÿÿÿÿÿÿÿÿþÿÿÿ¼ØÿÿkþÿhþÿjþÿjþÿjþÿjþÿmÿÿküínýmüþpÿÿnþÿnþÿnþÿnþÿmþÿgþÿSœÿÿÑåÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿáîÿÿ«Ïþÿ‡ºþÿmªþÿ_¤þÿe§þÿx±þÿ˜ÃþÿÊáÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿòøÿÿ|µÿÿnþÿkþÿmþÿmþÿmþÿmþÿjþÿmþÿl«þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿôùÿÿq®ÿÿlþÿlþÿmþÿmþÿmþÿmþÿpÿÿküïuÿ%mýÔrÿÿpþÿpþÿpþÿpþÿpþÿoþÿiþÿD•ÿÿ·×ÿÿûýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿìôÿÿ~·ÿÿ tþÿkþÿoþÿoþÿnþÿjþÿkþÿiþÿ.‰þÿÒåÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÙêÿÿ#‚þÿmþÿnþÿoþÿoþÿoþÿoþÿrÿÿoüérÿgpûôuÿÿqþÿrþÿrþÿrþÿrþÿqþÿlþÿþÿÁÿÿáîÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÕèÿÿj¬ÿÿsþÿoþÿqþÿqþÿpþÿsþÿ~þÿtþÿtþÿ¿þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿ•ÄÿÿnþÿoþÿqþÿqþÿqþÿqþÿqÿÿrýþnüÂ`ÿtü«vÿÿuÿÿtþÿtþÿtþÿtþÿtþÿtþÿqþÿsþÿR ÿÿ²Õÿÿãðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿâïÿÿ§Ïÿÿ:“ÿÿoþÿrþÿtþÿtþÿtþÿmþÿ%ˆþÿ—Æþÿn°þÿb©þÿóøÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÜìÿÿ0þÿrþÿrþÿsþÿsþÿsþÿsþÿuÿÿsüúrýyxÿ1výÓzÿÿwþÿwþÿwþÿwþÿwþÿwþÿvþÿuþÿpþÿ {þÿW¤ÿÿ©ÒÿÿÑæÿÿëôÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿð÷ÿÿÔèÿÿ«ÑÿÿT¢ÿÿxþÿqþÿuþÿuþÿuþÿvþÿsþÿ vþÿp²þÿÿÿÿÿï÷ÿÿåñÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿ‰Àÿÿsþÿuþÿvþÿvþÿvþÿvþÿvþÿyÿÿuüéwÿ/xÿ[wûê|ÿÿyþÿxþÿxþÿxþÿxþÿyþÿyþÿxþÿwþÿtþÿvþÿ+þÿh°ÿÿ›Êÿÿ¼ÜÿÿÊãÿÿÒçÿÿÔèÿÿÒèÿÿÍåÿÿÃàÿÿ§Ñÿÿt¶ÿÿ2“þÿwþÿsþÿwþÿxþÿxþÿxþÿxþÿxþÿqþÿ7”þÿÙëÿÿÿÿÿÿÿÿÿÿýÿÿÿýþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÃßÿÿ…þÿvþÿxþÿxþÿwþÿwþÿwþÿyÿÿxÿÿwüª’ÿ|û„xûù~ÿÿ{þÿzþÿzþÿzþÿzþÿzþÿzþÿzþÿzþÿzþÿxþÿwþÿuþÿxþÿ…þÿ)þÿ-’þÿ)þÿŠþÿ|þÿuþÿwþÿxþÿzþÿzþÿzþÿ{þÿ{þÿ{þÿ{þÿvþÿ €þÿ‰ÁþÿþþÿÿÿÿÿÿÿÿÿÿþþÿÿúüÿÿþÿÿÿÿÿÿÿÝíÿÿO¤ÿÿvþÿyþÿyþÿyþÿyþÿyþÿzþÿ|ÿÿwûï|ÿHÿŽÿ {ü—~ýú€ÿÿ}þÿ}þÿ}þÿ}þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ{þÿ{þÿ{þÿzþÿ{þÿ{þÿ{þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿwþÿP¥þÿôùÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþþÿÿýþÿÿõúÿÿ†Áÿÿyþÿ{þÿ}þÿ|þÿ|þÿ|þÿ|þÿ}ÿÿ}ÿÿzû«’ÿ€ÿ€ý¢‚ýýƒÿÿ€þÿþÿþÿþÿþÿþÿþÿþÿþÿþÿþÿþÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿyþÿ!þÿ«ÕþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿëõÿÿÀþÿ„þÿ}þÿ~þÿ~þÿ~þÿ~þÿþÿ‚ÿÿ~ûä}ÿ7„ÿú¨ƒüû…ÿÿ‚ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€ÿÿ}ÿÿb±ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüþÿÿ¿ßÿÿ/˜ÿÿ€ÿÿ€ÿÿ€ÿÿ€ÿÿ€ÿÿƒÿÿ€ýùý€ÿ€ÿüž…üù‡ÿÿ„ÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿ‚ÿÿ„ÿÿn¹ÿÿúýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿòùÿÿÇäÿÿšÎÿÿ; ÿÿƒÿÿƒÿÿƒÿÿƒÿÿ‚ÿÿ…ÿÿ…ÿÿ€ü²ˆÿ’ÿ…ý’„üòŠÿÿ‡ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿƒÿÿ‚ÿÿÿÿ€ÿÿ‚ÿÿ]±ÿÿåóÿÿùýÿÿÿÿÿÿÿÿÿÿàðÿÿ¼àÿÿƒÄÿÿ“ÿÿƒÿÿ„ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ†ÿÿˆÿÿ‚ýÌ‚ÿ+€ÿ‰ýu‰üáŒÿÿŠÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‰ÿÿ‘ÿÿ•ÿÿ •ÿÿ%–ÿÿg¸ÿÿËçÿÿ³Ûÿÿ¬ØÿÿÕëÿÿ©ÖÿÿL«ÿÿ‰ÿÿ…ÿÿ†ÿÿ†ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‰ÿÿŒÿÿ†ýÔ‡ÿ@‹üMˆüÈüûŽÿÿ‹ÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿ‰ÿÿ‰ÿÿ‰ÿÿ‰ÿÿ†ÿÿ•ÿÿxÁÿÿ«Øÿÿ¦ÖÿÿÓêÿÿëöÿÿs¾ÿÿ‰ÿÿ’ÿÿ&›ÿÿˆÿÿˆÿÿˆÿÿˆÿÿ‰ÿÿ‰ÿÿ‰ÿÿ‰ÿÿ‰ÿÿ‹ÿÿŽÿÿ‰ýÔˆÿGŠÿ#Šý™‹üìÿÿÿÿÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŠÿÿ•ÿÿV³ÿÿsÀÿÿ]¶ÿÿµÝÿÿ´Ýÿÿ‰ÿÿŠÿÿ‹ÿÿŠÿÿŠÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿŒÿÿÿÿýþ‰üÆŒÿ<€ÿÿTŽüÇýó‘ÿÿ‘ÿÿÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿÿÿÿÿ”ÿÿ‘Îÿÿ–Ñÿÿ†ÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿÿÿ’ÿÿŽýóŽþ­Šÿ%ŒÿÿqŒýÔýõ“ÿÿ”ÿÿ‘ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‹ÿÿžÿÿ’ÐÿÿsÁÿÿ‹ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ’ÿÿ”ÿÿ’ýûüÜýs€ê €ÿ™ÿ”ÿw‘ýÒ“ÿð“ÿÿ—ÿÿ–ÿÿ”ÿÿ“ÿÿ“ÿÿ’ÿÿ’ÿÿ’ÿÿ’ÿÿ’ÿÿ’ÿÿÿÿœÿÿP´ÿÿ7ªÿÿÿÿ’ÿÿ’ÿÿ’ÿÿ’ÿÿ“ÿÿ•ÿÿ–ÿÿ”ýûüç‘ýœ•ÿ0€ÿªÿ”ÿ˜ÿW“þµ”üè–ýô”ýþ™ÿÿ™ÿÿ˜ÿÿ—ÿÿ–ÿÿ–ÿÿ–ÿÿ•ÿÿ•ÿÿ•ÿÿ–ÿÿ–ÿÿ–ÿÿ—ÿÿ˜ÿÿ™ÿÿ˜ÿÿ”þÿ•ýó’úâ“ýš”ÿ7™ÿÿÿŽÿ šÿ&—ýi˜ü¯–üà˜ýï•ýõ•ýú–ýþ™ÿÿšÿÿ›ÿÿœÿÿ›ÿÿ™ÿÿ™ÿÿ˜ÿÿ—ýû•ý÷™ÿð—úâ—þ°–ÿd‘ÿªÿªÿ™æ ™ÿ›ÿ8™ÿi—ýŽšþ«—üÇ—ûØ—üá—üê•ýè˜ûÚ™ýΘþ¹™ý˜›ÿu˜ÿE›ÿ™ÿ €ÿ( @ ÿüdýŸüÁþþÿÿÿÿÿÿ¢¦ûíþº ýšüW ÿÿ,þ«úûýÿýÿuþÿÿÿÿÿÿÿÿÿ‰þÿýÿýÿýÿúüüÀüJÿÿý‡úûýÿýÿ>Pýÿ½Ãþÿÿÿÿÿÿÿÿÿúúÿÿ&:ýÿýÿýÿýÿýÿýÿýÿýÍüKÿ þºýÿýÿýÿbsþÿýýÿÿÿÿÿÿÿÿÿÿúûÿÿ\nþÿýÿýÿýÿýÿýÿýÿýÿýÿýÿý¦ ÿ+ÿþ»!ýÿ!ýÿ!ýÿŠ™þÿÿÿÿÿÿÿÿÿôöÿÿÈÏÿÿ0Jýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿüÛÿ+%ý‹%ýÿ%ýÿ%ýÿp…þÿÿÿÿÿÿÿÿÿúûÿÿs‡þÿ(ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ$ýÿ$ýÿ$úí"ÿ5+ÿ0*úý*ýÿ*ýÿ>^ýÿüüÿÿÿÿÿÿõ÷ÿÿ@_ýÿ)ýÿ)ýÿ)ýÿ)ýÿ$Gýÿyþÿ¥³þÿ»Æþÿ¯¼þÿ¢þÿPlþÿ 1ýÿ)ýÿ)ýÿ)ýÿ)ýÿ)úð'ÿ..þ¶.þÿ.þÿ0þÿÌÕÿÿÿÿÿÿþþÿÿXvþÿ.þÿ.þÿ.þÿCþÿ²Àÿÿþþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿêîÿÿ{’þÿ3þÿ-þÿ-þÿ-þÿ*üÞ$ÿ3ÿ#3ûþ3þÿ3þÿXyþÿÿÿÿÿÿÿÿÿ¤¶ÿÿ2þÿ2þÿ2þÿ8_þÿìðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÍ×ÿÿ#Nþÿ2þÿ2þÿ2þÿ2ýž7ÿt7þÿ7þÿ7þÿ·ÇÿÿÿÿÿÿúûÿÿOþÿ7þÿ7þÿOþÿèíÿÿÿÿÿÿÿÿÿÿÓÜÿÿ`‚þÿKþÿ6þÿ =þÿ;eþÿ§ÿÿóöÿÿÿÿÿÿÿÿÿÿàçÿÿJþÿ6þÿ6þÿ6þÿ7ÿO:þ³<þÿ;þÿIþÿúûÿÿÿÿÿÿ¸Èÿÿ;þÿ;þÿ;þÿ¯Âÿÿÿÿÿÿÿÿÿÿ¡·ÿÿ@þÿ;þÿ;þÿ;þÿ;þÿ;þÿ;þÿPþÿÀÏÿÿÿÿÿÿÿÿÿÿÎÙÿÿ@þÿ;þÿ:þÿ:üÛUÿ@üÞ@þÿ@þÿCrþÿÿÿÿÿÿÿÿÿjþÿ@þÿ@þÿ(^þÿýýÿÿÿÿÿÿÇÕÿÿCþÿ?þÿ?þÿ?þÿ?þÿ?þÿ?þÿ?þÿ?þÿ FþÿÅÓÿÿÿÿÿÿÿÿÿÿs–þÿ?þÿ?þÿ?þÿAÿSAûõDþÿDþÿgþÿÿÿÿÿÿÿÿÿAtþÿDþÿDþÿj’þÿÿÿÿÿÿÿÿÿM|þÿDþÿDþÿ7lþÿ­ÂþÿÑÝÿÿÂÒÿÿHyþÿDþÿDþÿDþÿTþÿðôÿÿÿÿÿÿîòÿÿNþÿCþÿCþÿDüÈIûøIþÿIþÿ|¡þÿÿÿÿÿÿÿÿÿ)fþÿIþÿIþÿ°ÿÿÿÿÿÿÿÿÿÿWþÿ Qþÿ¥¿ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿˆªÿÿHþÿHþÿHþÿxžþÿÿÿÿÿÿÿÿÿ\ŠþÿHþÿHþÿHþÿFÿ!MûçMþÿMþÿošþÿÿÿÿÿÿÿÿÿ6sþÿMþÿMþÿ‰­ÿÿÿÿÿÿÿÿÿÿ#eþÿÁÔÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýþÿÿ8sþÿLþÿLþÿ^þÿýþÿÿÿÿÿÿªÃÿÿLþÿLþÿLþÿMÿcRýÓRþÿRþÿ\þÿÿÿÿÿÿÿÿÿYŽþÿQþÿQþÿh˜þÿÿÿÿÿÿÿÿÿËÜÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþþÿÿþþÿÿÿÿÿÿœ»ÿÿQþÿQþÿQþÿÏÞÿÿÿÿÿÿÛæÿÿQþÿQþÿQþÿOý§Vþ®VþÿVþÿ"mþÿÿÿÿÿÿÿÿÿž¿ÿÿVþÿVþÿcþÿöùÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ³ÿÿãìÿÿÿÿÿÿÂÖÿÿUþÿUþÿUþÿ­ÈÿÿÿÿÿÿöùÿÿUþÿUþÿUþÿSýÏ\ÿl[þÿ[þÿZþÿÜèÿÿÿÿÿÿó÷ÿÿjþÿZþÿZþÿz©þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ’¹ÿÿ[þÿçïÿÿÿÿÿÿ»ÓÿÿZþÿZþÿZþÿŸÁÿÿÿÿÿÿþþÿÿ]þÿYþÿYþÿWúàaÿ*_þÿ_þÿ_þÿr§þÿÿÿÿÿÿÿÿÿžÂÿÿ_þÿ_þÿ_þÿ€¯þÿýþÿÿÿÿÿÿÿÿÿÿßëÿÿG‹þÿ^þÿ*yþÿÿÿÿÿÿÿÿÿ—½ÿÿ^þÿ^þÿ^þÿ²Îÿÿÿÿÿÿðöÿÿ^þÿ^þÿ^þÿ[ûòÿaýÔcþÿcþÿhþÿáíÿÿÿÿÿÿÿÿÿÿn¦þÿcþÿcþÿcþÿeþÿ({þÿlþÿcþÿcþÿdþÿ±ÏÿÿÿÿÿÿÿÿÿÿA‹þÿcþÿcþÿcþÿÛéÿÿÿÿÿÿÖæÿÿbþÿbþÿbþÿ_ûóiÿ_hþÿhþÿhþÿH“þÿüýÿÿÿÿÿÿÿÿÿÿ•Àÿÿqþÿhþÿgþÿgþÿgþÿgþÿ mþÿŸÆÿÿÿÿÿÿÿÿÿÿÌáÿÿhþÿgþÿgþÿ zþÿÿÿÿÿÿÿÿÿšÃÿÿgþÿgþÿgþÿgüÜfÿkúálþÿlþÿlþÿj©þÿþþÿÿÿÿÿÿÿÿÿÿíõÿÿ“ÁÿÿQ›þÿ8ŒþÿG•þÿ€¶þÿâîÿÿÿÿÿÿÿÿÿÿîõÿÿ#€þÿlþÿlþÿkþÿ‡¹ÿÿÿÿÿÿÿÿÿÿVþÿkþÿkþÿkþÿiþ³pÿ]qþÿqþÿqþÿqþÿNœþÿð÷ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàîÿÿ7þÿpþÿpþÿpþÿ~þÿñ÷ÿÿÿÿÿÿåðÿÿtþÿpþÿpþÿpþÿoÿwtþ­uþÿuþÿuþÿuþÿ€þÿŽÂÿÿñ÷ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿñ÷ÿÿ“Åÿÿ~þÿuþÿtþÿ yþÿÇàÿÿÉáÿÿÿÿÿÿÿÿÿÿs³þÿtþÿtþÿtþÿtûþvÿ'yÿ{ûçyþÿyþÿyþÿyþÿyþÿ|þÿ@›þÿl²þÿ€¼þÿq´þÿJ þÿ þÿyþÿyþÿyþÿyþÿŽÃþÿÿÿÿÿêôÿÿÿÿÿÿÌäÿÿ|þÿyþÿyþÿyþÿwþ¼}ÿ5~ûô~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ}þÿ}þÿ}þÿ}þÿ"ŽþÿäñÿÿÿÿÿÿÿÿÿÿèóÿÿP¥þÿ}þÿ}þÿ}þÿ}þÿ{ÿ<ƒÿD‚üó‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿŸÐÿÿÿÿÿÿÿÿÿÿýþÿÿÑéÿÿsºÿÿ‚ÿÿ‚ÿÿÿÿý–‰ÿ6…üå‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ†ÿÿ†ÿÿ†ÿÿ†ÿÿ†ÿÿ†ÿÿŠÿÿœÐÿÿÓêÿÿ¼ßÿÿi¸ÿÿˆÿÿ†ÿÿ†ÿÿ†ÿÿ‡üÇŽÿ ÿ‹þ»‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ%œÿÿ‘ÍÿÿÊæÿÿ3¢ÿÿŽÿÿ‹ÿÿ‹ÿÿ‹ÿÿŠÿÿŠÿÿŠüÇŽÿÿÿ‘ÿaŽüÞÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‹ÌÿÿÿÿÿÿÿÿÿÿÿÿŒüþý˜Žÿ €ÿ”ÿ_’ýÔ”ÿÿ”ÿÿ”ÿÿ”ÿÿ”ÿÿ”ÿÿ$£ÿÿ”ÿÿ“ÿÿ“ÿÿ“ÿÿ“þ½“ÿ;•ÿ$—ÿl˜þ°™ýÑ•üæ˜üû˜üé™ýÑ–þ²˜ÿwšÿ&(0 ÿù\ýš&2üÅùùýîÜÞûì$1üÉý¢ÿeÿ1ÿýiýÐÿÿ9Eÿÿ²¸ÿÿÿÿÿÿ¸¿ÿÿ ÿÿÿÿÿÿüðý ûBÿýÿÿÿÿl{ÿÿìïÿÿÿÿÿÿÚÞÿÿ;Oýÿýÿýÿþÿÿÿÿÿüîûÿÿ "ýŸÿÿ ÿÿƒ“þÿÿÿÿÿÿÿÿÿ¡®þÿ)Cýÿýÿýÿýÿýÿýÿýÿ!ÿÿ"ÿÿ þ¾ø&&ýk'üý ÿÿg~ýÿüüÿÿñóÿÿgþÿ#ýÿýÿ#ýÿ-MýÿTnýÿ[týÿGcýÿ<ýÿýÿ!ÿÿ'ÿÿ'ýÐ&ÿ(1ÿ+ûÙ'ÿÿ&Nþÿèìÿÿÿÿÿÿeþÿþÿ!þÿ5Xþÿ­»þÿéíÿÿÿÿÿÿÿÿÿÿþþÿÿÙàÿÿŽ¡þÿEþÿ$ÿÿ.ÿÿ*üÄÿ3ÿd4ÿÿ1ÿÿˆ þÿÿÿÿÿ£µÿÿ1þÿ&þÿMpþÿçìÿÿÿÿÿÿïóÿÿÇÒÿÿÀÌÿÿÒÜÿÿÿÿÿÿÿÿÿÿÓÜÿÿ@eþÿ*ÿÿ5ÿÿ1ýŒÿ8ý¨7ÿÿFþÿÝåÿÿøúÿÿ;gþÿ&þÿ(Xþÿâêÿÿÿÿÿÿ¤¹ÿÿ4\þÿ2þÿ)þÿ @þÿMsþÿ»Ëþÿÿÿÿÿáèÿÿ1^þÿ1ÿÿ8ýó8ÿD>ûÕ;ÿÿ8iþÿÿÿÿÿÈÖÿÿKþÿ2þÿŒ¨þÿÿÿÿÿ¯Ãÿÿ;þÿ,þÿCþÿ Gþÿ9þÿ,þÿFþÿ¬Àÿÿÿÿÿÿ·ÉÿÿEþÿ=ÿÿ>ú¨UÕBúê@ÿÿ[‡þÿÿÿÿÿ¢»ÿÿ>þÿOþÿÅÕÿÿþþÿÿ3jþÿ@þÿ^‰þÿ»ÍþÿÓßÿÿ…¦þÿSþÿ7þÿWþÿÙãÿÿÿÿÿÿOþÿ=ÿÿDüòCú5KùæFÿÿbþÿÿÿÿÿ›¸ÿÿ@þÿ]þÿ×âÿÿõ÷ÿÿ+iþÿš¸ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¢¾ÿÿJþÿ@þÿo™þÿÿÿÿÿžºÿÿKÿÿLÿÿIýpQýÔNÿÿP‡þÿÿÿÿÿ±ÊÿÿPþÿ WþÿÀÓþÿüýÿÿÁÕÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþþÿÿó÷ÿÿ?|ÿÿ?þÿ3sþÿùûÿÿ×äÿÿ YþÿOÿÿPþ­Vþ±Uÿÿ#nþÿøúÿÿäíÿÿ"nþÿFþÿv¦þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¦Äÿÿ¾Ôÿÿÿÿÿÿc—ÿÿDþÿ"mþÿâìÿÿôøÿÿbþÿTÿÿQýÑ]ýv_ÿÿ_ÿÿ¶Ñþÿÿÿÿÿ~­ÿÿQþÿdþÿ©Éþÿÿÿÿÿÿÿÿÿûüÿÿ”»þÿeþÿµÐÿÿÿÿÿÿU’ÿÿLþÿ"rþÿàëÿÿôøÿÿhþÿZÿÿYúâaû:büö^ÿÿGþÿÿÿÿÿôøÿÿO”þÿUþÿdþÿFŽþÿR•þÿ1þÿVþÿBŠþÿøûÿÿåïÿÿ!vþÿUþÿ0€þÿúüÿÿÛéÿÿ jþÿaÿÿ_úèqÿ fü¬mÿÿgþÿ½þÿÿÿÿÿñ÷ÿÿw®ÿÿvþÿbþÿ`þÿhþÿQ—þÿßìÿÿÿÿÿÿ‚µÿÿaþÿaþÿb¢þÿÿÿÿÿ¦ÊÿÿjþÿhÿÿgýÔnûHoýômÿÿtþÿ’Âþÿÿÿÿÿÿÿÿÿàîÿÿ«Ïÿÿ¡ÉÿÿÆßÿÿÿÿÿÿÿÿÿÿ¥Ìþÿ rþÿdþÿrþÿÇßÿÿÿÿÿÿ]£þÿhþÿqÿÿmý©ªÿrý“zÿÿqÿÿtþÿX¤þÿÇàÿÿüþÿÿÿÿÿÿÿÿÿÿÿÿÿÿàîÿÿt´ÿÿ xþÿjþÿQžþÿ±ÓÿÿÿÿÿÿÏåÿÿþÿnÿÿyÿÿtýgózüÈ‚ÿÿwÿÿuþÿ ~þÿ3•þÿ^¬þÿe¯þÿFŸþÿ…þÿvþÿqþÿ‰þÿÙëþÿÿÿÿÿóùÿÿV§ÿÿrþÿÿÿwüÞxÿ ‚ù+ûÕˆÿÿ€ÿÿ}þÿ|þÿ{þÿ{þÿ|þÿ|þÿ|þÿ|ÿÿ„ÂÿÿÿÿÿÿÿÿÿÿÍæÿÿ'’þÿ}ÿÿ„ÿÿ~ýs…ÿ,‡üÄÿÿŠÿÿ†ÿÿ†ÿÿ…ÿÿ…ÿÿ…ÿÿ ‰ÿÿ“ÿÿ§ÖÿÿÑéÿÿ‘Ëÿÿ6ŸÿÿŽÿÿŒÿÿ„ý¨™ÿ ÿŠý™ýô•ÿÿŽÿÿŒÿÿ‹ÿÿŒÿÿ>¨ÿÿžÓÿÿ4£ÿÿ ÿÿ‹ÿÿŒÿÿŽÿÿŠý¦ŒÿüL’ý¨’üøšÿÿ—ÿÿ–ÿÿ”ÿÿ;°ÿÿ™ÿÿ“ÿÿ—ÿÿ”ýÙŽýsŽÿ ™ÿ”ÿ9™ÿq–þ²—ýÓ–üç•üä—ýΕý©™ÿf•ÿ$(  ÿÿÿ* ýmîïý“cmýƒüU#ÿÿÿÿ1ÿ üª üû‘šþÿþþþþ4Eüþüÿüîý• ÿÿ1ÿúá6ýÿßãýýô÷ýüG\üûûûûý ûþüÿ ûí'ûHUÿ'þ¶+ýÿÝâýøÆÒýþ#ýÿ!üþl„üþ§³ýþœªýýKiüú!üþ$üý0üPUÿAÿC$ýÿvýýöùýþ 3üþ DþÿåëþÿúûÿÿÁÎþÿÏÙýþÿÿÿÿÅÐýü;þÿ-üñ>ÿ%>ýˆ4ýÿ×àýü”ªýþ$ýÿÏÚþÿÉ×þÿ4ýÿ"ýÿ"ýÿ RýþáêþÿÄÒýú6ýÿ;ý¨Aý¨Jþÿ÷ùýüRýþTþÿÿÿÿÿ0aþÿIxþÿ¼Îþÿœ¸þÿ Býÿ UýþþþþþW‚þÿ7üöUÿ-Mý RþÿôøýüXˆýþ_þÿöøþÿ´Ëþÿÿÿÿÿûüþÿÿÿÿÿœ»þÿ5ýÿ¾Ïýþ°ÉýüAýÿ Tÿt\ÿtOýÿÈÚýü­ÈýþCýÿ»Òþÿÿÿÿÿÿÿÿÿ¶Ïþÿ¡ÂþÿÐàþÿBýÿ—·ýþÏßýüNýÿYý lÿ-XüöNþÿþþþþKŒýþWýÿWšþÿGþÿ\ýÿÔãþÿ¢ÆþÿIýÿ­Êýþ¿ÖýüVýÿdý¨mý¨dýÿ‰ºýúÿÿÿÿ¡Æýþ@ŽþÿN–þÿÔåþÿíõþÿuþÿdýÿïöýþy°ýü_ýÿoýˆuÿ%uýñmýÿQ¡ýüÕéþÿúýýþùýþÿ´Øþÿþÿ{þÿºÙýþêóýþ}üýqýÿvÿCUÿ|ÿP€ýývýþvýú ‚ýýýþtýþoýþ¬Ôþÿþþþþ¼ýøtþÿ€þ¶Uÿ‡ÿHˆþí‰ÿÿ…þþ…þý ŒþûN©þû–ÎýüZ²ýý‹ÿÿ…þá„ÿªÿŠÿý”ýî‘þÿýþ:ªýþ þÿ†þúþª“ÿÿÿ€ÿÿÿ—ÿ–ÿU—ÿ€ÿˆ™ÿl¤ÿ*ÿÿÿ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.svgphotutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.sv0000644000214200020070000001103212605531164037135 0ustar lbradleySTSCI\science00000000000000 ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo_32.pngphotutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo_320000644000214200020070000000353412346164025037022 0ustar lbradleySTSCI\science00000000000000‰PNG  IHDR szzôsBIT|dˆ pHYsÓÇòŽtEXtSoftwarewww.inkscape.org›î<ÙIDATX…Å—PT×Ç?÷ñc‘P° ˆ „FƒUBìȘ&Ådi”I# ±ÒÄJhC2æÇŒ5ÑÔtw:íÛª5ÑÒSÒÆª©é„Ú¡%¨ZÉF2Vk­JFŒ`ÁŠ(JdÙ÷öÝþqaß., É?½3gæ½sÏïýžûΞº®óÿ\Ñ_ÄÉ•ìÎ#šÅHŠl ø2pèZ€Ý8ØãéÔ¯KL”Wš;†AC°È‹h4¥z>ÕÀ$?ñôé—#¹hJ~‹»œ›´`;&y˜#D²ËÂß b0¨Â¤Åu‹»2RìqKàJr'âã7˜<6.´;`Îã2Ò‹@‹†Ž&°Ìa‹$`›+Æâ1ôWB]Ç, w.rÆM¶|»r€Þh?G6B—m"ù‘GêÕïKàƒ…“œ0º#Ñ&¢: WBÅaˆË°mL6¸pÏ€+àΔƒx¥Áti@D1Çä;«áz§ v³ú7zCýrׇóE9ÎÐäš ‹,“é_Gÿ±hbÞˆy•ˆ;¾Ñ Ðñ!,e÷ÙUÄ—¦AÚlˆO†„©ˆ€-^;V€¬…~ï;MçÅðKxUZùK%:Lü剜"¸ë9äžáT½rÝë†3WCúWaá8úè9ô³`p4XW·;KšxBjó«ËwÙÉ¥„Ö÷á“ýÐÚׇ.WêLDå_e5Êw`ÎDîzFíG;ßz9ì¾?@ÈghI^Ž ÄâUˆ¥›Ô³áƒÆMÈl…+çíãÇÄs%bñZˆK„»Ÿ‚Ão@ûÅ`ó!8¹ò—À¬o‚)Ô!ÔÊpu¹4W›;Uü0ˆ0×i'÷Ý@V— ë\Ð}>üÖßôÁž Èu Àôƒˆï¾ ¦übdëÇ‘‰Yáþ>rµ¡z—c0iØI,\1D‹‰ÜX §)‡Ìùׇˆ×üˆ__…Šm cáB3ì߬|f̃¹ÙI.œ²KŸ;ò“NÖ¤AqÐ!~*Üùr8Þg)ã¬BÄß…¬;!*â'#î©DÔôÁürdÓN;Ql’ à|(€Ùá Xôj®€[Ã`aPy÷ã* ÷ר—¦Ô¥h¹bâO½¶Î 9el¢­ïë 0HÆi¦a29HáReÜÝ 5*Ã@ä)}豄 ¢cU5ö»aÙIr mý0›Jú€nARÂPÊør‡j­&5â“+Þðçõ£AL:éµKðAƒÍ\îÿ´ž eà'_Œ໩âlg'ò›Èm/!7|ü¾p7z‘¯T@ß5å—0 KÕÞ¹Àg†öƒ ú@/fHN|ׯ@b bÁÃÈú8X‹lü,yf} ºÚ ®ú•ˆU; )U1·o»bSµ j€~Ú¦‚aS2!&A”8¼/‡‚û ¿Ž7ªhu¯Ž.@ùó0¿D=¿_oo nIøý/© Ió”è70è¦FÞ§¬&%ÀýÁ¶,Ô*}t â—ƒ{Ë#ÿ$'Ï@ütbÅËʾç?ÈuO„Ú j&Á¡DèºÎK î-T㎉E4| )épá,ò;·Ûí³ôˆµ¿…¨!ÊÎ7ÿ¼Èö3ˆiÙ0ý6X°“Ô¾¹ò8önðôB°ÚSjOEÑšÅNi 0ýÈÚ-ˆg<0c&”T@Ãe]· ùßKˆ» .²ó ;©Þzäæç¡³-Tû³™R[åt:iºÝy±è„·‹,, å4âÑçÝEBÛY8{Z5˜öðîFô÷A¬¦¤ƒÐK]àä?‘úÓð»upíjèLñ©,ñ<«÷…" ^?aReÁ ÀAO/¬YŽØü–±áHKCî}K7ÿÙ¼V='N†´ èhß@$.:4Á}žr½säFp"jÊw^ùÆqo?%Š…føä$¢äâþ2HÍ€÷€°O6àƒžËà75E)iנس\o™FÌ„ë*õj¬þ”î{YU†¬¢üI´¿…ܹ㠦!bò¦¦Qà©Ð[Ç¢&âX¾¶Æ])àWHTÿ]º í…ŸAÖ­Ê`Їu×W ëâXq;¤dÍúgõÚ± "20¼Ö¯Ð·k·að:µobÝ3¹u‹2pÄ!}rô¸nÒ,TjÝäN$9Là¿¡k“{rÀâAMP*a¦Öri.©išÜ[ï—ËÊÎ h“Ш™ì÷¼¨7O$éç0 Ë•Lg§$3ó3Çãÿ¼ G®ÿ.Á½8<ßÇIEND®B`‚././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astropy.cssphotutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astro0000644000214200020070000002744312676274407037161 0ustar lbradleySTSCI\science00000000000000/*! * Bootstrap v1.4.0 * * Copyright 2011 Twitter, Inc * Licensed under the Apache License v2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Heavily modified by Kyle Barbary for the AstroPy Project for use with Sphinx. */ @import url("basic.css"); body { background-color: #ffffff; margin: 0; font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size: 13px; font-weight: normal; line-height: 18px; color: #404040; } /* Hyperlinks ----------------------------------------------------------------*/ a { color: #0069d6; text-decoration: none; line-height: inherit; font-weight: inherit; } a:hover { color: #00438a; text-decoration: underline; } /* Typography ----------------------------------------------------------------*/ h1,h2,h3,h4,h5,h6 { color: #404040; margin: 0.7em 0 0 0; line-height: 1.5em; } h1 { font-size: 24px; margin: 0; } h2 { font-size: 21px; line-height: 1.2em; margin: 1em 0 0.5em 0; border-bottom: 1px solid #404040; } h3 { font-size: 18px; } h4 { font-size: 16px; } h5 { font-size: 14px; } h6 { font-size: 13px; text-transform: uppercase; } p { font-size: 13px; font-weight: normal; line-height: 18px; margin-top: 0px; margin-bottom: 9px; } ul, ol { margin-left: 0; padding: 0 0 0 25px; } ul ul, ul ol, ol ol, ol ul { margin-bottom: 0; } ul { list-style: disc; } ol { list-style: decimal; } li { line-height: 18px; color: #404040; } ul.unstyled { list-style: none; margin-left: 0; } dl { margin-bottom: 18px; } dl dt, dl dd { line-height: 18px; } dl dd { margin-left: 9px; } hr { margin: 20px 0 19px; border: 0; border-bottom: 1px solid #eee; } strong { font-style: inherit; font-weight: bold; } em { font-style: italic; font-weight: inherit; line-height: inherit; } .muted { color: #bfbfbf; } address { display: block; line-height: 18px; margin-bottom: 18px; } code, pre { padding: 0 3px 2px; font-family: monospace; -webkit-border-radius: 3px; -moz-border-radius: 3px; border-radius: 3px; } tt { font-family: monospace; } code { padding: 1px 3px; } pre { display: block; padding: 8.5px; margin: 0 0 18px; line-height: 18px; border: 1px solid #ddd; border: 1px solid rgba(0, 0, 0, 0.12); -webkit-border-radius: 3px; -moz-border-radius: 3px; border-radius: 3px; white-space: pre; word-wrap: break-word; } img { margin: 9px 0; } /* format inline code with a rounded box */ tt, code { margin: 0 2px; padding: 0 5px; border: 1px solid #ddd; border: 1px solid rgba(0, 0, 0, 0.12); border-radius: 3px; } code.xref, a code { margin: 0; padding: 0 1px 0 1px; background-color: none; border: none; } /* all code has same box background color, even in headers */ h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt, h1 code, h2 code, h3 code, h4 code, h5 code, h6 code, pre, code, tt { background-color: #f8f8f8; } /* override box for links & other sphinx-specifc stuff */ tt.xref, a tt, tt.descname, tt.descclassname { padding: 0 1px 0 1px; border: none; } /* override box for related bar at the top of the page */ .related tt { border: none; padding: 0 1px 0 1px; background-color: transparent; font-weight: bold; } th { background-color: #dddddd; } .viewcode-back { font-family: sans-serif; } div.viewcode-block:target { background-color: #f4debf; border-top: 1px solid #ac9; border-bottom: 1px solid #ac9; } table.docutils { border-spacing: 5px; border-collapse: separate; } /* Topbar --------------------------------------------------------------------*/ div.topbar { height: 40px; position: absolute; top: 0; left: 0; right: 0; z-index: 10000; padding: 0px 10px; background-color: #222; background-color: #222222; background-repeat: repeat-x; background-image: -khtml-gradient(linear, left top, left bottom, from(#333333), to(#222222)); background-image: -moz-linear-gradient(top, #333333, #222222); background-image: -ms-linear-gradient(top, #333333, #222222); background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #333333), color-stop(100%, #222222)); background-image: -webkit-linear-gradient(top, #333333, #222222); background-image: -o-linear-gradient(top, #333333, #222222); background-image: linear-gradient(top, #333333, #222222); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#333333', endColorstr='#222222', GradientType=0); overflow: auto; } div.topbar a.brand { font-family: 'Source Sans Pro', sans-serif; font-size: 26px; color: #ffffff; font-weight: 600; text-decoration: none; float: left; display: block; height: 32px; padding: 8px 12px 0px 45px; margin-left: -10px; background: transparent url("astropy_logo_32.png") no-repeat 10px 4px; background-image: url("astropy_logo.svg"), none; background-size: 32px 32px; } #logotext1 { } #logotext2 { font-weight:200; color: #ff5000; } #logotext3 { font-weight:200; } div.topbar .brand:hover, div.topbar ul li a.homelink:hover { background-color: #333; background-color: rgba(255, 255, 255, 0.05); } div.topbar ul { font-size: 110%; list-style: none; margin: 0; padding: 0 0 0 10px; float: right; color: #bfbfbf; text-align: center; text-decoration: none; height: 100%; } div.topbar ul li { float: left; display: inline; height: 30px; margin: 5px; padding: 0px; } div.topbar ul li a { color: #bfbfbf; text-decoration: none; padding: 5px; display: block; height: auto; text-align: center; vertical-align: middle; border-radius: 4px; } div.topbar ul li a:hover { color: #ffffff; text-decoration: none; } div.topbar ul li a.homelink { width: 112px; display: block; height: 20px; padding: 5px 0px; background: transparent url("astropy_linkout_20.png") no-repeat 10px 5px; background-image: url("astropy_linkout.svg"), none; background-size: 91px 20px; } div.topbar form { text-align: left; margin: 0 0 0 5px; position: relative; filter: alpha(opacity=100); -khtml-opacity: 1; -moz-opacity: 1; opacity: 1; } div.topbar input { background-color: #444; background-color: rgba(255, 255, 255, 0.3); font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size: normal; font-weight: 13px; line-height: 1; padding: 4px 9px; color: #ffffff; color: rgba(255, 255, 255, 0.75); border: 1px solid #111; -webkit-border-radius: 4px; -moz-border-radius: 4px; border-radius: 4px; -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0px rgba(255, 255, 255, 0.25); -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0px rgba(255, 255, 255, 0.25); box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0px rgba(255, 255, 255, 0.25); -webkit-transition: none; -moz-transition: none; -ms-transition: none; -o-transition: none; transition: none; } div.topbar input:-moz-placeholder { color: #e6e6e6; } div.topbar input::-webkit-input-placeholder { color: #e6e6e6; } div.topbar input:hover { background-color: #bfbfbf; background-color: rgba(255, 255, 255, 0.5); color: #ffffff; } div.topbar input:focus, div.topbar input.focused { outline: 0; background-color: #ffffff; color: #404040; text-shadow: 0 1px 0 #ffffff; border: 0; padding: 5px 10px; -webkit-box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); -moz-box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); } /* Relation bar (breadcrumbs, prev, next) ------------------------------------*/ div.related { height: 21px; width: auto; margin: 0 10px; position: absolute; top: 42px; clear: both; left: 0; right: 0; z-index: 10000; font-size: 100%; vertical-align: middle; background-color: #fff; border-bottom: 1px solid #bbb; } div.related ul { padding: 0; margin: 0; } /* Footer --------------------------------------------------------------------*/ footer { display: block; margin: 10px 10px 0px; padding: 10px 0 0 0; border-top: 1px solid #bbb; } .pull-right { float: right; width: 30em; text-align: right; } /* Sphinx sidebar ------------------------------------------------------------*/ div.sphinxsidebar { font-size: inherit; border-radius: 3px; background-color: #eee; border: 1px solid #bbb; word-wrap: break-word; /* overflow-wrap is the canonical name for word-wrap in the CSS3 text draft. We include it here mainly for future-proofing. */ overflow-wrap: break-word; } div.sphinxsidebarwrapper { padding: 0px 0px 0px 5px; } div.sphinxsidebar h3 { font-family: 'Trebuchet MS', sans-serif; font-size: 1.4em; font-weight: normal; margin: 5px 0px 0px 5px; padding: 0; line-height: 1.6em; } div.sphinxsidebar h4 { font-family: 'Trebuchet MS', sans-serif; font-size: 1.3em; font-weight: normal; margin: 5px 0 0 0; padding: 0; } div.sphinxsidebar p { } div.sphinxsidebar p.topless { margin: 5px 10px 10px 10px; } div.sphinxsidebar ul { margin: 0px 0px 0px 5px; padding: 0; } div.sphinxsidebar ul ul { margin-left: 15px; list-style-type: disc; } /* If showing the global TOC (toctree), color the current page differently */ div.sphinxsidebar a.current { color: #404040; } div.sphinxsidebar a.current:hover { color: #404040; } /* document, documentwrapper, body, bodywrapper ----------------------------- */ div.document { margin-top: 72px; margin-left: 10px; margin-right: 10px; } div.documentwrapper { float: left; width: 100%; } div.body { background-color: #ffffff; padding: 0 0 0px 20px; } div.bodywrapper { margin: 0 0 0 230px; max-width: 55em; } /* Header links ------------------------------------------------------------- */ a.headerlink { font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; } a.headerlink:hover { background-color: #0069d6; color: white; text-docoration: none; } /* Admonitions and warnings ------------------------------------------------- */ /* Shared by admonitions and warnings */ div.admonition, div.warning { padding: 0px; border-radius: 3px; -moz-border-radius: 3px; -webkit-border-radius: 3px; } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; font-weight: bold; font-size: 1.1em; } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } /* Admonitions only */ div.admonition { border: 1px solid #609060; background-color: #e9ffe9; } div.admonition p.admonition-title { background-color: #70A070; } /* Warnings only */ div.warning { border: 1px solid #900000; background-color: #ffe9e9; } div.warning p.admonition-title { background-color: #b04040; } /* Figures ------------------------------------------------------------------ */ .figure.align-center { clear: none; } /* This is a div for containing multiple figures side-by-side, for use with * .. container:: figures */ div.figures { border: 1px solid #CCCCCC; background-color: #F8F8F8; margin: 1em; text-align: center; } div.figures .figure { clear: none; float: none; display: inline-block; border: none; margin-left: 0.5em; margin-right: 0.5em; } .field-list th { white-space: nowrap; } table.field-list { border-spacing: 0px; margin-left: 1px; border-left: 5px solid rgb(238, 238, 238) !important; } table.field-list th.field-name { display: inline-block; padding: 1px 8px 1px 5px; white-space: nowrap; background-color: rgb(238, 238, 238); border-radius: 0 3px 3px 0; -webkit-border-radius: 0 3px 3px 0; } photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/copybutton.js0000644000214200020070000000532113064026647036617 0ustar lbradleySTSCI\science00000000000000$(document).ready(function() { /* Add a [>>>] button on the top-right corner of code samples to hide * the >>> and ... prompts and the output and thus make the code * copyable. */ var div = $('.highlight-python .highlight,' + '.highlight-python3 .highlight,' + '.highlight-default .highlight') var pre = div.find('pre'); // get the styles from the current theme pre.parent().parent().css('position', 'relative'); var hide_text = 'Hide the prompts and output'; var show_text = 'Show the prompts and output'; var border_width = pre.css('border-top-width'); var border_style = pre.css('border-top-style'); var border_color = pre.css('border-top-color'); var button_styles = { 'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0', 'border-color': border_color, 'border-style': border_style, 'border-width': border_width, 'color': border_color, 'text-size': '75%', 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em', 'border-radius': '0 3px 0 0' } // create and add the button to all the code blocks that contain >>> div.each(function(index) { var jthis = $(this); if (jthis.find('.gp').length > 0) { var button = $('>>>'); button.css(button_styles) button.attr('title', hide_text); button.data('hidden', 'false'); jthis.prepend(button); } // tracebacks (.gt) contain bare text elements that need to be // wrapped in a span to work with .nextUntil() (see later) jthis.find('pre:has(.gt)').contents().filter(function() { return ((this.nodeType == 3) && (this.data.trim().length > 0)); }).wrap(''); }); // define the behavior of the button when it's clicked $('.copybutton').click(function(e){ e.preventDefault(); var button = $(this); if (button.data('hidden') === 'false') { // hide the code output button.parent().find('.go, .gp, .gt').hide(); button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden'); button.css('text-decoration', 'line-through'); button.attr('title', show_text); button.data('hidden', 'true'); } else { // show the code output button.parent().find('.go, .gp, .gt').show(); button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible'); button.css('text-decoration', 'none'); button.attr('title', hide_text); button.data('hidden', 'false'); } }); }); photutils-0.4/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/sidebar.js0000644000214200020070000001155312346164025036021 0ustar lbradleySTSCI\science00000000000000/* * sidebar.js * ~~~~~~~~~~ * * This script makes the Sphinx sidebar collapsible. * * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton * used to collapse and expand the sidebar. * * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden * and the width of the sidebar and the margin-left of the document * are decreased. When the sidebar is expanded the opposite happens. * This script saves a per-browser/per-session cookie used to * remember the position of the sidebar among the pages. * Once the browser is closed the cookie is deleted and the position * reset to the default (expanded). * * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ $(function() { // global elements used by the functions. // the 'sidebarbutton' element is defined as global after its // creation, in the add_sidebar_button function var bodywrapper = $('.bodywrapper'); var sidebar = $('.sphinxsidebar'); var sidebarwrapper = $('.sphinxsidebarwrapper'); // for some reason, the document has no sidebar; do not run into errors if (!sidebar.length) return; // original margin-left of the bodywrapper and width of the sidebar // with the sidebar expanded var bw_margin_expanded = bodywrapper.css('margin-left'); var ssb_width_expanded = sidebar.width(); // margin-left of the bodywrapper and width of the sidebar // with the sidebar collapsed var bw_margin_collapsed = 12; var ssb_width_collapsed = 12; // custom colors var dark_color = '#404040'; var light_color = '#505050'; function sidebar_is_collapsed() { return sidebarwrapper.is(':not(:visible)'); } function toggle_sidebar() { if (sidebar_is_collapsed()) expand_sidebar(); else collapse_sidebar(); } function collapse_sidebar() { sidebarwrapper.hide(); sidebar.css('width', ssb_width_collapsed); bodywrapper.css('margin-left', bw_margin_collapsed); sidebarbutton.css({ 'margin-left': '-1px', 'height': bodywrapper.height(), 'border-radius': '3px' }); sidebarbutton.find('span').text('»'); sidebarbutton.attr('title', _('Expand sidebar')); document.cookie = 'sidebar=collapsed'; } function expand_sidebar() { bodywrapper.css('margin-left', bw_margin_expanded); sidebar.css('width', ssb_width_expanded); sidebarwrapper.show(); sidebarbutton.css({ 'margin-left': ssb_width_expanded - 12, 'height': bodywrapper.height(), 'border-radius': '0px 3px 3px 0px' }); sidebarbutton.find('span').text('«'); sidebarbutton.attr('title', _('Collapse sidebar')); document.cookie = 'sidebar=expanded'; } function add_sidebar_button() { sidebarwrapper.css({ 'float': 'left', 'margin-right': '0', 'width': ssb_width_expanded - 18 }); // create the button sidebar.append('
«
'); var sidebarbutton = $('#sidebarbutton'); // find the height of the viewport to center the '<<' in the page var viewport_height; if (window.innerHeight) viewport_height = window.innerHeight; else viewport_height = $(window).height(); var sidebar_offset = sidebar.offset().top; var sidebar_height = Math.max(bodywrapper.height(), sidebar.height()); sidebarbutton.find('span').css({ 'font-family': '"Lucida Grande",Arial,sans-serif', 'display': 'block', 'top': Math.min(viewport_height/2, sidebar_height/2 + sidebar_offset) - 10, 'width': 12, 'position': 'fixed', 'text-align': 'center' }); sidebarbutton.click(toggle_sidebar); sidebarbutton.attr('title', _('Collapse sidebar')); sidebarbutton.css({ 'color': '#FFFFFF', 'background-color': light_color, 'border': '1px solid ' + light_color, 'border-radius': '0px 3px 3px 0px', 'font-size': '1.2em', 'cursor': 'pointer', 'height': sidebar_height, 'padding-top': '1px', 'margin': '-1px', 'margin-left': ssb_width_expanded - 12 }); sidebarbutton.hover( function () { $(this).css('background-color', dark_color); }, function () { $(this).css('background-color', light_color); } ); } function set_position_from_cookie() { if (!document.cookie) return; var items = document.cookie.split(';'); for(var k=0; k= (3, 3): from importlib import invalidate_caches else: def invalidate_caches(): return None # Python 2/3 compatibility if sys.version_info[0] < 3: string_types = (str, unicode) # noqa else: string_types = (str,) # Note: The following Warning subclasses are simply copies of the Warnings in # Astropy of the same names. class AstropyWarning(Warning): """ The base warning class from which all Astropy warnings should inherit. Any warning inheriting from this class is handled by the Astropy logger. """ class AstropyDeprecationWarning(AstropyWarning): """ A warning class to indicate a deprecated feature. """ class AstropyPendingDeprecationWarning(PendingDeprecationWarning, AstropyWarning): """ A warning class to indicate a soon-to-be deprecated feature. """ def _get_platlib_dir(cmd): """ Given a build command, return the name of the appropriate platform-specific build subdirectory directory (e.g. build/lib.linux-x86_64-2.7) """ plat_specifier = '.{0}-{1}'.format(cmd.plat_name, sys.version[0:3]) return os.path.join(cmd.build_base, 'lib' + plat_specifier) def get_numpy_include_path(): """ Gets the path to the numpy headers. """ # We need to go through this nonsense in case setuptools # downloaded and installed Numpy for us as part of the build or # install, since Numpy may still think it's in "setup mode", when # in fact we're ready to use it to build astropy now. if sys.version_info[0] >= 3: import builtins if hasattr(builtins, '__NUMPY_SETUP__'): del builtins.__NUMPY_SETUP__ import imp import numpy imp.reload(numpy) else: import __builtin__ if hasattr(__builtin__, '__NUMPY_SETUP__'): del __builtin__.__NUMPY_SETUP__ import numpy reload(numpy) try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() return numpy_include class _DummyFile(object): """A noop writeable object.""" errors = '' # Required for Python 3.x def write(self, s): pass def flush(self): pass @contextlib.contextmanager def silence(): """A context manager that silences sys.stdout and sys.stderr.""" old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() exception_occurred = False try: yield except: exception_occurred = True # Go ahead and clean up so that exception handling can work normally sys.stdout = old_stdout sys.stderr = old_stderr raise if not exception_occurred: sys.stdout = old_stdout sys.stderr = old_stderr if sys.platform == 'win32': import ctypes def _has_hidden_attribute(filepath): """ Returns True if the given filepath has the hidden attribute on MS-Windows. Based on a post here: http://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection """ if isinstance(filepath, bytes): filepath = filepath.decode(sys.getfilesystemencoding()) try: attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath) assert attrs != -1 result = bool(attrs & 2) except (AttributeError, AssertionError): result = False return result else: def _has_hidden_attribute(filepath): return False def is_path_hidden(filepath): """ Determines if a given file or directory is hidden. Parameters ---------- filepath : str The path to a file or directory Returns ------- hidden : bool Returns `True` if the file is hidden """ name = os.path.basename(os.path.abspath(filepath)) if isinstance(name, bytes): is_dotted = name.startswith(b'.') else: is_dotted = name.startswith('.') return is_dotted or _has_hidden_attribute(filepath) def walk_skip_hidden(top, onerror=None, followlinks=False): """ A wrapper for `os.walk` that skips hidden files and directories. This function does not have the parameter `topdown` from `os.walk`: the directories must always be recursed top-down when using this function. See also -------- os.walk : For a description of the parameters """ for root, dirs, files in os.walk( top, topdown=True, onerror=onerror, followlinks=followlinks): # These lists must be updated in-place so os.walk will skip # hidden directories dirs[:] = [d for d in dirs if not is_path_hidden(d)] files[:] = [f for f in files if not is_path_hidden(f)] yield root, dirs, files def write_if_different(filename, data): """Write `data` to `filename`, if the content of the file is different. Parameters ---------- filename : str The file name to be written to. data : bytes The data to be written to `filename`. """ assert isinstance(data, bytes) if os.path.exists(filename): with open(filename, 'rb') as fd: original_data = fd.read() else: original_data = None if original_data != data: with open(filename, 'wb') as fd: fd.write(data) def import_file(filename, name=None): """ Imports a module from a single file as if it doesn't belong to a particular package. The returned module will have the optional ``name`` if given, or else a name generated from the filename. """ # Specifying a traditional dot-separated fully qualified name here # results in a number of "Parent module 'astropy' not found while # handling absolute import" warnings. Using the same name, the # namespaces of the modules get merged together. So, this # generates an underscore-separated name which is more likely to # be unique, and it doesn't really matter because the name isn't # used directly here anyway. mode = 'U' if sys.version_info[0] < 3 else 'r' if name is None: basename = os.path.splitext(filename)[0] name = '_'.join(os.path.relpath(basename).split(os.sep)[1:]) if import_machinery: loader = import_machinery.SourceFileLoader(name, filename) mod = loader.load_module() else: with open(filename, mode) as fd: mod = imp.load_module(name, fd, filename, ('.py', mode, 1)) return mod def resolve_name(name): """Resolve a name like ``module.object`` to an object and return it. Raise `ImportError` if the module or name is not found. """ parts = name.split('.') cursor = len(parts) - 1 module_name = parts[:cursor] attr_name = parts[-1] while cursor > 0: try: ret = __import__('.'.join(module_name), fromlist=[attr_name]) break except ImportError: if cursor == 0: raise cursor -= 1 module_name = parts[:cursor] attr_name = parts[cursor] ret = '' for part in parts[cursor:]: try: ret = getattr(ret, part) except AttributeError: raise ImportError(name) return ret if sys.version_info[0] >= 3: def iteritems(dictionary): return dictionary.items() else: def iteritems(dictionary): return dictionary.iteritems() def extends_doc(extended_func): """ A function decorator for use when wrapping an existing function but adding additional functionality. This copies the docstring from the original function, and appends to it (along with a newline) the docstring of the wrapper function. Example ------- >>> def foo(): ... '''Hello.''' ... >>> @extends_doc(foo) ... def bar(): ... '''Goodbye.''' ... >>> print(bar.__doc__) Hello. Goodbye. """ def decorator(func): if not (extended_func.__doc__ is None or func.__doc__ is None): func.__doc__ = '\n\n'.join([extended_func.__doc__.rstrip('\n'), func.__doc__.lstrip('\n')]) return func return decorator # Duplicated from astropy.utils.decorators.deprecated # When fixing issues in this function fix them in astropy first, then # port the fixes over to astropy-helpers def deprecated(since, message='', name='', alternative='', pending=False, obj_type=None): """ Used to mark a function or class as deprecated. To mark an attribute as deprecated, use `deprecated_attribute`. Parameters ------------ since : str The release at which this API became deprecated. This is required. message : str, optional Override the default deprecation message. The format specifier ``func`` may be used for the name of the function, and ``alternative`` may be used in the deprecation message to insert the name of an alternative to the deprecated function. ``obj_type`` may be used to insert a friendly name for the type of object being deprecated. name : str, optional The name of the deprecated function or class; if not provided the name is automatically determined from the passed in function or class, though this is useful in the case of renamed functions, where the new function is just assigned to the name of the deprecated function. For example:: def new_function(): ... oldFunction = new_function alternative : str, optional An alternative function or class name that the user may use in place of the deprecated object. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a AstropyPendingDeprecationWarning instead of a AstropyDeprecationWarning. obj_type : str, optional The type of this object, if the automatically determined one needs to be overridden. """ method_types = (classmethod, staticmethod, types.MethodType) def deprecate_doc(old_doc, message): """ Returns a given docstring with a deprecation message prepended to it. """ if not old_doc: old_doc = '' old_doc = textwrap.dedent(old_doc).strip('\n') new_doc = (('\n.. deprecated:: %(since)s' '\n %(message)s\n\n' % {'since': since, 'message': message.strip()}) + old_doc) if not old_doc: # This is to prevent a spurious 'unexpected unindent' warning from # docutils when the original docstring was blank. new_doc += r'\ ' return new_doc def get_function(func): """ Given a function or classmethod (or other function wrapper type), get the function object. """ if isinstance(func, method_types): func = func.__func__ return func def deprecate_function(func, message): """ Returns a wrapped function that displays an ``AstropyDeprecationWarning`` when it is called. """ if isinstance(func, method_types): func_wrapper = type(func) else: func_wrapper = lambda f: f func = get_function(func) def deprecated_func(*args, **kwargs): if pending: category = AstropyPendingDeprecationWarning else: category = AstropyDeprecationWarning warnings.warn(message, category, stacklevel=2) return func(*args, **kwargs) # If this is an extension function, we can't call # functools.wraps on it, but we normally don't care. # This crazy way to get the type of a wrapper descriptor is # straight out of the Python 3.3 inspect module docs. if type(func) != type(str.__dict__['__add__']): deprecated_func = functools.wraps(func)(deprecated_func) deprecated_func.__doc__ = deprecate_doc( deprecated_func.__doc__, message) return func_wrapper(deprecated_func) def deprecate_class(cls, message): """ Returns a wrapper class with the docstrings updated and an __init__ function that will raise an ``AstropyDeprectationWarning`` warning when called. """ # Creates a new class with the same name and bases as the # original class, but updates the dictionary with a new # docstring and a wrapped __init__ method. __module__ needs # to be manually copied over, since otherwise it will be set # to *this* module (astropy.utils.misc). # This approach seems to make Sphinx happy (the new class # looks enough like the original class), and works with # extension classes (which functools.wraps does not, since # it tries to modify the original class). # We need to add a custom pickler or you'll get # Can't pickle : it's not found as ... # errors. Picklability is required for any class that is # documented by Sphinx. members = cls.__dict__.copy() members.update({ '__doc__': deprecate_doc(cls.__doc__, message), '__init__': deprecate_function(get_function(cls.__init__), message), }) return type(cls.__name__, cls.__bases__, members) def deprecate(obj, message=message, name=name, alternative=alternative, pending=pending): if obj_type is None: if isinstance(obj, type): obj_type_name = 'class' elif inspect.isfunction(obj): obj_type_name = 'function' elif inspect.ismethod(obj) or isinstance(obj, method_types): obj_type_name = 'method' else: obj_type_name = 'object' else: obj_type_name = obj_type if not name: name = get_function(obj).__name__ altmessage = '' if not message or type(message) == type(deprecate): if pending: message = ('The %(func)s %(obj_type)s will be deprecated in a ' 'future version.') else: message = ('The %(func)s %(obj_type)s is deprecated and may ' 'be removed in a future version.') if alternative: altmessage = '\n Use %s instead.' % alternative message = ((message % { 'func': name, 'name': name, 'alternative': alternative, 'obj_type': obj_type_name}) + altmessage) if isinstance(obj, type): return deprecate_class(obj, message) else: return deprecate_function(obj, message) if type(message) == type(deprecate): return deprecate(message) return deprecate def deprecated_attribute(name, since, message=None, alternative=None, pending=False): """ Used to mark a public attribute as deprecated. This creates a property that will warn when the given attribute name is accessed. To prevent the warning (i.e. for internal code), use the private name for the attribute by prepending an underscore (i.e. ``self._name``). Parameters ---------- name : str The name of the deprecated attribute. since : str The release at which this API became deprecated. This is required. message : str, optional Override the default deprecation message. The format specifier ``name`` may be used for the name of the attribute, and ``alternative`` may be used in the deprecation message to insert the name of an alternative to the deprecated function. alternative : str, optional An alternative attribute that the user may use in place of the deprecated attribute. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a AstropyPendingDeprecationWarning instead of a AstropyDeprecationWarning. Examples -------- :: class MyClass: # Mark the old_name as deprecated old_name = misc.deprecated_attribute('old_name', '0.1') def method(self): self._old_name = 42 """ private_name = '_' + name @deprecated(since, name=name, obj_type='attribute') def get(self): return getattr(self, private_name) @deprecated(since, name=name, obj_type='attribute') def set(self, val): setattr(self, private_name, val) @deprecated(since, name=name, obj_type='attribute') def delete(self): delattr(self, private_name) return property(get, set, delete) def minversion(module, version, inclusive=True, version_path='__version__'): """ Returns `True` if the specified Python module satisfies a minimum version requirement, and `False` if not. By default this uses `pkg_resources.parse_version` to do the version comparison if available. Otherwise it falls back on `distutils.version.LooseVersion`. Parameters ---------- module : module or `str` An imported module of which to check the version, or the name of that module (in which case an import of that module is attempted-- if this fails `False` is returned). version : `str` The version as a string that this module must have at a minimum (e.g. ``'0.12'``). inclusive : `bool` The specified version meets the requirement inclusively (i.e. ``>=``) as opposed to strictly greater than (default: `True`). version_path : `str` A dotted attribute path to follow in the module for the version. Defaults to just ``'__version__'``, which should work for most Python modules. Examples -------- >>> import astropy >>> minversion(astropy, '0.4.4') True """ if isinstance(module, types.ModuleType): module_name = module.__name__ elif isinstance(module, string_types): module_name = module try: module = resolve_name(module_name) except ImportError: return False else: raise ValueError('module argument must be an actual imported ' 'module, or the import name of the module; ' 'got {0!r}'.format(module)) if '.' not in version_path: have_version = getattr(module, version_path) else: have_version = resolve_name('.'.join([module.__name__, version_path])) try: from pkg_resources import parse_version except ImportError: from distutils.version import LooseVersion as parse_version if inclusive: return parse_version(have_version) >= parse_version(version) else: return parse_version(have_version) > parse_version(version) # Copy of the classproperty decorator from astropy.utils.decorators class classproperty(property): """ Similar to `property`, but allows class-level properties. That is, a property whose getter is like a `classmethod`. The wrapped method may explicitly use the `classmethod` decorator (which must become before this decorator), or the `classmethod` may be omitted (it is implicit through use of this decorator). .. note:: classproperty only works for *read-only* properties. It does not currently allow writeable/deleteable properties, due to subtleties of how Python descriptors work. In order to implement such properties on a class a metaclass for that class must be implemented. Parameters ---------- fget : callable The function that computes the value of this property (in particular, the function when this is used as a decorator) a la `property`. doc : str, optional The docstring for the property--by default inherited from the getter function. lazy : bool, optional If True, caches the value returned by the first call to the getter function, so that it is only called once (used for lazy evaluation of an attribute). This is analogous to `lazyproperty`. The ``lazy`` argument can also be used when `classproperty` is used as a decorator (see the third example below). When used in the decorator syntax this *must* be passed in as a keyword argument. Examples -------- :: >>> class Foo(object): ... _bar_internal = 1 ... @classproperty ... def bar(cls): ... return cls._bar_internal + 1 ... >>> Foo.bar 2 >>> foo_instance = Foo() >>> foo_instance.bar 2 >>> foo_instance._bar_internal = 2 >>> foo_instance.bar # Ignores instance attributes 2 As previously noted, a `classproperty` is limited to implementing read-only attributes:: >>> class Foo(object): ... _bar_internal = 1 ... @classproperty ... def bar(cls): ... return cls._bar_internal ... @bar.setter ... def bar(cls, value): ... cls._bar_internal = value ... Traceback (most recent call last): ... NotImplementedError: classproperty can only be read-only; use a metaclass to implement modifiable class-level properties When the ``lazy`` option is used, the getter is only called once:: >>> class Foo(object): ... @classproperty(lazy=True) ... def bar(cls): ... print("Performing complicated calculation") ... return 1 ... >>> Foo.bar Performing complicated calculation 1 >>> Foo.bar 1 If a subclass inherits a lazy `classproperty` the property is still re-evaluated for the subclass:: >>> class FooSub(Foo): ... pass ... >>> FooSub.bar Performing complicated calculation 1 >>> FooSub.bar 1 """ def __new__(cls, fget=None, doc=None, lazy=False): if fget is None: # Being used as a decorator--return a wrapper that implements # decorator syntax def wrapper(func): return cls(func, lazy=lazy) return wrapper return super(classproperty, cls).__new__(cls) def __init__(self, fget, doc=None, lazy=False): self._lazy = lazy if lazy: self._cache = {} fget = self._wrap_fget(fget) super(classproperty, self).__init__(fget=fget, doc=doc) # There is a buglet in Python where self.__doc__ doesn't # get set properly on instances of property subclasses if # the doc argument was used rather than taking the docstring # from fget if doc is not None: self.__doc__ = doc def __get__(self, obj, objtype=None): if self._lazy and objtype in self._cache: return self._cache[objtype] if objtype is not None: # The base property.__get__ will just return self here; # instead we pass objtype through to the original wrapped # function (which takes the class as its sole argument) val = self.fget.__wrapped__(objtype) else: val = super(classproperty, self).__get__(obj, objtype=objtype) if self._lazy: if objtype is None: objtype = obj.__class__ self._cache[objtype] = val return val def getter(self, fget): return super(classproperty, self).getter(self._wrap_fget(fget)) def setter(self, fset): raise NotImplementedError( "classproperty can only be read-only; use a metaclass to " "implement modifiable class-level properties") def deleter(self, fdel): raise NotImplementedError( "classproperty can only be read-only; use a metaclass to " "implement modifiable class-level properties") @staticmethod def _wrap_fget(orig_fget): if isinstance(orig_fget, classmethod): orig_fget = orig_fget.__func__ # Using stock functools.wraps instead of the fancier version # found later in this module, which is overkill for this purpose @functools.wraps(orig_fget) def fget(obj): return orig_fget(obj.__class__) # Set the __wrapped__ attribute manually for support on Python 2 fget.__wrapped__ = orig_fget return fget def find_data_files(package, pattern): """ Include files matching ``pattern`` inside ``package``. Parameters ---------- package : str The package inside which to look for data files pattern : str Pattern (glob-style) to match for the data files (e.g. ``*.dat``). This supports the Python 3.5 ``**``recursive syntax. For example, ``**/*.fits`` matches all files ending with ``.fits`` recursively. Only one instance of ``**`` can be included in the pattern. """ if sys.version_info[:2] >= (3, 5): return glob.glob(os.path.join(package, pattern), recursive=True) else: if '**' in pattern: start, end = pattern.split('**') if end.startswith(('/', os.sep)): end = end[1:] matches = glob.glob(os.path.join(package, start, end)) for root, dirs, files in os.walk(os.path.join(package, start)): for dirname in dirs: matches += glob.glob(os.path.join(root, dirname, end)) return matches else: return glob.glob(os.path.join(package, pattern)) photutils-0.4/astropy_helpers/astropy_helpers/version.py0000644000214200020070000000102613175652706026313 0ustar lbradleySTSCI\science00000000000000# Autogenerated by Astropy-affiliated package astropy_helpers's setup.py on 2017-10-30 16:39:34 from __future__ import unicode_literals import datetime version = "2.0.2" githash = "d23a53f46dd1c3703e5eee63dca3f53bd18a4e8b" major = 2 minor = 0 bugfix = 2 release = True timestamp = datetime.datetime(2017, 10, 30, 16, 39, 34) debug = False try: from ._compiler import compiler except ImportError: compiler = "unknown" try: from .cython_version import cython_version except ImportError: cython_version = "unknown" photutils-0.4/astropy_helpers/astropy_helpers/version_helpers.py0000644000214200020070000002303313175633272030034 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Utilities for generating the version string for Astropy (or an affiliated package) and the version.py module, which contains version info for the package. Within the generated astropy.version module, the `major`, `minor`, and `bugfix` variables hold the respective parts of the version number (bugfix is '0' if absent). The `release` variable is True if this is a release, and False if this is a development version of astropy. For the actual version string, use:: from astropy.version import version or:: from astropy import __version__ """ from __future__ import division import datetime import imp import os import pkgutil import sys import time from distutils import log import pkg_resources from . import git_helpers from .distutils_helpers import is_distutils_display_option from .utils import invalidate_caches PY3 = sys.version_info[0] == 3 def _version_split(version): """ Split a version string into major, minor, and bugfix numbers. If any of those numbers are missing the default is zero. Any pre/post release modifiers are ignored. Examples ======== >>> _version_split('1.2.3') (1, 2, 3) >>> _version_split('1.2') (1, 2, 0) >>> _version_split('1.2rc1') (1, 2, 0) >>> _version_split('1') (1, 0, 0) >>> _version_split('') (0, 0, 0) """ parsed_version = pkg_resources.parse_version(version) if hasattr(parsed_version, 'base_version'): # New version parsing for setuptools >= 8.0 if parsed_version.base_version: parts = [int(part) for part in parsed_version.base_version.split('.')] else: parts = [] else: parts = [] for part in parsed_version: if part.startswith('*'): # Ignore any .dev, a, b, rc, etc. break parts.append(int(part)) if len(parts) < 3: parts += [0] * (3 - len(parts)) # In principle a version could have more parts (like 1.2.3.4) but we only # support .. return tuple(parts[:3]) # This is used by setup.py to create a new version.py - see that file for # details. Note that the imports have to be absolute, since this is also used # by affiliated packages. _FROZEN_VERSION_PY_TEMPLATE = """ # Autogenerated by {packagetitle}'s setup.py on {timestamp!s} from __future__ import unicode_literals import datetime {header} major = {major} minor = {minor} bugfix = {bugfix} release = {rel} timestamp = {timestamp!r} debug = {debug} try: from ._compiler import compiler except ImportError: compiler = "unknown" try: from .cython_version import cython_version except ImportError: cython_version = "unknown" """[1:] _FROZEN_VERSION_PY_WITH_GIT_HEADER = """ {git_helpers} _packagename = "{packagename}" _last_generated_version = "{verstr}" _last_githash = "{githash}" # Determine where the source code for this module # lives. If __file__ is not a filesystem path then # it is assumed not to live in a git repo at all. if _get_repo_path(__file__, levels=len(_packagename.split('.'))): version = update_git_devstr(_last_generated_version, path=__file__) githash = get_git_devstr(sha=True, show_warning=False, path=__file__) or _last_githash else: # The file does not appear to live in a git repo so don't bother # invoking git version = _last_generated_version githash = _last_githash """[1:] _FROZEN_VERSION_PY_STATIC_HEADER = """ version = "{verstr}" githash = "{githash}" """[1:] def _get_version_py_str(packagename, version, githash, release, debug, uses_git=True): epoch = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) timestamp = datetime.datetime.utcfromtimestamp(epoch) major, minor, bugfix = _version_split(version) if packagename.lower() == 'astropy': packagetitle = 'Astropy' else: packagetitle = 'Astropy-affiliated package ' + packagename header = '' if uses_git: header = _generate_git_header(packagename, version, githash) elif not githash: # _generate_git_header will already generate a new git has for us, but # for creating a new version.py for a release (even if uses_git=False) # we still need to get the githash to include in the version.py # See https://github.com/astropy/astropy-helpers/issues/141 githash = git_helpers.get_git_devstr(sha=True, show_warning=True) if not header: # If _generate_git_header fails it returns an empty string header = _FROZEN_VERSION_PY_STATIC_HEADER.format(verstr=version, githash=githash) return _FROZEN_VERSION_PY_TEMPLATE.format(packagetitle=packagetitle, timestamp=timestamp, header=header, major=major, minor=minor, bugfix=bugfix, rel=release, debug=debug) def _generate_git_header(packagename, version, githash): """ Generates a header to the version.py module that includes utilities for probing the git repository for updates (to the current git hash, etc.) These utilities should only be available in development versions, and not in release builds. If this fails for any reason an empty string is returned. """ loader = pkgutil.get_loader(git_helpers) source = loader.get_source(git_helpers.__name__) or '' source_lines = source.splitlines() if not source_lines: log.warn('Cannot get source code for astropy_helpers.git_helpers; ' 'git support disabled.') return '' idx = 0 for idx, line in enumerate(source_lines): if line.startswith('# BEGIN'): break git_helpers_py = '\n'.join(source_lines[idx + 1:]) if PY3: verstr = version else: # In Python 2 don't pass in a unicode string; otherwise verstr will # be represented with u'' syntax which breaks on Python 3.x with x # < 3. This is only an issue when developing on multiple Python # versions at once verstr = version.encode('utf8') new_githash = git_helpers.get_git_devstr(sha=True, show_warning=False) if new_githash: githash = new_githash return _FROZEN_VERSION_PY_WITH_GIT_HEADER.format( git_helpers=git_helpers_py, packagename=packagename, verstr=verstr, githash=githash) def generate_version_py(packagename, version, release=None, debug=None, uses_git=True, srcdir='.'): """Regenerate the version.py module if necessary.""" try: version_module = get_pkg_version_module(packagename) try: last_generated_version = version_module._last_generated_version except AttributeError: last_generated_version = version_module.version try: last_githash = version_module._last_githash except AttributeError: last_githash = version_module.githash current_release = version_module.release current_debug = version_module.debug except ImportError: version_module = None last_generated_version = None last_githash = None current_release = None current_debug = None if release is None: # Keep whatever the current value is, if it exists release = bool(current_release) if debug is None: # Likewise, keep whatever the current value is, if it exists debug = bool(current_debug) version_py = os.path.join(srcdir, packagename, 'version.py') if (last_generated_version != version or current_release != release or current_debug != debug): if '-q' not in sys.argv and '--quiet' not in sys.argv: log.set_threshold(log.INFO) if is_distutils_display_option(): # Always silence unnecessary log messages when display options are # being used log.set_threshold(log.WARN) log.info('Freezing version number to {0}'.format(version_py)) with open(version_py, 'w') as f: # This overwrites the actual version.py f.write(_get_version_py_str(packagename, version, last_githash, release, debug, uses_git=uses_git)) invalidate_caches() if version_module: imp.reload(version_module) def get_pkg_version_module(packagename, fromlist=None): """Returns the package's .version module generated by `astropy_helpers.version_helpers.generate_version_py`. Raises an ImportError if the version module is not found. If ``fromlist`` is an iterable, return a tuple of the members of the version module corresponding to the member names given in ``fromlist``. Raises an `AttributeError` if any of these module members are not found. """ if not fromlist: # Due to a historical quirk of Python's import implementation, # __import__ will not return submodules of a package if 'fromlist' is # empty. # TODO: For Python 3.1 and up it may be preferable to use importlib # instead of the __import__ builtin return __import__(packagename + '.version', fromlist=['']) else: mod = __import__(packagename + '.version', fromlist=fromlist) return tuple(getattr(mod, member) for member in fromlist) photutils-0.4/astropy_helpers/astropy_helpers.egg-info/0000755000214200020070000000000013175654702025745 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/astropy_helpers.egg-info/dependency_links.txt0000644000214200020070000000000113175652707032016 0ustar lbradleySTSCI\science00000000000000 photutils-0.4/astropy_helpers/astropy_helpers.egg-info/not-zip-safe0000644000214200020070000000000113175652707030176 0ustar lbradleySTSCI\science00000000000000 photutils-0.4/astropy_helpers/astropy_helpers.egg-info/PKG-INFO0000644000214200020070000000760113175652707027051 0ustar lbradleySTSCI\science00000000000000Metadata-Version: 1.1 Name: astropy-helpers Version: 2.0.2 Summary: Utilities for building and installing Astropy, Astropy affiliated packages, and their respective documentation. Home-page: https://github.com/astropy/astropy-helpers Author: The Astropy Developers Author-email: astropy.team@gmail.com License: BSD Description-Content-Type: UNKNOWN Description: astropy-helpers =============== * Stable versions: https://pypi.org/project/astropy-helpers/ * Development version, issue tracker: https://github.com/astropy/astropy-helpers This project provides a Python package, ``astropy_helpers``, which includes many build, installation, and documentation-related tools used by the Astropy project, but packaged separately for use by other projects that wish to leverage this work. The motivation behind this package and details of its implementation are in the accepted `Astropy Proposal for Enhancement (APE) 4 `_. The ``astropy_helpers.extern`` sub-module includes modules developed elsewhere that are bundled here for convenience. At the moment, this consists of the following two sphinx extensions: * `numpydoc `_, a Sphinx extension developed as part of the Numpy project. This is used to parse docstrings in Numpy format * `sphinx-automodapi `_, a Sphinx developed as part of the Astropy project. This used to be developed directly in ``astropy-helpers`` but is now a standalone package. Issues with these sub-modules should be reported in their respective repositories, and we will regularly update the bundled versions to reflect the latest released versions. ``astropy_helpers`` includes a special "bootstrap" module called ``ah_bootstrap.py`` which is intended to be used by a project's setup.py in order to ensure that the ``astropy_helpers`` package is available for build/installation. This is similar to the ``ez_setup.py`` module that is shipped with some projects to bootstrap `setuptools `_. As described in APE4, the version numbers for ``astropy_helpers`` follow the corresponding major/minor version of the `astropy core package `_, but with an independent sequence of micro (bugfix) version numbers. Hence, the initial release is 0.4, in parallel with Astropy v0.4, which will be the first version of Astropy to use ``astropy-helpers``. For examples of how to implement ``astropy-helpers`` in a project, see the ``setup.py`` and ``setup.cfg`` files of the `Affiliated package template `_. .. image:: https://travis-ci.org/astropy/astropy-helpers.svg :target: https://travis-ci.org/astropy/astropy-helpers .. image:: https://coveralls.io/repos/astropy/astropy-helpers/badge.svg :target: https://coveralls.io/r/astropy/astropy-helpers Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Framework :: Setuptools Plugin Classifier: Framework :: Sphinx :: Extension Classifier: Framework :: Sphinx :: Theme Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Software Development :: Build Tools Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: System :: Archiving :: Packaging photutils-0.4/astropy_helpers/astropy_helpers.egg-info/SOURCES.txt0000644000214200020070000000642513175652707027643 0ustar lbradleySTSCI\science00000000000000CHANGES.rst LICENSE.rst MANIFEST.in README.rst ah_bootstrap.py ez_setup.py setup.cfg setup.py astropy_helpers/__init__.py astropy_helpers/distutils_helpers.py astropy_helpers/git_helpers.py astropy_helpers/openmp_helpers.py astropy_helpers/setup_helpers.py astropy_helpers/test_helpers.py astropy_helpers/utils.py astropy_helpers/version.py astropy_helpers/version_helpers.py astropy_helpers.egg-info/PKG-INFO astropy_helpers.egg-info/SOURCES.txt astropy_helpers.egg-info/dependency_links.txt astropy_helpers.egg-info/not-zip-safe astropy_helpers.egg-info/top_level.txt astropy_helpers/commands/__init__.py astropy_helpers/commands/_dummy.py astropy_helpers/commands/_test_compat.py astropy_helpers/commands/build_ext.py astropy_helpers/commands/build_py.py astropy_helpers/commands/build_sphinx.py astropy_helpers/commands/install.py astropy_helpers/commands/install_lib.py astropy_helpers/commands/register.py astropy_helpers/commands/setup_package.py astropy_helpers/commands/test.py astropy_helpers/commands/src/compiler.c astropy_helpers/compat/__init__.py astropy_helpers/extern/__init__.py astropy_helpers/extern/setup_package.py astropy_helpers/extern/automodapi/__init__.py astropy_helpers/extern/automodapi/autodoc_enhancements.py astropy_helpers/extern/automodapi/automodapi.py astropy_helpers/extern/automodapi/automodsumm.py astropy_helpers/extern/automodapi/smart_resolver.py astropy_helpers/extern/automodapi/utils.py astropy_helpers/extern/automodapi/templates/autosummary_core/base.rst astropy_helpers/extern/automodapi/templates/autosummary_core/class.rst astropy_helpers/extern/automodapi/templates/autosummary_core/module.rst astropy_helpers/extern/numpydoc/__init__.py astropy_helpers/extern/numpydoc/docscrape.py astropy_helpers/extern/numpydoc/docscrape_sphinx.py astropy_helpers/extern/numpydoc/numpydoc.py astropy_helpers/extern/numpydoc/templates/numpydoc_docstring.rst astropy_helpers/sphinx/__init__.py astropy_helpers/sphinx/conf.py astropy_helpers/sphinx/setup_package.py astropy_helpers/sphinx/ext/__init__.py astropy_helpers/sphinx/ext/changelog_links.py astropy_helpers/sphinx/ext/doctest.py astropy_helpers/sphinx/ext/edit_on_github.py astropy_helpers/sphinx/ext/tocdepthfix.py astropy_helpers/sphinx/ext/tests/__init__.py astropy_helpers/sphinx/local/python2_local_links.inv astropy_helpers/sphinx/local/python3_local_links.inv astropy_helpers/sphinx/themes/bootstrap-astropy/globaltoc.html astropy_helpers/sphinx/themes/bootstrap-astropy/layout.html astropy_helpers/sphinx/themes/bootstrap-astropy/localtoc.html astropy_helpers/sphinx/themes/bootstrap-astropy/searchbox.html astropy_helpers/sphinx/themes/bootstrap-astropy/theme.conf astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout.svg astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout_20.png astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.ico astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.svg astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo_32.png astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astropy.css astropy_helpers/sphinx/themes/bootstrap-astropy/static/copybutton.js astropy_helpers/sphinx/themes/bootstrap-astropy/static/sidebar.js licenses/LICENSE_ASTROSCRAPPY.rst licenses/LICENSE_COPYBUTTON.rst licenses/LICENSE_NUMPYDOC.rstphotutils-0.4/astropy_helpers/astropy_helpers.egg-info/top_level.txt0000644000214200020070000000002013175652707030472 0ustar lbradleySTSCI\science00000000000000astropy_helpers photutils-0.4/astropy_helpers/CHANGES.rst0000644000214200020070000003744613175633272022647 0ustar lbradleySTSCI\science00000000000000astropy-helpers Changelog ************************* 2.0.2 (2017-10-13) ------------------ - Added new helper function add_openmp_flags_if_available that can add OpenMP compilation flags to a C/Cython extension if needed. [#346] - Update numpydoc to v0.7. [#343] - The function ``get_git_devstr`` now returns ``'0'`` instead of ``None`` when no git repository is present. This allows generation of development version strings that are in a format that ``setuptools`` expects (e.g. "1.1.3.dev0" instead of "1.1.3.dev"). [#330] - It is now possible to override generated timestamps to make builds reproducible by setting the ``SOURCE_DATE_EPOCH`` environment variable [#341] - Mark Sphinx extensions as parallel-safe. [#344] - Switch to using mathjax instead of imgmath for local builds. [#342] - Deprecate ``exclude`` parameter of various functions in setup_helpers since it could not work as intended. Add new function ``add_exclude_packages`` to provide intended behavior. [#331] - Allow custom Sphinx doctest extension to recognize and process standard doctest directives ``testsetup`` and ``doctest``. [#335] 2.0.1 (2017-07-28) ------------------ - Fix compatibility with Sphinx <1.5. [#326] 2.0 (2017-07-06) ---------------- - Add support for package that lies in a subdirectory. [#249] - Removing ``compat.subprocess``. [#298] - Python 3.3 is no longer supported. [#300] - The 'automodapi' Sphinx extension (and associated dependencies) has now been moved to a standalone package which can be found at https://github.com/astropy/sphinx-automodapi - this is now bundled in astropy-helpers under astropy_helpers.extern.automodapi for convenience. Version shipped with astropy-helpers is v0.6. [#278, #303, #309, #323] - The ``numpydoc`` Sphinx extension has now been moved to ``astropy_helpers.extern``. [#278] - Fix ``build_docs`` error catching, so it doesn't hide Sphinx errors. [#292] - Fix compatibility with Sphinx 1.6. [#318] - Updating ez_setup.py to the last version before it's removal. [#321] 1.3.1 (2017-03-18) ------------------ - Fixed the missing button to hide output in documentation code blocks. [#287] - Fixed bug when ``build_docs`` when running with the clean (-l) option. [#289] - Add alternative location for various intersphinx inventories to fall back to. [#293] 1.3 (2016-12-16) ---------------- - ``build_sphinx`` has been deprecated in favor of the ``build_docs`` command. [#246] - Force the use of Cython's old ``build_ext`` command. A new ``build_ext`` command was added in Cython 0.25, but it does not work with astropy-helpers currently. [#261] 1.2 (2016-06-18) ---------------- - Added sphinx configuration value ``automodsumm_inherited_members``. If ``True`` this will include members that are inherited from a base class in the generated API docs. Defaults to ``False`` which matches the previous behavior. [#215] - Fixed ``build_sphinx`` to recognize builds that succeeded but have output *after* the "build succeeded." statement. This only applies when ``--warnings-returncode`` is given (which is primarily relevant for Travis documentation builds). [#223] - Fixed ``build_sphinx`` the sphinx extensions to not output a spurious warning for sphinx versions > 1.4. [#229] - Add Python version dependent local sphinx inventories that contain otherwise missing references. [#216] - ``astropy_helpers`` now require Sphinx 1.3 or later. [#226] 1.1.2 (2016-03-9) ----------------- - The CSS for the sphinx documentation was altered to prevent some text overflow problems. [#217] 1.1.1 (2015-12-23) ------------------ - Fixed crash in build with ``AttributeError: cython_create_listing`` with older versions of setuptools. [#209, #210] 1.1 (2015-12-10) ---------------- - The original ``AstropyTest`` class in ``astropy_helpers``, which implements the ``setup.py test`` command, is deprecated in favor of moving the implementation of that command closer to the actual Astropy test runner in ``astropy.tests``. Now a dummy ``test`` command is provided solely for informing users that they need ``astropy`` installed to run the tests (however, the previous, now deprecated implementation is still provided and continues to work with older versions of Astropy). See the related issue for more details. [#184] - Added a useful new utility function to ``astropy_helpers.utils`` called ``find_data_files``. This is similar to the ``find_packages`` function in setuptools in that it can be used to search a package for data files (matching a pattern) that can be passed to the ``package_data`` argument for ``setup()``. See the docstring to ``astropy_helpers.utils.find_data_files`` for more details. [#42] - The ``astropy_helpers`` module now sets the global ``_ASTROPY_SETUP_`` flag upon import (from within a ``setup.py``) script, so it's not necessary to have this in the ``setup.py`` script explicitly. If in doubt though, there's no harm in setting it twice. Putting it in ``astropy_helpers`` just ensures that any other imports that occur during build will have this flag set. [#191] - It is now possible to use Cython as a ``setup_requires`` build requirement, and still build Cython extensions even if Cython wasn't available at the beginning of the build processes (that is, is automatically downloaded via setuptools' processing of ``setup_requires``). [#185] - Moves the ``adjust_compiler`` check into the ``build_ext`` command itself, so it's only used when actually building extension modules. This also deprecates the stand-alone ``adjust_compiler`` function. [#76] - When running the ``build_sphinx`` / ``build_docs`` command with the ``-w`` option, the output from Sphinx is streamed as it runs instead of silently buffering until the doc build is complete. [#197] 1.0.7 (unreleased) ------------------ - Fix missing import in ``astropy_helpers/utils.py``. [#196] 1.0.6 (2015-12-04) ------------------ - Fixed bug where running ``./setup.py build_sphinx`` could return successfully even when the build was not successful (and should have returned a non-zero error code). [#199] 1.0.5 (2015-10-02) ------------------ - Fixed a regression in the ``./setup.py test`` command that was introduced in v1.0.4. 1.0.4 (2015-10-02) ------------------ - Fixed issue with the sphinx documentation css where the line numbers for code blocks were not aligned with the code. [#179, #180] - Fixed crash that could occur when trying to build Cython extension modules when Cython isn't installed. Normally this still results in a failed build, but was supposed to provide a useful error message rather than crash outright (this was a regression introduced in v1.0.3). [#181] - Fixed a crash that could occur on Python 3 when a working C compiler isn't found. [#182] - Quieted warnings about deprecated Numpy API in Cython extensions, when building Cython extensions against Numpy >= 1.7. [#183, #186] - Improved support for py.test >= 2.7--running the ``./setup.py test`` command now copies all doc pages into the temporary test directory as well, so that all test files have a "common root directory". [#189, #190] 1.0.3 (2015-07-22) ------------------ - Added workaround for sphinx-doc/sphinx#1843, a but in Sphinx which prevented descriptor classes with a custom metaclass from being documented correctly. [#158] - Added an alias for the ``./setup.py build_sphinx`` command as ``./setup.py build_docs`` which, to a new contributor, should hopefully be less cryptic. [#161] - The fonts in graphviz diagrams now match the font of the HTML content. [#169] - When the documentation is built on readthedocs.org, MathJax will be used for math rendering. When built elsewhere, the "pngmath" extension is still used for math rendering. [#170] - Fix crash when importing astropy_helpers when running with ``python -OO`` [#171] - The ``build`` and ``build_ext`` stages now correctly recognize the presence of C++ files in Cython extensions (previously only vanilla C worked). [#173] 1.0.2 (2015-04-02) ------------------ - Various fixes enabling the astropy-helpers Sphinx build command and Sphinx extensions to work with Sphinx 1.3. [#148] - More improvement to the ability to handle multiple versions of astropy-helpers being imported in the same Python interpreter session in the (somewhat rare) case of nested installs. [#147] - To better support high resolution displays, use SVG for the astropy logo and linkout image, falling back to PNGs for browsers that support it. [#150, #151] - Improve ``setup_helpers.get_compiler_version`` to work with more compilers, and to return more info. This will help fix builds of Astropy on less common compilers, like Sun C. [#153] 1.0.1 (2015-03-04) ------------------ - Released in concert with v0.4.8 to address the same issues. 0.4.8 (2015-03-04) ------------------ - Improved the ``ah_bootstrap`` script's ability to override existing installations of astropy-helpers with new versions in the context of installing multiple packages simultaneously within the same Python interpreter (e.g. when one package has in its ``setup_requires`` another package that uses a different version of astropy-helpers. [#144] - Added a workaround to an issue in matplotlib that can, in rare cases, lead to a crash when installing packages that import matplotlib at build time. [#144] 1.0 (2015-02-17) ---------------- - Added new pre-/post-command hook points for ``setup.py`` commands. Now any package can define code to run before and/or after any ``setup.py`` command without having to manually subclass that command by adding ``pre__hook`` and ``post__hook`` callables to the package's ``setup_package.py`` module. See the PR for more details. [#112] - The following objects in the ``astropy_helpers.setup_helpers`` module have been relocated: - ``get_dummy_distribution``, ``get_distutils_*``, ``get_compiler_option``, ``add_command_option``, ``is_distutils_display_option`` -> ``astropy_helpers.distutils_helpers`` - ``should_build_with_cython``, ``generate_build_ext_command`` -> ``astropy_helpers.commands.build_ext`` - ``AstropyBuildPy`` -> ``astropy_helpers.commands.build_py`` - ``AstropyBuildSphinx`` -> ``astropy_helpers.commands.build_sphinx`` - ``AstropyInstall`` -> ``astropy_helpers.commands.install`` - ``AstropyInstallLib`` -> ``astropy_helpers.commands.install_lib`` - ``AstropyRegister`` -> ``astropy_helpers.commands.register`` - ``get_pkg_version_module`` -> ``astropy_helpers.version_helpers`` - ``write_if_different``, ``import_file``, ``get_numpy_include_path`` -> ``astropy_helpers.utils`` All of these are "soft" deprecations in the sense that they are still importable from ``astropy_helpers.setup_helpers`` for now, and there is no (easy) way to produce deprecation warnings when importing these objects from ``setup_helpers`` rather than directly from the modules they are defined in. But please consider updating any imports to these objects. [#110] - Use of the ``astropy.sphinx.ext.astropyautosummary`` extension is deprecated for use with Sphinx < 1.2. Instead it should suffice to remove this extension for the ``extensions`` list in your ``conf.py`` and add the stock ``sphinx.ext.autosummary`` instead. [#131] 0.4.7 (2015-02-17) ------------------ - Fixed incorrect/missing git hash being added to the generated ``version.py`` when creating a release. [#141] 0.4.6 (2015-02-16) ------------------ - Fixed problems related to the automatically generated _compiler module not being created properly. [#139] 0.4.5 (2015-02-11) ------------------ - Fixed an issue where ah_bootstrap.py could blow up when astropy_helper's version number is 1.0. - Added a workaround for documentation of properties in the rare case where the class's metaclass has a property of the same name. [#130] - Fixed an issue on Python 3 where importing a package using astropy-helper's generated version.py module would crash when the current working directory is an empty git repository. [#114, #137] - Fixed an issue where the "revision count" appended to .dev versions by the generated version.py did not accurately reflect the revision count for the package it belongs to, and could be invalid if the current working directory is an unrelated git repository. [#107, #137] - Likewise, fixed a confusing warning message that could occur in the same circumstances as the above issue. [#121, #137] 0.4.4 (2014-12-31) ------------------ - More improvements for building the documentation using Python 3.x. [#100] - Additional minor fixes to Python 3 support. [#115] - Updates to support new test features in Astropy [#92, #106] 0.4.3 (2014-10-22) ------------------ - The generated ``version.py`` file now preserves the git hash of installed copies of the package as well as when building a source distribution. That is, the git hash of the changeset that was installed/released is preserved. [#87] - In smart resolver add resolution for class links when they exist in the intersphinx inventory, but not the mapping of the current package (e.g. when an affiliated package uses an astropy core class of which "actual" and "documented" location differs) [#88] - Fixed a bug that could occur when running ``setup.py`` for the first time in a repository that uses astropy-helpers as a submodule: ``AttributeError: 'NoneType' object has no attribute 'mkdtemp'`` [#89] - Fixed a bug where optional arguments to the ``doctest-skip`` Sphinx directive were sometimes being left in the generated documentation output. [#90] - Improved support for building the documentation using Python 3.x. [#96] - Avoid error message if .git directory is not present. [#91] 0.4.2 (2014-08-09) ------------------ - Fixed some CSS issues in generated API docs. [#69] - Fixed the warning message that could be displayed when generating a version number with some older versions of git. [#77] - Fixed automodsumm to work with new versions of Sphinx (>= 1.2.2). [#80] 0.4.1 (2014-08-08) ------------------ - Fixed git revision count on systems with git versions older than v1.7.2. [#70] - Fixed display of warning text when running a git command fails (previously the output of stderr was not being decoded properly). [#70] - The ``--offline`` flag to ``setup.py`` understood by ``ah_bootstrap.py`` now also prevents git from going online to fetch submodule updates. [#67] - The Sphinx extension for converting issue numbers to links in the changelog now supports working on arbitrary pages via a new ``conf.py`` setting: ``changelog_links_docpattern``. By default it affects the ``changelog`` and ``whatsnew`` pages in one's Sphinx docs. [#61] - Fixed crash that could result from users with missing/misconfigured locale settings. [#58] - The font used for code examples in the docs is now the system-defined ``monospace`` font, rather than ``Minaco``, which is not available on all platforms. [#50] 0.4 (2014-07-15) ---------------- - Initial release of astropy-helpers. See `APE4 `_ for details of the motivation and design of this package. - The ``astropy_helpers`` package replaces the following modules in the ``astropy`` package: - ``astropy.setup_helpers`` -> ``astropy_helpers.setup_helpers`` - ``astropy.version_helpers`` -> ``astropy_helpers.version_helpers`` - ``astropy.sphinx`` - > ``astropy_helpers.sphinx`` These modules should be considered deprecated in ``astropy``, and any new, non-critical changes to those modules will be made in ``astropy_helpers`` instead. Affiliated packages wishing to make use those modules (as in the Astropy package-template) should use the versions from ``astropy_helpers`` instead, and include the ``ah_bootstrap.py`` script in their project, for bootstrapping the ``astropy_helpers`` package in their setup.py script. photutils-0.4/astropy_helpers/ez_setup.py0000644000214200020070000003037113175633272023243 0ustar lbradleySTSCI\science00000000000000#!/usr/bin/env python """ Setuptools bootstrapping installer. Maintained at https://github.com/pypa/setuptools/tree/bootstrap. Run this script to install or upgrade setuptools. This method is DEPRECATED. Check https://github.com/pypa/setuptools/issues/581 for more details. """ import os import shutil import sys import tempfile import zipfile import optparse import subprocess import platform import textwrap import contextlib from distutils import log try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen try: from site import USER_SITE except ImportError: USER_SITE = None # 33.1.1 is the last version that supports setuptools self upgrade/installation. DEFAULT_VERSION = "33.1.1" DEFAULT_URL = "https://pypi.io/packages/source/s/setuptools/" DEFAULT_SAVE_DIR = os.curdir DEFAULT_DEPRECATION_MESSAGE = "ez_setup.py is deprecated and when using it setuptools will be pinned to {0} since it's the last version that supports setuptools self upgrade/installation, check https://github.com/pypa/setuptools/issues/581 for more info; use pip to install setuptools" MEANINGFUL_INVALID_ZIP_ERR_MSG = 'Maybe {0} is corrupted, delete it and try again.' log.warn(DEFAULT_DEPRECATION_MESSAGE.format(DEFAULT_VERSION)) def _python_cmd(*args): """ Execute a command. Return True if the command succeeded. """ args = (sys.executable,) + args return subprocess.call(args) == 0 def _install(archive_filename, install_args=()): """Install Setuptools.""" with archive_context(archive_filename): # installing log.warn('Installing Setuptools') if not _python_cmd('setup.py', 'install', *install_args): log.warn('Something went wrong during the installation.') log.warn('See the error message above.') # exitcode will be 2 return 2 def _build_egg(egg, archive_filename, to_dir): """Build Setuptools egg.""" with archive_context(archive_filename): # building an egg log.warn('Building a Setuptools egg in %s', to_dir) _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) # returning the result log.warn(egg) if not os.path.exists(egg): raise IOError('Could not build the egg.') class ContextualZipFile(zipfile.ZipFile): """Supplement ZipFile class to support context manager for Python 2.6.""" def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __new__(cls, *args, **kwargs): """Construct a ZipFile or ContextualZipFile as appropriate.""" if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls) @contextlib.contextmanager def archive_context(filename): """ Unzip filename to a temporary directory, set to the cwd. The unzipped target is cleaned up after. """ tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) try: with ContextualZipFile(filename) as archive: archive.extractall() except zipfile.BadZipfile as err: if not err.args: err.args = ('', ) err.args = err.args + ( MEANINGFUL_INVALID_ZIP_ERR_MSG.format(filename), ) raise # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) yield finally: os.chdir(old_wd) shutil.rmtree(tmpdir) def _do_download(version, download_base, to_dir, download_delay): """Download Setuptools.""" py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys) tp = 'setuptools-{version}-{py_desig}.egg' egg = os.path.join(to_dir, tp.format(**locals())) if not os.path.exists(egg): archive = download_setuptools(version, download_base, to_dir, download_delay) _build_egg(egg, archive, to_dir) sys.path.insert(0, egg) # Remove previously-imported pkg_resources if present (see # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). if 'pkg_resources' in sys.modules: _unload_pkg_resources() import setuptools setuptools.bootstrap_install_from = egg def use_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=DEFAULT_SAVE_DIR, download_delay=15): """ Ensure that a setuptools version is installed. Return None. Raise SystemExit if the requested version or later cannot be installed. """ to_dir = os.path.abspath(to_dir) # prior to importing, capture the module state for # representative modules. rep_modules = 'pkg_resources', 'setuptools' imported = set(sys.modules).intersection(rep_modules) try: import pkg_resources pkg_resources.require("setuptools>=" + version) # a suitable version is already installed return except ImportError: # pkg_resources not available; setuptools is not installed; download pass except pkg_resources.DistributionNotFound: # no version of setuptools was found; allow download pass except pkg_resources.VersionConflict as VC_err: if imported: _conflict_bail(VC_err, version) # otherwise, unload pkg_resources to allow the downloaded version to # take precedence. del pkg_resources _unload_pkg_resources() return _do_download(version, download_base, to_dir, download_delay) def _conflict_bail(VC_err, version): """ Setuptools was imported prior to invocation, so it is unsafe to unload it. Bail out. """ conflict_tmpl = textwrap.dedent(""" The required version of setuptools (>={version}) is not available, and can't be installed while this script is running. Please install a more recent version first, using 'easy_install -U setuptools'. (Currently using {VC_err.args[0]!r}) """) msg = conflict_tmpl.format(**locals()) sys.stderr.write(msg) sys.exit(2) def _unload_pkg_resources(): sys.meta_path = [ importer for importer in sys.meta_path if importer.__class__.__module__ != 'pkg_resources.extern' ] del_modules = [ name for name in sys.modules if name.startswith('pkg_resources') ] for mod_name in del_modules: del sys.modules[mod_name] def _clean_check(cmd, target): """ Run the command to download target. If the command fails, clean up before re-raising the error. """ try: subprocess.check_call(cmd) except subprocess.CalledProcessError: if os.access(target, os.F_OK): os.unlink(target) raise def download_file_powershell(url, target): """ Download the file at url to target using Powershell. Powershell will validate trust. Raise an exception if the command cannot complete. """ target = os.path.abspath(target) ps_cmd = ( "[System.Net.WebRequest]::DefaultWebProxy.Credentials = " "[System.Net.CredentialCache]::DefaultCredentials; " '(new-object System.Net.WebClient).DownloadFile("%(url)s", "%(target)s")' % locals() ) cmd = [ 'powershell', '-Command', ps_cmd, ] _clean_check(cmd, target) def has_powershell(): """Determine if Powershell is available.""" if platform.system() != 'Windows': return False cmd = ['powershell', '-Command', 'echo test'] with open(os.path.devnull, 'wb') as devnull: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except Exception: return False return True download_file_powershell.viable = has_powershell def download_file_curl(url, target): cmd = ['curl', url, '--location', '--silent', '--output', target] _clean_check(cmd, target) def has_curl(): cmd = ['curl', '--version'] with open(os.path.devnull, 'wb') as devnull: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except Exception: return False return True download_file_curl.viable = has_curl def download_file_wget(url, target): cmd = ['wget', url, '--quiet', '--output-document', target] _clean_check(cmd, target) def has_wget(): cmd = ['wget', '--version'] with open(os.path.devnull, 'wb') as devnull: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except Exception: return False return True download_file_wget.viable = has_wget def download_file_insecure(url, target): """Use Python to download the file, without connection authentication.""" src = urlopen(url) try: # Read all the data in one block. data = src.read() finally: src.close() # Write all the data in one block to avoid creating a partial file. with open(target, "wb") as dst: dst.write(data) download_file_insecure.viable = lambda: True def get_best_downloader(): downloaders = ( download_file_powershell, download_file_curl, download_file_wget, download_file_insecure, ) viable_downloaders = (dl for dl in downloaders if dl.viable()) return next(viable_downloaders, None) def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=DEFAULT_SAVE_DIR, delay=15, downloader_factory=get_best_downloader): """ Download setuptools from a specified location and return its filename. `version` should be a valid setuptools version number that is available as an sdist for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. ``downloader_factory`` should be a function taking no arguments and returning a function for downloading a URL to a target. """ # making sure we use the absolute path to_dir = os.path.abspath(to_dir) zip_name = "setuptools-%s.zip" % version url = download_base + zip_name saveto = os.path.join(to_dir, zip_name) if not os.path.exists(saveto): # Avoid repeated downloads log.warn("Downloading %s", url) downloader = downloader_factory() downloader(url, saveto) return os.path.realpath(saveto) def _build_install_args(options): """ Build the arguments to 'python setup.py install' on the setuptools package. Returns list of command line arguments. """ return ['--user'] if options.user_install else [] def _parse_args(): """Parse the command line for options.""" parser = optparse.OptionParser() parser.add_option( '--user', dest='user_install', action='store_true', default=False, help='install in user site package') parser.add_option( '--download-base', dest='download_base', metavar="URL", default=DEFAULT_URL, help='alternative URL from where to download the setuptools package') parser.add_option( '--insecure', dest='downloader_factory', action='store_const', const=lambda: download_file_insecure, default=get_best_downloader, help='Use internal, non-validating downloader' ) parser.add_option( '--version', help="Specify which version to download", default=DEFAULT_VERSION, ) parser.add_option( '--to-dir', help="Directory to save (and re-use) package", default=DEFAULT_SAVE_DIR, ) options, args = parser.parse_args() # positional arguments are ignored return options def _download_args(options): """Return args for download_setuptools function from cmdline args.""" return dict( version=options.version, download_base=options.download_base, downloader_factory=options.downloader_factory, to_dir=options.to_dir, ) def main(): """Install or upgrade setuptools and EasyInstall.""" options = _parse_args() archive = download_setuptools(**_download_args(options)) return _install(archive, _build_install_args(options)) if __name__ == '__main__': sys.exit(main()) photutils-0.4/astropy_helpers/LICENSE.rst0000644000214200020070000000272312361365073022644 0ustar lbradleySTSCI\science00000000000000Copyright (c) 2014, Astropy Developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Astropy Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. photutils-0.4/astropy_helpers/licenses/0000755000214200020070000000000013175654702022635 5ustar lbradleySTSCI\science00000000000000photutils-0.4/astropy_helpers/licenses/LICENSE_ASTROSCRAPPY.rst0000644000214200020070000000315413175633272026465 0ustar lbradleySTSCI\science00000000000000# The OpenMP helpers include code heavily adapted from astroscrappy, released # under the following license: # # Copyright (c) 2015, Curtis McCully # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this # list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # * Neither the name of the Astropy Team nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. photutils-0.4/astropy_helpers/licenses/LICENSE_COPYBUTTON.rst0000644000214200020070000000471112346164025026233 0ustar lbradleySTSCI\science00000000000000Copyright 2014 Python Software Foundation License: PSF PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -------------------------------------------- . 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. . 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. . 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. . 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. . 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. . 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. . 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. . 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. photutils-0.4/astropy_helpers/licenses/LICENSE_NUMPYDOC.rst0000644000214200020070000001350712346164025025766 0ustar lbradleySTSCI\science00000000000000------------------------------------------------------------------------------- The files - numpydoc.py - docscrape.py - docscrape_sphinx.py - phantom_import.py have the following license: Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------------------------------------------------------------- The files - compiler_unparse.py - comment_eater.py - traitsdoc.py have the following license: This software is OSI Certified Open Source Software. OSI Certified is a certification mark of the Open Source Initiative. Copyright (c) 2006, Enthought, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Enthought, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------------------------------------------------------------- The file - plot_directive.py originates from Matplotlib (http://matplotlib.sf.net/) which has the following license: Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. 1. This LICENSE AGREEMENT is between John D. Hunter (“JDHâ€), and the Individual or Organization (“Licenseeâ€) accessing and otherwise using matplotlib software in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved†are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3. 4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS†basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement. photutils-0.4/astropy_helpers/README.rst0000644000214200020070000000502113175633272022514 0ustar lbradleySTSCI\science00000000000000astropy-helpers =============== * Stable versions: https://pypi.org/project/astropy-helpers/ * Development version, issue tracker: https://github.com/astropy/astropy-helpers This project provides a Python package, ``astropy_helpers``, which includes many build, installation, and documentation-related tools used by the Astropy project, but packaged separately for use by other projects that wish to leverage this work. The motivation behind this package and details of its implementation are in the accepted `Astropy Proposal for Enhancement (APE) 4 `_. The ``astropy_helpers.extern`` sub-module includes modules developed elsewhere that are bundled here for convenience. At the moment, this consists of the following two sphinx extensions: * `numpydoc `_, a Sphinx extension developed as part of the Numpy project. This is used to parse docstrings in Numpy format * `sphinx-automodapi `_, a Sphinx developed as part of the Astropy project. This used to be developed directly in ``astropy-helpers`` but is now a standalone package. Issues with these sub-modules should be reported in their respective repositories, and we will regularly update the bundled versions to reflect the latest released versions. ``astropy_helpers`` includes a special "bootstrap" module called ``ah_bootstrap.py`` which is intended to be used by a project's setup.py in order to ensure that the ``astropy_helpers`` package is available for build/installation. This is similar to the ``ez_setup.py`` module that is shipped with some projects to bootstrap `setuptools `_. As described in APE4, the version numbers for ``astropy_helpers`` follow the corresponding major/minor version of the `astropy core package `_, but with an independent sequence of micro (bugfix) version numbers. Hence, the initial release is 0.4, in parallel with Astropy v0.4, which will be the first version of Astropy to use ``astropy-helpers``. For examples of how to implement ``astropy-helpers`` in a project, see the ``setup.py`` and ``setup.cfg`` files of the `Affiliated package template `_. .. image:: https://travis-ci.org/astropy/astropy-helpers.svg :target: https://travis-ci.org/astropy/astropy-helpers .. image:: https://coveralls.io/repos/astropy/astropy-helpers/badge.svg :target: https://coveralls.io/r/astropy/astropy-helpers photutils-0.4/cextern/0000755000214200020070000000000013175654702017255 5ustar lbradleySTSCI\science00000000000000photutils-0.4/cextern/README.rst0000644000214200020070000000054212345377273020750 0ustar lbradleySTSCI\science00000000000000External Packages/Libraries =========================== This directory contains C extensions included with the package. Note that only C extensions should be included in this directory - pure Cython code should be placed in the package source tree, and wrapper Cython code for C libraries included here should be in the packagename/wrappers directory. photutils-0.4/CHANGES.rst0000644000214200020070000003603013175652637017416 0ustar lbradleySTSCI\science000000000000000.4 (2017-10-30) ---------------- General ^^^^^^^ - Dropped python 3.3 support. [#542] - Dropped numpy 1.8 support. Minimal required version is now numpy 1.9. [#542] - Dropped support for astropy 1.x versions. Minimal required version is now astropy 2.0. [#575] - Dropped scipy 0.15 support. Minimal required version is now scipy 0.16. [#576] - Explicitly require six as dependency. [#601] New Features ^^^^^^^^^^^^ - ``photutils.aperture`` - Added ``BoundingBox`` class, used when defining apertures. [#481] - Apertures now have ``__repr__`` and ``__str__`` defined. [#493] - Improved plotting of annulus apertures using Bezier curves. [#494] - Rectangular apertures now use the true minimal bounding box. [#507] - Elliptical apertures now use the true minimal bounding box. [#508] - Added a ``to_sky`` method for pixel apertures. [#512] - ``photutils.background`` - Mesh rejection now also applies to pixels that are masked during sigma clipping. [#544] - ``photutils.datasets`` - Added new ``make_wcs`` and ``make_imagehdu`` functions. [#527] - Added new ``show_progress`` keyword to the ``load_*`` functions. [#590] - ``photutils.isophote`` - Added a new ``photutils.isophote`` subpackage to provide tools to fit elliptical isophotes to a galaxy image. [#532, #603] - ``photutils.segmentation`` - Added a ``cmap`` method to ``SegmentationImage`` to generate a random matplotlib colormap. [#513] - Added ``sky_centroid`` and ``sky_centroid_icrs`` source properties. [#592] - Added new source properties representing the sky coordinates of the bounding box corner vertices (``sky_bbox_ll``, ``sky_bbox_ul`` ``sky_bbox_lr``, and ``sky_bbox_ur``). [#592] - Added new ``SourceCatalog`` class to hold the list of ``SourceProperties``. [#608] - The ``properties_table`` function is now deprecated. Use the ``SourceCatalog.to_table()`` method instead. [#608] - ``phtotutils.psf`` - Uncertainties on fitted parameters are added to the final table. [#516] - Fitted results of any free parameter are added to the final table. [#471] API changes ^^^^^^^^^^^ - ``photutils.aperture`` - The ``ApertureMask`` ``apply()`` method has been renamed to ``multiply()``. [#481]. - The ``ApertureMask`` input parameter was renamed from ``mask`` to ``data``. [#548] - Removed the ``pixelwise_errors`` keyword from ``aperture_photometry``. [#489] - ``photutils.background`` - The ``Background2D`` keywords ``exclude_mesh_method`` and ``exclude_mesh_percentile`` were removed in favor of a single keyword called ``exclude_percentile``. [#544] - Renamed ``BiweightMidvarianceBackgroundRMS`` to ``BiweightScaleBackgroundRMS``. [#547] - Removed the ``SigmaClip`` class. ``astropy.stats.SigmaClip`` is a direct replacement. [#569] - ``photutils.datasets`` - The ``make_poission_noise`` function was renamed to ``apply_poisson_noise``. [#527] - The ``make_random_gaussians`` function was renamed to ``make_random_gaussians_table``. The parameter ranges must now be input as a dictionary. [#527] - The ``make_gaussian_sources`` function was renamed to ``make_gaussian_sources_image``. [#527] - The ``make_random_models`` function was renamed to ``make_random_models_table``. [#527] - The ``make_model_sources`` function was renamed to ``make_model_sources_image``. [#527] - The ``unit``, ``hdu``, ``wcs``, and ``wcsheader`` keywords in ``photutils.datasets`` functions were removed. [#527] - ``'photutils-datasets'`` was added as an optional ``location`` in the ``get_path`` function. This option is used as a fallback in case the ``'remote'`` location (astropy data server) fails. [#589] - ``photutils.detection`` - The ``daofind`` and ``irafstarfinder`` functions were removed. [#588] - ``photutils.psf`` - ``IterativelySubtractedPSFPhotometry`` issues a "no sources detected" warning only on the first iteration, if applicable. [#566] - ``photutils.segmentation`` - The ``'icrs_centroid'``, ``'ra_icrs_centroid'``, and ``'dec_icrs_centroid'`` source properties are deprecated and are no longer default columns returned by ``properties_table``. [#592] - The ``properties_table`` function now returns a ``QTable``. [#592] - ``photutils.utils`` - The ``background_color`` keyword was removed from the ``random_cmap`` function. [#528] - Deprecated unused ``interpolate_masked_data()``. [#526, #611] Bug Fixes ^^^^^^^^^ - ``photutils.segmentation`` - Fixed ``deblend_sources`` so that it correctly deblends multiple sources. [#572] - Fixed a bug in calculation of the ``sky_centroid_icrs`` (and deprecated ``icrs_centroid``) property where the incorrect pixel origin was being passed. [#592] - ``photutils.utils`` - Added a check that ``data`` and ``bkg_error`` have the same units in ``calc_total_error``. [#537] 0.3.1 (unreleased) ------------------ General ^^^^^^^ - Dropped numpy 1.7 support. Minimal required version is now numpy 1.8. [#327] - ``photutils.datasets`` - The ``load_*`` functions that use remote data now retrieve the data from ``data.astropy.org`` (the astropy data repository). [#472] Bug Fixes ^^^^^^^^^ - ``photutils.background`` - Fixed issue with ``Background2D`` with ``edge_method='pad'`` that occurred when unequal padding needed to be applied to each axis. [#498] - Fixed issue with ``Background2D`` that occurred when zero padding needed to apply along only one axis. [#500] - ``photutils.geometry`` - Fixed a bug in ``circular_overlap_grid`` affecting 32-bit machines that could cause errors circular aperture photometry. [#475] - ``photutils.psf`` - Fixed a bug in how ``FittableImageModel`` represents its center. [#460] - Fix bug which modified user's input table when doing forced photometry. [#485] 0.3 (2016-11-06) ---------------- General ^^^^^^^ New Features ^^^^^^^^^^^^ - ``photutils.aperture`` - Added new ``origin`` keyword to aperture ``plot`` methods. [#395] - Added new ``id`` column to ``aperture_photometry`` output table. [#446] - Added ``__len__`` method for aperture classes. [#446] - Added new ``to_mask`` method to ``PixelAperture`` classes. [#453] - Added new ``ApertureMask`` class to generate masks from apertures. [#453] - Added new ``mask_area()`` method to ``PixelAperture`` classes. [#453] - The ``aperture_photometry()`` function now accepts a list of aperture objects. [#454] - ``photutils.background`` - Added new ``MeanBackground``, ``MedianBackground``, ``MMMBackground``, ``SExtractorBackground``, ``BiweightLocationBackground``, ``StdBackgroundRMS``, ``MADStdBackgroundRMS``, and ``BiweightMidvarianceBackgroundRMS`` classes. [#370] - Added ``axis`` keyword to new background classes. [#392] - Added new ``removed_masked``, ``meshpix_threshold``, and ``edge_method`` keywords for the 2D background classes. [#355] - Added new ``std_blocksum`` function. [#355] - Added new ``SigmaClip`` class. [#423] - Added new ``BkgZoomInterpolator`` and ``BkgIDWInterpolator`` classes. [#437] - ``photutils.datasets`` - Added ``load_irac_psf`` function. [#403] - ``photutils.detection`` - Added new ``make_source_mask`` convenience function. [#355] - Added ``filter_data`` function. [#398] - Added ``DAOStarFinder`` and ``IRAFStarFinder`` as oop interfaces for ``daofind`` and ``irafstarfinder``, respectively, which are now deprecated. [#379] - ``photutils.psf`` - Added ``BasicPSFPhotometry``, ``IterativelySubtractedPSFPhotometry``, and ``DAOPhotPSFPhotometry`` classes to perform PSF photometry in crowded fields. [#427] - Added ``DAOGroup`` and ``DBSCANGroup`` classes for grouping overlapping sources. [#369] - ``photutils.psf_match`` - Added ``create_matching_kernel`` and ``resize_psf`` functions. Also added ``CosineBellWindow``, ``HanningWindow``, ``SplitCosineBellWindow``, ``TopHatWindow``, and ``TukeyWindow`` classes. [#403] - ``photutils.segmentation`` - Created new ``photutils.segmentation`` subpackage. [#442] - Added ``copy`` and ``area`` methods and an ``areas`` property to ``SegmentationImage``. [#331] API changes ^^^^^^^^^^^ - ``photutils.aperture`` - Removed the ``effective_gain`` keyword from ``aperture_photometry``. Users must now input the total error, which can be calculated using the ``calc_total_error`` function. [#368] - ``aperture_photometry`` now outputs a ``QTable``. [#446] - Renamed ``source_id`` keyword to ``indices`` in the aperture ``plot()`` method. [#453] - Added ``mask`` and ``unit`` keywords to aperture ``do_photometry()`` methods. [#453] - ``photutils.background`` - For the background classes, the ``filter_shape`` keyword was renamed to ``filter_size``. The ``background_low_res`` and ``background_rms_low_res`` class attributes were renamed to ``background_mesh`` and ``background_rms_mesh``, respectively. [#355, #437] - The ``Background2D`` ``method`` and ``backfunc`` keywords have been removed. In its place one can input callable objects via the ``sigma_clip``, ``bkg_estimator``, and ``bkgrms_estimator`` keywords. [#437] - The interpolator to be used by the ``Background2D`` class can be input as a callable object via the new ``interpolator`` keyword. [#437] - ``photutils.centroids`` - Created ``photutils.centroids`` subpackage, which contains the ``centroid_com``, ``centroid_1dg``, and ``centroid_2dg`` functions. These functions now return a two-element numpy ndarray. [#428] - ``photutils.detection`` - Changed finding algorithm implementations (``daofind`` and ``starfind``) from functional to object-oriented style. Deprecated old style. [#379] - ``photutils.morphology`` - Created ``photutils.morphology`` subpackage. [#428] - Removed ``marginalize_data2d`` function. [#428] - Moved ``cutout_footprint`` from ``photutils.morphology`` to ``photutils.utils``. [#428] - Added a function to calculate the Gini coefficient (``gini``). [#343] - ``photutils.psf`` - Removed the ``effective_gain`` keyword from ``psf_photometry``. Users must now input the total error, which can be calculated using the ``calc_total_error`` function. [#368] - ``photutils.segmentation`` - Removed the ``effective_gain`` keyword from ``SourceProperties`` and ``source_properties``. Users must now input the total error, which can be calculated using the ``calc_total_error`` function. [#368] - ``photutils.utils`` - Renamed ``calculate_total_error`` to ``calc_total_error``. [#368] Bug Fixes ^^^^^^^^^ - ``photutils.aperture`` - Fixed a bug in ``aperture_photometry`` so that single-row output tables do not return a multidimensional column. [#446] - ``photutils.centroids`` - Fixed a bug in ``centroid_1dg`` and ``centroid_2dg`` that occured when the input data contained invalid (NaN or inf) values. [#428] - ``photutils.segmentation`` - Fixed a bug in ``SourceProperties`` where ``error`` and ``background`` units were sometimes dropped. [#441] 0.2.2 (2016-07-06) ------------------ General ^^^^^^^ - Dropped numpy 1.6 support. Minimal required version is now numpy 1.7. [#327] - Fixed configparser for Python 3.5. [#366, #384] Bug Fixes ^^^^^^^^^ - ``photutils.detection`` - Fixed an issue to update segmentation image slices after deblending. [#340] - Fixed source deblending to pass the pixel connectivity to the watershed algorithm. [#347] - SegmentationImage properties are now cached instead of recalculated, which significantly improves performance. [#361] - ``photutils.utils`` - Fixed a bug in ``pixel_to_icrs_coords`` where the incorrect pixel origin was being passed. [#348] 0.2.1 (2016-01-15) ------------------ Bug Fixes ^^^^^^^^^ - ``photutils.background`` - Added more robust version checking of Astropy. [#318] - ``photutils.detection`` - Added more robust version checking of Astropy. [#318] - ``photutils.segmentation`` - Fixed issue where ``SegmentationImage`` slices were not being updated. [#317] - Added more robust version checking of scikit-image. [#318] 0.2 (2015-12-31) ---------------- General ^^^^^^^ - Photutils has the following requirements: - Python 2.7 or 3.3 or later - Numpy 1.6 or later - Astropy v1.0 or later New Features ^^^^^^^^^^^^ - ``photutils.detection`` - ``find_peaks`` now returns an Astropy Table containing the (x, y) positions and peak values. [#240] - ``find_peaks`` has new ``mask``, ``error``, ``wcs`` and ``subpixel`` precision options. [#244] - ``detect_sources`` will now issue a warning if the filter kernel is not normalized to 1. [#298] - Added new ``deblend_sources`` function, an experimental source deblender. [#314] - ``photutils.morphology`` - Added new ``GaussianConst2D`` (2D Gaussian plus a constant) model. [#244] - Added new ``marginalize_data2d`` function. [#244] - Added new ``cutout_footprint`` function. [#244] - ``photutils.segmentation`` - Added new ``SegmentationImage`` class. [#306] - Added new ``check_label``, ``keep_labels``, and ``outline_segments`` methods for modifying ``SegmentationImage``. [#306] - ``photutils.utils`` - Added new ``random_cmap`` function to generate a colormap comprised of random colors. [#299] - Added new ``ShepardIDWInterpolator`` class to perform Inverse Distance Weighted (IDW) interpolation. [#307] - The ``interpolate_masked_data`` function can now interpolate higher-dimensional data. [#310] API changes ^^^^^^^^^^^ - ``photutils.segmentation`` - The ``relabel_sequential``, ``relabel_segments``, ``remove_segments``, ``remove_border_segments``, and ``remove_masked_segments`` functions are now ``SegmentationImage`` methods (with slightly different names). [#306] - The ``SegmentProperties`` class has been renamed to ``SourceProperties``. Likewise the ``segment_properties`` function has been renamed to ``source_properties``. [#306] - The ``segment_sum`` and ``segment_sum_err`` attributes have been renamed to ``source_sum`` and ``source_sum_err``, respectively. [#306] - The ``background_atcentroid`` attribute has been renamed to ``background_at_centroid``. [#306] Bug Fixes ^^^^^^^^^ - ``photutils.aperture_photometry`` - Fixed an issue where ``np.nan`` or ``np.inf`` were not properly masked. [#267] - ``photutils.geometry`` - ``overlap_area_triangle_unit_circle`` handles correctly a corner case in some i386 systems where the area of the aperture was not computed correctly. [#242] - ``rectangular_overlap_grid`` and ``elliptical_overlap_grid`` fixes to normalization of subsampled pixels. [#265] - ``overlap_area_triangle_unit_circle`` handles correctly the case where a line segment intersects at a triangle vertex. [#277] Other Changes and Additions ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Updated astropy-helpers to v1.1. [#302] 0.1 (2014-12-22) ---------------- Photutils 0.1 was released on December 22, 2014. It requires Astropy version 0.4 or later. photutils-0.4/CODE_OF_CONDUCT.rst0000644000214200020070000000025713175634532020617 0ustar lbradleySTSCI\science00000000000000Photutils is an `Astropy `_ affiliated package and we follow the `Astropy Community Code of Conduct `_. photutils-0.4/CONTRIBUTING.rst0000644000214200020070000001460513175634532020253 0ustar lbradleySTSCI\science00000000000000Contributing to Photutils ========================= Reporting Issues ---------------- When opening an issue to report a problem, please try and provide a minimal code example that reproduces the issue. Also please include details of the operating system and the Python, Numpy, Astropy, and Photutils versions you are using. Contributing code ----------------- So you're interested in contributing code to Photutils? Excellent! Most contributions to Photutils are done via pull requests from GitHub users' forks of the `Photutils repository `_. If you're new to this style of development, you'll want to read over the `Astropy development workflow `_). Once you open a pull request (which should be opened against the ``master`` branch, not against any of the other branches), please make sure that you include the following: - **Code**: the code you are adding, which should follow as much as possible the `Astropy coding guidelines `_. - **Tests**: these are either tests to ensure code that previously failed now works (regression tests) or tests that cover as much as possible of the new functionality to make sure it doesn't break in future. The tests are also used to ensure consistent results on all platforms, since we run these tests on many platforms/configurations. For more information about how to write tests, see the `Astropy testing guidelines `_. - **Documentation**: if you are adding new functionality, be sure to include a description in the main documentation (in ``docs/``). For more information, please see the detailed `Astropy documentation guidelines `_. - **Changelog entry**: whether you are fixing a bug or adding new functionality, you should add an entry to the ``CHANGES.rst`` file that includes the PR number and if possible the issue number (if you are opening a pull request you may not know this yet, but you can add it once the pull request is open). If you're not sure where to put the changelog entry, wait until a maintainer has reviewed your PR and assigned it to a milestone. You do not need to include a changelog entry for fixes to bugs introduced in the developer version and therefore are not present in the stable releases. In general you do not need to include a changelog entry for minor documentation or test updates. Only user-visible changes (new features/API changes, fixed issues) need to be mentioned. If in doubt ask the core maintainer reviewing your changes. Other Tips ---------- - To prevent the automated tests from running you can add ``[ci skip]`` to your commit message. This is useful if your PR is a work in progress and you are not yet ready for the tests to run. For example: $ git commit -m "WIP widget [ci skip]" - If you already made the commit without including this string, you can edit your existing commit message by running: $ git commit --amend - To skip only the AppVeyor (Windows) CI builds you can use ``[skip appveyor]``, and to skip testing on Travis CI use ``[skip travis]``. - If your commit makes substantial changes to the documentation, but no code changes, the you can use ``[docs only]``, that will skip all but the documentation building jobs on Travis. - When contributing trivial documentation fixes (i.e. fixes to typos, spelling, grammar) that do not contain any special markup and are not associated with code changes, please include the string ``[docs only]`` in your commit message. $ git commit -m "Fixed typo [docs only]" Checklist for Contributed Code ------------------------------ A pull request for a new feature will be reviewed to see if it meets the following requirements. For any pull request, a Photutils maintainer can help to make sure that the pull request meets the requirements for inclusion in the package. **Scientific Quality** (when applicable) * Is the submission relevant to this package? * Are references included to the original source for the algorithm? * Does the code perform as expected? * Has the code been tested against previously existing implementations? **Code Quality** * Are the `Astropy coding guidelines `_ followed? * Is the code compatible with Python 2.7 and >=3.4? * Are there dependencies other than the Astropy core, the Python Standard Library, and NumPy 1.9.0 or later? - Is the package importable even if the C-extensions are not built? - Are additional dependencies handled appropriately? - Do functions and classes that require additional dependencies raise an `ImportError` if they are not present? **Testing** * Are the `Astropy testing guidelines `_ followed? * Are the inputs to the functions and classes sufficiently tested? * Are there tests for any exceptions raised? * Are there tests for the expected performance? * Are the sources for the tests documented? * Are the tests that require an `optional dependency `_ marked as such? * Does "``python setup.py test``" run without failures? **Documentation** * Are the `Astropy documentation guidelines `_ followed? * Is there a `docstring `_ in the functions and classes describing: - What the code does? - The format of the inputs of the function or class? - The format of the outputs of the function or class? - References to the original algorithms? - Any exceptions which are raised? - An example of running the code? * Is there any information needed to be added to the docs to describe the function or class? * Does the documentation build without errors or warnings? **License** * Is the astropy license included at the top of the file? * Are there any conflicts with this code and existing codes? **Photutils requirements** * Do all the Travis CI, AppVeyor, and CircleCI tests pass? * If applicable, has an entry been added into the changelog? * Can you checkout the pull request and repeat the examples and tests? photutils-0.4/docs/0000755000214200020070000000000013175654702016535 5ustar lbradleySTSCI\science00000000000000photutils-0.4/docs/_static/0000755000214200020070000000000013175654702020163 5ustar lbradleySTSCI\science00000000000000photutils-0.4/docs/_static/favicon.ico0000644000214200020070000003535612444404542022311 0ustar lbradleySTSCI\science00000000000000 h6  ¨ž00 ¨%F(  ¨o1‘xm²t,G®p(¦m.ª¤l,£¢j*z h)1’\ ¨o/©q4.­p*É¡k1ÿšj9þ¥k*ÿ¢j)ÿŸh)ÿf'üšc&»—a%4‘[¦o35«o*ô–lFÿtfýŽjNübOüœe)üœd&ü™c'þ—a$ÿ–_!ÿ”]s“Y¡m6©o-Ìžg,ÿ‘ymúͪƒÿÑ­‚ÿ¢ƒiÿƒ]>ÿbÿ–^ÿ•^üŽ\%û[$ÿY{ŽX¤l-Z¥l*ÿ—e0üŽp\ÿÓ±‡ÿؽžÿ±“vÿ€]Aÿ’a+ÿƒePÿZÿuWDþmTJüˆUÿ‰TI¡i* žg*ÿ¢g#üƒ`Eÿ‹iNÿ¡}Zÿ‹{{ÿŠVÿ†eJÿƒ€”ÿ}R)ÿ†hQÿ“sTüqP6ÿˆQÍOGWf(½že$ÿ^.ýŽ^-ÿ…[4ÿ}Y<ÿŠVÿ“ZÿŠXÿƒT#ÿ…TÿsT?ÿ¡xGÿs\Qý~KÿNTœd%µ’]%ÿ„uuýngÿ“Zÿ‘[ÿ‚fUÿ\;ÿŠTÿ‰Uÿ‚QÿpUEÿÁ£ÿƒoeürEÿ~K¢Ÿd‰U/ÿº´³ü’‚|ÿˆQÿ}U2ÿ™’šÿnhÿƒOÿƒOÿ‚JÿlQ@ÿƯ•ÿxgýhAÿ{H ÌŸ`>ƒX/þ„|„þ|_Jÿ‹Uÿ‰TÿzT3ÿ}R&ÿ‚MÿzW6ÿr\OÿfI6ÿždÿ}[:ýeC&ÿwDÓY»yQ-ÿ†Sü‰TÿzO#ÿzN!ÿ€NÿxGÿvwÿykkÿg?ÿz`Mÿš|]ýgI0ÿq=¸‰U7ŠUüˆSþpM0ýzbSÿ„l[ÿjQDÿgI5ÿnEÿsB ÿuC ÿaG8ÿubZücA#ÿq<q…Q€Qg„PÿkK2ü`Gû¼—iÿ¦‚ÿdJÿeG2ÿtC ÿqA ÿj?ûU:+ÿh< Ün;ƒP~La}JùeG2ÿpZOþ–~jü˜yYü\HDük?üm> ýk<ÿk;üh:E€NyH)zF ¯h?úeF-ÿdJ8ÿd=ÿl=þh;ÿf9Óc9 :h:M}G H*s<sn9 m<ªg:‘f9Nb5g:øÿàÀ€€€€Ààðøÿ( @ ªq0©o.«q1©p/&¨o.>§n-G¦m-A¥l,-¤l,›d&›d&ªp/ªp/¬s1ªq0S©p/©¨o/Þ¨n.ø§n-ÿ¦m-ÿ¥l,ÿ¤k+û¢j+ç¡i*¾ h)|Ÿg(*—a$—`$ªp/©o/«q0Yªp0ߨo1ÿªo,þªn)ÿ§m+ÿ£l.ÿ¤k,þ£j+þ¢i*ÿ¡i*ÿ h)ÿŸg(ÿf(ýœe'½›d&A˜a$”^"ªp/ªq0 ªp/©©p0ÿªo,þ¯q&ü l6û—j?ýŸk3þªl!ÿ£j*ÿ i+ÿ h)ÿŸg(þžf(üœe'û›d&þ›d&ÿ™c%ÿ˜b$°—`$“]!ªp/ªp/ ªp/Ĩo0ÿªn*ú£m2ým]fÿncrÿragÿj_pÿsclÿ h)ÿ¡g&ÿf)ÿe'ÿœe'ÿ›d&ÿšc%ÿ™b%ü˜a$ü—`$ÿ•_#ð”^"S[ ¥m-©o/±¨o.ÿ¨n,ù¥n1ÿaZsÿ¬œ–ÿ³|=ÿ«o)ÿ«r0ÿ}[Dÿe_xÿŸf%ÿœe'ÿ›d&ÿšc&ÿ™b%ÿ˜a$ÿ—`$ÿ–`#ÿ”_"û“^"ý’]!ÿ‘\ yŽY¨n.¨n.q§n.ÿ¤m/ú®n$ÿq]_ÿ™„{ÿÙ²€ÿ¹’gÿÊ­Žÿʯ’ÿËœ`ÿvZMÿv`]ÿ¢dÿ˜b'ÿ˜b$ÿ—a$ÿ–`#ÿ•_"ÿ”^"ÿ’]"ÿ“\ý’\û[ ÿZŒX§n-¦m-ë¥l,ÿ¤k,þ¤m.ÿfXdÿÅ¡xÿ¸bÿѶ—ÿɪˆÿ¶_ÿË®ÿ¬„ZÿbUcÿœc"ÿ—a%ÿ•`%ÿ–`"ÿ“^#ÿ“]!ÿ’]!ÿ“\ÿ[$ÿ‹Z$þ‘YûŒXÿŒWjŠW¥l,u¥l,ÿ£k+û£j)ÿžj2ÿiW\ÿÄžsÿ»•lÿÕ¾£ÿøôðÿØÃ«ÿ®‚Qÿ¾yÿcQYÿ˜b&ÿ•_$ÿ™_ÿ’_'ÿ–^ÿ’\ÿ‘[ÿ‹Z%ÿ\WpÿXWvÿ€W1þŽWþ‰Vü‰U<‰U¤k+Ç£j+ÿ¡i*ý h+ÿ¤h$ÿg]oÿk7ÿÓ¸šÿÀžyÿÈ©‡ÿ»™sÿ»“eÿ¯”{ÿ_Q]ÿ›aÿ—_ÿvXDÿ`d‰ÿgYbÿŽYÿ•ZÿmXSÿoRBÿ…OÿTTvÿŠUüˆTÿ‡SуQ‡T¢j+$¢j*ô¡i*ÿ h)þg*ÿ¤gÿbOÿhZfÿ©jÿ¾–fÿ³‰XÿµˆRÿìÕ·ÿc\rÿ[>ÿ˜^ÿ‘^%ÿab„ÿãÆ ÿ‚˜ÿuVAÿ™[ÿ`QXÿŒmQÿ³ƒFÿgPHÿhSOÿSû„Rÿ„Qx„R¡i*L h)ÿŸg)þžf(ÿe'ÿ™d*ÿŸdÿr__ÿe[lÿV-ÿ‡V#ÿ™}hÿjj‡ÿoWNÿ™^ÿ\#ÿ”\ÿgW[ÿ|}™ÿdazÿ„V%ÿ‘Wÿ`Uaÿ¦‹rÿ°Š_ÿ£YÿQI`ÿ†SþƒPÿ‚Pç€NŸg)gžf(ÿf'üœe'ÿšd'ÿ¡dÿžcÿbÿ„_@ÿk]fÿh\jÿfV[ÿ€W2ÿ—^ÿ[!ÿZÿŽZÿŽXÿwU;ÿ…U#ÿ‹VÿŒUÿ_TbÿW-ÿzAÿ¶_ÿcWcÿpN3ÿ†Oû€NÿMif'qœe'ÿ›d&üšc&ÿc ÿv\Rÿv\Qÿ˜`!ÿ™_ÿ›_ÿ—^ÿ˜^ÿ•]ÿ”]ÿZÿYÿŒXÿ‹WÿXÿŠVÿ†TÿŒTÿ^R^ÿyO&ÿ£zKÿ¯‰[ÿ…naÿYHKÿ‡O ü}Mÿ}L¾›d&i›d&ÿ˜b'üŸcÿo[ZÿrciÿlamÿxZIÿ˜^ÿ\$ÿ\"ÿ‘[ÿŽ[#ÿ}]Eÿ‹Y#ÿŽXÿŠWÿ‰VÿˆUÿ‡Tÿ…Sÿ‹Sÿ^MRÿ_Dÿ·–nÿ¸›{ÿŸ^ÿPGYÿNþ|Kÿ{JòzI"™c%O˜b%ÿšb!þŽ_/ÿ`ZrÿDz“ÿ¢ƒÿcRXÿ˜^ÿŽ["ÿZÿ‰X$ÿUOgÿwoÿTOiÿ‚T%ÿŠUÿ‡Tÿ†Sÿ…RÿƒQÿ‰QÿaKEÿzcVÿ­†WÿÒ²ÿ°cÿSJYÿvK ÿ|JýyIÿyHU˜a$*–`%øœaÿuWFþswÿçãÚÿ¨œ—ÿbPUÿ•\ÿŒY"ÿ–\ÿnZXÿ€ÿûá·ÿ—†€ÿgW\ÿWÿ„Rÿ„RÿƒQÿPÿ†OÿjK6ÿkZ\ÿ¹–kÿáÙÒÿµ‘eÿ]NRÿjI/ÿ}H üwGÿwG˜b%“_%Õ›`ÿkSMý’…ƒÿëèàÿ”‡„ÿePOÿ•[ÿŠX!ÿXÿvW@ÿfbyÿÄ«ÿjezÿnSCÿŠSÿƒQÿ‚PÿPÿ„PÿƒNÿsL&ÿ[Q`ÿ¶”jÿÌ»ªÿª„VÿgRJÿ_G=ÿ|FüuFÿuEš‘]%•™^ÿiRNû~zÿÒ¾žÿgatÿxU8ÿXÿŠVÿ‰UÿŒUÿiOCÿ[XsÿeNFÿ‡Qÿ‚PÿOÿƒOÿƒM ÿlF%ÿxKÿLÿOI^ÿ¥†dÿ±•vÿ²‘hÿfH2ÿYHJÿ{EüsDÿsC£Ž\$?”\ÿ|X:ýb[pÿze\ÿ[SfÿWÿ‰Vÿ‰Uÿ‡Tÿ…SÿŠSÿŒVÿ‰Qÿ‚Pÿ€OÿƒOÿwJÿRQoÿqtŽÿQSvÿJ ÿQCLÿwkÿvKÿ‡Y"ÿfC$ÿVJSÿxCüqCÿqB ™‡TY Å‘YÿpUHübRYÿŠWÿŠUÿˆTÿ†Sÿ†RÿˆRÿ‡Rÿ„Qÿ€OÿNÿ€Nÿ|KÿUUsÿÀ»¹ÿµ°®ÿNMiÿHÿaD2ÿg^lÿ¦Rÿm9ÿxdÿRHTÿvAüoAÿo@ {ŒXXM‹WÿWüWÿˆUÿ‡Tÿ…Sÿ‰RÿƒRÿtO.ÿmG%ÿvKÿƒOÿ„M ÿLÿoH%ÿV]…ÿ€“ÿMNpÿrEÿzG ÿuHÿFB]ÿž|Yÿ©‰bÿ´«¨ÿJ=FÿvBþm@ ÿn? L‰U‰V«‡Uÿ†Tû†Sÿ„RÿˆQÿqQ9ÿPMjÿ_S]ÿyv‹ÿd`vÿMEXÿ]LMÿxLÿ‚IÿiG+ÿ_@+ÿyF ÿxF ÿsEÿzDÿTFLÿ]F>ÿ§|Eÿ‰xqÿN?Eþs?ÿk> çl= ‡TˆU†Så…Rÿ„Rý„QÿPÿQNjÿ€Rÿ³Œ]ÿ¡vAÿ™l8ÿ‡_3ÿiRGÿFC_ÿ^JFÿ€Gÿ{H ÿuFÿtDÿsDÿsC ÿpBÿCFlÿj6ÿQ>CÿXB:üp=ÿj=  …R…RC„Qü‚Pþ‚Oý€OÿQLdÿ‡Vÿª‹iÿ€S!ÿ¾¨ÿµ™yÿ°Œ_ÿžvGÿRFPÿRIWÿxEÿsDÿrC ÿqB ÿpAÿr@ÿcA'ÿDGlÿIE^ÿj= ýj<ÿi< 8„Q‚OYNÿNý„M ügNBÿOGZÿšj0ÿ±’mÿıÿßÔÈÿ½§ÿ›ySÿ¦zBÿNBMÿ\G@ÿvBÿpBÿpA ÿo@ ÿm? ÿo=ÿh>ÿi< új;ÿh;›i;‚OMT~Lø|KÿJû_LJþIE^ÿ|W1ÿ uAÿ¦Uÿ¤‚[ÿl:ÿ«ŒgÿxIÿJE\ÿsAÿn@ ÿn? ÿm> ÿl= ÿj= ÿj;úi;ÿg:Ôh9f9€N|K4{JÜyIÿ~GükJ.ûJF`þNBNÿlQ?ÿ£Œvÿ»¦‘ÿŽkDÿRACÿQEQÿr?ÿl? ÿl> ÿk= ÿj<ýi;úg:ÿf9àf9#g9~LzI xH“vGüyEÿyEþfG.ûUHQüVRiþXWpþE?TÿRDIÿn? ÿl> þk= þj<üi;ûh:ýg9ÿf9Æe8f9|KyHvE,sD§rC÷uBÿvAþq>ÿl<ÿq@ÿr=þk= ÿj< ÿi;ÿh:þg:ÿf9ìe8sc6m@ f9yIyHpAqB oAlm@³m@ ám? øk= ÿi= ÿi<ÿh;úg:ãf9´f8ce7f9f9vEuEk= k= 'j<=i;Fh;@g:+e9 h:f9e8ÿÿÿÿÿ€?ÿþÿøÿðÿààÀÀ€€€€€€€€€€ÀÀààðøüþÿÿ€ÿàÿüÿÿÿÿÿ(0` $«q0ªp/ªo.¶v.Ÿg)Ÿg)žf(«q0ªq0©o.ªp/ªp/U©o/ˆ¨o.­¨n.ŧn-Ѧm-Ô¦m-Ï¥l,Á¤k,¨£k+ƒ¢j+S¢i* ¨q6œe'œd'ªq0ªq/«r1ªq0gªp/Īp/ù©o/ÿ¨o.ÿ§n.ÿ§m-ÿ¦m-ÿ¥l,ÿ¤l,ÿ¤k,ÿ£k+ÿ¢j+ÿ¢i*ÿ¡i*ú h)ÍŸg)~Ÿg('™c%™b%ªp/¬r1«q0tªq0éªp/ÿ©o/þ¨o.ÿ§n.ü§m.û¦m-ü¥l,ü¥l,ý¤k,ý£k+ý¢j+ü¢i*û¡i*û h)ü h)ÿŸg)ÿžg(ÿžf(ûe'·œd'B—a$–`#ªp/«q09ªq0תp/ÿ©o/þ¨o/ü¥n1ü§n-þ©n*ÿ§m+ÿ£l.ÿ£k.ÿ£k+ÿ£j+ÿ¢j*ÿ¡i*ÿ¡i*ÿ h)ÿŸg)ÿžg(þžf(üf'ûœe'þœd'ÿ›d&ÿšc&¿™b%4—`#”_"©p/ªq0mªp/ý©p/ÿ©o.û¦n1ý¬o'ÿ±p ÿ¥m.ÿœk8ÿŸk2ÿ«l"ÿ¬lÿ¡j,ÿ¡i+ÿ¡i*ÿ h)ÿŸg)ÿŸg(ÿžf(ÿf(ÿœe'ÿœd'ÿ›d&ÿšc&üšc%ü™b%ÿ˜b$þ—a$’–`# “^!©o/ªp/ƒªp/ÿ©o/û¨o.ü¥n0ÿ±o ÿ“kHÿ\^…ÿT^“ÿW_ÿU^’ÿU^‘ÿxcaÿ¨j ÿ¢i(ÿŸh*ÿŸg(ÿžf(ÿf(ÿe'ÿœe'ÿ›d&ÿ›d&ÿšc%ÿ™b%ÿ˜b%ÿ˜a$ü—a$ü–`#ÿ•_#Ú”_"4’]!©o.©p/x©o/ÿ¨o.ú¨n.þ¥m0ÿ°n ÿsepÿHV•ÿŠo`ÿ¢m2ÿ¦l(ÿ¢h'ÿ’hAÿ^`†ÿP\“ÿ g(ÿ g&ÿf(ÿe'ÿœe'ÿ›d&ÿ›d&ÿšc&ÿ™b%ÿ™b%ÿ˜a$ÿ—a$ÿ–`#ÿ–`#þ•_#û”^"ÿ”^"ú“]!_\ Z©o.©o/P©o.ÿ¨n.ü§n-þ¤m0ÿ¯n ÿxfkÿLS†ÿÍ iÿëÓ³ÿ›^ÿœc"ÿ£m1ÿž`ÿ©b ÿubeÿM[•ÿ£gÿ›e(ÿœd'ÿ›d&ÿšc&ÿ™c%ÿ™b%ÿ˜b$ÿ—a$ÿ—`$ÿ–`#ÿ•_#ÿ”_"ÿ”^"ÿ“^!ü’]!ü’\ ÿ‘\ |Z¨o.¨o.¨o.æ§n.ÿ¦m-ý¥m-ÿ¨m)ÿ›k:ÿGX™ÿ³~?ÿîâÕÿ¢n3ÿ·Œ[ÿæ×ÆÿäÔÃÿãÓÁÿ²Œcÿ§_ÿ`_ÿm^hÿ¥eÿ™c(ÿšc%ÿ™b%ÿ˜b%ÿ˜a$ÿ—`$ÿ–`#ÿ•_#ÿ•_"ÿ”^"ÿ“^!ÿ“]!ÿ’\!ÿ‘\ þ[ û[ÿZƒX§n.§n.›§m-ÿ¦m-û¥l,ÿ£k/ÿ®mÿndvÿmVUÿݹŒÿÁ {ÿ¨u:ÿíâÖÿ­}Gÿœc#ÿ¯Nÿìâ×ÿº‘bÿŽZ$ÿO\”ÿc!ÿ˜b&ÿ˜b%ÿ˜a$ÿ—a$ÿ–`#ÿ–`#ÿ•_"ÿ”^"ÿ“^!ÿ“]!ÿ’] ÿ\"ÿ‘\ÿ[ÿŽZ!þZûŽYÿYwŒW§m-§m-0¦m-û¦m-ÿ¥l,ÿ¤k,ÿ¢k.ÿªk!ÿ\`Šÿ‰]4ÿáɬÿ¬|GÿͰÿá{ÿÁžvÿìáÔÿª{Fÿ¥s;ÿäØÊÿŸcÿTZ‡ÿŒa7ÿ›b ÿ—a$ÿ–`#ÿ–`#ÿ”_$ÿ“_$ÿ”^"ÿ“]!ÿ’]!ÿ’\ ÿ\!ÿ”[ÿZ ÿŽZ ÿ”ZÿŒY þXüŒXÿ‹WXŠV¦m-¦m-™¥l,ÿ¤k,û£k+ÿ£j+ÿ¡j,ÿ§j#ÿZ_ŒÿŒ[)ÿÜħÿ³ˆYÿº“gÿþþýÿÿÿÿÿ÷óîÿãÔÃÿ”ZÿÛʸÿ²~AÿTTxÿ†`@ÿ›aÿ•`$ÿ•_#ÿ“^#ÿ›aÿ›`ÿ‘\!ÿ‘\!ÿ‘\ ÿ[!ÿ“[ÿ‚Y4ÿDU”ÿBT–ÿsWFÿ“Xÿ‰Wþ‹WÿŠVò‰U,‰U¥l,¤l,ê¤k+ÿ£k+þ¢j+ÿ¢i*ÿŸi,ÿ¨iÿ[_ˆÿ†[6ÿ½‘]ÿäÖÅÿ–\ÿȪˆÿÈ©‡ÿ´‹]ÿáоÿ‘VÿÛÉ·ÿ°~EÿNRÿ‹`5ÿ˜`ÿ”_#ÿ”^"ÿ˜^ÿz_Qÿy_Rÿ–]ÿ‘\ÿ[ ÿŽZ!ÿ–ZÿHUŒÿuWDÿŒYÿ;SŸÿzV7ÿVÿˆVü‰UÿˆTƃO¤k+P£k+ÿ¢j+ü¢i*ÿ¡i*ÿ h)ÿžg+ÿ©hÿl_lÿmamÿ¢^ ÿ˱–ÿçÙÊÿº”iÿ¼˜oÿëàÓÿ®ƒSÿše+ÿçÖÁÿa6ÿKWŽÿš`ÿ“^$ÿ’]"ÿ˜^ÿNNsÿWa”ÿXb“ÿKMtÿ”[ÿY ÿYÿƒX.ÿLU‡ÿVÿŒQ ÿ}W6ÿ@S•ÿŽUÿ‡Uÿ‡Tû‡Sÿ†Sy†S¢j+¢j*ÿ¡i*û¡i*ÿ h)ÿŸg)ÿžg)ÿŸf&ÿ˜e.ÿEYŸÿ›e)ÿ˜[ÿ¯‡[ÿзœÿзœÿ£r<ÿ¡q<ÿøøûÿÌ`ÿMNuÿo]^ÿœ_ÿ‘]%ÿ™_ÿ}_KÿWb”ÿà™ÿâÄ›ÿYb’ÿx]Mÿ”[ÿ’YÿrWHÿSQpÿ—\ÿ—oFÿŽNÿRTwÿcSWÿT ÿ…Sþ…Rÿ…Rñ„Q"…R¡i*Á¡i*ÿ h)üŸg)ÿŸg(ÿžf(ÿf'ÿ›e)ÿ¤eÿx_WÿGY›ÿe%ÿž[ ÿ’UÿTÿ”[ÿѱ‰ÿêÍ¥ÿe_uÿNV†ÿ›_ÿ‘]#ÿ‘\"ÿ—^ÿ~^GÿQ^•ÿع‘ÿÚ»“ÿR^“ÿy[Hÿ‘Zÿ’XÿgUVÿ\SfÿË©|ÿÉ´ÿ³eÿvJÿCT‘ÿŠSÿ„Rÿ„QûƒQÿƒP˜ƒQ¡i*  h)àŸh)ÿŸg(ýžf(ÿf(ÿe'ÿœe'ÿ›d&ÿ˜c)ÿ£dÿu^YÿCXŸÿp^`ÿb7ÿ”`'ÿšn@ÿ…k[ÿCIyÿRW€ÿ˜^ÿ’\ ÿ‘\ ÿ[ ÿŽZ!ÿ•[ÿRNjÿP^–ÿQ^–ÿOLlÿ‘XÿˆV ÿ“Xÿ\LSÿwuÿÁšjÿu;ÿư™ÿ£q3ÿEJuÿnRBÿ‰Qÿ‚Pþ‚Pÿ‚OõO%OŸh)Ÿg)ñžf(ÿžf(þe'ÿœe'ÿ›d&ÿšd'ÿ˜c(ÿ˜b(ÿ–b(ÿ bÿŽ`1ÿ^ZuÿNXŒÿNXŒÿKU‰ÿRVÿz]Mÿ›^ÿ‘\ ÿ\ ÿ[ÿZÿŽZÿŽZÿ“Yÿ|\Cÿ{[Dÿ‘WÿŠWÿˆVÿ‘V ÿ_R]ÿbWeÿ’XÿNÿ•j;ÿȨ€ÿbH<ÿPRvÿ‹Pÿ€OÿOû€Nÿ€N†€Nžf((žf(øf'ÿœe'þœd'ÿ›d&ÿ™c'ÿ›c#ÿ¢cÿ¡cÿ™a!ÿ”`'ÿ˜`ÿŸ`ÿ™_ÿ’^#ÿ•^ÿœ^ÿ˜]ÿ\#ÿ\!ÿZÿZÿŽYÿYÿXÿŠWÿ‘Yÿ‘YÿˆUÿ‰Vÿ‡UÿT ÿbSZÿ[Q`ÿR ÿ€Nÿw?ÿ¿¤†ÿd7ÿ?KÿPÿNÿ€NýMÿ~MÜ|Kf'*œe'úœd'ÿ›d&þšc&ÿ™c'ÿœc ÿ”a,ÿY[€ÿWZ‚ÿ‘_+ÿ˜`ÿ”_$ÿ’^%ÿ’^#ÿ“] ÿ‘\!ÿ\#ÿ["ÿŽ["ÿŽ\$ÿY ÿŽYÿXÿŒXÿ‹Wÿ‹Wÿ‰Vÿ‰VÿˆUÿˆTÿ†TÿS ÿfRPÿXSkÿ‡Jÿ°’qÿ¶˜wÿ¡}Wÿ²ˆUÿCDhÿmP>ÿ„M ÿ~Mÿ~Lý}Lÿ}KCœd'$›d&ö›d&ÿšc%þ™b%ÿ™b#ÿ—a&ÿCWœÿrZRÿnZZÿKWÿš_ÿ’^#ÿ“]!ÿ’]!ÿ‘\ ÿ‘\ÿ[!ÿ‘Zÿ™]ÿœd ÿ”XÿŒXÿ‹Wÿ‹WÿŠVÿ‰Vÿ‰UÿˆTÿ‡Tÿ†Sÿ…Sÿ‹RÿlRCÿNNoÿ“\ÿ¼¤‰ÿ²“qÿ²•vÿ¹”dÿYHJÿWPdÿ†Lÿ|Lÿ}Kû|Kÿ{J‰›d&šc&ë™b%ÿ™b%þ–a'ÿ¡bÿXY|ÿl^fÿ¼™iÿ²†NÿPT€ÿ€\@ÿ—]ÿ‘\!ÿ‘\ ÿ[ÿZ!ÿ’Zÿ†Y+ÿWY~ÿT`”ÿdV^ÿXÿŠWÿŠVÿ‰UÿˆUÿ‡Tÿ‡Sÿ†Sÿ…Rÿ„RÿˆQÿtQ3ÿEJuÿšh-ÿ°’rÿ–l>ÿϽ©ÿ¸—qÿuS7ÿFMzÿƒK ÿ{Kÿ{Jü{JÿzIÛd&™b%טb$ÿ—a$ý™a!ÿŽ`1ÿGS‹ÿª†[ÿÇÎÙÿÁ­‘ÿaYnÿnYVÿ™]ÿ[!ÿ[ÿŽZÿYÿˆX%ÿ8KŽÿm[[ÿ¤ŒxÿACjÿUSqÿU ÿ†TÿˆTÿ‡Tÿ†Sÿ…Rÿ…Rÿ„QÿƒQÿ„Pÿ}P"ÿ@J|ÿ•i5ÿ¯mÿ» ‚ÿêâÙÿ´—wÿ‹`0ÿ>I|ÿzKÿ{JÿzIþyIÿyHéxG˜a$µ—a$ÿ•`%ü`ÿq[Xÿ`YpÿÄ­ŽÿÝâëÿȵšÿc[oÿkXXÿ˜\ÿŽZ!ÿŽZÿŒZ!ÿ™]ÿa[pÿmeuÿѨpÿìâ×ÿ¯BÿK]›ÿ‚Y2ÿ‹Vÿ†Tÿ†Sÿ…Rÿ„QÿƒQÿƒPÿ‚PÿOÿƒOÿAK}ÿ‡a:ÿ³“nÿů–ÿòíçÿ±•vÿšl4ÿ@EpÿmK0ÿ}I ÿxHÿxHÿwGþwG4–`#ƒ–`#ÿ“_%ûž`ÿ^WnÿuddÿÑŲÿêïöÿÄ®ÿYWsÿrXMÿ•[ÿY ÿYÿ‹Y"ÿš`ÿ_]{ÿ…xzÿâÈ¥ÿõöùÿÈ¡oÿXd—ÿ]@ÿŒWÿ…Sÿ…Rÿ„QÿƒPÿ‚Pÿ‚OÿOÿNÿ‡MÿIMtÿqVCÿ¸–mÿí•ÿïèâÿ¨Šiÿ£v>ÿGB[ÿ_LIÿ~GÿwGÿwGývFÿvFQ•_#G”_"ÿ’^$ýœ_ÿVUvÿ€kaÿÒÉ»ÿÝáæÿ²•rÿLQ~ÿY4ÿYÿŒXÿŒXÿŠWÿUÿxT6ÿ4Fˆÿ‰_5ÿáwÿU>;ÿCPˆÿ‰Q ÿƒQÿ„QÿƒQÿ‚Pÿ‚OÿOÿNÿ~Nÿ}Mÿ†LÿXN]ÿXJSÿ¹”eÿ»£‰ÿÒÁ®ÿ{Uÿ¥zEÿP@DÿSL^ÿFÿuFÿvFýuEÿtEd”^"“^!è‘]$ÿ›^þVTtÿ~g]ÿÈÁ¶ÿÉÆÃÿŽpVÿFQ†ÿ“Zÿ‹Xÿ‹Wÿ‹WÿŠVÿˆVÿUÿmTJÿEW˜ÿPbŸÿHRƒÿ‚S!ÿ†RÿƒQÿƒPÿ‚PÿOÿNÿ„NÿˆNÿ‡MÿLÿK ÿjN<ÿBCfÿ²‹[ÿ®’tÿ«kÿ¦‡dÿœq<ÿZ@3ÿJKmÿ}EÿtEÿtEütDÿsDm‘\ ’\ ¢\#ÿš\üaWgÿhX^ÿ¸™nÿ´ZÿPS|ÿhVXÿ“Xÿ‰WÿŠVÿ‰Vÿ‰UÿˆTÿ†TÿŒSÿŠVÿ‰^1ÿŠP ÿ…Qÿ‚Pÿ‚Pÿ‚OÿOÿNÿˆNÿpL+ÿKGbÿBJxÿjL7ÿ€J ÿyLÿ8BuÿzQÿ¤ƒ^ÿ«Œjÿȵ ÿ|I ÿdE,ÿCJvÿ{DÿsDÿsDürC ÿrC i[\ D[ ÿ’Zý„Y0ÿ>S›ÿ„X-ÿ]TgÿHSˆÿ‘Wÿ‰VÿŠVÿ‰UÿˆUÿ‡Tÿ‡Sÿ†Sÿ„Rÿ…Rÿ‡TÿPÿ‚Pÿ‚OÿOÿ€NÿNÿ†M ÿQI]ÿ?U›ÿ‚™ÿ|qtÿ9J‰ÿ{JÿIÿCIsÿpVEÿ¿¢ÿr@ ÿzKÿwCÿrQ3ÿ?IzÿxDÿrCÿrC ýqB ÿqB YZÇY ÿ“YýsWGÿDT‘ÿVUsÿŒWÿ‹Vÿ‰UÿˆUÿˆTÿ‡Sÿ†Sÿ…Rÿ„RÿƒQÿƒQÿƒQÿ‚OÿOÿ€Nÿ€Nÿ~Mÿ†MÿRK`ÿQf¦ÿϱÿìܾÿƒ|ƒÿ@GuÿI ÿ~Hÿ[LQÿE?UÿÀžsÿŠd:ÿn<ÿzIÿ‡jOÿ=I~ÿvBÿqBÿqB ÿpA ÿo@ ?YYVXÿ‹XüXÿ‘Wÿ’WÿŠVÿˆUÿˆTÿ‡Tÿ†Sÿ…SÿƒRÿ…Qÿ‡Qÿ†Pÿ„PÿOÿOÿNÿMÿ~Mÿ€LÿtL%ÿ=NŒÿÁ¤yÿÖ˹ÿ€“ÿ2K–ÿsIÿzH ÿxGÿrHÿ5C|ÿ‘h8ÿ»¤‹ÿ]%ÿìÿ¯Ÿ“ÿ5>pÿuCÿpAþo@ ÿo@ ðn@ ‰V‹WÁ‹Wÿ‰VüˆVÿ‡UÿˆTÿ‡Tÿ†Sÿ†Sÿ„Rÿ†QÿŒQ ÿPÿvR2ÿtP1ÿwM$ÿ€Nÿ‡Mÿ†Mÿ~Lÿ{Lÿ~KÿuK ÿ;N‘ÿ^bƒÿFW“ÿ ÿl> ‘ˆUˆUŠˆTÿ‡Sû†Sÿ…Rÿ…Rÿ„Qÿ„Qÿ‚Pÿÿk> ýl> ÿk= D†SŠU†SdžSÿ…Rü„QÿƒQÿ‚Pÿ‰P ÿdOLÿRQnÿ‡DÿŸ|Wÿ»¢‡ÿ~Oÿ{IÿšrEÿ¡vCÿ“_ÿsA ÿLCSÿ7O™ÿaI>ÿFÿtGÿvFÿuEÿuEÿtDÿsCÿrC ÿqCÿtAÿfB#ÿ1J–ÿqBÿ~>ÿ?EpÿUB?ÿq=ýj= ÿj<Ôj< …R…R#„Qé„QÿƒPý‚PÿOÿ†O ÿgOCÿNQvÿ~@ÿªmÿžyRÿj1ÿ¼£‡ÿ°”sÿ¢\ÿ¯”wÿ»¢…ÿ›k.ÿi; ÿ9M‘ÿQI\ÿ}EÿsEÿtDÿsDÿsC ÿrC ÿqB ÿqA ÿnAÿu?ÿTCFÿ4H‹ÿJDYÿ6G‡ÿm=ÿj= ûj<ÿi<ki<„QƒQ=‚P÷‚OÿOý€NÿNÿ‚Mÿ;O“ÿqJ&ÿ‡Rÿȶ£ÿ‡Y&ÿ°“rÿÝÐÂÿØÉ¹ÿ¿§ÿ”nCÿº¤Œÿ¯lÿv;ÿÿ]@-ÿED`ÿg=ÿl<ýi< ÿi;Ôi;h;ƒPOJ€Nù€NÿMü~Mÿ‚L ÿlL4ÿ6OœÿuDÿŽ[#ÿ¯šÿ©‹hÿ¾¦‹ÿÖȸÿÞÒÅÿ̹¥ÿn:ÿ¶‚ÿ¡€Zÿo9ÿ5L•ÿjDÿtBÿqB ÿpA ÿo@ ÿo@ ÿn? ÿm? ÿm> ÿk> ÿn<ÿr;ÿk<ÿi;üh;ÿh:Jh:OMDMó~Lÿ}Lû{KÿƒJÿeK=ÿ5OžÿdD.ÿFÿ§„[ÿ±—|ÿ«oÿ­‘qÿ²—yÿsC ÿo>ÿº¤Žÿw@ÿSGPÿLG\ÿx@ÿnAÿo@ ÿn? ÿn? ÿm> ÿl> ÿk= ÿk<ÿi< ÿh< ÿh;úh:ÿg:Œh:€N~L/}KÞ|Kÿ|JûyJý€IÿoI%ÿ:MÿGKrÿg<ÿ}Dÿ\!ÿŠ_.ÿˆ`2ÿvIÿœ|Yÿ´…ÿv9ÿXFGÿGFdÿw?ÿm@ÿn? ÿm> ÿl> ÿl= ÿk= ÿj<ÿi<ÿi;ÿh:úg:ÿg9²i9i;~M|K{J°zIÿyIýwHû{G þ}FÿWIOÿ;NÿAHuÿdSSÿŠlNÿÙɶÿæ×Âÿµ•mÿwFÿ^A-ÿ1J–ÿdA!ÿp?ÿm? ÿl> ÿl= ÿk= ÿj<ÿj<ÿi;ÿh:ýg:úg9ÿf9¹e8 f9}L{JyHcxGîwGÿvFýuFû|EþwD ÿ^F;ÿEHlÿ;I‚ÿ9F}ÿIWŽÿ8@pÿ7E~ÿ:HÿaA)ÿq>ÿl> ÿl> ÿk= ÿj<ÿj<ÿi;ÿh;þh:ûg:ýf9ÿf8Ÿf7i=f9|KzIwGvF˜uEùuEÿsDþsDüxCüzBþtBÿkBÿb>ÿdC&ÿjAÿt>ÿp>ÿk> ÿl= ÿk= ÿj<ÿi;ÿi;ýh:ûg:ýf9ÿf8ðe8bf9f9yIvFtE%tD–sC ïrC ÿpBþoBÿoA ýq@ ûr@ûq?ün>ýk> ýk= ýk=üj<üi<ûi;ûh:þg:ÿf9ÿf9÷e8™e8f9f8wGwGqB qC qB dpA ¸o@ ño@ ÿn? ÿm? ÿl> ÿl= ÿk= ÿj<ÿi<ÿi;ÿh:ÿg:ÿg9þf9Öf8€e8f9f8uEtDn@ n? Cm> tl> k= ºj<Ëj<Òi;Ñi;Çh:³g:‘g9bf9+d8f8e8rC qB qB g:f9e8ÿÿÿÿÿÿÿÿÿÿÿøÿÿÿàÿÿÿ€ÿÿÿüÿüÿøÿðÿàÿàÀ?ÀÀ€€€€€€€€€€€ÀÀÀààððøøüþÿÿ€ÿÀÿàÿð?ÿüÿþÿÿÿ€ÿÿÿðÿÿÿÿÿÿÿÿÿÿÿÿphotutils-0.4/docs/_static/photutils.css0000644000214200020070000000040112721610567022720 0ustar lbradleySTSCI\science00000000000000 @import url("bootstrap-astropy.css"); div.topbar a.brand { background: transparent url("photutils_logo-32x32.png") no-repeat 8px 3px; background-image: url("photutils_logo.svg"), none; background-size: 32px 32px; } #logotext1 { color: #e8f2fc; } photutils-0.4/docs/_static/photutils_banner-475x120.png0000644000214200020070000005045312444404542025201 0ustar lbradleySTSCI\science00000000000000‰PNG  IHDRÛxº…XÀtEXtSoftwareAdobe ImageReadyqÉe<PÍIDATxÚì]`UúÿfÓ„E^Xè+MITä°&6ÄFâY°œ³œå¼$÷·ßiÀV’xŠŠ%ÁÆY *EQ U9–* B =;ÿ÷½}3ûfvfwf[xLvgwöÍ›yoÞïý¾÷"Dˆ!B"*±âKâø[ÒAüïª_:×%îˆ!B„ V¤£í‚“ϼ=h*y›F´S|ïÄ}Y–uGËʶ«¾+ÇOÉÞò¾’¼© û Ë_¬]Jˆ!B„U`Û.ý'ÂtŠ(¸‚œÎ!'ÿVùW‹¾ú2è"ðR.'à+±!B„9òÀ¶}ÆLd¨ÓÈÛtÀz©E°ôè*{¤˜òZÖ¸B¯!B„Ø9ò5€U¦Ý`m€=+AuAÈLð¨ˆÃ–a)‡ªæÙà}I¨›£,‰“ò³•¾@\uŸä‹;&DH«[Y¶m³Ú¬T‡³ÿŠë¬Ù¤)fàº+mIRAN’$XJì¿–›jàŽÄZPÆ·žc-”ãùwÀ•<Ÿ+ÓZ˜ ¬¸¥"²_7úFÝYt£*ж#”“M€­!BÂ*޶Vá”sîJM9ûNZÒ²[H@-ÑÌóE7Â!X*€éK‰"qûÊ5åp“(mìX+ehêBË!“)ì  ‹ÀëÝPˆ!BŽli3Ì6eâ]hÜ”GÙ‰$1†*)ŠZ/ÐÉvªì%¥F Õ˵ǖ=û¤˜™d?™.a¹ù¢; "¤­Šœ Ùà]ª©Ê LÜ•6¶'ÞN^ȲUQYt:Еչ C¤jÙZ9²Ête™®œGÕœ9tËE·"DHÚ" ¶ügÅpsÄÝñH«U#;Ïý[ªóÜ{Š"-&[:¨°&é̺ STõmDTËËÑ(Ÿ¹‰{ë£ZÆw8#\,TËB„iƒŒ6Ûà«löÖ¶ÎI÷:“þ–O°h  l-@yaK’´@æK)‚ ë=Ì|=7XàVAw&ù³ŠnšèžB„i2!ÈïŽ*iUjäNº×e‹dÊòŒÖa¨)jXÆý©–;Ju0"æw³†Çì%ûõpFÌÃó¯i>Ö’íë¦^ðAã¨r'èÖs#©ZVÊ¡û©¤d¹¹+^*ÝTˆ!B؆²“ïs2ã§™Ôh‰âŽ?ã'ÿ ;Üñ;\û+œO¶á޽ô˜µîc ˜v%`ÚæÖŸ•JñøAAûxÒWð|ògðzÃ`¸§öLº‘[Ï•¹µfïÚ28ÑU(nôM#W¼˜+ºª#Y'å/†®YuŸäKâNÙÑ’K*‹ˆ[FÌËöóÖ¶ÇL¾°"tá‘yÐÆ?èzÊkl„`yUì:¸%îè+UÁ6¹#|Ôt¼Þé|˜¿³TÊ Ð±}"ŒØ†ìç“×¾Ý9ï.—®£ÅØ8P-ë‚ØMt—Àq›aò¡K Pw±mD¥ ˆaný¬B®!Ë…™pp…¡#YRÅ-ˆˆðþåårpê ·8Bà.ÀÖ6О÷@> ¥±ž¡Ò}IXI9%˜ý̳ßÇQ ÷Æ-¥@[Eõ£æãanc&¬!Lße \uzOض{?̽{ Ü$õüU‡j጑ǑïÀšM;U Ãr^o6 „Û½‹Ú¿ Cªs Š°a-C Ì"lªåìø17áN®Hv D€­Ö&huLwIX´[;Òùü¿£µm)A¡toä'/ ¤¬0Ëí먂¿Å"È®¥,öÖ†ó(“E T0ïªI§À׫…oVo‚ñ#¶=`8Û×?]IAö‚qÃàÖKϤìö–'Þ€JÀ XbYç¾ –µžHú n®™¨c¨Ñr¢û8kÄû–%º­!BZ!à pmM`ÛåüÓŒ,¦À!Y]‡Õ‚.‚ìô˜•„i&Âmdç7÷Æ"VVuH9ó?ý^ïoÖÀ^}»u¢À;ÿ“•ô'¾ú a·d¿ æÞ3®úÇ< È!à>R?OÄIÛĪEÐeÌÞ€-g†[DØ­P) 9b$qR~º¸ áÕÐJ$ª®?].x0›Çb#NÉÔ=G2uóAc§% E0=ö{x¾ùTH«»…ío9>¾¬ )ÿ#»¥ ̹ }½z3\•WLYîðãzù¸çlsw¤ë¸¡ûçz™»nN¨sÉõë*„*å"Ñu…bA„ÏþÑÆl½ð3 a+TªçEå€-Žo‰ùŽý –ºû„†ëa›œâ»žËÊp¶K„ázÀ°=¡#y‚ë³-[OÁÖÈUUÍøÝã†ÃÚM»4uQP°£Ô@˜n|p¡ j™îÍK–¿ Ô6Bû"D€­´y4”—ÄÀCæÔŠ5®Ì?iÃ-JðlÌ05f <Ñ|<ÞtŸÈNU-wl—çÓ³ÆS EAðÄ ?{ô–‹`né7pßÜ÷ ýs˜”AÞPïlÅ]¨u„~,Š{sEò*D7"Ø—!G9Øv½(¿ˆI6‡Ž`tééLX®êàÕØ·a˜´®mº >n>A)AKÙé™ãè†ò1a¯¿ö9|³f3NPmêÄ“aÎ]WP†‹L–B:|\ÇEKeü³%VAõªø ðaÓqÞ¥Sh5©üJ àŽ"€+,”…´eé(n¶!­‡Ñ2+bH Y®t ½¸ñZX'wÕ?¡œ?f(<û×KéûçË–Âó¥K¡ê°`Ç }»9©Ae¥x=êaP?ãû1Âz€é1\V!\'Æ·ÔM­AWè kÌÂ×?W’LËHÅü¸äa0%¤-‹P# `4Ð^\àÍ!˪ñ“UÐ}Öñh›ÐB7Ž{ÖsŸÍ½¦žsÙeðÄë_@%Ùñ`§gŽ¥êd3©:T/ßà5X"uA¶‹|F~ x -áÄrXëî ¯7ƒp§ò3fa ¸³ÆN/©_ö|¹èÎañFÈ!r¦ÿˆ\Bްí–ùÏ"‚Ù\¤'  «³˜,ý W:VÃ_š/‚õrwàƒY Úxá£×CŽ×>ô:|¼lƒÊrÿóàÕ~ëUu¸®ùç«ô#J=2ýB¸jâÉp5Z\ßåUÔ÷Å-£1•Ç×\繄֛ʯ(aÜôQõKŸo•êdæÚ›”\” ®;¯&[YÝ'ù­^%N®'•¼d’m$xœøŒU²kQ@»œ\Oy+©o:«oÇ&¬¾•l[ÝBu  D€­} ý?£5\‹år¹Rlò fÑjᙘ÷á÷éð¦<’‹å)ãÂhh/¾÷X·ù7Z&6)êd3ŵÜÇ^ûÂk05°' +ËÔ½è¶Ä 83n'ý^ðpÍ©Ø2ÌHwKqýÒ¹®05§ÓãXÂe» ëOpÀÏd ›Ó˜.›xApk‹©l1ƒ]OE„ëêýWÚÁ1›”WÎ& ¡‚n8|Ãí0à%! ¼ú‰f\#– Äà|)ÃHdsõã&5úöO#Ç/¶Y½«‘¡ôe[½†Y¼wÓô-=)Àßás<Ç?r–ƨ°-ÚLÏÅ z(v˜?™î•R¼(†jHòYÏ½çª xó‹U°l­‹oîàZ,2Ý¥k·hز‚K㇤ `9=v%<ÿ%Ú[ë'³ŸNågtý%'»Þlÿ8¥zx­~0=âöÄÕpaÜf]ue àV2'EÓXj‚n g} ,®XV:)3«%Y.9?j òÂP¨‹Iy‘\¦®/‚ðÆNgíÐj&?Q+ÍÖr¾Tý3˜ó,±ÙWÚ “U&8é!<Ïø¬’²fÀ ¨U Øö¸äazb/CU¢Ûݾd‚Їl‹äA@ÂrútuÒí…÷Wø°åE+~"@|,yævxã‹ ˜VRëcØb@w7bU5¯%`|ÛSïÀŸýjV.ïí#ñ_PVûxã8xŒlzëgSЕeŸD fë¹f`92æwXÔþ=ØêN1ÕSið üñkõƒà¿)¥ð@òwðpíiØrvâø[ ê¾™ãŠâ ¡°©"á.*'Š*ÁJvM8ˆ•Bx-eiLpRö¨pƒ)³1ÚH‰2ùɈ4;"$ l¶Ð)(¯ вä‘fðÜÏ#e";Î! [1°íqé#JR§¯ZØèú³(Т¬‡î\Œ`(î:Û÷T‚Þ?wûž*ȸcÜ|Ñ7¼?^*Y.2^´VþxùOêz-ç0KËïØÏÅ}¡®i¸„fò‚(ø&Jàüs•º‡¢ZÆP˜?vò¡K š%SÀÃ1{Ñsui„áVב,¯?¶LÕ‡ÑÊ›ÆT¬¥Ù¬-¨†uE‰áV°kZ ‘ ¶Êô°h "8Ñ1›,¬ŠòäGHàÉa¹ŸöJÓkw¢ä:’nÚ"€E†ZfÏ`Ï›S÷\/&ß瘩ÝC[ÉÃhSõja­ñ“¤2I¿Á,€‰¬î¥k]ÔÐéÊsFÁ‹ï/÷aËÂi‘6º”ì-Y[gXv„:ø[ì74©†¼¨ñ|x9†j=a}(ªeÌ(ÔÏQíIã‡@« fñl}ÜŸô-\¿…2ÝlYa·ÑR÷­ŠÒyö‚g¨Ù>®…ÎC\ Ñ÷SÅÉÀmy!;ö¡ `HgýCH¶…uÔ­2ÑÕOVr¬î~c:A$4y:m’bXh(!%"èyÙ£äDR¦&¬¾¤†äg $>%.Ï€¤°„;ÀÃ^'K ËyñýðÐ ‚›ƒ•$É´bû$'@½—€lEÂ\˜³nk¼.l¸š¦ê“Ô2ì'JPÀ%90/ÇóçŒØpuü˜^{®šP-™‹ŸÐ8.ˆß &É ØåѺ8£Är¢-NÆ #-ÙQºža`µÁm… pE0 !mIôö$ŽVVº•Ìhn§-Èòg,4Øö¼ì1ò I…> ¢$; »àÄ2è7IË Ëù×›KàÅ<€ûÃË3áî©0nx*ÛúÓ }g5µáê‡1ޑɮJ˜76ûBó©0ªþVxÓ=@W—ð€.— Èh@þ<Ÿô)|Ø8nþ² á÷gÆî ”HÝ­¬“»˜š¦<5/éÌëHl¦hg†01¨àÚ£œ :A±êP®Aˆ(³ÚtÝÇYŒ©†¤Y Û(ÚåþŽ ZLÆvª÷Öªs9u)Ç$Ý|ŒÖseøL†Ïa|ïëdž?×0 ë¹þUË÷Ç/§ëµ÷Ö§ûÍ4â¸Þ°vË>z,ª›qm׿®œ–tÆm©µ_?çja€¥¦ñuŸä» ÀBñM·Yn¡?UM„Ï»|×±ðPýœj³<¼ÅA­‘:ÌJ{€IàÆRgØpe½8˱&ŸO°Ñ lÔ­\@Œ?«ò@àht–Øöºâñ|2¶§ù‚¥Ð5 fÁ¯çJ°z'þø?ø¾—¶À¿å xOÅ÷ö½•ðà+Ÿè¢P©sjè4VrÁy1©JÕÆKå~4"Õî0¯ÅjÀ’£ÿ`ö×sÑúùÖøaNÃI4_®d²ž‹Á7.?ùu'-»_ÌAr|Š•T~عò[ Sã@žh=Ã(cÁìø`¦"P›Óˆ ÈæM¸=Ÿ©uí€Õ„`Àìû¬“ºçhº…kÉ`Ïà }¢Ó¯‘ïóM&ùVÁÖ¬ !B,>k¼”D»¶Á¶÷O ÈæÉÏPuÆOf Ð"Ë}KËȳ~,†ÙR)ÞE°”ì¯'@¼”Ž|Š=€>r%ô‘*aì†aÒnÕªy©œ ÿrO€Ý'ÀvÙ©‚1‡ú† ÕË t¯Žó„™œÓx’¦.˜yèüqà_·càµO¾ƒ ÇPM¼Ö4 gÄ›z[‰Buq €-¸YvŒ˜”ÑØÆ&€L‹»µ4qà®%‡‹ÜdEl¯yr¡/ôzÐE×$ð¾Y\dÙ£Äx.¤Kš¦§uƒ­¢¾òF\ò’e–ktÑXj†| ü2`2ücɽÁ×»$ß(UR"uÚFõM9 –‹@ ªE/•°~¼c;(á™ÖÊ]¡JN t5®BdÿÖø`~ãPO™¬Œá{Á9p·íÙ÷M›Dý…?\º–²\½U€ÐiÉgþ%µæ«g¢Õ©f‘:(—#¸l°BdSÎ[&S㉠,†s# ¶`/ÀF™ åÚ£ƒ‰€uKó´Ð6bGZܶÀØöžò¯luV­€%ˆèzØòv¹¼H öE£!¦Zïû`RùéÁr˜c/Mï×WªŽÀíÏ7J˜òñ4‘¼Fµlƒ-_»ž”] sšNVkôéÞ >~òVê 0<æwvÉ’®…xëge_š¥~²¿(6;à©ks ´œ,ŒÐŒÛŽ•ùl?kÌ–µ`ÝR9n Ò¥" Z¥è€-¹ÑÕ ˜{ŠèjG|I‡§^p ‡«P° «­‹Aã¥Eò‰Ðбœ›Ö¦Œ?zN‡‡ÜWt¾¾?þ_ê‰:E˜¦ÐE¶Œiû0ö²â*tË%gPC¨[þõT³ô(ãG¤¯#Èw’Xüº I-Ô¡B; ©ks…A=©p†é6Ž ™õ³û0Û¦¶Aˆ¶¶3äÌ誖-mß©O:=³jIÃHMƒYØb¹áÝÀl8òÜ¢í“âàøaÔ€®>¢\2î˜4v8¼}Üõpcò p^Üfx?~¾p-úçÞÿ# žACB"­iŸ ÷];æ¾÷5¬Ý´K-?G2ʇËÖy¯A³`F Û¦ÀÖæš_k¾6;`m‰ 2²UæXV h_,Æt!md"ÏRa4+`‘ÙJ3ÉÀíÔ”¸~XnXA±åð©–¹O„)ÒjWÃo•µàFPLiÇõégŽ™cO€äácáö”› oÌ!¸uººpuó¢>œó ‹½ìùî–¬ñôõ±×>ó‰B¥¨‘1¾3Oñ×è'‚”æÛ¥ÏHocE¹Õ™eîQ¶6YíÂp]mWê(DHÔ„E‰Ò-Ù„ÝE«Á¶ïUO9ɘ=Ã,úàZQÏP-²ÜÄ„¸A“Öãší?jÞ÷ÀZ×>¨:\ 1±1ÐýØÎ0ü¸Þ0é”ÐkäÉpK» ¯£šF¦ …jxÌ^ªFƹ ã¾êÜS`né7Pù(T¸n{þ]ÏSÕ22^gû$-pKF ËÝ+ûƒykW[°#”gd*³hN€DG!­Ur &¸«Xé–[ Y-HN/fø] ÔAnÓ§° ñyºå7½½å¾ê\ÓõÜð©–c¡¬çzÞUA\×|œëÞ·î}>­ØËÖo‡-;öÂÁC‡!6†€î1)pêñÝÁÑo0<“A“ô“ª¹¾ ‹éûÖº»Â6èH?¼`ìPšÍˆæÕ5 ý8ÿÓï)ð~³f“~*bûÙ‡å¶5°ÝjãØ£i`·ÜŽÈŽ´5õ"$ÊìVIÚPi0Ž`Æž"–Õ§eÀ–ŒØÓTàT­Ê ÞjaAÃøsó×°ÞÑ V8Â$÷zXÞð\Þü}dUË ëÉÐøÉÿz®—宓»Ã_ÜÃ%?ÀÍ¿ý¾ú~#|üÝ/°jãVعgÔÖÕCûÄXH=¶ÌKÔ ScÖø€%_´`~£y¸zï¦VûѲõ°]Mý§7ƒb,÷0§¢ ´ Û¯=vT°GS\Þ–1;à-˜­¶¸.ƒ¯³É¶Š1ÝìpPùõ³íwõ¬lPÒçñ¸Q/5CŠ\ã Œ¥0ö\ÊtŸjz‹¶ æ_¿Ú0ùçò¡ããT¬ñ›Êϯ¯—å¾%„íÍN(·à´}[ ïÐe°`Ï8¡WGèÑ)b ‹Þ[U MÍnXä>ÆIÛ||k•`ÃòRw?zÎŒ5f\óÏÿLå§`§Ç ìæÏmkLC$& lËÅ­"Ä?à ňg…`ìÛF\Ë-$ÇáZïìP[ Y­lrž÷§Ë¿Âh÷¯0%á6¨–’!†|ŒZ\7é c'Ñcž$€»Ãq ,—³0]¿¡4É °Œ‰q@\L y×hÜöAÃFžÕ|<-¿ój^„®0çXx-aÄ’ ®ªi¨;D`XH¾n<;EV‹VÈërþØ¡”±~´lO³ A÷›Õ›i9Øèk«þ @þ\oÀ!B„Â.ÍcKÀc$cà–tÍ‚1²\dÂèWì/^P`›zÍìTZÅðÉt/oZ Pu3ãЧSôp&¡ú&ؼ÷0Ì®™cÜ›àÉÆ·`lÂý|ÁÒ ©º úÁD¡"ûtsB%…lÙ?èʺL>Û¡d¹¯ƒ±°î–—ÀÓÍÿªº$XÝèqC{èq/¸Og_†z¾ƒ1›8 «ýxÙz5‚TJûD¸jâÉôØùŸþà laPŽG ¯ %F³ò\£,ÔzB„b º¨ *g)ø”¬WNÍR!c»ÅäµÄnÖ X?¬v†Ì€MG=èŽnþæÅI­z MÜú› ‡ë›aŦýðý–J¸Sž ‹êŸ¤jeT/›w8TËÝi’â=`¤”?¶¬É•cÈ–õ,7ËÝ&:+m…¾l‚ó±ûDxÃ=’†p4b¨˜uh˜´wŸáeºlo{êz,æà}ÿ‰©±ʰ=à¶'ß6NäÀ@ùË*ı\‘oTˆ!Büƒ.2W´VÎÅõZðøŒ›iQØ.‚mUÐõ£F–2%l2ŸÆŽ®/ÊÐA®…Þò~Âl{C)Ð…€ï¼Ø3!…€óåÍ+!(ÿ\ £Í>¾ …Ý„AÏì} îû¹ äàOò†¨„~ôõÖ[?cö ­4Ÿ®Ræøá`Ýæß`ûžJê?;=s<_¶ÖnÞ /÷¬á' «·8F¡ ï â-K’äC’À×]¨õ‹&î:ŠÆ«×š*†K!BÂÃvÉ–O6dG˜ËŒÁb(ãÅ܆ Á– ÐÓŒýju Ë@×k;&ÇBÚÄÄHNN‚”vIp\÷пk2ÔÇ·ƒwbO…Iîu¾Á,ÐÕ H5qÂ$ñï4¿Ÿ6? WÈ?Bs—~3pŒ…-0O~>s?K€ø·ÐAW_ýÝ PÆ8i+³Bö|‚Ìv-[T'wl—/”-SÝ|0!Áø‘4A1” DÕr¢ß$ºÙ ¤ì¸* °ئ‹6r”oÙÐe¨¿ è¦\3?Ûtc¿Zt½Xã€øXÄÆÆ@bB´KN&[tII‚]ÛCJb|3Îm^GÝ„¼˜|* ¤ñvó‹ÐÀe17Á蘿AÙIwÃú³îƒswÀé1wÓcßu¿Lט¡š³e C !ô£“Lp½ÖÃl=eášìÒµ.úÁåž«ÏV*2^,ñg˜’óê³ðÃrÛ–Ø ¶(Nb$Dˆ—í"èÇ@­\‘™®ØœöúÕ¦ñ€cÌB2À@]oâã!))‰²ÛöÉ Ð³S]Çý.Öx´¼ Âúñ•¦Wi@scgÀ ‡‡NÙF èNÜÇÀeŽa»Ô æ¹_o±T~Ø¥òz¹;-§o7'e²ë6ïVY.ÊÔ‰'⣧.}U,ѼõT~ZÐm3b•EU†1Ø~[–Œ¬e¹¼D¯"¤5‚n9c¹F©ûfZe¶é¾¡A›ó´ã–š™å/†2Dv‹€‹¯Û'À±) P“LÝ„†ºw‚¿ÐV@w˜¼ ÆÈ›!7æ ¨¦4˜ßï >0 ç1ÐçX'ý¨ZJ‚?;®¥ìÕÌÑMåçy‡*dŒFU%%Òý¾]=Çë¶üÃö¤À‹‚¯ãF 0KyaµAäÏm3ƒadvR´mÁ/ì´ãÅal“T`+’9š·Œ£Q¦ïó[L8.Iª[Ëe‚já¦fjÜÐìv{7.–mBBÔÎ`=ÃAî ŒCC*õ»á¼S!¥Çù†óOQËDf»Àq2L!`ÝT~°¤`‹þ¸¬ŒqÃû3VK´«¶=¨ëÆËÁ€Ãj*?ßµå6!v@báÑô`3Æh'‘{¸\¾ìL€«r4nŽîc§Ñscäú“Ήī×Ç3ÈëqcâÞë›Cum446CssuŠ! 7.¦b¡Sr$°E¦‰VÉ<Ȝϫ‘{Nb| Ä8ô€nÎöôøŒÝÛ`O—S û¤“à´A½aˆTh°ÅmÉê-dsAüú-Ðû—· ëíL¿ß_] MÍôýẓ`,ÔúQuñ±î*4TÚMýp=åH4L#u,¡Åq°•ôȬd¶)ª4uñ f¡úç¶!Àe *[ ì~¥Ìâ=Röâ0œs†cK"ÝGZhé UÀŠ?€‹A1*t ‘~™íq9s„E¥ú°7¦»ÃÑ»wBc³ön„ÚúFhhh„úúhjj¢Çà:nÌøØø–0Û!TÌ3K_–‹1Ž‘·#mùí€Ç@×Nía`ÏcÈ÷±rlw3¤ MíªZ^†¦vƒSNì }ºv„Ž„ãoñ}Â(;&{Ô·íãÉyÈvÕ2Æ2ƀ롻Êt‡õïËÖmSKa TÕ2?éQŒ¬üe2a¹KÚHŸÍ³q¬+BiìZ»Øaóy¡²[òûl@6‰è¹Ø ñÙL Àl¥4Fg=¬SÌBe€ŒR}{<Œnúþã> öj€ªšF¨«¯‡ÚÚZp'$–ë¦Ç£µ2%§\Ju³x˸¿®±‰‹Å@Þ¯¶#Ü0ù$øóyžp‡Ë7l‡ü’/aýqR~órhzÀg?lò–©¾ø·¬Bųî@A1†V‹²”®£Ëvj­¡õ‚Öʼ´H ý8̱—~¾ª‘õÁ,¼uñ(|ã$GABÔ3m²ÚÙGãÓLÀ¬ŒÜ+—ÅÁ?•M`rƒllÓÂ(´I¹‰Vz44hq*딀¸lLÜH??BZ…œh:t¬*I’ÀËý6æx8½ùp`Ùw°öT×AM]=Ô°EÀmlj·ÛíeÅ\9æÇúua_÷œÂå]ÐÐowü^ù¯.V‹/ @»Áµ—þ]„®pÿŸ8††5•_à`„ÅÂnØNÍêØa©°nËnkn®tÝ '¬ƒY`?s‹cS¦[…ŽVDçô u%ÆUÁ¯ø(~¨ l;“±Ó`€v± ¥MìØ´(ÞgW ¦a†4… 䈇v°—úI:£%%ð½è~{]ƒ=§q ¨i‚M{j`ÿÁ:8|¸†nõx›šš¡¡Éí±VÖŒß`’©Õò'Ža°ÁÑ^n,Ö÷Wk\x«aýÖßU¬*hþ€¾_à8lçÏåá^gü¤e©Æªå>R%[ p©>\gØÛöVª®@ë™eºŽ=°Nîf1˜…wÀj銳]LéB;ªKÆhí ê”A†Wy´>°äÚ‹Áž/k‘Àå€Ö¸Û&6UÏ©¤~3[lÓÃ}´‹!zjjÁ #'iú5rª @wGLgø,n$\Úø-Ô5ɰåZØþÚš:ÊnQ¥\ߨë›àÊÐQ5Ž ‚åê@÷θ©Ð[> æÒWüð«5[aÍfê6Eªƒ§šÀåîï!?öB8(%é"Q™³Âfµ< öÀ2T!¸áþ²uÚöØNÀדрªª¦N½š –‚Y¨•¨¬üä1W;„[è¦ùc³d+%oKm8 ÏϵmÕ0îâmâd ¼Å&ÐVÀ µMìh_ .3¶ Uô¶‡³9 d,ð¨°s!TôQ«õÄê˜f*pAí=ã6[Ó”µÖ¸ÊJççl¯yz6ÿ»«Ž…õ»A—ñÐ|¹mÕ5ÂÞêÆnjL…ªgœe¶Ï¯ç²7ÔgwJü­ðRCü·á)Êvã×ÿí÷õ€ü¦/àòæïé¡o;NVáÑ›Ê/ø„õV×sÑ¿·JÄY]kÙò:¶¦Œ‚»lÝV¸{ªÇ5ˆÏl€îC˜¾O3!ð“lž[Ïm #"']Ta"8V°­ŠuÈ´fò¹G3«åØ ®Ý"ÃͶñ3df«Øš/‚$cBÌ-' —´ÐæùpÑJº„õ­JÖ§R•k!ß Ñ` ï¿–œ†¡øÂ‘DÜh+#Á:MÖžÃr mIXB\ªÊŠÄº;+?3ÐR¶dŒNÕæ•U†o™a-oŒCGtx7~4ÜQ÷‹© ý`šUÈæz.þÃ`ÁÅ[ÖÀ§áz®«å>l ¤Ì–n\—G˜¬jü4 ;U!ÿ÷ÛŸa8Z7yËnõxýkµuQÙ«õõTåGÀÃBAE¨MA*£6Ü“Ÿ‚0_G8Ö'gÔ©(À%¿Ég@›ÊõëH°,½Ì6Ð6MÖÏF}ŠÚ–„2qâ€V/ËÌÚÔáÅÿn>F Ëïÿ­Ý4Øéè Ï~5í QuÔ `pó¸£îc(NÈÿ BÝý‡jáõÇ•‚Hågå]t9ÒN$<ïÑ÷kx‚W$Á]WN @»ªjêaìðT ¼Ô?˜•9Yú–É©`%•Ÿ®~û=­A¸,‚³òŒ£,á€]ÀÅ{Ô?ŠZŒ‚­2qÈ c‘©¡J±õ=#–‚€‹ )=À`œŠ2ÙÐè,O,IyD×m žÏlVÿTêW#6Ih5`èž\®‰HÉг…µ‘å¾BŽÍÄûg´~5p:)P×ben UÌ»ô(©TÕ ×tø+¼v°^?TïÅ¡–ʧ7ý²ë¿¤ïŸN8¼K¿ÞÐt3~òœÖâz.·FÙHÀ}oåaŽêÉêÚ'+<Àš°çwáXÏE[j‰¬«Ë²õ[á®)gÂ?æ%Âì¿\D¿yð•OéQ¸vKÕÉìcž÷KÕ~ ® °ýdaü,f!Îs¹`´–ï?Þ£Qh&G 8hçD2«ª¥É5P0 S‘80‡hd°žEV4ìË6(ÕÊz#@” l0O3™@f1Œ–ä°ú:uõß –suïÈêÎˬô°2ðjÚRR…­W`žY mXF~SÎ&<úç#•[…lïÇj®ÏWrí8Òà>jÆ+÷)V«‡ôððÇþâ$óVËÕR2\”ò×Kê—ÑWd»Å gÁÓ‰ç{ÏdoÙty°ÔF³’LÀòp]£—Ìše8AWÖ˜>i&ÚøÆžß¼µx ÜxÁé°ñµ»©Ô%þªkêèõÖþþÊ'^V ?ÓõÚÊz-·Ùè–EyÀŸEÊrÖqÓC(ª’±'áâc¿ rI”„¡ ôíƒÛ¬hL|8À-„Ð-t'@Ö‚ÉšÃ@Õh"cÇpcZF²"àË«0tR\n0òi·bd®8ÛÁÈlØ:9-A¹U­Á\Ôy`l$˜Ê¶L›u, åÔÄj0‚±ZÕÅLjåZÝâij(ÀªÀ¦à’E¶ìµ8¶Çr×7A»„ØlÙ:è¶OŒ‡C˜¨@ò^£Or3–+ór¯Õrõáz8çΗ`XÿnÔõ§Š©½'>RÚ%ÂòuÛÔ{0VrÁ"yŠÛú$Jâ}¢òâú㣇*Z`°§j_æ¯9ì­›á,²$Zƒú®VÎ`½¦Að–Ç.6ÐG»=à–û­Öu ê…Œu§Úüy[Ì`ÏMê?ŠÕ?Ýæ}„¬7Z…›ôƒ´ Ê£Rf£2!8WÄJ®]-Ý#uéqÐMóÒ=3¼˜Æ±IYEX$ïQ²&ž±öçrepÇêÊ‘õuãÀ²]bœ‡Ýrqe],d ^iN¨­]»„8OV “k4,‡ã÷˰z@žã|]]4?R7ëö‹(Ÿý×é~¹VJOA¶<Á e(,W}?k߇ÿÒú¬[%“2ÒØ€Ÿfò€—3µÍôP~a¡ÒËSXŒÓD½éb «¼µ$z`ë®™\ýSý¨ùúG $˜U±¿~­¨–œË¢¬2¶SÿtÀR꾄ս¢•Ô9ŸÕ9U×gCVɳû}l$+?ÍB+³«Z5Â_o0 Ó•%-Xš2T£€ ñϵ–m¯ç$NðÏ–ý³ÜDÛúÆ ÖsQªYÂx½ŠÚ(•ßäÓO€¿õ•w_ú‰º ý«uãË0f¹ èJ%­áa¶H(Þ²m ÌÀËÚhýqp›­(r 6Û¯Ûbý™š6¿-߇œ$#Ð尜ղxËfIëµ<\›Ø[FM}“íÐf®B•Ô}ýsu•Mµ©«Ð”Œ‘T…Œë¹J]Æ’ Ô"Z ©üt~¾û>ügT¤0&*"Dˆ#Rb9`¨ô·®õ\#¦Ë§òÃŽhÁŒ 7o§¯;¤Î°Óq >µ"æxO*9ø(TælYËt{vN-»+M¬Ÿ ؼOl Õg @‹ë¹ø›¾Òø“üäÀUÀ‡m ˜´ÞS·–`µipt&s"Dˆ{`ûÓ 9ƒ§±q=< ë?ô£²zËûáŽÚàœÆ ®ü⧘>`ñýt{7ÿÙ ;<àK>7n4ŧC5$…º&ÆOÇ÷î K×oókýìOµì\sW¡±CúÂØ¡ýàÒ·^WÁüOÀTÈÒ`f„¬1~Ò…~ä&’T)Ýéç„"¤uƒ­<€te?®B Ð`¹p>px\Ò°œº=“t0å‰ŸÌ gD…l÷Ò†o!§a1ÝŠâ3`vÂdKë¹Fî9¸8–‹`žÛô œÛ¼z/Üwà_îóbΠ¯v\…ð `±â¹épYþ|XŽÀ­Ý;§œA?_¾~«ºž;Ž Á0ôöÏrË~_Xò "DHk}н -誫£¾ë¹Šië¹ç4¬†Å•P•1FŸJïø0'œ ;c:›–Ó§Kú‚‡’.ƒ þ Åhp?<ü8 qïM¢GˆB… é—Ö?Dö•Ø3á‡ëæÃKÝ®¡©ßj˜ëÉ$d)ô#—†ð÷*º MíúT~7ž*ŒÚþQô¹ú›að %Ûé$îzü§òãÖs D—"DˆV¶¨†”$}°|°`üdt‘ÉÎ94—ÆT¾¸ãß¡4a¬Îˆ tûNÔz›½phÙ‰çÁ…íï£ûókž&€»3¤Ð/ÕσÒ109ñN˜w& š|),ízMrP; žl|Ƹ7yËÑÞ<^?À݉°ÖíXOÑ€åÐþÝàÎ+΀—?Z \{Ô2n”—ѤË¥þvóçï]˜ç]Zˆ!BZ?³]Â@"ºYõËá±CÅðLò…poûl šÞrÀÔjùÛŸwÑ­º¶^¿7æ“íÜ“ÀGg¸ªÝ ª~ž_3†4ï *ÞòeÍ+éÚñM ×ÃAR§¡ýºB»Äx¸`ô‰ôw³âÎ…Žp}ó×`%Á®¹v„ZºÿÔ;K¡÷±á†ó<€;´Wx'ÿ*ØNï“o/Uãw„:ø“¼^vŒµ”´žgòä¥Dtg!B„i¢_³eÌH5¸ñì)á®çòyo}×sOkÜH€¶ž%@ûlÒ…>q’­øçîÜw²\…íâ~ѧ«ážO“áù𠏶»—‚°‘űYèÇÑÍ¿ÂWÉ£ Ó€Á0ºwg¸þO£< þ%¸ñ¼“aÓo`óoûaïÃê±mJ]%d£˜%_Â'+1CûÀÛåkaAù:Ð[?_.ÿ@Xíxja-ÙKXŸ+º²!B„´°]?纊¡·¾ZI†p§ž¡Úg¹Zн½æ}úûû;\ïëºÂîiƒzðëL@ÎxT}¼q—Æ=ç¡7—’ãzªÇÜ‘y*\ýø.8H@ꞤkáƒC¥߻q§ûøÕÖ54«Œ´¦¾þø¹¾ûy'ô«OÑKÞ†ô•ýéºðŒKN‡ËÎBYé?_[Ÿ~¿ ®oZ3¥$xiÑ~Â6z™m®ûS C]±a;ݼ—âeÜ—»€>òXs²—¹ðÏe [¼»ôA—èÊB„ÒFÀ–üådÏ”#4b¹vA·§ûw¸®ös¸mµ#Y•-#hN;{\wÎ0@ùËE'CuM”|¾ž^ø=¨ûÁ3ä}uM=<0uU'_:þDx÷›ðSlx/~4ÜQÿ1¼K^­†~ü,v\߸„ª“Wì;f¿÷-ÛyŸTÀ§?l†¨%ßŸÆ W ¾¯®WµŒ¾Â( Û¥N~]…Rä:(hþ²Úx¬ªD˜Ê¯R–$Áj…"¤•‹Ãà³%À;yßj ŒQy ¼¤nìŒé¥Iã|ÊLXlÙ?.Û/:‰-ª‰K—ýž}ÿGúºóƒôsÝ÷ó/ƒ ¡HŠ(þl-¼·ôPòÙ»½øTµ\Lç×ÛýœÛ¸Ú0ô£>ñ=eбÇÓèT/Ô½L­šwì;Hѯܸ“úÞþ»þ ¸³â&Y ý¸Üq-w(ì è*tƒ{)Ý*æ0L|/qÚ¾€‚Ýï= üj…"¤­1[ Ã2c"]ŽYy˜!cº:#óõ\/sˬ[ _$Œâ’Í{ÊÜçxõÎó YØ÷ÀªÂÊÍwÜ?e æ¹·O‚kžø@]ÏEÐ6q]»Ò· ]¿Eã¨ocO€‰MkhÒzÓT~:–;=éx¡öeø¨æ ¼ÇÁŽg7ÂÌëáÔÚïè÷W&ÞNËö2Kÿ¡78zÂh÷&øÄ1Ì4ñýPyüµù3ȹÈÃ†ÍØ²/Ë­øí½DÎW!B„i‹ÌvÝs׸ÈK…¾Ì ‚`¹½›ÿ€^Íû 4q<ðÜ2¥]<<{ë9hÞþdý_)”.ÿÅÐ?·”0Økÿõ!ýè´{Àé'öT™º}‡kº€ÖɽTÿÜÏ ÈžC˜m c¦+é2àº-Z'‡2³Vžý`aÄ[U°D Ì€zåÆßT°DŸX…¡z%ØÝf.,”Öø‰ã÷6X®ÿ\Эرàa},Dˆ!mTbý}¹ö™©ÅÃïx#dHµAJ50ò¬6nŒï½ª÷ÑDñ™E¯¤G[òÿgfŒ„k¹ƒúv¦ªâiO."Œ÷8í„îtÝöÛ»á;¨*Þ“Šj¹tÙ/P]Û¨&ÜäÉo‹VÉæñ–e¿ë¹õÚ‘3Ä …Êîz®™®.ÉA%¹±v*¤Åä /t’—4²U~ðÁGصe“—T`­ýúŽä¶8ªÁ–aZùS$ƒ¤ ì7˜5Üû9®Ÿ‡6ü_$žì±fk²¢sÔr$([þ+dŽ9î¿ü4˜öÔ)!#ÓÕà ³~~4û ÊjQå¬O)ÓXAC7‚.˜…ov"Ðd²jµ.ÐÕ'JÐ%' €œ±ý­»…µ¯–܃Ç:ýˆ˜ø1ÐZÌ®M }Z!ÚBH‹€í𧧏ãdüO“%›q’É߃Ždø’€lfÍ7ôYùË^ú»Sï¦úÚ¢<÷Ñj8;­/œJ@ø™[΂ûK¾¡@ª·Ü›üæ´Ö(÷E݇øç4¬‚ÏãG¸Ð˜±Üp®ìë*¤€®¬³’Ìr³¶½qWE ÎØ·°ÝYd&œÅÁ¬ˆ¼d³]œ…w 㬥‚”yÄNBȵÊ&_U2@(!×_,†° %›õ%­,ÑÆBZl2à ½8Øä_& xz5ÿAc$ïÚˆî ¶×f †ÇÞYI¿kÿaxìí•ððuãàì‘}áó‡/ƒRÂvWþo7MF€V̰È(”Ñ.ø>¯Øæ%Øä_VÝRz®’¤sü‚¥mÐ5p⸲O&ŸUË9ÛÞøkyˆí›É^]ì}4“drïd`É$GY˜fõÀfõåGÁ3Z Û Ô%ÒÉ=Fî©`7ÁÉö:»LÚZe“sã³–NÎ/!Ø|ò’‡c¹¦¨–ÀvÍÓW–˜ñæ,‚3­³PA—¼_˜|&ÜZ] ·|èt¤9­¢™áÚ³CÙŠMêš­çý¸c)¸bð %/h<õ(ZT1KººÜ^ó|AXí.G Ðù]™s6YÏžj63Úi hK°“‘ΖuVXn†ŽÇÅ@ƒ¥±#¤½òMþb6g ö”89)ÚXHka¶êì+“`A*AÊ,˜…&@ùîqç50ûB(#À»2a¬üu|¹f;œ5¢çž ,eî>2ÞKù°Û>p"²Y€Q02²Ü/“ÝAUÏÞ5c…¡Þvø‹ùºŽwÛ fYÕ² Е ÐÎh™ ™à,ry |+,þ>]aÅäAwÙ<ýÅìgKØ"ç8_:ÇÄËÆÁ©§ñ,—|®¼§*ev͸©Æ#œL…žY³Iª–Œ¥ÁWÎTëZ!•EwÀf׊Ïc)»×ÅmŸÊß í¦LÂ9!³[£6 p¼º´ ¶ìsÀ~ô½ ÔÆ!Þ³Pžm¿m6—w”ºµWϰ÷‹u£ÏD¸ú•-õÀÈo¥Ët¶¥‰8dݾ:(qFU³ÿx N­ÿ &u›EóÚvHŠƒ¢ ƒí»xþåÅrÊj•£› <;‡¹t><Ûîbx6ù"mÊl’ü R‹Ù!ƒÝ÷LV?ýu™¶»g38¦§Lvr”AF.ÝïKÙyñ¾Íâú€­º°~PÈM¸LÛ™;G!xí”ö›mÄ î›^4×fr_ÊÙuV˜”™ËŽ)å®»“PØmcýgì|ú{†ŸgÓê5)êã ÛØÄ W_Ý8“ÎÞ;õÏ›ócésiÒo\ìø2Ýy I¤Ò_X{”±q«”+3‹Ý·tvoË ê1“Õ£À¬ÿ9ì à«gO)Gëä`ƒYü½ÓtØs,ÌÛ÷0¤¸kàPm#äÌþŒlÏÎíáÝû. ~¶š\®\9š9‚¤ Ú?¨y;¼zà ø.~a¸#%ú@ÙKNp(¦™O¾ŸÀž‘¾žì¾d°ørVŸÅ¬õ7¸-†÷#\ml YººW°û¥Y²±{MÊ=%ßå±ý|“Irª“Ã2·²ºúéçý¹öÁߌduœ¦ƒð»~ìÞf2ðS¤” xf^ÆîC!+«œ•cØæãF…®~DÙýÈ&¯¹ºçEY²+÷§–wÕ$r%R!£R|äd†º1>.ïö(}ÿÎÞûᬺ¨ÛÎŒ—Àãïþ`›-ßvh!xœ2ÚiÇü 9Úùa¨’Žû†[ôV;–k1•ùCºp-7x¡,ä™26ãËô3ã\hÐY\ÜÃmE…œ©?7zžõ2bxJ}ËCd*ëtŽ-óê ö°Îæ®Õ‡ ê“\ŽUb·xýeV×yq‚¡ÛJb”rþž€K9ÇöyÉãîS¥~½0@²Ù@Va,6ë’fÄ฾˜©زٹ&%úY ÉãØœQýqK5Ñ~¸ ‹;mÌI…þsî~9ÃxMfl6žÌ2Yþ©dÀ”j2ÎèûØj?cÐVýuq¶Åzµ>+Ÿí4“ó’“g „›TÁ˜æW[Lo\=kJåÈ™ 2h‡d§7.ƒd‰åîŠ=þÜõïpKÕ»ÔhjeÂ`˜›r ¬Œ VÙrfí7pëÁRH‘kà¹ö™tó2I_÷/C ‚åFÀ?—W†“–-¯Þ ãE…¬g°eÜ ±8BçUU㜧RÇ|ùsç€wýt›—°‡(”{Sn2k5šð¿ÉãT‚vÏ—ªpíæê€u†Ñ gaðçôb³Ù97áQfñéþT”vݱ¸52e-ËÊä+P]\è–ëuEÕï£Z5Qç:uÇ¥ºõÃ|ƒ?£þá ²ÏÚjã(_“?õv…‰ö ’=Çé¬M]k”:TùQë+ÇÙ:¿«Y¬­fè&ÓØWv°õî p¥Å4œvóÞ$ ô‰N×Á—ɧPÐ÷ûÄ1ð‚ïÏq}éú®RΠ†­pbãV8µág8›°áî(Ks:dQõ1˜Ö0Ë®5ëç‚-¯þ%?w»ñç4oU%lW.Ö©™Ì¬ªJf*ÇJN-•Æ6Tæ„Á7 ðf«R¥c»•hËø‰ ÙÇ2ä»2¾d±Ýl ˜ÉM¬–0¶^jÀðƒàëÚ8+”º0™Í&…ì·ÊÄň‘¥s»Ì0?;vîKZ¸ÎI?Ö]“Ý2#ñ Oà&*ya“ˆbÆØ©%§BHbC99Ü ÆpÉ(9ƒI6ÿ}ÂÂr‡@Ϧßᚃ‹à”úŸàšCÿ5='1,†~ÜÉùÑŒfЕý†~TtÓ~Y.æ£ÍÚôêmåì às꽊Ùhí4Ô‡ÚicðÓ0k¨eŒ5Í`/–‡FýÃáž`À°ÓÀP »çÉ©Âx)â®5"çk¬X…â¹ôkJ>ªCýg6ÀËÍPÚ ×nuÆQ¶ê¢0 ææ’§<+™J¯Ì@ûöˆh6ïK›ˆK¡k*·p‘ºO ÏŠà„üL g0­œÑ²YøÁVÜ´Ü‹˜û†èdΠH g¾Éæ‘Õ"ÓU\|ÛÁ}ØsŒ§ÛÅÃ`Um4Çí„4 fÁù ›øç†¨Z.'edm*¾5b>™àUš­¥éŒ ÂÉny Í2ðiÅz‹?fÍÔ79äØÕ3§ª\7)уÄ4ÝÃlGÒÁ»æ«°Û Âf+9C©i¹Ð€ÙLiÕ8ï)®Ó¥ÛPUR eà˜ËÚ<T¹‚­ ëÊàUþí»¸þ‰ˆhî‹¢j] mGÂ}MJ¤ù™´§El]Üs5°e}¿¼FhÓX_XG8*PQxEÁ¢ Iâolhyo1ÁJÂzé–8vÅk»}]ŒÜsôTawª$Ÿæ͈$Ðr3@ç[È1ÜH0j0bŒl0Vú‡j¤…Æ Ì²Õ õF?Æ^`æJÝ02O!WźØÃcy"BÀ[IÕh £†RL…¬SEMü¨cŽB£{hbXR¡ `sX+ ±.Š&ƒ®y2㸠“6t× '?·¬€SOÕ?,¬Ïµ2 öš\F}µA1k³™eæ³ïÊè•Ò‹bð™mCUîâ44¡²['»NËjòØp]9ÜÊ´Ü·p #‘³eÀ,9UW!¿ Õ[ކjÙo926‚”ûkñôh °Rià Œ­‘RK½0EI嘭?·‘…ÜŒ7“û]&3«çE(ƒ;`ÅÜ€‘­XO’W;±N VqÅLš­ÒŒ}²`<¨2 Í“ ,Šà"É^J•Á€õ‡lvMN]?@«×‹Áëg©ø!¦‚×÷vT€ÉKs³ÈÔ©“mÕ…Ñ—Òè•ëÚ7‹µasç˜ÍÊMUž«æ‚6ÓØdl1Wv:7AÍjKI0B¸¦rÖfE¬8ÁëÒÇk6úq}{×ÎY¼&\?Åò±-fk©%«Æz”è–8–°ºå±1P±)â~r÷n¶•ß9Ây* /¯$̤ ó°ò‰Ö-&›·Ì‚d˦u‘B`º†l)ë×¢é¸Ehu*ä@3­2ŽQ„[…¼ÄÂyéCÉüó”àŠÿ)à"Ã`FcÀvÓl<(XF0V¡—ƒÎgÏ€! ÿ,Z/ÆW6¸g˜¹q › ‘1JãÈÅÚb1x£óµi¼¿³8vªø[V‚u‹é\v|çºe«.L‹ L´ò 6LW) œÂ(v?Ó•¼‘Ȇ8󔲕Éਖbp æš””ƒé\;frm ÐHÿHOHX3¸IR‡BðÆe×÷³bÖ•{¬†o¶nòP"fGX.†OæRáÊÚhA†~ŒX9ºï Ë0ÙXIþÎþå•›òAH0ì8•c´|=Ó9•¯+ÈsZŽéŠá¥2¦ñ¨„qsYQ ³ðŒ“c³¢pÓ8•¬ËÆïB¾ŸÁÖ…±„l£šõ‹B6Șí{Ý£Qv >k¶®)P;¶†Ô—vêŽ8Ê\xÆ\Áè‚-ºùžÙƒì”u±‡[t–a'ÙtqÖ8ë/ß(½ƒm¿ÅYt‰—Ÿ£l’…ñ›³ÙĽÀ_Üc!B¢ØoW1À¶Ìà‘®TEáåù•TìQçr8/…fD¥)4±«F³ “jŸdûÍ@+Äh©ß°Ú€’éÇPKQõ•‹Û$¤m>cѶr!G5)0a¹¨¾Àu‚t­jÙCmªeßL>.ò§„¼›µñ¥À f«?ÖÉ‚Z`GV›eb©,Ô„È^‹XÅ?\‰—‹’#rº iÁ>ª`W*Û|bŠ·*°Õî4¯ÕrÀ2„r8ÀÅ äç¯,E€­¿cóÙƒé"[®`µ–³lϘ ‰N¤¸jÍ>RÖK…´y°EYhu¶ÅÁ–ÝTÏŒVžF0.5Ú k±Œ ‹…²Ÿ^ȼ!B„±-Rk©HZî‚t‚qèF‘ ªUj8@7¨2h,WRFÙOÏg €"Dˆ!GØò2ræ\|FÚ> ¾:½v–[IÙ+W(ß0÷ºrÑ-„"Dȶ¾àûV*e»2à~2e¾²Ç¯Ê:è–³Ö*™îË®õÏ]+˜«!B„`kUFÜñFšìaÁ®µÏL *Dˆ!B„"Dˆ!Gƒü¿F®ÕTBIˆIEND®B`‚photutils-0.4/docs/_static/photutils_banner.pdf0000644000214200020070000006065312721611413024234 0ustar lbradleySTSCI\science00000000000000%PDF-1.5 %µí®û 3 0 obj << /Length 4 0 R /Filter /FlateDecode >> stream xœå}Ë®$IrÝ>¾" rÂßî[‚€ZŒ´ÐBЂH©Y nIh΂óù²sŽ™GܼÅ⨵$=uÍ3Âææöv‹ßß”Ö3ÏõHé|–•?©¬gÏé1ÛsŽa`~®Rµ?Ky¤³>KœŸg¯×c´çèùq|ËÏ|¦GéÏtæÇ·ô\«>~ìx¸åf«Ï6u<Ïšù™ÒxÌþl³>Êùgá,f;ìÑæÓ*Ík5Ÿ«Ùï¥>WÊ÷Y¿ß²’ÿöøßÇùÌ#ŸÖõiÿûvAÏUû˜ÓzLϳÍñxý8þôçãŸþòøÓ_¾·Ç_¾>Òÿû/ÿé¡_›ò9Ï´ÿzœ°ÿþùøïÿÃú:ÿó¨ÿüøÝžæ8ø‡ˆKm劾k{f{qg-ÓæVÏgoù±Ê3¯ñ(ÓPRËV ~«ÝræçœÅWË3ÛŠ³?×ÙG¶5ð<ףæf8ÍxüÄŸ5M cÿoÏõ‘s=O[ïã¿>b…ÿòOØ”56åT:ÛmàÛ“jTÛ·Òµù¨¶›Ãµ.SÂäl%6¼íL¯ùxõÌABè¶öãD¿Ã L­aäŽõ® ¦ (MÂDB#'Ѐ-3Û€ teÓ>5®M‘ó­špdz÷ùßãóO ”·µþví³ÑG)˨þ_­íì¿>~µ§¶]FÞs~pëm ±l¹6Ðß0€‘”ÍÉHmj€aLÁàldÂzÜoL›EýÛ0²é†›8Ï®uh$þŒ$Øk‰pþo§ñW›TŸgê8Z‰7æ– Ïöʶìk£²ci„Q:—œçnØCv`rµc‡SZö·w’eYÕg­ mõÌ:ã€LqvqD“팽:2Àõ,´m Ë%ÀìÔœ³ž‹:ØÉfuV £Æl†'ˆ5Ùæžg¦ó×ܧƒß÷üït2àVlDpM;ãÆY+–“6øâ2sÀÇ#ÛÙ)ûeA ¸ÒŸ¶X¶^šéY×îÒ¡—xDƒÏÇß¼fŠ“zŸ÷Å®¸öšÏÖméÙÉ¿Ã{{¹³ ¶Ù±œÆz ÎÆÀl6XØ=ãjÆ/—í&˜ %–½šŒes³ý8}ýw0ÂjÔ?‰+7vMYÈŽm}3?F›5¡TMÌØ‹í48ɰ7æÛ›ßFá: «¶G†mÈOcéݤŸÍüD;ì¹fÄ4AqU›5Hï`‹1[!Ø^o†¾Ö(ÐÑ}çãŸàŸA‡ƒUw ;#¶vÌ}%6†êÙ6w ä\l-ù”¬²¹ʺwoÈÉYK]ZjNz~¯Ý~¦qˆ(Þ±õ‡x/Tm°GdݨÚNv>Ÿà1ã4È~µÆ~mãBHÛžƒìûýõ?̾ ‰ÃD‘u”H¡¶ß¡ ÜÕàe#^ã°o;GÉ6ÍvÉÿÀ®ùkqi6¨NÆ…ìÜþöm[­'tЧLÑ×ÁRöþ}Í4lY¿dßë|dCMËÚ¢e:d–@3'ÁN¦oƒ¬NêQî ÷48C0ñl=wò0CEªE-ƨ{€oö*„„Zè%Ö»€fH;x"ŒðÐ`ÒÕ é?›Ð°1Á9å›qç^fÿÄ·M–ŒFÅÔÿø.2¼ }¨ú.‚˜ ËdäÚ›MÓÒ:N+ ðRènxˆ¿TÉϯ:ä¿H¡õL¯B 3YæãúÏ>'¾šŸgÌÃû¶†¯\½÷õ·±u°˜b´Z«àvx(Ü»ôF0ˆl²uã¯ùP¼ŒK7PÇÂ&?FyV¨Ô¦,Û¡·®ÊԣˠIþ¿$Æ~mñ“ÒÐð!?%µS¡ð¨Ð)ŽwÍÓ€œ³‘²Mú{iYL5>™³& …ÀxK§–Æ £€²*´{°QÄÅfd ¬·†Ÿƒ‡àÉçM¤Ýv Y &7N) ÅŒ#›{)އn‡Èp¸š˜z“üy¡šú§•ÿ¤ ÷Žª?ÌÙ±%cÑÖ„>l– -QSc!¿?¨ÏäBÓ#åä Ó0B“„Üý­‹?ÌÜÙ3H~ÒªÃdL\OZ)} ŽÝ)YØPþÇûF¾YCÔàr©XžýEC ÿJ! ÁÈ>ÑSÞc°¡¥òÿÄåAHÀ«QaJ#ÎÈZÁÂA–yŠÕUiÍÐ ›|i§±#ÓN ”〞¦×›†oÕU;-äH^¤L;ö‰–0´ÿ¾?‚ynLlRƒÃI¶†²JæÏ0zì´7šòo¦úÿVѱD04ÖD@ X`¥Æ¯V£Éoë;×™ÑK~t·=̾¯F9öϬõÀÔZ‘‚؆ÔH˜7Ö°örü5¬ËJ©ÑÚï‹",Aã1Ó¨ÚD¯‘µ@<6Xº‰Ò’úãmæ:Èo‹qR(u¤NRø¤5(–¼cå› í eÏpQ@Ùjp-UãSߌ@²1(›`5ÍâçZ‚}3 kjéÃOz}8˜Õýäóh°]5%vñm“&®—w 5âIåB#óW“þšVÇ{ìI!_Ö„üþøÓ_:=N¿ÿ?Þ2ղԲŜ зA 8~PÔ!ì„Oh«m‡€±/:&Œžý3änö·ý³±ùEþào/´ç½÷ýû÷ã}üß~Eì¿Ö¹»u`ÿê0Úø¦tˆA–‘s,I5 è‚‚ºSÎýj@ì÷ ¬ëÞßl^Fï×!3Æ|Xoð)ù«Ÿæû’Ö_@ld’ïð¯¦Tî­4¶¨¶ò¶™C}À¯–q<†fTí7lÞ˜ôTãbú2žß§TžÃ;¸0Ýfå[p›²C ~ÿ~¼Oáë~þbæ“Ö>¸í‡Ã+Ñö›F5™¾MðTŸŠC´’áò|ôÏèU‡¼ã׆}Žþ*¸ñ™¢cA`S>®ÿsÔ«ŸgŒ}|_ƒXTÊe I+¬Ì¢­rýñßF& €è"‡l CÊo±\Ó¨³A²°aß =ìTñ×Ã@³Bl2ƒšž ‚޳\ésÓ«¦»“µGǀȲc\X–šÒà‹éñy¾dNo+Iÿ‡^þŸ¿ƒžÀ^ºJƒï[’ªþ/ÿ‹¡…ß¶÷¦à¬Å²6KmÝxÎ-®ð×Tê=ÁwoTZ5B£kñ›‰ÔœÚm´ç•4„~=•éOÚšfy­ü6H»9Ñ«N´-qÖ‚=5ÜÃL66äaQ¾h²RŠ735è0ÛǶ¼~m`Ð s‚iY ioMòK-8‹³70q5Lj0°¾* «îq Ó}ÖiÜ!›Mt7wô'¸FW$jᓹ=Ó91#È,v)ØÞ0&²Ú8nôíAªÐl·æR%W¾2“B*ù™àx¯¨]Ó”6!Ý×R¨7¤I«l 8j†ÜÊ…èJSv©)™mªûeêG{ö\3=ð\$†Œyh£æÏ£Áæ]ô3ù_¦‡½'¸ìL.j¢þ4ÙmêX „«¤œY¹t_,|!Ð- ±G²ÃÔŒé@§J…n,“qTÔ Ú í•)I 'SÆ«)ãf<0ö Ú#ÜjðLÄöuwî^¦ Êú’ €c²¸e¼+ö­tøg9ÓÑ%ƒTmŒI­Ùž„7¶#˜4Ø4ex%;GCD¦C}æ`…ú©Á'vÕžª4½TJëâÄÁ) æCü>2¹öRýV8°q/tö"âðo@:¬N_¶­ðÑùzà¤E› ãÑh4»€|GíoG>W”²­•jlN™':Z>®³¹Î‘½…lÀE…6sdcœ0$à‚±©gãf œúbç3›²ãþîÖéJ$!ºtP¶÷ÍF=âDç<è‘¿}6!¥3îOëB, gèÊ;"c<¤e¯ÔD7Žâa#Ó3)LȰxáãj€ª¬•hË&~Þ[p®ëœä\¡•Rõi¶†¥0²b ò°ã&ÄÙë1wc§«bóÂó´aŽ`ŽÅÏpK#vRŸcMÙ6vì·d„mÒœ/Æöl•^LFQ 7[ÿ+Û›ZßÙÈÁçµ¹ÉY»Yq†÷œF?™µÁ•ì2˜ù‘a1´tc÷¦¯ÐÕ!ƒµÎ‹vFã7ê¤ÐK¤dã¡P3w¶å4­¯1¦|1l˜UŒÇeJÆÉþ´l•¼X¨m,7ÔÑ`ÍÝöù“½xBf" †¶hvзáu5Øâ )éuH•dÜÞ»gÈkéq ßäÚs£x{îðJR‹¯ìz-þ€L:=ÆÎ1Økã2…qEq±âhh‡âwß¿xÛ7øêÝI Fw ÑäŽMB×ìÄbu¢ÀXºèFŒÓ¯wlò¼~&0VNÇ(ó:Ñ`¢¼É‚š â‹}0M¢cƒº“kØ=pßcè&SÕú l9jU^Iü>³Œm*(&~õ…¶öøP'LˆÙ¸ Éf¥¶sÄëÆæêŠîÁWJ×ãr‹‡K“C~Ž=yd˾ºx@‹ß¯;r®îy>ºcöðɽ¡þ§»ñÛQ’Q#F0Ô b¡yEÃÇnab-qô·òkz7BK½»°g¬)+¡ç'-×kŒ£ä¯-ÇmxëK41æwDƒkA%7òh€Ë8„YŠ9Ft¨œ Ž{C)†ÍFMî‚b²¡ò­ ÅÇF€n:·aç« Èô3xÌs dBÇUH­ ²ª!ê–:˜¦± e¦´5LͯJ©ìÈ2ÁH‡ŽALoo•³˜l¤Z$ª¼ðÛ—´ÄèÔ€K‚jÒ™8…¸_ÇíäV°1ÎèENÚH|¡ ¿D56J*‚œ£„³Ú–?¨f’ž`P£Dÿ /Ðq ÀwôøF|o~³ÚJ Y#õÈêój»åc· ei] 8äHh€~°ƒÐËËH3ã–ÅÖFj|¼ÃðË4Wñ0Ø{°w ŸÃ´‘ø=l‚Å«2`{]6\1CDÏTuËžj9Òl’ëàGÑp[ÞìÒ3¾´le¤›©«Ÿ[΋È?®v,f¤@#*0|ÅDkFV…vÝ„+i)Œ— G}_ŒëÑA\œëZE¦Í±0òBÒÙu”^9D&2o2UTH«œ÷¶ãL#ö(ð€ÜiYûWÊøí¨§{`lr¬²©3£ác7à­Ò™m…¸Þš7ô¢eÁ]ÞUÙ°@@_áë;ƒ`"_[4òq{kÏ%:âTåÞÐé׈£KÍè´"΃´$#èH/¶ uÊö~€Ð’@×­˜t4CÒ£q$HÝ5eÒ䯪ð鈪—÷^¥ˆ Ä5 ì2'¦š3Ã2ƒÑ,äípš"K1ŸâJ“ š=ml CÜÎYLnA²Är*¶ñk`Þæª_ð5­»åcÅp$?ˆõ§ƒ9¸*ücÈ/i›`±¦DýbñA85WðJ€«æLTBðÚp­*1t1€5 qTDç»2$Sjö!ìYq " é¬EŽwC*@Ø|HÁ²ã Œ$“Ë?s7dÌ™È7ÚP‡©Øò—s`G£(Ð…x–§½lèÚ-µÐWÁì7ÐC©<±˜ež¹Z*„g14{ Pgp.&’7l‡lÄ+jYŒr{f3!ßùª1Š5Lš1Œ<°qÍÒà¹ðu8Œè;n0ÿñêa1/÷6ÒxaakËöY·†½ÝâK.6*|¶0‹ã†O÷ÝmtÞî›aã.Û~°¨#7Æâ-›ÕÇK7Óôk?FàÀ°ÒèX-Ì{N<ñÑò±[:×÷†“‘ÇJcâØ,2­Ü8”‚Ü6Ç€I‘ÊfP©Î âq†Vhù` }¼Q©jQ^£¿F¯8ÊäÎ#¤Õ8äÇ¡qäÇú1‡ÝAQtÅJ0΃X™˜ Ϥ«V{þ›)@)jÛ¹eåØàV+¾ÿç¶ðu\ܶÁ1|UÈ.Db ¬ÃžƒmW3i‘hÙ3L¬M¦çkp1UøÌùlBÄ`eFñ´ç—‚ ûw%ãB[Ã5‹|À“ yÍømC^eQrøå© O82¡r8¸Å¯«aRè€qârX2{ƒ?–É ÏË5I-Æ_½)5Éc «…»oQ»Å|‘#°ƒ¬…%ƒ'×7@+¦KBfZ´ÕªdãAc'v æ¸ôøÉ”!3Áé.‘L3ÍÄP ËÜÀ%¡5©pdˆ-æJLd÷6¤‹AÇ‚/ÛÖÌ¢ ¯v]ʱàFC\['ˆüTÄMšm$6´+¯_i»H¢?AèXpK1Ñ_à¶þvC¡šKÊB˜ •ý3㣇º0¦ù&1¬B™Ë6yhŒðÑä³éNBö¤f¶ˆá FB9D©’´—²˜z’ç‘K€3ÿ\ ”¬’¹y• kÇ”áOÈË´;' ‰æªU£ï$D… ¼ $V%!à\BŽ"„gÍ!üD¢HìBJÈ̼›à¦`îø?ˆyakŠ3Ã+_夣#ʆi#P.øE *hêØ "+$ìGYTý"ÓS™f fD÷3Mo,C؇謷p¤ yÌ^܈„­ë´º*Cät+à¢É”yŽ“pê ÙÖrøBiP™Œ.g(ãæ†³}7œL8pq5€Ïçp:(F‚Ùœ…&ÊE$¶$2I¸’»¨pbœj©¼fÑB ¦-²NFû²µ-)lÍ7«ó3ùq@1쇛 ‚Ó…„VÐ&ÒìEÚ‹äÒÈ0¦ÓŠ Sq¤YÈxwv„§•P ëç³9NêÚt Ÿ#F!€ç;œî@‡ ¬dOqDsKÄÞ1ÜB ¥R‹ SÓÙ§|‰§Ø%E:!.0ÚZèRÈÈ¥AÛ6Ú¦ÓKžÝõí9ÀÎPž5&2Ç>#%8‹=éþòÔà[®£ñ%ä7•yÀ[’|ÿ*ë~CžB²_ÿ@þ¨_%É'Õølƒ}@£ yDÆMýïP,K&Ë̸²Q f0èÜü®î¼1‰ºS¥½ " v¥2åI½[Cõ†ãC—æJºÞ8©ÊïþLwî$÷ŠùäÅí×lÂp¦ùrìù<ºò3³TÞÌA`ˆa;ÙÆË!Fá°Bûh ®&iš‰×xeYLktºÌ‘`*ÅÉÕJ¼?uãY¿›Á Miôc*„‰Þi;#Çfc°“`M™0à`èùuoȦÜ0E£„yˆÑñáFp!_fh8¾&µ?N®“‹NŸ=ò"æ /“ø@Ŧ1'–×Z¯Žx¦’ì›6ä… XP…Èäe(÷`ÕíŸê«o`‰h‡‡SnÀWŒ;àpD ­¦H\ÙÉÂ"d.ê@!n œB\áö@#Ãáä‘8„ðjÒ•Á gtöº ­7€05ít@UÂl –æˆD˜;| 8¸×†œо³)iƒ7Þ ³pìßóÀŠ™qbÆy_ã=¿få%g¦æT17Þý˜I‡ygÉï<Ì™nžõØxš)šÝ sãê¯=@ö–äóƒWÍffîd  '¬g^ ç´Ìëg˜)\+Cr€Ê¢Ôœ"È®¢,mÜÁ^e®7°ŸÅéÂàâÆÛ'Y³IŠ¡Îp=/)ðÊá2@LP#COǕϪô@¢ÈH´x±ä›d¥:„äÇgPBâŠçí€_ŸŸ<Ú$Ïéée‹÷Êh¤ †¬=ÉÈêÜR†“âó™§¢ì ®2í7í¶µé!‘[¬€t±Á¿!Æ5½Ì“Ô99ÃÙFBƒYŒ€)H¨ :wfö˸%Cêôî-û×5¼²© ‚&Ëk‹kÁæÁ·—ƒ–Áô‘&©Éœ´‰Ra¼có4rq'ÅE ®9ÚÉQr«§C!Í×am?•±x éÊa!1‘§, –²ŸóÙ8[R®G0áЇ|2©yUƒï_ˆÉäfÍæ™2˳ÁóôW}]ùÃ{©’ƒÈÂ*ò¢6òÁì=Òâ—†xe!øÅxÇ~úšÀúyMu7 N€?µ£iôø ä…•>?=àh!ý,ÝL Ûêq0¤¤Ë½˜]b”¨)© ~ çbÏè-QÒ™¯§JŸ¢ÀHdÆØÄü®åù J¤ôù 6ð|É—²i€ÙJëF#°æ†Ã#—Ô58 :)P‹,ÎÅÎÉžÓ )UÅ&à“‚D¡ :]¸›c)yw„ÞŽ»%Ki—•)ŽLJÑã¨ó!ßô8¯ÛG |'7£ÀUyDEF€fÝR\mú€vü]0$ñ­a°l JàAM ò Û=Ekô½'ÔL(ümòvò’¨(Ëp-!«vË:$šöˆ5ÈìÀ ˜ÒË™2º™é@ãTt<”O¦ûº5O@±/ŸÀ!5µéý“±NÀ8a?Šœòú†ƒ¾èwÞ©8úŠ…gV­y+´˜àö%wL? ä™áš‚€˜ñ<#!¡ ú.ÝÔ·}C.¥.Ògà.C­AöáÉ ;g°?ÎKk¡&Ÿºñ-}RЬB…/# ­')1LŠ9u±zQÛ˜ -£ìôK aÎMÔw žBT;«j·_ …ý;b°)ûõtÒßµµûñÓøI>Õ˜âŠ}\³7¹]˜ŒËKbåZ<3¡þ^È¡y8±k9f ó¯ ž;²]Êsôž”ÎúÚ£#ƒJML†ÕìÁ¦fããJ ,Öî!.Ç åÚìBΩðV¡PÜ:O Üo{ã GlÝ~]{{uï{ã;eÄìœr4ûÃ)M:Wçtkwº¼#² ܳÇã3Í3ù ³Ù{ñã±á©$G¤Po~òhô|ÒÅZ‘J…å䡼ù„Ó¸(:mk7ÓtUðu‚Süyfe#¤ÅkxìœVù¸É(ņë ÞÍÌì9vïà)±Íô‚ÀßöÔ JC#&ðµ—vì´ôýºP³{wÄiôÖ˜›Àãñ5í żVŸý–H4L²ë¦m£öÁ; y¢ÌpX‡ùøÔ€Œw¸fx‹ XͽúBŽ èS"…y\Sžs%ÉÚ`¬·‹“¡cøÝW‚^ Jæ 7˜_Çõ@§FËþ˜TŽˆIUYèÊǶnPå`%û"[sPHõîõÈ”Ÿ%] Ì`àmÿá·0Mê.Ž˜‡.ÖãØ±‚ZgêÅVOÑÀbgJ±ÃB¨'Uš0]ÓÖ¡?1'ZpètÕŠ©Ð +7¦ l^L‰)“ýµ>…÷$&$ˆCçý/ضEK.¼M8•6©-šèm)É]R°{°¥*_ æ@; ÐP&uv%þdZ‡YhF–=—’xq¡^Cþæb¹RŸèÃÀz=œ8 Š1ªñºjLrèÌÏ‚n»ºï¹C§ÆtHšä]ÖÓð[?¤'Ùf̤ñ5iÛoð¢6ÐÒl§ Z@÷â`k<ÿ¿ƒ©I$Ó¹JºU9Ù\`¯½Õ*B6’»M‰ µˆB+~p›ªHŠ:å8FpÝžÍ!|FñkæõÓ¬,ÞTRO’„LVÍ4¦m:S—L‹û8~F?¦N•S9ºOÖ'è7 éËö©L Fð÷lÎ|´Â%÷ ÄW1,®@ZÉŠ'û¯EAHZ+€èWC “~µ\*#5dý… «ä$å"†ÀÅŠ¥;+JÓÅ1û+,cp㘘+“Ê®bÆi”4&÷+JTwáœY;ñ !ÈȽ†›AÐÙ šá‚J“SÉÓGº™7lZ÷]ôý©žßm¼.Í(&»èJ?öb<èZ\×ÑõL5°x¸Qñ #¿1VGÞÈöiÀŸt师,Ìq×åzÂæ1+’\n%ÊÛL‰Oä;£~^\Áå¢Ûþ•";SCœ>Œ(fúe<À L3~F¬ or'¤žJÂþLÆÿ^l0£¨HÚ1¬¼Œ@wµp[EÊÔ€« tÑ-ÖÎÊÍ3ór;)3s‡—xG MXm‹þªŒŒ0•“+ë1!23‡¦±&Ú©Ë9n¸ÿ ôvr€³ÉFëºÀsH‹–ª}‰×•±»»Ïq'+†Ïb±š¿4ªbúIålxÏcÒwמåÊ£‡·‹áž%D]"?æ}­Àì)í10/Ï»ª†ÝàVá±wï2Ñ Š¬`¾#q?++yo¢š:'-s=Q¯©Å‚ ½“… ,€‡4ø.7HüúólèÌLÒë,·»Gîn×ñ"jf¹JÞñùJ—"Ã`Ð1ûéŽÔ5|=?]³1s$Û¥ŠþÇn˜ºœÄ|2LƒQ‚•â s_÷™à,'#ιËGŸ˜¦€ $Ôâ"‚yk tBÚ–*ÜByí¼L´h>eÞ-Ô½ƒŒîÓýqM*_V?€']îÀå'ç¥"ЏÍ2Ì–pa@v<Â>!š„Z×¥÷íWÁëem<`4^1¢paOG%0Ý )Ó•q¼ý ¼(ÕZXâ|‚*o•Äóœ‰Ú:o]Ûƒ„ Lž;x=8Ù¾ÕXÍ£SqêÜ…Ú¿/]8Do8¤¸~+CÎn4xÚà ËIGn x:©ÞG´ÄqãM¢Y>Á̲҅^50õj˜ L_Ü.J˜JR˜¤Uë2GöH&Ü™`(@À¥fxsgç¨&?–[òðŽ$Ö`ò¡íYyÁDZ ÛÏçG·œÖ…v*Îr5Вﺎ–HGe ÔªD\áP$ݦÅÐX+sÃZe¡±ò´Úõ{'á“,4<^Ø< GLVYæÜûɉÔý¼¯ˆÕª&ånX¬oÆ‚{׃-Sœ £ ?ÿ]µ8®[”#Ô²&78£—×¥¨O Åî> éûXär™Ì"êôëéÈ*ºzEŒdâ¨w}¼íÄO7ÇX Ó=W„Hm¿¼ GIºäþº˜.E&åÖ#B°­l÷"k°A«ç£¡µÐß3£|3ì`†Ãx•ßÊÛ™ sÀôÀ >¸ÑãÜN¤¹ÞŽõt·püÌ»P—Í”*uVãz g« =8#¢Ò››ü‘Åí¹Dz|Ò«§ªsl©ò#Åë†âyÈ—Àð],zI¤MUm&Ëå䙜5âuÁr±XdRÆï¨&YMèd¨.òŽ—ha÷JÞ.§²ê)ÔÞüÎÄ{SÁt »ƒnè“lu+F¾‚šh¨ …ƒnòAŠhcÛSÐk7IÉéñ6ȸ§Ýû@pøN•)–3 >ÏÂ+H@Î5Íßä³gÑæ³™ÁN)VÇŒØa] kAÞ9lÜ‘‡W–•ƒd$Ï]Ø«ô»GFq¹ äMJr”Ã)£Œn‰Pø8­mf‹Ò×ç÷«X}}JmV 8Æ.iƒ:U†×SÄD¶Wu¦ˆ>rç0×’73Gä¹”·gþÇþÝpSÇå³Jºzg ޶GÇÆ±FõÃïé›û²Ž—ëÌ%QÞÅ+ʸ²KÈl(^´õ›÷¨Jû+"ÃzçN¿ñÖ¯í¬ð˼gùôÉ' kf–ŸŸ·”7H‘1w°EÒb‡Q3^yßdyñÌ(ÖpÁåt&jo˜…ÿŽH9ñ9tš Ä£š÷Mÿ¼[ÙòzË[×F׊ƒm´œ§¤‡k£™ÕFZ¡Öé_¡ŽxnåÇ‘:zkà]W€ŠEAXpcë£h¡ŽI}ç˜>ZN•üÛ )n @æÒºB uĶBŠ›Î¬i-…´°:i0¿ÂÐ@½˜#”ddñ'飉­¢!—k2ªÕ"u” lÓÍ—ê^ê(ÒœÏê(;È)ÔQô^ûŽgœŽgRŽÎÉfÏÅ)™K“³Zb‹˜ºÔSàu*¨¢4dAô¨—6ZNe¤º6 °Ë…6ºÁÐF½áØ„ÇZþ“‡òɆœ>7xq*8Ã1aj9RFyG½_º(7KÊggáã ˆ;Õ#r~øÞ¿ÆçsÏxÇ’­Ž"RœÛVGã|P=oˆ7U‘´=Ò¥ñÜ Žbtr ©£œÌœ—>ІūdÒG9ÿš‚bu§ôMqD4x(H¿£ük¨£€Ì0¹©£Ä¥|½ƒ›/³×ÕÑ8¡[Ý [ÅœÕQ ÐÚÖGÉJ¿ôQž±ŒB‘t}0݈¡bõÈbw}`QÕK裓~=Y¼P´õÑkê£o[ñÓÝAI„¾K`äÇãÖ`Ê ÁÉ{1*‡;R,N¯\€kÐûò+ót½µÊ»÷‰VÇ%r•JÝ¿*{eÑó‚IUºç~`ñÑÅ~Ö3õ5ýO“¥_„¡3ãNp.á©;à1DÕˆÖÕŒ³_þI»†@ROÖ0»+ÅÏÎT ö&Á‚¼g€ ¬¿!ÂÉ…`ŠjøqCöVÚ\chú¼ü¨)à/Rp{¡éƒðAêé–7s ^–8þ<YEë&·Ëöê:L¶;n?Ó>›»¹Äêgˆæ ºDƒ „xx$H8Œ ¿®H‡k™ºçY®u_•É:x¾õxÖãÈg? -/i0$¼ 6U(¹¨ °@öˆ´H€¬E×T}Yϳ|fã <®f«2°Ie¼=øQ?Ho0ž!o Å^–;+qÑÀÌÚÊ|5ÇÁ©æ)‚K—h÷‰á}€'å"êʱ3ÊÐɈN‰èûÉû¯œ÷H©«"qkÓc€Añºií yL2Dõõg÷UwnRµ“¤r†“ <ˆGˆ”¿6ªC¸(êŸê˜zV«r¢, BÆ ¼´;í±3^r%bÈýpyXɲ,ùçV•è¨}2íä×7™'1Øã^põCüjî€ÒãV–¢„/à**Š”ô5¢qJgpS(#w*9`— ¸5Ïã[(ÈKxW£‘¹®ÓÎÄJŸËz*ù@((Ä„`ø­Ôï³xî[~§>GSY‚ËÔ• *ZÁÅw±7nïì×M¾x‡ýÞ½K‘¾J`rÇ^sß ñxfR¤¯[¬á†–øV ng ­šÍ í-\%Ú•&{Öt‘ø¶«º¾¿w½Kõ’øD2?¥"SÖ³¾<·Ý3ÑU©äwfàÜ8”<–§ÎtÓµXÏt,»F¹-x£.³?w&>ÀGËS½¥.<¸g=¶ƒÅ»xãPþß’= Ìe4n‡Þ¼Ç¨üÅÌÿÙY!&ðT cfI\÷Eƒ #¸”¸zŠ”+€¸ŠïÉÏ/V*;«.ÿÛ(½2RøK6Å釬¤\TÅ´GÄg î|}ů¶‚Nš|8èHßGRâZÉž¦©†Èúî¹0)YâLþJ*^yÐt‘ÊéHõä Zß0‡Õw·øL¡{«o{¸¤HìGª ¾É>VñO4°$©· ¯— ´*س¼œœEŽ¥y>øÀTBX©þöTíÔh]YûÊ&Ãl"MÔsuO’&Ò_\, ZÄÙ=Íò¦WÞ›¼©…‰UÛ< õNåç¢îãJà%õëR`y8¦g 1?<ð¦ºÒVCÏÁr®Ã(oêÛÙä FÑ É|?»A¹]@ ¿!ÍÝi‘8/t!Ëñ`FV%‚\¥ŠŸ™[çÐÕt|ãg«ÂËOä1ÚS=¥·0mQAêÓÑéWÈz%žw#°«or¡8Êf´ré‰^~²+Ó‹åÌÃ3NýîI•Èrã¹ÉQsü]ÆððET]"mü"Ï, W !=PH_î2ªté4Æ"p+\7ÀU4lÝã]7Ž[‹\©×;ï ÛãW‹égø¸¿‘â6Ç[ƒBsW·‰y§~Ü[”ÃÙö‹WUK•ã¦l§IÕ§yp᤭^ŒLÐP qéà‹ Š—ÿ®²²äÛuc%Tä-ƒšïŠ|æUßš¥|¬rù"go¸óà}¯±ýHž¾‰;Á*ÄB¥¥\P„JûýrÓãÂS=šÊîñþ~„JÑ@œ3TŠ"}nSbšpߪ›Må+%âL9E7q‡œ£~9¥[÷ì“xÝ–"‹ºˆfÄð3üáœÚŠd˜¹9%–†ë­ÉBÅ‹ïoÄ ‡®Qo(Ħè"±*ð¸Y<ŒÎ_w{0zßQ§Ý£R‡Ï.‚V1ûKdùâ<è+÷˜X`F\þpTεCrÙŽùKŠøÎìß¹oÇõz w›Kßø=¾¨bÏÎ]²1û#Èj/OTç«¢¼°ãDØs 'jÇg‚Òô±ï3 ör~J¾„ÔÀìyÑtë-ëÎŽG{¹¼V"ÚËZ”=_Á^àƒoìXÇö\Y )Õ »*:¡hoáGXw0wïF{KÛ©¥h8 ¿Q¯×{¼ÝIÕD°Y³* ÀÙNLüÞ„¯N×céóŽz§¼T/Ü-Çê¡¿÷Ïa-O½áùèqî}@ö}2G„ýc¶;î«ñ¤X«§.®œƒ@–'%.#(b/¯þc¯÷ø¢„=;QÊmö¤¤ÛòTMx/^”xÃ(uãNtˆõ½ýŠôM t–£ÚU8P¯6›TD&Òaª 1w}3ýS?¬›f»j»N© -*wU³ŸNZÖ/6 I³ßj² ¥Í~=àuX­ lÕ¨æ¯×-|cË(ø\†Ç=Uâ #Æ[ƒCṡëú8oC pˆï¾IÔe”ÂOxŒ¥Áƒ‚±R'èõaä¸Cé ‘œ"H¼Ò„"Xí [ÇÑF]Ñ÷÷³½ô{á;ǹð›9óÊq.^OËÊTÞ¸ç8£20¿7½sœËÔG1=Ç ê¤zŽsÁá|¥8&,D†3 ºüCØP‡çyF†sÁêU#Ã`öê<ü°÷6:v‹2œ÷ë^‰;º g?2œcrIWcîžßÌ•õùÍ\·{Àt•rá9ðöZw«Ê_ÞXüæ2U¦pç7GÃÞ[mÜqß]Ô0Sª­r}ËÔçj½S*9Ò›1ßSÕw”ÞŒ”:Ь6,׿·¼%jB³øÖ¦¤TBR¢á¨Œß^×oÝ7~bøÆ•O ú[ ƒFõµ=CÒW–†F÷u'÷M^’ Úà†>•˜E¬:¼U‹Ý ƒ&^w½0zß*»~„N“s?f¿M‚Xœ› ±ò¨…,ÌÜ.†êÜ" ̺Á˜ßMìL\ Û¯Ÿrcíî}ãc|§Š˜ˆ&НMT±:§¹X»“ä…'Ù@žúظýLñ*Ø­›·C0£ËÞb‚ïš “CkÇzŒpWë¹¾¶BçˆräÛ¢AÙ •ׯ¶Epæy³hXK~l‹¦ú'¦ÝbÙÛ «¥—Ë¢Áö ÔÛHO¾w^Yåm^#¿^s«JöØöLet¯nÚ½ðÔî¨wÂËÛž¸Ò6i¼Naéßñª+çÑõVÞ}äÈXöi¹æÓÞ¦A,ËM‡X´Ži½‡€’ì „ºÙ²9O˜5±!ûwm×±_ç†^fMlxŒîässr¹æ.rÚf“[˜5AŽfœ\sNÌŽVAÇ/éßä‚ßÃvÿßVð× ùw`ͪഠ|T@åЦÙ·fOÁøÒp½3"mò­Ã÷wöD¼,q®{ƒræðí‚“õ UW•Îh]wñmÚ !©šýf¬|ò•vÒº\ô5÷ðF á¨ºí¬Ã¶øsSòwâ5^4x+¬!ܲîÁ)Ÿ"<8¹å,šscÀ ¢3îb+{éC®­+û ÌmPÕ¢šu*87Ô´qçÜÙCŠñ{Uú“2½*=ï3Ôê²jR"Õ[<•ÊÎ:Ë Ïž£ÀÇ‹ YŒ×ñ+ öˆX?Ñ÷= }”‚ŸSSbf çrnp¸»øøþ•JA¸óÆSŒn‡àÍúþKÈe^m±w ;:hkÙJ)ôqe¶ßÔèQk«3%…¾}×·×Ý?‹¯/¸¾A}į‡:S”=p©3¥í"þÎ/²×ë=й:S†ò~}ø©â£>5e3ÞfŽòë¸V¶Âò¦:ƒÏAçË? ¸ÔK©§âªŽT7–„Èõ×C"{ï—ÄöÑÃ?ë³Ûßg©3¾¸P|åûÓ=òÝ?ë¸Ûúˆc6ˆÞ1 ß™ý;÷í¸^×Î^êŒoü?…v¥Ù‘hn“'M{uNs¡ÎD¶T÷™…Ì÷©_JAÙUy¨4øÂC¥ ^Ž›Êáx •ÄÑK0Ÿ­Ñø¶xû¶_/Ê\¹ºßuÁ5¼ˆbOND£ÉNdc\‹#É]KIÞp#’ݸKÛš"fÓgcê'gÀäB-qAXî¾Ê/ÔßT ·ÐáYç½5GwñlÀÌ•'¤"Ɇº'¤Ö©ð´E¶+ª-“€ûŠJ‚Nàü¸öEàŒœìì°:²ª<îAð³±5ÒÃ*JE«ÒÃzEéa¾ß€ÄRg7=U€ß ÁEJÙ³R9ükžÐÝMn¡ {†XÀž"F0E†ûRpQbÀ£WÎUFZæNõjAm}è–g¤ˆ³EYZ•¿ûGd]&Û¸ës(—0¯ŸÓSñ]~¼mxáàHC§'ˆ³Òƒ!f׺2İ<ž4Ï«£ÄŒRýõÙ"CŒÛ¦ë,þµ2T„¯QlOÕUÃq>¯»·/®´Ž±óê—ìͱ«Øç¥9öŠ,Üú$àéaNß·ô0€s×HSTéaq~vzXœ¯HÛçÑuÇÏÇS_ )b‚‘ï´”Vù!úéaÜÝ8wepð³‚JéJÚéÕwÏ”„€RrÐàUöâw¯FõK‘Î[‚ K^WDĈO©!´C@‡©\ bD¯ÌC&ˆ>¥D!A f™ÙH#þ•uɯF%ˆÕÞ#?w'ˆ}A¾06Å–¶çdèë‚—Ç"¶ïdL5¶.‚[Úä|ÛU‚bëó+¼ß˜.p¾4\žeOCðå7qØÝ&óô´}æ ¯`W!´°4ž»MP¡Û«p€Ò£¾÷&]Ö^¸ÿÜXuÙ½&*­póšÌ9 %ø—¾Âm‚µ•p›øã«ÊDs·Éô{bî6™ÛqA· ÆÓ53w› ’ƒê#Aµ;ø§}°ù§ë½ïâP¿ŸQÃP UŸ¡p$®ÆŠK÷t› åA†ÛßLHw· 꾫’ Ý&(”_‰+ºMPìÀ3bW<ž®ò*Aá6 0Ü&ß¿R¨-¢¡µÞ´ã©šè7zÚå¿•¶€†¢Tí« žÇ€¯1›S.ˆ©O4î<|¥KáwZ ËS¾\º®…@â;eÙ?vß1cµ }rkùg"‹?ß¾ƒ¸—âéêKÅvãGÑ”oá\feiCJb¨^IÜ“ö¬ºJb@ qœR̕ŀ…/%€øGÚV8q6§®¹~nFüRY Ѱ/ߨ›âõ¾u¶›ø¤3^:yYÎvs1Ëž М?{/4¿Ê×Tç .¾Tb·UqI ðh~ï&dŽÒΤåDŠß‡n4¬} PNÄä_„dzøIå$;jMùS2‚x1¯cêª>/æwW‡xåBr€Ô8§ÌÕï êC÷/Gå‡&—®£ðë¢M_0ó’ÅÉg©ºbçÕ¹ÊÏ•í A%€~ûT¾ ƹ·|°%>¹‚Ï…¡~y!U ÝU1¿9bBøˆã)5AóÅ'ÕxM#îmð‹zº.¨ã3fõîGgL±Ó½YµÐç•)Ë8CL)I“ˆ5%æ¥T\NÞ-9ˆ#~E‰Q^ŽÖ§É–îã-qClïDWÁ’C{òjÒñrò@Ã(ú„$Â:ªKéGQ¥”Ç;SúU·q â¶éëDÓ/×èÇØýV~(lJ·_úžöäç#ÈßÈoÇŸÿ í¼Å­ endstream endobj 4 0 obj 15625 endobj 2 0 obj << /ExtGState << /a0 << /CA 1 /ca 1 >> /s6 6 0 R /s8 8 0 R /s10 10 0 R /s12 12 0 R >> /Shading << /sh5 5 0 R >> /XObject << /x7 7 0 R /x9 9 0 R /x11 11 0 R /x13 13 0 R /x14 14 0 R /x15 15 0 R >> >> endobj 16 0 obj << /Type /Page /Parent 1 0 R /MediaBox [ 0 0 512 132.915924 ] /Contents 3 0 R /Group << /Type /Group /S /Transparency /I true /CS /DeviceRGB >> /Resources 2 0 R >> endobj 17 0 obj << /Type /XObject /Length 47 /Filter /FlateDecode /Subtype /Form /BBox [ 41 52.915924 73 85.915924 ] /Group << /Type /Group /S /Transparency /I true /CS /DeviceRGB >> /Resources << /ExtGState << /a0 << /CA 0.19 /ca 0.19 >> >> >> >> stream xœ3P0¢týD…ôb.CS#=KCSK#c#cc…¢T…4.¯Ù˜ endstream endobj 7 0 obj << /Type /XObject /Length 48 /Filter /FlateDecode /Subtype /Form /BBox [ 41 52.915924 73 85.915924 ] /Group << /Type /Group /S /Transparency /I true /CS /DeviceRGB >> /Resources << /ExtGState << /a0 << /CA 1 /ca 1 >> >> /XObject << /x18 18 0 R >> >> >> stream xœ+ä2T0B©k g`ab`nj©œË¥Ÿh ^¬ _ah¡à’ÏȽ & endstream endobj 19 0 obj << /Type /Mask /S /Alpha /G 17 0 R >> endobj 6 0 obj << /Type /ExtGState /SMask 19 0 R /ca 1 /CA 1 /AIS false >> endobj 20 0 obj << /Type /XObject /Length 47 /Filter /FlateDecode /Subtype /Form /BBox [ 46 57.915924 68 80.915924 ] /Group << /Type /Group /S /Transparency /I true /CS /DeviceRGB >> /Resources << /ExtGState << /a0 << /CA 0.595 /ca 0.595 >> >> >> >> stream xœ3P0¢týD…ôb.3Ss=KCSK####c…¢T…4.°  endstream endobj 9 0 obj << /Type /XObject /Length 48 /Filter /FlateDecode /Subtype /Form /BBox [ 46 57.915924 68 80.915924 ] /Group << /Type /Group /S /Transparency /I true /CS /DeviceRGB >> /Resources << /ExtGState << /a0 << /CA 1 /ca 1 >> >> /XObject << /x21 21 0 R >> >> >> stream xœ+ä2T0B©k g`ab`nj©œË¥Ÿh ^¬ _ad¨à’Ïȼô endstream endobj 22 0 obj << /Type /Mask /S /Alpha /G 20 0 R >> endobj 8 0 obj << /Type /ExtGState /SMask 22 0 R /ca 1 /CA 1 /AIS false >> endobj 23 0 obj << /Type /XObject /Length 47 /Filter /FlateDecode /Subtype /Form /BBox [ 65 31.915924 91 56.915924 ] /Group << /Type /Group /S /Transparency /I true /CS /DeviceRGB >> /Resources << /ExtGState << /a0 << /CA 0.19 /ca 0.19 >> >> >> >> stream xœ3P0¢týD…ôb.3ScC=KCSK##3#S…¢T…4.°JŸ endstream endobj 11 0 obj << /Type /XObject /Length 48 /Filter /FlateDecode /Subtype /Form /BBox [ 65 31.915924 91 56.915924 ] /Group << /Type /Group /S /Transparency /I true /CS /DeviceRGB >> /Resources << /ExtGState << /a0 << /CA 1 /ca 1 >> >> /XObject << /x24 24 0 R >> >> >> stream xœ+ä2T0B©k g`ab`nj©œË¥Ÿh ^¬ _ad¢à’ÏȽ # endstream endobj 25 0 obj << /Type /Mask /S /Alpha /G 23 0 R >> endobj 10 0 obj << /Type /ExtGState /SMask 25 0 R /ca 1 /CA 1 /AIS false >> endobj 26 0 obj << /Type /XObject /Length 47 /Filter /FlateDecode /Subtype /Form /BBox [ 69 35.915924 87 52.915924 ] /Group << /Type /Group /S /Transparency /I true /CS /DeviceRGB >> /Resources << /ExtGState << /a0 << /CA 0.595 /ca 0.595 >> >> >> >> stream xœ3P0¢týD…ôb.3KcS=KCSK#C Cs…¢T…4.±© endstream endobj 13 0 obj << /Type /XObject /Length 48 /Filter /FlateDecode /Subtype /Form /BBox [ 69 35.915924 87 52.915924 ] /Group << /Type /Group /S /Transparency /I true /CS /DeviceRGB >> /Resources << /ExtGState << /a0 << /CA 1 /ca 1 >> >> /XObject << /x27 27 0 R >> >> >> stream xœ+ä2T0B©k g`ab`nj©œË¥Ÿh ^¬ _ad®à’ÏȽ & endstream endobj 28 0 obj << /Type /Mask /S /Alpha /G 26 0 R >> endobj 12 0 obj << /Type /ExtGState /SMask 28 0 R /ca 1 /CA 1 /AIS false >> endobj 29 0 obj << /FunctionType 2 /Domain [ 0 1 ] /C0 [ 0.188235 0.443137 0.670588 ] /C1 [ 0.0196078 0.219608 0.396078 ] /N 1 >> endobj 5 0 obj << /ShadingType 2 /ColorSpace /DeviceRGB /Coords [ 121.590797 450.859406 367.959412 24.1366 ] /Domain [ 0 1 ] /Extend [ true true ] /Function 29 0 R >> endobj 30 0 obj << /Length 31 0 R /Filter /FlateDecode /Type /XObject /Subtype /Image /Width 19 /Height 18 /ColorSpace /DeviceGray /Interpolate true /BitsPerComponent 8 >> stream xœc`À1DXyÄØ9Q„™$=’œ5XBæ«.ž?;+Þ„&Äî¾ýûïßïnÏŽ–`‚kœùñï¿¿>ï)Ôƒ©ã=ôùï¿ÿÿ¾]o2ေHä\ýúÿÿÿߟvËB-aTÉ;õý/Pì÷…"I˜qÊé›>ýüÿïß·ÍœP1ž€ §?}ÿóóRªÜOšÑ“.^º¹6^áf6-ó¬Âp5d¯1ròJ ³(8»‘E endstream endobj 31 0 obj 185 endobj 14 0 obj << /Length 32 0 R /Filter /FlateDecode /Type /XObject /Subtype /Image /Width 19 /Height 18 /ColorSpace /DeviceGray /Interpolate true /BitsPerComponent 8 /SMask 30 0 R >> stream xœÁ ‚@EýÉþÇ/h´h%´›y:øP0ÑPH©Ål¢o°ˆ#há].÷<žeMËŠ¬ó2ì(Ð0ð’/r(Dä!#1ˆÒ¼,$r:hd»<«Î][J€™f„bÙ)ukdì;ºFDV÷çý$£­>ȺWêzLÁ5sdE{éš½Œ6Yr!U!ãYƒ³åˆ)Fœ˜KÞ^Â(§cëÓ$~³‰ù›xi endstream endobj 32 0 obj 180 endobj 33 0 obj << /Length 34 0 R /Filter /FlateDecode /Type /XObject /Subtype /Image /Width 19 /Height 33 /ColorSpace /DeviceGray /Interpolate true /BitsPerComponent 8 >> stream xœc`ÀјعؘPDXøä´äùX5ñGåÆ˜‰"‰±©æ¬Û³¶ÎN˜nÇü{¯ªµà‡Y¢œwèÓŸ/¶eª°ÁôôÞúùÿßó]xaB¦+^ÿýÿïß÷óù2P—°ZoÿôïÿÿÿžÌ²å˜Æé¾ ,ô÷ÞD)ˆ2nÏ­ŸABÿ~\kÑ€¸ËiÛW°Ðß7+MX!Ž0Ùúÿÿë>°a¬¦‹_þ }?ž&ö›aßÝ_`¡Ÿ—kTÁ†±¨–_þ ù÷û~:X£h̰Ðÿ¿/gjC¬ätÙúá/HèÏó¹æì`!vãI÷~Ýÿl®5ÄJyÇ¿@„æ™AƒQÀmÐãÿÿýy>SŒÌJuW¾ÿû÷ÿ÷ Vhà ù-}øóß¿¯Ç •`À¡™·ãÅŸ¯Ö‹Â⎉߲îàƒGg[Œ8q+dW½|ÍD_qx$"jí"ωãâr¢ì¨©€‰™•…yª»_ endstream endobj 34 0 obj 381 endobj 15 0 obj << /Length 35 0 R /Filter /FlateDecode /Type /XObject /Subtype /Image /Width 19 /Height 33 /ColorSpace /DeviceGray /Interpolate true /BitsPerComponent 8 /SMask 33 0 R >> stream xœ’IRÂ@…¹¤÷á î²bßU¡çt:&LA4&R(Pê "&a¥ÿò«×ÿð^×jÿ®œ( ~F̶q`ž" ¢œè$œ1‚¯@Eê3!uÎÈ«N„ê¦m:W+Ù^$Ý^àšR+ÔáV0Ež›‡ NÏâБèðPéîI˜B-eœ~¼\eÉ +x‰ —þ4]¿fOÃJ¹þôq»{ºíKÞ*ÛwtoºÚ}®ï#›“b ‹³í׿aäpX43¡æŽ–o›lìjô¢0 P;Lž·ïéÄÒ"½iúò¼»“]"á çiš 4Ê-šÜêÇÉ|9‚•'å-¢;Ñä&¾¶x§º1Ó ‡¡'Tþ0*-¯çw­ì©5ÐÞXË’ ³« ʸ.‚¿ÉåïSÂðè~‘ ¦¤Ý> stream xœ]± €0 {Oñ;ÁN2#ÐÀþ„H ¡¯^§¿ßIQsÌèFÁ|R¯¬^ØÌÙÔkÕ±ÂKa‘³hë–>œ mM/Nr+ó·ÎÆ[³¿|ù½O4П@ ö endstream endobj 37 0 obj 101 endobj 36 0 obj << /ExtGState << /a0 << /CA 1 /ca 1 >> >> >> endobj 21 0 obj << /Length 39 0 R /Filter /FlateDecode /Type /XObject /Subtype /Form /BBox [ 46 58 68 81 ] /Resources 38 0 R >> stream xœ]» Ã@ C{MÁ hù>’nŒŒ&va¶÷ŒK‚°"yÈŒžsÁôT,—cJÖh†Õ¨‘`A׌ Õ©%££ô™3jûšòf®´Ù‡Ê½~Vxý¿}ÉCnåcØ endstream endobj 39 0 obj 102 endobj 38 0 obj << /ExtGState << /a0 << /CA 1 /ca 1 >> >> >> endobj 24 0 obj << /Length 41 0 R /Filter /FlateDecode /Type /XObject /Subtype /Form /BBox [ 65 32 91 57 ] /Resources 40 0 R >> stream xœeŽ;€0 C÷œÂ'é'M{ ŽÀB`î/Q¨òYϲ³“í£b˜õ¤¤\BDŒœD±ÁŒMÂí};+,³“‚à9¶Øãե΋°Ï z z@YœïšØòÃß/¿féÃ"= endstream endobj 41 0 obj 105 endobj 40 0 obj << /ExtGState << /a0 << /CA 1 /ca 1 >> >> >> endobj 27 0 obj << /Length 43 0 R /Filter /FlateDecode /Type /XObject /Subtype /Form /BBox [ 69 36 87 53 ] /Resources 42 0 R >> stream xœeŽ1Ã0 w½‚/`­J¶ägô ]šíäÿ@ÒÂ…‡Bq AqÅç¶—{Á²KëÌRáÎvÊ´8Ù˜¡x!’Z:¬ÑÊàôœ~6º:dÌ@¥¹Ï‚ze×/?ÿù7á!79’Î"o endstream endobj 43 0 obj 106 endobj 42 0 obj << /ExtGState << /a0 << /CA 1 /ca 1 >> >> >> endobj 1 0 obj << /Type /Pages /Kids [ 16 0 R ] /Count 1 >> endobj 44 0 obj << /Creator (cairo 1.14.0 (http://cairographics.org)) /Producer (cairo 1.14.0 (http://cairographics.org)) >> endobj 45 0 obj << /Type /Catalog /Pages 1 0 R >> endobj xref 0 46 0000000000 65535 f 0000023749 00000 n 0000015741 00000 n 0000000015 00000 n 0000015717 00000 n 0000020004 00000 n 0000017040 00000 n 0000016587 00000 n 0000017951 00000 n 0000017498 00000 n 0000018861 00000 n 0000018407 00000 n 0000019774 00000 n 0000019320 00000 n 0000020650 00000 n 0000021702 00000 n 0000015996 00000 n 0000016218 00000 n 0000022307 00000 n 0000016980 00000 n 0000017127 00000 n 0000022665 00000 n 0000017891 00000 n 0000018038 00000 n 0000023024 00000 n 0000018801 00000 n 0000018949 00000 n 0000023386 00000 n 0000019714 00000 n 0000019862 00000 n 0000020228 00000 n 0000020627 00000 n 0000021061 00000 n 0000021084 00000 n 0000021679 00000 n 0000022284 00000 n 0000022592 00000 n 0000022569 00000 n 0000022951 00000 n 0000022928 00000 n 0000023313 00000 n 0000023290 00000 n 0000023676 00000 n 0000023653 00000 n 0000023815 00000 n 0000023943 00000 n trailer << /Size 46 /Root 45 0 R /Info 44 0 R >> startxref 23996 %%EOF photutils-0.4/docs/_static/photutils_banner.svg0000644000214200020070000012633012721611413024255 0ustar lbradleySTSCI\science00000000000000 photutils logoimage/svg+xmlphotutils logoLarry Bradley photutils-0.4/docs/_static/photutils_banner_original.svg0000644000214200020070000003743612721611413026151 0ustar lbradleySTSCI\science00000000000000 photutils logoimage/svg+xmlphotutils logoLarry Bradleyphot utils An Astropy Package for Photometry photutils-0.4/docs/_static/photutils_logo-32x32.png0000644000214200020070000000407112444404542024516 0ustar lbradleySTSCI\science00000000000000‰PNG  IHDR szzôtEXtSoftwareAdobe ImageReadyqÉe<ÛIDATxÚ¤W PT×þÎaaewwQpäµ ò¨ƒ©±¶¤˜„Ñ’¦Ð´3u™Nf´“Nb§í$ÓÎ$mÆ´i’JÓv:MšJ§6ŽŽŒB4JRÌ¡€°°,( ¸¼–ǽýï¹û¸ ¤ÑÊðï½÷<þÇ÷?Îùîá/jçadXeY¢/²¬ýØç.ÿÖ~'<ØÝ ¹¿¶Œ„|‹Hyn„ìTÂ7f§G=}ürþÊëö{VÀøÀ3ÕÄô9­PU ¤úŽÀ{-®%EÜw­@ìÞcVâð±)Ó2/ë‚[Ž€qU$Ü“1~q!ù3•¡7=w‘-ZþaÿKx\ù³Õ`ìÏŒ± EW#ób¿®)|)_ªÀ‡j¾­Í£ ý·nã‹:'º¤5ª]ŒÑ¿úï@$M 3\_ø¨ãsˆÿÚs'Iãj¿ÙlÙ¸…FnGIÎ&¸§<ÂúÓM­(ÍMÅ¥ëÝ„J7Nϧ.±^Ò¸Kâ†]s¼&Э$<ᡫ–ÌHð̘DkÊ^”oJÄW‹2‰¶Âvc,wÃ-¬o•Ö‘±\$Õ<™ƒib†A6ÑûIúÈ_щûr„;*PôTÀœx[PaÆ©K6Ü_Ž÷.·ãüG]ðÎ/ÂûïsÂ=ÍR’r¨ðC%›Ã£ú.äêF`“â•ÉDe[ßBsKI¼PYzID¯BL…‘ÉŠ*¦ÕQÈIKÂ÷_>%"Ü,ßF ëƒ ëÐ(Yˆ¯o­õïUè±ðœðZ‘Ç‘6…¾ÅeÝÓôSÉ•/öÒ†K#8KÂŒ‰oó:ª=ÑF8zz) "ÑD‚…Ÿ¡‰xM”ò~ôIä¡4/ §ó .Î'Óeò˜üây‘ãøŠÜ¯2k—“Ð&¯W… û‹eªë¸,üü¢ñCŒ7ap`œË `âs°ìÞ [·¹›7 ‘¾ÕìàÊö2%bRõ²‰qþ4çY¸‰Jùcšç" ÄýÄ©™ãDýºì<øMl~ð ª"ÚĸÉ'lbzŽa7”ªÊ7ðf)"có¾r€T:`Ä,òùë P:qí<9˜ÏœeHKŽÃØ„'˜ãDÍ3±ˆ¶Áz6 ׈ŸÈ ØÍ{ ´ö AŽÆë7 ÷ŒÂ£ï¬@€´Q­'_;,%8\± ±ñkEdsŸ?ÅÌŽ‡×Œ¢}ËúÝT:ŽÚbð§Å¼à>¢òâl‚?׸o,ˆO«~Ußêƒúð0¤lHÂdæ.d“BèiÞŒq)¶=\%2ÂÂÆU8y!Ó(ìGçgO–cÂãEn˜ «q§²‡q+÷Yà [ ÓX¦<3H4EÃÀfã(}Ìñ&ìKÆÞÊJeY°÷@%*¶¬FjRÅD„ˆ •T^FÊýI…ª’>”aá“@¬¹m*ó§½¢@‘û ÎÜ‹›î™€%Þ ƒcSxut¾Ýùs¼ÓÛ‚¶†³xƒ—*gŒH·`T3(‡ £IN…¡µO~}‡@Ì(uSj‹R¢Ñ§ lƒ2øZt^¸ ƒäAO˜ÚyeÇÉð2 ÐD\ÔíG.ìõ×…Œ²W/ň½yF¸ÃĽjuUôûе>Vèjø,HŒÊN4uñªŸ‚i#¨Õî ñ£ßMÚ TàWçÚìð÷Ð7î±÷~ÚÀù²<çè· CgãJjîY´a‡ÔMÏV‘S³ó¡u€öÄ›V•¨–î3׺`I0Á⺎÷ÙV­±õE5C ‹2¶c± {>Á¹+:Œ¹0?ø8jbû±è F?Ñ—¥6¼çÁw¤FŸ1*RÅÙ)ÈN]ׄP\Ë›è¸8Žia‹z|ùý§ÁÉ .±áTd ÓE$;G§ñë_€³êqŠæ8Ì„¯‚‰î% X¿q†h=Î^ÑÁ5> ãŒm}#4v&¨¨)1á‹‘º[õÏÚÅ-ë{¸Mƒ&íA²o¦çbËp¬²Ÿ:FÅx 1û´D ¶ëqV—+Î\À£ÑhjëÇYž-ÎŒ­„Ò8ôTõbñÄÂ%ʘºi>èÝÚ÷„ÂG2 «öe=ÈÛq®uÞÄöoGù}iÂÂSt!Ógw7œÞxjð¯Çü‡ ÎW$~OªUCs0`†Uz\ëº%”8ï™ÃÕÿ â/lÄßš:‘ì;Tü¨OÏRV¹(ãï†eâF$Õ ¼}´N{g핃 u|IJ6·t¢¢h³PBñeEqºˆæê©éÊžIÆÒ”¤®zŠ68þòLÍÒ˜.x;eµ´ºŒTÞ¨ p!z;ªœÿÂäh":œ³¨¿rCX#Mcß| ÞŠÚ­Zå³N^v|€ŠÀ#«yíç6&yGÞ¶Òã 2ùo´»fš… *,Šp…é9}~èÝ+4%Bñ~¼ç­ÃÏßqgd­}ǪÞZeëŠMäÏh>¤ŽHiÍ触ûäS wÝ’&zQZ±•„.·^ZÒÊÇ;ßønÝ=7§¤ˆrG¤žgërÈV+mW=Ñß;^«©¿›f÷Ž›SŠ“Pb9öÖÙÿßÿ¿ æ(HU¿Ž’ŠIEND®B`‚photutils-0.4/docs/_static/photutils_logo.svg0000644000214200020070000003625212721610567023764 0ustar lbradleySTSCI\science00000000000000 photutils logoimage/svg+xmlphotutils logoLarry Bradleyphotutils-0.4/docs/_templates/0000755000214200020070000000000013175654702020672 5ustar lbradleySTSCI\science00000000000000photutils-0.4/docs/_templates/autosummary/0000755000214200020070000000000013175654702023260 5ustar lbradleySTSCI\science00000000000000photutils-0.4/docs/_templates/autosummary/base.rst0000644000214200020070000000037212345377273024731 0ustar lbradleySTSCI\science00000000000000{% extends "autosummary_core/base.rst" %} {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #}photutils-0.4/docs/_templates/autosummary/class.rst0000644000214200020070000000037312345377273025125 0ustar lbradleySTSCI\science00000000000000{% extends "autosummary_core/class.rst" %} {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #}photutils-0.4/docs/_templates/autosummary/module.rst0000644000214200020070000000037412345377273025306 0ustar lbradleySTSCI\science00000000000000{% extends "autosummary_core/module.rst" %} {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #}photutils-0.4/docs/aperture.rst0000644000214200020070000005163613175634532021130 0ustar lbradleySTSCI\science00000000000000Aperture Photometry (`photutils.aperture`) ========================================== Introduction ------------ In Photutils, the :func:`~photutils.aperture_photometry` function is the main tool to perform aperture photometry on an astronomical image for a given set of apertures. Photutils provides several apertures defined in pixel or sky coordinates. The aperture classes that are defined in pixel coordinates are: * `~photutils.CircularAperture` * `~photutils.CircularAnnulus` * `~photutils.EllipticalAperture` * `~photutils.EllipticalAnnulus` * `~photutils.RectangularAperture` * `~photutils.RectangularAnnulus` Each of these classes has a corresponding variant defined in celestial coordinates: * `~photutils.SkyCircularAperture` * `~photutils.SkyCircularAnnulus` * `~photutils.SkyEllipticalAperture` * `~photutils.SkyEllipticalAnnulus` * `~photutils.SkyRectangularAperture` * `~photutils.SkyRectangularAnnulus` To perform aperture photometry with sky-based apertures, one will need to specify a WCS transformation. Users can also create their own custom apertures (see :ref:`custom-apertures`). .. _creating-aperture-objects: Creating Aperture Objects ------------------------- The first step in performing aperture photometry is to create an aperture object. An aperture object is defined by a position (or a list of positions) and parameters that define its size and possibly, orientation (e.g., an elliptical aperture). We start with an example of creating a circular aperture in pixel coordinates using the :class:`~photutils.CircularAperture` class:: >>> from photutils import CircularAperture >>> positions = [(30., 30.), (40., 40.)] >>> apertures = CircularAperture(positions, r=3.) The positions should be either a single tuple of ``(x, y)``, a list of ``(x, y)`` tuples, or an array with shape ``Nx2``, where ``N`` is the number of positions. The above example defines two circular apertures located at pixel coordinates ``(30, 30)`` and ``(40, 40)`` with a radius of 3 pixels. Creating an aperture object in celestial coordinates is similar. One first uses the :class:`~astropy.coordinates.SkyCoord` class to define celestial coordinates and then the :class:`~photutils.SkyCircularAperture` class to define the aperture object:: >>> from astropy import units as u >>> from astropy.coordinates import SkyCoord >>> from photutils import SkyCircularAperture >>> positions = SkyCoord(l=[1.2, 2.3] * u.deg, b=[0.1, 0.2] * u.deg, ... frame='galactic') >>> apertures = SkyCircularAperture(positions, r=4. * u.arcsec) .. note:: Sky apertures are not defined completely in celestial coordinates. They simply use celestial coordinates to define the central position, and the remaining parameters are converted to pixels using the pixel scale of the image at the central position. Projection distortions are not taken into account. If the apertures were defined completely in celestial coordinates, their shapes would not be preserved when converting to pixel coordinates. Converting Between Pixel and Sky Apertures ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The pixel apertures can be converted to sky apertures, and visa versa. To accomplish this, use the :meth:`~photutils.PixelAperture.to_sky` method for pixel apertures, e.g.: .. doctest-skip:: >>> aperture = CircularAperture((10, 20), r=4.) >>> sky_aperture = aperture.to_sky(wcs) and the :meth:`~photutils.SkyAperture.to_pixel` method for sky apertures, e.g.: .. doctest-skip:: >>> position = SkyCoord(1.2, 0.1, unit='deg', frame='icrs') >>> aperture = SkyCircularAperture(position, r=4. * u.arcsec) >>> pix_aperture = aperture.to_pixel(wcs) Performing Aperture Photometry ------------------------------ After the aperture object is created, we can then perform the photometry using the :func:`~photutils.aperture_photometry` function. We start by defining the apertures as described above:: >>> positions = [(30., 30.), (40., 40.)] >>> apertures = CircularAperture(positions, r=3.) and then we call the :func:`~photutils.aperture_photometry` function with the data and the apertures:: >>> import numpy as np >>> from photutils import aperture_photometry >>> data = np.ones((100, 100)) >>> phot_table = aperture_photometry(data, apertures) >>> print(phot_table) # doctest: +SKIP id xcenter ycenter aperture_sum pix pix --- ------- ------- ------------- 1 30.0 30.0 28.2743338823 2 40.0 40.0 28.2743338823 This function returns the results of the photometry in an Astropy `~astropy.table.QTable`. In this example, the table has four columns, named ``'id'``, ``'xcenter'``, ``'ycenter'``, and ``'aperture_sum'``. Since all the data values are 1.0, the aperture sums are equal to the area of a circle with a radius of 3:: >>> print(np.pi * 3. ** 2) # doctest: +FLOAT_CMP 28.2743338823 Aperture and Pixel Overlap -------------------------- The overlap of the apertures with the data pixels can be handled in different ways. For the default method (``method='exact'``), the exact intersection of the aperture with each pixel is calculated. The other options, ``'center'`` and ``'subpixel'``, are faster, but with the expense of less precision. For ``'center'``, a pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. For ``'subpixel'``, pixels are divided into a number of subpixels, which are in or out of the aperture based on their centers. For this method, the number of subpixels needs to be set with the ``subpixels`` keyword. This example uses the ``'subpixel'`` method where pixels are resampled by a factor of 5 (``subpixels=5``) in each dimension:: >>> phot_table = aperture_photometry(data, apertures, method='subpixel', ... subpixels=5) >>> print(phot_table) # doctest: +SKIP id xcenter ycenter aperture_sum pix pix --- ------- ------- ------------ 1 30.0 30.0 27.96 2 40.0 40.0 27.96 Note that the results differ from the true value of 28.274333 (see above). For the ``'subpixel'`` method, the default value is ``subpixels=5``, meaning that each pixel is equally divided into 25 smaller pixels (this is the method and subsampling factor used in SourceExtractor_.). The precision can be increased by increasing ``subpixels``, but note that computation time will be increased. Multiple Apertures at Each Position ----------------------------------- While the `~photutils.Aperture` objects support multiple positions, they must have a fixed shape, e.g. radius, size, and orientation. To perform photometry in multiple apertures at each position, one may input a list of aperture objects to the :func:`~photutils.aperture_photometry` function. Suppose that we wish to use three circular apertures, with radii of 3, 4, and 5 pixels, on each source:: >>> radii = [3., 4., 5.] >>> apertures = [CircularAperture(positions, r=r) for r in radii] >>> phot_table = aperture_photometry(data, apertures) >>> print(phot_table) # doctest: +SKIP id xcenter ycenter aperture_sum_0 aperture_sum_1 aperture_sum_2 pix pix --- ------- ------- -------------- -------------- -------------- 1 30.0 30.0 28.2743338823 50.2654824574 78.5398163397 2 40.0 40.0 28.2743338823 50.2654824574 78.5398163397 For multiple apertures, the output table column names are appended with the ``positions`` index. Other apertures have multiple parameters specifying the aperture size and orientation. For example, for elliptical apertures, one must specify ``a``, ``b``, and ``theta``:: >>> from photutils import EllipticalAperture >>> a = 5. >>> b = 3. >>> theta = np.pi / 4. >>> apertures = EllipticalAperture(positions, a, b, theta) >>> phot_table = aperture_photometry(data, apertures) >>> print(phot_table) # doctest: +SKIP id xcenter ycenter aperture_sum pix pix --- ------- ------- ------------- 1 30.0 30.0 47.1238898038 2 40.0 40.0 47.1238898038 Again, for multiple apertures one should input a list of aperture objects, each with identical positions:: >>> a = [5., 6., 7.] >>> b = [3., 4., 5.] >>> theta = np.pi / 4. >>> apertures = [EllipticalAperture(positions, a=ai, b=bi, theta=theta) ... for (ai, bi) in zip(a, b)] >>> phot_table = aperture_photometry(data, apertures) >>> print(phot_table) # doctest: +SKIP id xcenter ycenter aperture_sum_0 aperture_sum_1 aperture_sum_2 pix pix --- ------- ------- -------------- -------------- -------------- 1 30.0 30.0 47.1238898038 75.3982236862 109.955742876 2 40.0 40.0 47.1238898038 75.3982236862 109.955742876 Background Subtraction ---------------------- Global Background Subtraction ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :func:`~photutils.aperture_photometry` assumes that the data have been background-subtracted. If ``bkg`` is an array representing the background of the data (determined by `~photutils.background.Background2D` or an external function), simply do:: >>> phot_table = aperture_photometry(data - bkg, apertures) # doctest: +SKIP Local Background Subtraction ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Suppose we want to perform the photometry in a circular aperture with a radius of 3 pixels and estimate the local background level around each source with a circular annulus of inner radius 6 pixels and outer radius 8 pixels. We start by defining the apertures:: >>> from photutils import CircularAnnulus >>> apertures = CircularAperture(positions, r=3) >>> annulus_apertures = CircularAnnulus(positions, r_in=6., r_out=8.) We then perform the photometry in both apertures:: >>> apers = [apertures, annulus_apertures] >>> phot_table = aperture_photometry(data, apers) >>> print(phot_table) # doctest: +SKIP id xcenter ycenter aperture_sum_0 aperture_sum_1 pix pix --- ------- ------- -------------- -------------- 1 30.0 30.0 28.2743338823 87.9645943005 2 40.0 40.0 28.2743338823 87.9645943005 Note that we cannot simply subtract the aperture sums because the apertures have different areas. To calculate the mean local background within the circular annulus aperture, we need to divide its sum by its area, which can be calculated using the :meth:`~photutils.CircularAnnulus.area` method:: >>> bkg_mean = phot_table['aperture_sum_1'] / annulus_apertures.area() The background sum within the circular aperture is then the mean local background times the circular aperture area:: >>> bkg_sum = bkg_mean * apertures.area() >>> final_sum = phot_table['aperture_sum_0'] - bkg_sum >>> phot_table['residual_aperture_sum'] = final_sum >>> print(phot_table['residual_aperture_sum']) # doctest: +FLOAT_CMP residual_aperture_sum --------------------- -7.1054273576e-15 -7.1054273576e-15 The result here should be zero because all of the data values are 1.0 (the tiny difference from 0.0 is due to numerical precision). .. _error_estimation: Error Estimation ---------------- If and only if the ``error`` keyword is input to :func:`~photutils.aperture_photometry`, the returned table will include a ``'aperture_sum_err'`` column in addition to ``'aperture_sum'``. ``'aperture_sum_err'`` provides the propagated uncertainty associated with ``'aperture_sum'``. For example, suppose we have previously calculated the error on each pixel's value and saved it in the array ``error``:: >>> error = 0.1 * data >>> phot_table = aperture_photometry(data, apertures, error=error) >>> print(phot_table) # doctest: +SKIP id xcenter ycenter aperture_sum aperture_sum_err pix pix --- ------- ------- ------------- ---------------- 1 30.0 30.0 28.2743338823 0.531736155272 2 40.0 40.0 28.2743338823 0.531736155272 ``'aperture_sum_err'`` values are given by: .. math:: \Delta F = \sqrt{\sum_{i \in A} \sigma_{\mathrm{tot}, i}^2} where :math:`\Delta F` is `~photutils.SourceProperties.source_sum_err`, :math:`A` are the non-masked pixels in the aperture, and :math:`\sigma_{\mathrm{tot}, i}` is the input ``error`` array. In the example above, it is assumed that the ``error`` keyword specifies the *total* error -- either it includes Poisson noise due to individual sources or such noise is irrelevant. However, it is often the case that one has calculated a smooth "background-only error" array, which by design doesn't include increased noise on bright pixels. To include Poisson noise from the sources, we can use the :func:`~photutils.utils.calc_total_error` function. Let's assume we have a background-only image called ``bkg_error``. If our data are in units of electrons/s, we would use the exposure time as the effective gain:: >>> from photutils.utils import calc_total_error >>> effective_gain = 500 # seconds >>> error = calc_total_error(data, bkg_error, effective_gain) # doctest: +SKIP >>> phot_table = aperture_photometry(data - bkg, apertures, error=error) # doctest: +SKIP Pixel Masking ------------- Pixels can be ignored/excluded (e.g., bad pixels) from the aperture photometry by providing an image mask via the ``mask`` keyword:: >>> data = np.ones((5, 5)) >>> aperture = CircularAperture((2, 2), 2.) >>> mask = np.zeros_like(data, dtype=bool) >>> data[2, 2] = 100. # bad pixel >>> mask[2, 2] = True >>> t1 = aperture_photometry(data, aperture, mask=mask) >>> print(t1['aperture_sum']) # doctest: +FLOAT_CMP aperture_sum ------------- 11.5663706144 The result is very different if a ``mask`` image is not provided:: >>> t2 = aperture_photometry(data, aperture) >>> print(t2['aperture_sum']) # doctest: +FLOAT_CMP aperture_sum ------------- 111.566370614 Aperture Photometry Using Sky Coordinates ----------------------------------------- As mentioned in :ref:`creating-aperture-objects`, performing photometry using apertures defined in celestial coordinates simply requires defining a "sky" aperture at positions defined by a :class:`~astropy.coordinates.SkyCoord` object. Here we show an example of photometry on real data using a `~photutils.SkyCircularAperture`. We start by loading a Spitzer 4.5 micron image of a region of the Galactic plane:: >>> from photutils import datasets >>> hdu = datasets.load_spitzer_image() # doctest: +REMOTE_DATA >>> catalog = datasets.load_spitzer_catalog() # doctest: +REMOTE_DATA The catalog contains (among other things) the Galactic coordinates of the sources in the image as well as the PSF-fitted fluxes from the official Spitzer data reduction. We define the apertures positions based on the existing catalog positions:: >>> positions = SkyCoord(catalog['l'], catalog['b'], frame='galactic') # doctest: +REMOTE_DATA >>> apertures = SkyCircularAperture(positions, r=4.8 * u.arcsec) # doctest: +REMOTE_DATA Now perform the photometry in these apertures using the ``hdu``. The ``hdu`` object is a FITS HDU that contains the data and a header describing the WCS transformation of the image. The WCS includes the coordinate frame of the image and the projection from celestial to pixel coordinates. The `~photutils.aperture_photometry` function uses the WCS information to automatically convert the apertures defined in celestial coordinates into pixel coordinates:: >>> phot_table = aperture_photometry(hdu, apertures) # doctest: +REMOTE_DATA The Spitzer catalog also contains the official fluxes for the sources, so we can compare to our fluxes. Because the Spitzer catalog fluxes are in units of mJy and the data are in units of MJy/sr, we need to convert units before comparing the results. The image data have a pixel scale of 1.2 arcsec/pixel. >>> import astropy.units as u >>> factor = (1.2 * u.arcsec) ** 2 / u.pixel >>> fluxes_catalog = catalog['f4_5'] # doctest: +REMOTE_DATA >>> converted_aperture_sum = (phot_table['aperture_sum'] * ... factor).to(u.mJy / u.pixel) # doctest: +REMOTE_DATA Finally, we can plot the comparison of the photometry: .. doctest-skip:: >>> import matplotlib.pyplot as plt >>> plt.scatter(fluxes_catalog, converted_aperture_sum.value) >>> plt.xlabel('Spitzer catalog PSF-fit fluxes ') >>> plt.ylabel('Aperture photometry fluxes') .. plot:: from astropy import units as u from astropy.coordinates import SkyCoord from photutils import aperture_photometry, SkyCircularAperture # Load dataset from photutils import datasets hdu = datasets.load_spitzer_image() catalog = datasets.load_spitzer_catalog() # Set up apertures positions = SkyCoord(catalog['l'], catalog['b'], frame='galactic') apertures = SkyCircularAperture(positions, r=4.8 * u.arcsec) phot_table = aperture_photometry(hdu, apertures) # Convert to correct units factor = (1.2 * u.arcsec) ** 2 / u.pixel fluxes_catalog = catalog['f4_5'] converted_aperture_sum = (phot_table['aperture_sum'] * factor).to(u.mJy / u.pixel) # Plot import matplotlib.pyplot as plt plt.scatter(fluxes_catalog, converted_aperture_sum.value) plt.xlabel('Spitzer catalog PSF-fit fluxes ') plt.ylabel('Aperture photometry fluxes') plt.plot([40, 100, 450],[40, 100, 450], color='black', lw=2) Despite using different methods, the two catalogs are in good agreement. The aperture photometry fluxes are based on a circular aperture with a radius of 4.8 arcsec. The Spitzer catalog fluxes were computed using PSF photometry. Therefore, differences are expected between the two measurements. Aperture Masks -------------- All `~photutils.PixelAperture` objects have a :meth:`~photutils.PixelAperture.to_mask` method that returns a list of `~photutils.ApertureMask` objects, one for each aperture position. The `~photutils.ApertureMask` object contains a cutout of the aperture mask and a `~photutils.BoundingBox` object that provides the bounding box where the mask is to be applied. It also provides a :meth:`~photutils.ApertureMask.to_image` method to obtain an image of the mask in a 2D array of the given shape, a :meth:`~photutils.ApertureMask.cutout` method to create a cutout from the input data over the mask bounding box, and an :meth:`~photutils.ApertureMask.multiply` method to multiply the aperture mask with the input data to create a mask-weighted data cutout. All of these methods properly handle the cases of partial or no overlap of the aperture mask with the data. Let's start by creating an aperture object:: >>> from photutils import CircularAperture >>> positions = [(30., 30.), (40., 40.)] >>> apertures = CircularAperture(positions, r=3.) Now let's create a list of `~photutils.ApertureMask` objects using the :meth:`~photutils.PixelAperture.to_mask` method:: >>> masks = aperture.to_mask(method='center') We can now create an image with of the first aperture mask at its position:: >>> mask = masks[0] >>> image = mask.to_image(shape=((200, 200))) We can also create a cutout from a data image over the mask domain:: >>> data_cutout = mask.cutout(data) We can also create a mask-weighted cutout from the data. Here the circular aperture mask has been multiplied with the data:: >>> data_cutout_aper = mask.multiply(data) .. _custom-apertures: Defining Your Own Custom Apertures ---------------------------------- The :func:`~photutils.aperture_photometry` function can perform aperture photometry in arbitrary apertures. This function accepts any `~photutils.Aperture`-derived objects, such as `~photutils.CircularAperture`. This makes it simple to extend functionality: a new type of aperture photometry simply requires the definition of a new `~photutils.Aperture` subclass. All `~photutils.PixelAperture` subclasses must define a ``bounding_boxes`` property, ``to_mask()`` and ``plot()`` methods, and optionally an ``area()`` method. All `~photutils.SkyAperture` subclasses must implement only a ``to_pixel()`` method. * ``bounding_boxes``: A property defining a list of minimal `~photutils.BoundingBox` objects for the aperture, one at each aperture position. * ``to_mask()``: A method to return a list of `~photutils.ApertureMask` objects, one for each aperture position. * ``area()``: A method to return the exact analytical area (in pixels**2) of the aperture. * ``plot()``: A method to plot the aperture on a `matplotlib.axes.Axes` instance. See Also -------- 1. `IRAF's APPHOT specification [PDF]`_ (Sec. 3.3.5.8 - 3.3.5.9) 2. `SourceExtractor Manual [PDF]`_ (Sec. 9.4 p. 36) .. _SourceExtractor: http://www.astromatic.net/software/sextractor .. _SourceExtractor Manual [PDF]: https://www.astromatic.net/pubsvn/software/sextractor/trunk/doc/sextractor.pdf .. _IRAF's APPHOT specification [PDF]: http://iraf.net/irafdocs/apspec.pdf Reference/API ------------- .. automodapi:: photutils.aperture :no-heading: photutils-0.4/docs/background.rst0000644000214200020070000004544013175634532021414 0ustar lbradleySTSCI\science00000000000000Background Estimation (`photutils.background`) ============================================== Introduction ------------ To accurately measure the photometry and morphological properties of astronomical sources, one requires an accurate estimate of the background, which can be from both the sky and the detector. Similarly, having an accurate estimate of the background noise is important for determining the significance of source detections and for estimating photometric errors. Unfortunately, accurate background and background noise estimation is a difficult task. Further, because astronomical images can cover a wide variety of scenes, there is not a single background estimation method that will always be applicable. Photutils provides tools for estimating the background and background noise in your data, but they will likely require some tweaking to optimize the background estimate for your data. Scalar Background and Noise Estimation -------------------------------------- Simple Statistics ^^^^^^^^^^^^^^^^^ If the background level and noise are relatively constant across an image, the simplest way to estimate these values is to derive scalar quantities using simple approximations. Of course, when computing the image statistics one must take into account the astronomical sources present in the images, which add a positive tail to the distribution of pixel intensities. For example, one may consider using the image median as the background level and the image standard deviation as the 1-sigma background noise, but the resulting values are obviously biased by the presence of real sources. A slightly better method involves using statistics that are robust against the presence of outliers, such as the biweight location for the background level and biweight scale or `median absolute deviation (MAD) `_ for the background noise estimation. However, for most astronomical scenes these methods will also be biased by the presence of astronomical sources in the image. As an example, we load a synthetic image comprised of 100 sources with a Gaussian-distributed background whose mean is 5 and standard deviation is 2:: >>> from photutils.datasets import make_100gaussians_image >>> data = make_100gaussians_image() Let's plot the image: .. doctest-skip:: >>> import matplotlib.pyplot as plt >>> from astropy.visualization import SqrtStretch >>> from astropy.visualization.mpl_normalize import ImageNormalize >>> norm = ImageNormalize(stretch=SqrtStretch()) >>> plt.imshow(data, norm=norm, origin='lower', cmap='Greys_r') .. plot:: import matplotlib.pyplot as plt from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize from photutils.datasets import make_100gaussians_image data = make_100gaussians_image() norm = ImageNormalize(stretch=SqrtStretch()) plt.imshow(data, norm=norm, origin='lower', cmap='Greys_r') The image median and biweight location are both larger than the true background level of 5:: >>> import numpy as np >>> from astropy.stats import biweight_location >>> print(np.median(data)) 5.2255295184 >>> print(biweight_location(data)) 5.1867597555 Similarly, using the median absolute deviation to estimate the background noise level gives a value that is larger than the true value of 2:: >>> from astropy.stats import mad_std >>> print(mad_std(data)) # doctest: +FLOAT_CMP 2.1443728009 Sigma Clipping Sources ^^^^^^^^^^^^^^^^^^^^^^ The most widely used technique to remove the sources from the image statistics is called sigma clipping. Briefly, pixels that are above or below a specified sigma level from the median are discarded and the statistics are recalculated. The procedure is typically repeated over a number of iterations or until convergence is reached. This method provides a better estimate of the background and background noise levels:: >>> from astropy.stats import sigma_clipped_stats >>> mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5) >>> print((mean, median, std)) # doctest: +FLOAT_CMP (5.1991386516217908, 5.1555874333582912, 2.0942752121329691) Masking Sources ^^^^^^^^^^^^^^^ An even better procedure is to exclude the sources in the image by masking them. Of course, this technique requires one to `identify the sources in the data `_, which in turn depends on the background and background noise. Therefore, this method for estimating the background and background RMS requires an iterative procedure. Photutils provides a convenience function, :func:`~photutils.segmentation.make_source_mask`, for creating source masks. It uses sigma-clipped statistics as the first estimate of the background and noise levels for the source detection. Sources are then identified using image segmentation. Finally, the source masks are dilated to mask more extended regions around the detected sources. Here we use an aggressive 2-sigma detection threshold to maximize the source detections and dilate using a 11x11 box: .. doctest-requires:: scipy >>> from photutils import make_source_mask >>> mask = make_source_mask(data, snr=2, npixels=5, dilate_size=11) >>> mean, median, std = sigma_clipped_stats(data, sigma=3.0, mask=mask) >>> print((mean, median, std)) # doctest: +FLOAT_CMP (5.0010134754755695, 5.0005849056043763, 1.970887100626572) Of course, the source detection and masking procedure can be iterated further. Even with one iteration we are within 0.02% of the true background and 1.5% of the true background RMS. .. _scipy: http://www.scipy.org/ 2D Background and Noise Estimation ---------------------------------- If the background or the background noise varies across the image, then you will generally want to generate a 2D image of the background and background RMS (or compute these values locally). This can be accomplished by applying the above techniques to subregions of the image. A common procedure is to use sigma-clipped statistics in each mesh of a grid that covers the input data to create a low-resolution background image. The final background or background RMS image can then be generated by interpolating the low-resolution image. Photutils provides the :class:`~photutils.background.Background2D` class to estimate the 2D background and background noise in an astronomical image. :class:`~photutils.background.Background2D` requires the size of the box (``box_size``) in which to estimate the background. Selecting the box size requires some care by the user. The box size should generally be larger than the typical size of sources in the image, but small enough to encapsulate any background variations. For best results, the box size should also be chosen so that the data are covered by an integer number of boxes in both dimensions. If that is not the case, the ``edge_method`` keyword determines whether to pad or crop the image such that there is an integer multiple of the ``box_size`` in both dimensions. The background level in each of the meshes is calculated using the function or callable object (e.g. class instance) input via ``bkg_estimator`` keyword. Photutils provides a several background classes that can be used: * `~photutils.background.MeanBackground` * `~photutils.background.MedianBackground` * `~photutils.background.ModeEstimatorBackground` * `~photutils.background.MMMBackground` * `~photutils.background.SExtractorBackground` * `~photutils.background.BiweightLocationBackground` The default is a `~photutils.background.SExtractorBackground` instance. For this method, the background in each mesh is calculated as ``(2.5 * median) - (1.5 * mean)``. However, if ``(mean - median) / std > 0.3`` then the ``median`` is used instead (despite what the `SExtractor `_ User's Manual says, this is the method it always uses). Likewise, the background RMS level in each mesh is calculated using the function or callable object input via the ``bkgrms_estimator`` keyword. Photutils provides the following classes for this purpose: * `~photutils.background.StdBackgroundRMS` * `~photutils.background.MADStdBackgroundRMS` * `~photutils.background.BiweightScaleBackgroundRMS` For even more flexibility, users may input a custom function or callable object to the ``bkg_estimator`` and/or ``bkgrms_estimator`` keywords. By default the ``bkg_estimator`` and ``bkgrms_estimator`` are applied to sigma clipped data. Sigma clipping is defined by inputting a :class:`astropy.stats.SigmaClip` object to the ``sigma_clip`` keyword. The default is to perform sigma clipping with ``sigma=3`` and ``iters=10``. Sigma clipping can be turned off by setting ``sigma_clip=None``. After the background level has been determined in each of the boxes, the low-resolution background image can be median filtered, with a window of size of ``filter_size``, to suppress local under- or overestimations (e.g., due to bright galaxies in a particular box). Likewise, the median filter can be applied only to those boxes where the background level is above a specified threshold (``filter_threshold``). The low-resolution background and background RMS images are resized to the original data size using the function or callable object input via the ``interpolator`` keyword. Photutils provides two interpolator classes: :class:`~photutils.background.BkgZoomInterpolator` (default), which performs spline interpolation, and :class:`~photutils.background.BkgIDWInterpolator`, which uses inverse-distance weighted (IDW) interpolation. For this example, we will create a test image by adding a strong background gradient to the image defined above:: >>> ny, nx = data.shape >>> y, x = np.mgrid[:ny, :nx] >>> gradient = x * y / 5000. >>> data2 = data + gradient >>> plt.imshow(data2, norm=norm, origin='lower', cmap='Greys_r') # doctest: +SKIP .. plot:: import matplotlib.pyplot as plt from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize from photutils.datasets import make_100gaussians_image data = make_100gaussians_image() ny, nx = data.shape y, x = np.mgrid[:ny, :nx] gradient = x * y / 5000. data2 = data + gradient norm = ImageNormalize(stretch=SqrtStretch()) plt.imshow(data2, norm=norm, origin='lower', cmap='Greys_r') We start by creating a `~photutils.background.Background2D` object using a box size of 50x50 and a 3x3 median filter. We will estimate the background level in each mesh as the sigma-clipped median using an instance of :class:`~photutils.background.MedianBackground`. .. doctest-requires:: scipy >>> from astropy.stats import SigmaClip >>> from photutils import Background2D, MedianBackground >>> sigma_clip = SigmaClip(sigma=3., iters=10) >>> bkg_estimator = MedianBackground() >>> bkg = Background2D(data2, (50, 50), filter_size=(3, 3), ... sigma_clip=sigma_clip, bkg_estimator=bkg_estimator) The 2D background and background RMS images are retrieved using the ``background`` and ``background_rms`` attributes, respectively, on the returned object. The low-resolution versions of these images are stored in the ``background_mesh`` and ``background_rms_mesh`` attributes, respectively. The global median value of the low-resolution background and background RMS image can be accessed with the ``background_median`` and ``background_rms_median`` attributes, respectively: .. doctest-requires:: scipy >>> print(bkg.background_median) 10.8219978626 >>> print(bkg.background_rms_median) 2.29882053968 Let's plot the background image: .. doctest-skip:: >>> plt.imshow(bkg.background, origin='lower', cmap='Greys_r') .. plot:: import matplotlib.pyplot as plt from astropy.stats import SigmaClip from photutils.datasets import make_100gaussians_image from photutils import Background2D, MedianBackground data = make_100gaussians_image() ny, nx = data.shape y, x = np.mgrid[:ny, :nx] gradient = x * y / 5000. data2 = data + gradient sigma_clip = SigmaClip(sigma=3., iters=10) bkg_estimator = MedianBackground() bkg = Background2D(data2, (50, 50), filter_size=(3, 3), sigma_clip=sigma_clip, bkg_estimator=bkg_estimator) plt.imshow(bkg.background, origin='lower', cmap='Greys_r') and the background-subtracted image: .. doctest-skip:: >>> plt.imshow(data2 - bkg.background, norm=norm, origin='lower', ... cmap='Greys_r') .. plot:: import matplotlib.pyplot as plt from astropy.stats import SigmaClip from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize from photutils.datasets import make_100gaussians_image from photutils import Background2D, MedianBackground data = make_100gaussians_image() ny, nx = data.shape y, x = np.mgrid[:ny, :nx] gradient = x * y / 5000. data2 = data + gradient sigma_clip = SigmaClip(sigma=3., iters=10) bkg_estimator = MedianBackground() bkg = Background2D(data2, (50, 50), filter_size=(3, 3), sigma_clip=sigma_clip, bkg_estimator=bkg_estimator) norm = ImageNormalize(stretch=SqrtStretch()) plt.imshow(data2 - bkg.background, norm=norm, origin='lower', cmap='Greys_r') Masking ^^^^^^^ Masks can also be input into `~photutils.background.Background2D`. As described above, this can be employed to mask sources in the image prior to estimating the background levels. Additionally, input masks are often necessary if your data array includes regions without data coverage (e.g., from a rotated image or an image from a mosaic). Otherwise the data values in the regions without coverage (usually zeros or NaNs) will adversely contribute to the background statistics. Let's create such an image and plot it (NOTE: this example requires `scipy`_): .. doctest-requires:: scipy >>> from scipy.ndimage import rotate >>> data3 = rotate(data2, -45.) >>> norm = ImageNormalize(stretch=SqrtStretch()) # doctest: +SKIP >>> plt.imshow(data3, origin='lower', cmap='Greys_r', norm=norm) # doctest: +SKIP .. plot:: from photutils.datasets import make_100gaussians_image from scipy.ndimage.interpolation import rotate from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize import matplotlib.pyplot as plt data = make_100gaussians_image() ny, nx = data.shape y, x = np.mgrid[:ny, :nx] gradient = x * y / 5000. data2 = data + gradient data3 = rotate(data2, -45.) norm = ImageNormalize(stretch=SqrtStretch()) plt.imshow(data3, origin='lower', cmap='Greys_r', norm=norm) Now we create a coverage mask and input it into `~photutils.background.Background2D` to exclude the regions where we have no data. For real data, one can usually create a coverage mask from a weight or noise image. In this example we also use a smaller box size to help capture the strong gradient in the background: .. doctest-requires:: scipy >>> mask = (data3 == 0) >>> bkg3 = Background2D(data3, (25, 25), filter_size=(3, 3), mask=mask) The input masks are never applied to the returned background image because the input ``mask`` can represent either a coverage mask or a source mask, or a combination of both. Therefore, we need to manually apply the coverage mask to the returned background image: .. doctest-requires:: scipy >>> back3 = bkg3.background * ~mask >>> norm = ImageNormalize(stretch=SqrtStretch()) # doctest: +SKIP >>> plt.imshow(back3, origin='lower', cmap='Greys_r', norm=norm) # doctest: +SKIP .. plot:: from photutils.datasets import make_100gaussians_image from photutils.background import Background2D from scipy.ndimage.interpolation import rotate from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize import matplotlib.pyplot as plt data = make_100gaussians_image() ny, nx = data.shape y, x = np.mgrid[:ny, :nx] gradient = x * y / 5000. data2 = data + gradient data3 = rotate(data2, -45.) mask = (data3 == 0) bkg3 = Background2D(data3, (25, 25), filter_size=(3, 3), mask=mask) back3 = bkg3.background * ~mask norm = ImageNormalize(stretch=SqrtStretch()) plt.imshow(back3, origin='lower', cmap='Greys_r', norm=norm) Finally, let's subtract the background from the image and plot it: .. doctest-skip:: >>> norm = ImageNormalize(stretch=SqrtStretch()) >>> plt.imshow(data3 - back3, origin='lower', cmap='Greys_r', norm=norm) .. plot:: from photutils.datasets import make_100gaussians_image from photutils.background import Background2D from scipy.ndimage.interpolation import rotate import matplotlib.pyplot as plt from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize data = make_100gaussians_image() ny, nx = data.shape y, x = np.mgrid[:ny, :nx] gradient = x * y / 5000. data2 = data + gradient data3 = rotate(data2, -45.) mask = (data3 == 0) bkg3 = Background2D(data3, (25, 25), filter_size=(3, 3), mask=mask) back3 = bkg3.background * ~mask norm = ImageNormalize(stretch=SqrtStretch()) plt.imshow(data3 - back3, origin='lower', cmap='Greys_r', norm=norm) If there is any small residual background still present in the image, the background subtraction can be improved by masking the sources and/or through further iterations. Plotting Meshes ^^^^^^^^^^^^^^^ Finally, the meshes that were used in generating the 2D background can be plotted on the original image using the :meth:`~photutils.background.Background2D.plot_meshes` method: .. doctest-skip:: >>> plt.imshow(data3, origin='lower', cmap='Greys_r', norm=norm) >>> bkg3.plot_meshes(outlines=True, color='#1f77b4') .. plot:: from photutils.datasets import make_100gaussians_image from photutils.background import Background2D from scipy.ndimage.interpolation import rotate import matplotlib.pyplot as plt from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize data = make_100gaussians_image() ny, nx = data.shape y, x = np.mgrid[:ny, :nx] gradient = x * y / 5000. data2 = data + gradient data3 = rotate(data2, -45.) mask = (data3 == 0) bkg3 = Background2D(data3, (25, 25), filter_size=(3, 3), mask=mask) back3 = bkg3.background * ~mask norm = ImageNormalize(stretch=SqrtStretch()) plt.imshow(data3, origin='lower', cmap='Greys_r', norm=norm) bkg3.plot_meshes(outlines=True, color='#17becf') The meshes extended beyond the original image on the top and right because :class:`~photutils.background.Background2D`'s default ``edge_method`` is ``'pad'``. Reference/API ------------- .. automodapi:: photutils.background :no-heading: photutils-0.4/docs/centroids.rst0000644000214200020070000000654113175634532021266 0ustar lbradleySTSCI\science00000000000000Centroids (`photutils.centroids`) ================================= Introduction ------------ `photutils.centroids` provides several functions to calculate the centroid of a single source. The centroid methods are: * :func:`~photutils.centroids.centroid_com`: Calculates the object "center of mass" from 2D image moments. * :func:`~photutils.centroids.centroid_1dg`: Calculates the centroid by fitting 1D Gaussians to the marginal ``x`` and ``y`` distributions of the data. * :func:`~photutils.centroids.centroid_2dg`: Calculates the centroid by fitting a 2D Gaussian to the 2D distribution of the data. Masks can be input into each of these functions to mask bad pixels. Error arrays can be input into the two fitting methods to weight the fits. Getting Started --------------- Let's extract a single object from a synthetic dataset and find its centroid with each of these methods. For this simple example we will not subtract the background from the data (but in practice, one should subtract the background):: >>> from photutils.datasets import make_4gaussians_image >>> from photutils import centroid_com, centroid_1dg, centroid_2dg >>> data = make_4gaussians_image()[43:79, 76:104] .. doctest-requires:: skimage >>> x1, y1 = centroid_com(data) >>> print((x1, y1)) # doctest: +FLOAT_CMP (13.93157998341213, 17.051234441067088) .. doctest-requires:: scipy >>> x2, y2 = centroid_1dg(data) >>> print((x2, y2)) # doctest: +FLOAT_CMP (14.040352707371396, 16.962306463644801) .. doctest-requires:: scipy, skimage >>> x3, y3 = centroid_2dg(data) >>> print((x3, y3)) # doctest: +FLOAT_CMP (14.002212073733611, 16.996134592982017) Now let's plot the results. Because the centroids are all very similar, we also include an inset plot zoomed in near the centroid: .. plot:: :include-source: from photutils.datasets import make_4gaussians_image from photutils import centroid_com, centroid_1dg, centroid_2dg import matplotlib.pyplot as plt data = make_4gaussians_image()[43:79, 76:104] # extract single object x1, y1 = centroid_com(data) x2, y2 = centroid_1dg(data) x3, y3 = centroid_2dg(data) fig, ax = plt.subplots(1, 1) ax.imshow(data, origin='lower', interpolation='nearest', cmap='viridis') marker = '+' ms, mew = 30, 2. plt.plot(x1, y1, color='#1f77b4', marker=marker, ms=ms, mew=mew) plt.plot(x2, y2, color='#17becf', marker=marker, ms=ms, mew=mew) plt.plot(x3, y3, color='#d62728', marker=marker, ms=ms, mew=mew) from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid1.inset_locator import mark_inset ax2 = zoomed_inset_axes(ax, zoom=6, loc=9) ax2.imshow(data, interpolation='nearest', origin='lower', cmap='viridis', vmin=190, vmax=220) ax2.plot(x1, y1, color='#1f77b4', marker=marker, ms=ms, mew=mew) ax2.plot(x2, y2, color='#17becf', marker=marker, ms=ms, mew=mew) ax2.plot(x3, y3, color='#d62728', marker=marker, ms=ms, mew=mew) ax2.set_xlim(13, 15) ax2.set_ylim(16, 18) mark_inset(ax, ax2, loc1=3, loc2=4, fc='none', ec='0.5') ax2.axes.get_xaxis().set_visible(False) ax2.axes.get_yaxis().set_visible(False) ax.set_xlim(0, data.shape[1]-1) ax.set_ylim(0, data.shape[0]-1) Reference/API ------------- .. automodapi:: photutils.centroids :no-heading: photutils-0.4/docs/changelog.rst0000644000214200020070000000011312721610567021206 0ustar lbradleySTSCI\science00000000000000.. _changelog: ********* Changelog ********* .. include:: ../CHANGES.rst photutils-0.4/docs/conf.py0000644000214200020070000001762413175634532020045 0ustar lbradleySTSCI\science00000000000000# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # # Astropy documentation build configuration file. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this file. # # All configuration values have a default. Some values are defined in # the global Astropy configuration which is loaded here before anything else. # See astropy.sphinx.conf for which values are set there. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('..')) # IMPORTANT: the above commented section was generated by sphinx-quickstart, but # is *NOT* appropriate for astropy or Astropy affiliated packages. It is left # commented out with this explanation to make it clear why this should not be # done. If the sys.path entry above is added, when the astropy.sphinx.conf # import occurs, it will import the *source* version of astropy instead of the # version installed (if invoked as "make html" or directly with sphinx), or the # version in the build directory (if "python setup.py build_sphinx" is used). # Thus, any C-extensions that are needed to build the documentation will *not* # be accessible, and the documentation will not build correctly. import datetime import os import six import sys try: import astropy_helpers except ImportError: # Building from inside the docs/ directory? if os.path.basename(os.getcwd()) == 'docs': a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers')) if os.path.isdir(a_h_path): sys.path.insert(1, a_h_path) # Load all of the global Astropy configuration from astropy_helpers.sphinx.conf import * # Get configuration information from setup.cfg try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser conf = ConfigParser() conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')]) setup_cfg = dict(conf.items('metadata')) # Monkey patch to suppress "nonlocal image URI found" warnings # http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format import sphinx.environment from docutils.utils import get_source_line def _warn_node(self, msg, node, **kwargs): if not msg.startswith('nonlocal image URI found:'): self._warnfunc(msg, '%s:%s' % get_source_line(node), **kwargs) sphinx.environment.BuildEnvironment.warn_node = _warn_node plot_formats = ['png', 'hires.png', 'pdf', 'svg'] # -- General configuration ---------------------------------------------------- # By default, highlight as Python 3. highlight_language = 'python3' # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.1' # We don't have references to `h5py` ... no need to load the intersphinx mapping file. del intersphinx_mapping['h5py'] # We currently want to link to the latest development version of the astropy docs, # so we override the `intersphinx_mapping` entry pointing to the stable docs version # that is listed in `astropy/sphinx/conf.py`. intersphinx_mapping['astropy'] = ('http://docs.astropy.org/en/latest/', None) # Extend astropy intersphinx_mapping with packages we use here intersphinx_mapping['skimage'] = ('http://scikit-image.org/docs/stable/', None) intersphinx_mapping['gwcs'] = ('http://gwcs.readthedocs.io/en/latest/', None) # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns.append('_templates') # This is added to the end of RST files - a good place to put substitutions to # be used globally. rst_epilog += """ .. _Photutils: high-level_API.html """ # -- Project information ------------------------------------------------------ # This does not *have* to match the package name, but typically does project = setup_cfg['package_name'] author = setup_cfg['author'] copyright = '{0}, {1}'.format( datetime.datetime.now().year, setup_cfg['author']) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. __import__(setup_cfg['package_name']) package = sys.modules[setup_cfg['package_name']] # The short X.Y version. version = package.__version__.split('-', 1)[0] # The full version, including alpha/beta/rc tags. release = package.__version__ # -- Options for HTML output --------------------------------------------------- # A NOTE ON HTML THEMES # The global astropy configuration uses a custom theme, 'bootstrap-astropy', # which is installed along with astropy. A different theme can be used or # the options for this theme can be modified by overriding some of the # variables set in the global configuration. The variables set in the # global configuration are listed below, commented out. html_theme_options = { 'logotext1': 'phot', # white, semi-bold 'logotext2': 'utils', # orange, light 'logotext3': '' # white, light } # Add any paths that contain custom themes here, relative to this directory. # To use a different custom theme, add the directory containing the theme. #html_theme_path = [] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. To override the custom theme, set this to the # name of a builtin theme or the name of a custom theme in html_theme_path. #html_theme = None # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. from os.path import join html_favicon = join('_static', 'favicon.ico') # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = '{0} v{1}'.format(project, release) # Output file base name for HTML help builder. htmlhelp_basename = project + 'doc' # Static files to copy after template files html_static_path = ['_static'] html_style = 'photutils.css' # -- Options for LaTeX output -------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [('index', project + '.tex', project + u' Documentation', author, 'manual')] latex_logo = '_static/photutils_banner.pdf' # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [('index', project.lower(), project + u' Documentation', [author], 1)] ## -- Options for the edit_on_github extension ---------------------------------------- if eval(setup_cfg.get('edit_on_github')): extensions += ['astropy.sphinx.ext.edit_on_github'] versionmod = __import__(setup_cfg['package_name'] + '.version') edit_on_github_project = setup_cfg['github_project'] if versionmod.release: edit_on_github_branch = "v" + versionmod.version else: edit_on_github_branch = "master" edit_on_github_source_root = "" edit_on_github_doc_root = "docs" github_issues_url = 'https://github.com/astropy/photutils/issues/' autodoc_docstring_signature = True nitpicky = True nitpick_ignore = [] for line in open('nitpick-exceptions'): if line.strip() == "" or line.startswith("#"): continue dtype, target = line.split(None, 1) target = target.strip() nitpick_ignore.append((dtype, six.u(target))) photutils-0.4/docs/datasets.rst0000644000214200020070000000350613175634532021102 0ustar lbradleySTSCI\science00000000000000.. _datasets: Datasets (`photutils.datasets`) =============================== Introduction ------------ `photutils.datasets` gives easy access to load or make a few example datasets. The datasets are are mostly images, but they also include PSF models and a source catalog. These datasets are useful for the Photutils documentation, tests, and benchmarks, but also for users that would like to try out or implement new methods for Photutils. Functions that start with ``load_*`` load data files from disk. Very small data files are bundled in the Photutils code repository and are guaranteed to be available. Mid-sized data files are currently available from the `astropy-data`_ repository and loaded into the Astropy cache on the user's machine on first load. Functions that start with ``make_*`` generate simple simulated data (e.g. Gaussian sources on a flat background with Poisson or Gaussian noise). Note that there are other tools like `skymaker`_ that can simulate much more realistic astronomical images. Getting Started --------------- Let's load an example image of M67 with :func:`~photutils.datasets.load_star_image`:: >>> from photutils import datasets >>> hdu = datasets.load_star_image() # doctest: +REMOTE_DATA >>> print(hdu.data.shape) # doctest: +REMOTE_DATA (1059, 1059) ``hdu`` is a FITS `~astropy.io.fits.ImageHDU` object and ``hdu.data`` is a `~numpy.ndarray` object. Let's plot the image: .. plot:: :include-source: from photutils import datasets hdu = datasets.load_star_image() plt.imshow(hdu.data, origin='lower', interpolation='nearest') plt.tight_layout() plt.show() Reference/API ------------- .. automodapi:: photutils.datasets :no-heading: .. _astropy-data: https://github.com/astropy/astropy-data/ .. _skymaker: http://www.astromatic.net/software/skymaker photutils-0.4/docs/detection.rst0000644000214200020070000002112213175634532021242 0ustar lbradleySTSCI\science00000000000000Source Detection (`photutils.detection`) ======================================== Introduction ------------ One generally needs to identify astronomical sources in their data before they can perform photometry or morphological measurements. Photutils provides two functions designed specifically to detect point-like (stellar) sources in an astronomical image. Photutils also provides a function to identify local peaks in an image that are above a specified threshold value. For general-use source detection and extraction of both point-like and extended sources, please see :ref:`image_segmentation`. Detecting Stars --------------- Photutils includes two widely-used tools that are used to detect stars in an image, `DAOFIND`_ and IRAF's `starfind`_. :class:`~photutils.DAOStarFinder` is a class that provides an implementation of the `DAOFIND`_ algorithm (`Stetson 1987, PASP 99, 191 `_). It searches images for local density maxima that have a peak amplitude greater than a specified threshold (the threshold is applied to a convolved image) and have a size and shape similar to a defined 2D Gaussian kernel. :class:`~photutils.DAOStarFinder` also provides an estimate of the objects' roundness and sharpness, whose lower and upper bounds can be specified. :class:`~photutils.IRAFStarFinder` is a class that implements IRAF's `starfind`_ algorithm. It is fundamentally similar to :class:`~photutils.DAOStarFinder`, but :class:`~photutils.DAOStarFinder` can use an elliptical Gaussian kernel. One other difference in :class:`~photutils.IRAFStarFinder` is that it calculates the objects' centroid, roundness, and sharpness using image moments. As an example, let's load an image from the bundled datasets and select a subset of the image. We will estimate the background and background noise using sigma-clipped statistics:: >>> from astropy.stats import sigma_clipped_stats >>> from photutils import datasets >>> hdu = datasets.load_star_image() # doctest: +REMOTE_DATA >>> data = hdu.data[0:400, 0:400] # doctest: +REMOTE_DATA >>> mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5) # doctest: +REMOTE_DATA >>> print((mean, median, std)) # doctest: +REMOTE_DATA, +FLOAT_CMP (3667.7792400186008, 3649.0, 204.27923665845705) Now we will subtract the background and use an instance of :class:`~photutils.DAOStarFinder` to find the stars in the image that have FWHMs of around 3 pixels and have peaks approximately 5-sigma above the background. Running this class on the data yields an astropy `~astropy.table.Table` containing the results of the star finder: .. doctest-requires:: scipy, skimage >>> from photutils import DAOStarFinder >>> daofind = DAOStarFinder(fwhm=3.0, threshold=5.*std) # doctest: +REMOTE_DATA >>> sources = daofind(data - median) # doctest: +REMOTE_DATA >>> print(sources) # doctest: +REMOTE_DATA id xcentroid ycentroid ... peak flux mag --- ------------- ------------- ... ------ ------------- --------------- 1 144.247567164 6.37979042704 ... 6903.0 5.70143033038 -1.88995955438 2 208.669068628 6.82058053777 ... 7896.0 6.72306730455 -2.06891864748 3 216.926136655 6.5775933198 ... 2195.0 1.66737467591 -0.555083002864 4 351.625190383 8.5459013233 ... 6977.0 5.90092548147 -1.92730032571 5 377.519909958 12.0655009987 ... 1260.0 1.11856203781 -0.121650189969 ... ... ... ... ... ... ... 281 268.049236979 397.925371446 ... 9299.0 6.22022587541 -1.98451538884 282 268.475068392 398.020998272 ... 8754.0 6.05079160593 -1.95453048936 283 299.80943822 398.027911813 ... 8890.0 6.11853416663 -1.96661847383 284 315.689448343 398.70251891 ... 6485.0 5.55471107793 -1.86165368631 285 360.437243037 398.698539555 ... 8079.0 5.26549321379 -1.80359764345 Length = 285 rows Let's plot the image and mark the location of detected sources: .. doctest-skip:: >>> import matplotlib.pyplot as plt >>> from astropy.visualization import SqrtStretch >>> from astropy.visualization.mpl_normalize import ImageNormalize >>> from photutils import CircularAperture >>> positions = (sources['xcentroid'], sources['ycentroid']) >>> apertures = CircularAperture(positions, r=4.) >>> norm = ImageNormalize(stretch=SqrtStretch()) >>> plt.imshow(data, cmap='Greys', origin='lower', norm=norm) >>> apertures.plot(color='blue', lw=1.5, alpha=0.5) .. plot:: import matplotlib.pyplot as plt from astropy.stats import sigma_clipped_stats from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize from photutils import datasets, DAOStarFinder, CircularAperture hdu = datasets.load_star_image() data = hdu.data[0:400, 0:400] mean, median, std = sigma_clipped_stats(data, sigma=3.0) daofind = DAOStarFinder(fwhm=3.0, threshold=5.*std) sources = daofind(data - median) positions = (sources['xcentroid'], sources['ycentroid']) apertures = CircularAperture(positions, r=4.) norm = ImageNormalize(stretch=SqrtStretch()) plt.imshow(data, cmap='Greys', origin='lower', norm=norm) apertures.plot(color='blue', lw=1.5, alpha=0.5) Local Peak Detection -------------------- Photutils also includes a :func:`~photutils.detection.find_peaks` function to find local peaks in an image that are above a specified threshold value. Peaks are the local maxima above a specified threshold that are separated by a specified minimum number of pixels. By default, the returned pixel coordinates are always integer-valued (i.e., no centroiding is performed, only the peak pixel is identified). However, :func:`~photutils.detection.find_peaks` may be used to compute centroid coordinates with subpixel precision whenever the optional argument ``subpixel`` is set to `True`. :func:`~photutils.detection.find_peaks` supports a number of additional options, including searching for peaks only within a segmentation image or a specified footprint. Please see the :func:`~photutils.detection.find_peaks` documentation for more options. As simple example, let's find the local peaks in an image that are 10 sigma above the background and a separated by at least 2 pixels: .. doctest-requires:: skimage >>> from astropy.stats import sigma_clipped_stats >>> from photutils.datasets import make_100gaussians_image >>> from photutils import find_peaks >>> data = make_100gaussians_image() >>> mean, median, std = sigma_clipped_stats(data, sigma=3.0) >>> threshold = median + (10.0 * std) >>> tbl = find_peaks(data, threshold, box_size=5) >>> print(tbl[:10]) # print only the first 10 peaks x_peak y_peak peak_value ------ ------ ------------- 233 0 27.4778521972 236 1 27.339519624 289 22 35.8532759965 442 31 30.2399941373 1 40 35.5482863002 89 59 41.2190469279 7 70 33.2880647048 258 75 26.5624808518 463 80 28.7588206692 182 93 38.0885687202 And let's plot the location of the detected peaks in the image: .. doctest-skip:: >>> import matplotlib.pyplot as plt >>> from astropy.visualization import SqrtStretch >>> from astropy.visualization.mpl_normalize import ImageNormalize >>> norm = ImageNormalize(stretch=SqrtStretch()) >>> plt.imshow(data, cmap='Greys_r', origin='lower', norm=norm) >>> plt.plot(tbl['x_peak'], tbl['y_peak'], ls='none', color='cyan', ... marker='+', ms=10, lw=1.5) >>> plt.xlim(0, data.shape[1]-1) >>> plt.ylim(0, data.shape[0]-1) .. plot:: from astropy.stats import sigma_clipped_stats from photutils.datasets import make_100gaussians_image from photutils import find_peaks data = make_100gaussians_image() mean, median, std = sigma_clipped_stats(data, sigma=3.0) threshold = median + (10.0 * std) tbl = find_peaks(data, threshold, box_size=5) import matplotlib.pyplot as plt from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize norm = ImageNormalize(stretch=SqrtStretch()) plt.imshow(data, cmap='Greys_r', origin='lower', norm=norm) plt.plot(tbl['x_peak'], tbl['y_peak'], ls='none', color='cyan', marker='+', ms=10, lw=1.5) plt.xlim(0, data.shape[1]-1) plt.ylim(0, data.shape[0]-1) Reference/API ------------- .. automodapi:: photutils.detection :no-heading: .. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind .. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind photutils-0.4/docs/geometry.rst0000644000214200020070000000047313175634532021125 0ustar lbradleySTSCI\science00000000000000Geometry Functions (`photutils.geometry`) ========================================= Introduction ------------ The `photutils.geometry` package contains low-level geometry functions used mainly by `~photutils.aperture_photometry`. Reference/API ------------- .. automodapi:: photutils.geometry :no-heading: photutils-0.4/docs/getting_started.rst0000644000214200020070000001215613175634532022462 0ustar lbradleySTSCI\science00000000000000Getting Started with Photutils ============================== The following example uses Photutils to find sources in an astronomical image and perform circular aperture photometry on them. We start by loading an image from the bundled datasets and selecting a subset of the image. We then subtract a rough estimate of the background, calculated using the image median: >>> import numpy as np >>> from photutils import datasets >>> hdu = datasets.load_star_image() # doctest: +REMOTE_DATA >>> image = hdu.data[500:700, 500:700].astype(float) # doctest: +REMOTE_DATA >>> image -= np.median(image) # doctest: +REMOTE_DATA In the remainder of this example, we assume that the data is background-subtracted. Photutils supports several source detection algorithms. For this example, we use :class:`~photutils.detection.DAOStarFinder` to detect the stars in the image. We set the detection threshold at the 3-sigma noise level, estimated using the median absolute deviation (`~astropy.stats.mad_std`) of the image. The parameters of the detected sources are returned as an Astropy `~astropy.table.Table`: .. doctest-requires:: scipy, skimage >>> from photutils import DAOStarFinder >>> from astropy.stats import mad_std >>> bkg_sigma = mad_std(image) # doctest: +REMOTE_DATA >>> daofind = DAOStarFinder(fwhm=4., threshold=3.*bkg_sigma) # doctest: +REMOTE_DATA >>> sources = daofind(image) # doctest: +REMOTE_DATA >>> print(sources) # doctest: +REMOTE_DATA id xcentroid ycentroid ... peak flux mag --- ------------- -------------- ... ------ ------------- --------------- 1 182.838658938 0.167670190537 ... 3824.0 2.80283459469 -1.11899367311 2 189.204308134 0.260813525338 ... 4913.0 3.87291850311 -1.47009589582 3 5.79464911433 2.61254240807 ... 7752.0 4.1029107294 -1.53273016937 4 36.8470627804 1.32202279582 ... 8739.0 7.43158178793 -2.17770315441 5 3.2565602452 5.41895201748 ... 6935.0 3.81262984074 -1.45306160673 ... ... ... ... ... ... ... 148 124.313272579 188.305229159 ... 6702.0 6.63585429303 -2.05474210356 149 24.2572074962 194.714942814 ... 8342.0 3.2671036996 -1.28540729858 150 116.449998422 195.059233325 ... 3299.0 2.87752205766 -1.1475466535 151 18.9580860645 196.342065132 ... 3854.0 2.38352961224 -0.943051379595 152 111.525751196 195.731917995 ... 8109.0 7.9278607401 -2.24789003194 Length = 152 rows Using the list of source locations (``xcentroid`` and ``ycentroid``), we now compute the sum of the pixel values in circular apertures with a radius of 4 pixels. The :func:`~photutils.aperture_photometry` function returns an Astropy `~astropy.table.Table` with the results of the photometry: .. doctest-requires:: scipy, skimage >>> from photutils import aperture_photometry, CircularAperture >>> positions = (sources['xcentroid'], sources['ycentroid']) # doctest: +REMOTE_DATA >>> apertures = CircularAperture(positions, r=4.) # doctest: +REMOTE_DATA >>> phot_table = aperture_photometry(image, apertures) # doctest: +REMOTE_DATA >>> print(phot_table) # doctest: +SKIP id xcenter ycenter aperture_sum pix pix --- ------------------ ------------------- ------------- 1 182.8386589381308 0.16767019053693752 18121.7594837 2 189.20430813403388 0.26081352533766516 29836.5152158 3 5.794649114329246 2.612542408073547 331979.819037 4 36.84706278043582 1.3220227958153257 183705.093284 5 3.2565602452007325 5.418952017476508 349468.978627 ... ... ... ... 148 124.3132725793939 188.30522915858668 45084.8737867 149 24.257207496209027 194.71494281419265 355778.007298 150 116.44999842177826 195.05923332483115 31232.9117818 151 18.958086064485013 196.3420651316401 162076.262752 152 111.52575119605933 195.73191799469373 82795.7145661 Length = 152 rows The sum of the pixel values within the apertures are given in the column ``aperture_sum``. We now plot the image and the defined apertures: .. doctest-skip:: >>> import matplotlib.pyplot as plt >>> plt.imshow(image, cmap='gray_r', origin='lower') >>> apertures.plot(color='blue', lw=1.5, alpha=0.5) .. plot:: import numpy as np import matplotlib.pyplot as plt from astropy.stats import mad_std from photutils import (datasets, DAOStarFinder, aperture_photometry, CircularAperture) hdu = datasets.load_star_image() image = hdu.data[500:700, 500:700].astype(float) image -= np.median(image) bkg_sigma = mad_std(image) daofind = DAOStarFinder(fwhm=4., threshold=3.*bkg_sigma) sources = daofind(image) positions = (sources['xcentroid'], sources['ycentroid']) apertures = CircularAperture(positions, r=4.) phot_table = aperture_photometry(image, apertures) brightest_source_id = phot_table['aperture_sum'].argmax() plt.imshow(image, cmap='gray_r', origin='lower') apertures.plot(color='blue', lw=1.5, alpha=0.5) photutils-0.4/docs/grouping.rst0000644000214200020070000002451213175634532021124 0ustar lbradleySTSCI\science00000000000000Grouping Algorithms =================== Introduction ------------ In Point Spread Function (PSF) photometry, a grouping algorithm is used to separate stars into optimum groups. The stars in each group are defined as those close enough together such that they need to be fit simultaneously, i.e. their profiles overlap. Photoutils currently provides two classes to group stars: * :class:`~photutils.psf.DAOGroup`: An implementation of the `DAOPHOT `_ GROUP algorithm. * :class:`~photutils.psf.DBSCANGroup`: Grouping is based on the `Density-Based Spatial Clustering of Applications with Noise (DBSCAN) `_ algorithm. DAOPHOT GROUP ------------- Stetson, in his seminal paper (`Stetson 1987, PASP 99, 191 `_), provided a simple and powerful grouping algorithm to decide whether or not the profile of a given star extends into the fitting region of any other star. Stetson defines this in terms of a "critical separation" parameter, which is defined as the minimal distance that any two stars must be separated by in order to be in different groups. Stetson gives intuitive reasoning to suggest that the critical separation may be defined as a multiple of the stellar full width at half maximum (FWHM). Photutils provides an implementation of the DAOPHOT GROUP algorithm in the :class:`~photutils.psf.DAOGroup` class. Let's take a look at a simple example. First, let's make some Gaussian sources using `~photutils.datasets.make_random_gaussians_table` and `~photutils.datasets.make_gaussian_sources_image`. The former will return a `~astropy.table.Table` containing parameters for 2D Gaussian sources and the latter will make an actual image using that table. .. plot:: :include-source: from collections import OrderedDict import numpy as np from photutils.datasets import (make_random_gaussians_table, make_gaussian_sources_image) import matplotlib.pyplot as plt n_sources = 350 sigma_psf = 2.0 # use an OrderedDict to ensure reproducibility params = OrderedDict([('flux', [500, 5000]), ('x_mean', [6, 250]), ('y_mean', [6, 250]), ('x_stddev', [sigma_psf, sigma_psf]), ('y_stddev', [sigma_psf, sigma_psf]), ('theta', [0, np.pi])]) starlist = make_random_gaussians_table(n_sources, params, random_state=1234) shape = (256, 256) sim_image = make_gaussian_sources_image(shape, starlist) plt.imshow(sim_image, origin='lower', interpolation='nearest', cmap='viridis') plt.show() ``starlist`` is an astropy `~astropy.table.Table` of parameters defining the position and shape of the stars. Next, we need to rename the table columns of the centroid positions so that they agree with the names that `~photutils.psf.DAOGroup` expect. Here we rename ``x_mean`` to ``x_0`` and ``y_mean`` to ``y_0``: .. doctest-skip:: >>> starlist['x_mean'].name = 'x_0' >>> starlist['y_mean'].name = 'y_0' Now, let's find the stellar groups. We start by creating a `~photutils.DAOGroup` object. Here we set its ``crit_separation`` parameter ``2.5 * fwhm``, where the stellar ``fwhm`` was defined above when we created the stars as 2D Gaussians. In general one will need to measure the FWHM of the stellar profiles. .. doctest-skip:: >>> from astropy.stats import gaussian_sigma_to_fwhm >>> from photutils.psf.groupstars import DAOGroup >>> fwhm = sigma_psf * gaussian_sigma_to_fwhm >>> daogroup = DAOGroup(crit_separation=2.5*fwhm) ``daogroup`` is a `~photutils.DAOGroup` instance that can be used as a calling function that receives as input a table of stars (e.g. ``starlist``): .. doctest-skip:: >>> star_groups = daogroup(starlist) The ``star_groups`` output is copy of the input ``starlist`` table, but with an extra column called ``group_id``. This column contains integers that represent the group assigned to each source. Here the grouping algorithm separated the 350 stars into 92 distinct groups: .. doctest-skip:: >>> print(max(star_groups['group_id'])) 92 One can use the ``group_by`` functionality from `~astropy.table.Table` to create groups according to ``group_id``: .. doctest-skip:: >>> star_groups = star_groups.group_by('group_id') >>> print(star_groups) flux x_0 y_0 ... amplitude id group_id ------------- ------------- ------------- ... ------------- --- -------- 1361.83752671 182.958386152 178.708228379 ... 54.1857935158 1 1 4282.41965053 179.998944123 171.437757021 ... 170.392063944 183 1 555.831417775 181.611905957 185.16181342 ... 22.1158294162 222 1 3299.48946968 243.60449392 85.8926967927 ... 131.282514695 2 2 2469.77482553 136.657577889 109.771746713 ... 98.2692179518 3 3 ... ... ... ... ... ... ... 818.132804377 117.787387455 92.4349134636 ... 32.5524699806 313 88 3979.57421702 154.85279495 18.3148180315 ... 158.34222701 318 89 3622.30997136 97.0901736699 50.3565997421 ... 144.127134338 323 90 765.47561385 144.952825542 7.57086675812 ... 30.4573069401 330 91 1508.68165551 54.0404934991 232.693833605 ... 60.0285357567 349 92 Length = 350 rows Finally, let's plot a circular aperture around each star, where stars in the same group have the same aperture color: .. doctest-skip:: >>> from photutils import CircularAperture >>> from photutils.utils import random_cmap >>> plt.imshow(sim_image, origin='lower', interpolation='nearest', ... cmap='Greys_r') >>> cmap = random_cmap(random_state=12345) >>> for i, group in enumerate(star_groups.groups): >>> xypos = np.transpose([group['x_0'], group['y_0']]) >>> ap = CircularAperture(xypos, r=fwhm) >>> ap.plot(color=cmap.colors[i]) >>> plt.show() .. plot:: from collections import OrderedDict import numpy as np from astropy.stats import gaussian_sigma_to_fwhm from photutils.datasets import (make_random_gaussians_table, make_gaussian_sources_image) from photutils.psf.groupstars import DAOGroup from photutils import CircularAperture from photutils.utils import random_cmap import matplotlib.pyplot as plt from matplotlib import rcParams rcParams['image.aspect'] = 1 # to get images with square pixels rcParams['figure.figsize'] = (7, 7) n_sources = 350 sigma_psf = 2.0 # use an OrderedDict to ensure reproducibility params = OrderedDict([('flux', [500, 5000]), ('x_mean', [6, 250]), ('y_mean', [6, 250]), ('x_stddev', [sigma_psf, sigma_psf]), ('y_stddev', [sigma_psf, sigma_psf]), ('theta', [0, np.pi])]) starlist = make_random_gaussians_table(n_sources, params, random_state=1234) shape = (256, 256) sim_image = make_gaussian_sources_image(shape, starlist) starlist['x_mean'].name = 'x_0' starlist['y_mean'].name = 'y_0' fwhm = sigma_psf * gaussian_sigma_to_fwhm daogroup = DAOGroup(crit_separation=2.5*fwhm) star_groups = daogroup(starlist) star_groups = star_groups.group_by('group_id') plt.imshow(sim_image, origin='lower', interpolation='nearest', cmap='Greys_r') cmap = random_cmap(random_state=12345) for i, group in enumerate(star_groups.groups): xypos = np.transpose([group['x_0'], group['y_0']]) ap = CircularAperture(xypos, r=fwhm) ap.plot(color=cmap.colors[i]) DBSCANGroup ----------- Photutils also provides a :class:`~photutils.psf.DBSCANGroup` class to group stars based on the `Density-Based Spatial Clustering of Applications with Noise (DBSCAN) `_ algorithm. :class:`~photutils.psf.DBSCANGroup` provides a more general algorithm than :class:`~photutils.psf.DAOGroup`. Here's a simple example using :class:`~photutils.psf.DBSCANGroup` with ``min_samples=1`` and ``metric=euclidean``. With these parameters, the result is identical to the `~photutils.psf.DAOGroup` algorithm. Note that `scikit-learn `_ must be installed to use :class:`~photutils.psf.DBSCANGroup`. .. plot:: from collections import OrderedDict import numpy as np from astropy.stats import gaussian_sigma_to_fwhm from photutils.datasets import (make_random_gaussians_table, make_gaussian_sources_image) from photutils.psf.groupstars import DBSCANGroup from photutils import CircularAperture from photutils.utils import random_cmap import matplotlib.pyplot as plt from matplotlib import rcParams rcParams['image.aspect'] = 1 # to get images with square pixels rcParams['figure.figsize'] = (7, 7) n_sources = 350 sigma_psf = 2.0 # use an OrderedDict to ensure reproducibility params = OrderedDict([('flux', [500, 5000]), ('x_mean', [6, 250]), ('y_mean', [6, 250]), ('x_stddev', [sigma_psf, sigma_psf]), ('y_stddev', [sigma_psf, sigma_psf]), ('theta', [0, np.pi])]) starlist = make_random_gaussians_table(n_sources, params, random_state=1234) shape = (256, 256) sim_image = make_gaussian_sources_image(shape, starlist) starlist['x_mean'].name = 'x_0' starlist['y_mean'].name = 'y_0' fwhm = sigma_psf * gaussian_sigma_to_fwhm group = DBSCANGroup(crit_separation=2.5*fwhm) star_groups = group(starlist) star_groups = star_groups.group_by('group_id') plt.imshow(sim_image, origin='lower', interpolation='nearest', cmap='Greys_r') cmap = random_cmap(random_state=12345) for i, group in enumerate(star_groups.groups): xypos = np.transpose([group['x_0'], group['y_0']]) ap = CircularAperture(xypos, r=fwhm) ap.plot(color=cmap.colors[i]) Reference/API ------------- .. automodapi:: photutils.psf.groupstars :no-heading: photutils-0.4/docs/high-level_API.rst0000644000214200020070000000040013175634532021775 0ustar lbradleySTSCI\science00000000000000API Reference ============= These are the functions and classes available in the top-level ``photutils`` namespace. Other functionality is available by importing specific sub-packages (e.g. `photutils.utils`). .. automodapi:: photutils :no-heading: photutils-0.4/docs/index.rst0000644000214200020070000001003413175634532020373 0ustar lbradleySTSCI\science00000000000000 .. the "raw" directive below is used to hide the title in favor of just the logo being visible .. raw:: html ********* Photutils ********* .. raw:: html .. only:: latex .. image:: _static/photutils_banner.pdf **Photutils** is an `affiliated package `_ of `Astropy`_ to provide tools for detecting and performing photometry of astronomical sources. It is an open source (BSD licensed) Python package. Bug reports, comments, and help with development are very welcome. Photutils at a glance ===================== .. toctree:: :maxdepth: 1 install.rst overview.rst getting_started.rst changelog User Documentation ================== .. toctree:: :maxdepth: 1 background.rst detection.rst grouping.rst aperture.rst psf.rst psf_matching.rst segmentation.rst centroids.rst morphology.rst isophote.rst geometry.rst datasets.rst utils.rst .. toctree:: :maxdepth: 1 high-level_API.rst .. note:: Like much astronomy software, Photutils is an evolving package. The developers make an effort to maintain backwards compatibility, but at times the API may change if there is a benefit to doing so. If there are specific areas you think API stability is important, please let us know as part of the development process! Reporting Issues ================ If you have found a bug in Photutils please report it by creating a new issue on the `Photutils GitHub issue tracker `_. Please include an example that demonstrates the issue that will allow the developers to reproduce and fix the problem. You may be asked to also provide information about your operating system and a full Python stack trace. The developers will walk you through obtaining a stack trace if it is necessary. Photutils uses a package of utilities called `astropy-helpers `_ during building and installation. If you have any build or installation issue mentioning the ``astropy_helpers`` or ``ah_bootstrap`` modules please send a report to the `astropy-helpers issue tracker `_. If you are unsure, then it's fine to report to the main Photutils issue tracker. Contributing ============ Like the `Astropy`_ project, Photutils is made both by and for its users. We accept contributions at all levels, spanning the gamut from fixing a typo in the documentation to developing a major new feature. We welcome contributors who will abide by the `Python Software Foundation Code of Conduct `_. Photutils follows the same workflow and coding guidelines as `Astropy`_. The following pages will help you get started with contributing fixes, code, or documentation (no git or GitHub experience necessary): * `How to make a code contribution `_ * `Coding Guidelines `_ * `Try the development version `_ * `Developer Documentation `_ Citing Photutils ================ If you use Photutils, please consider citing the package via its Zenodo record. If you just want the latest release, cite this (follow the link on the badge and then use one of the citation methods on the right): .. image:: https://zenodo.org/badge/2640766.svg :target: https://zenodo.org/badge/latestdoi/2640766 If you want to cite an earlier version, you can `search for photutils on Zenodo `_. Then cite the Zenodo DOI for whatever version(s) of Photutils you are using. photutils-0.4/docs/install.rst0000644000214200020070000001345513175634532020744 0ustar lbradleySTSCI\science00000000000000************ Installation ************ Requirements ============ Photutils has the following strict requirements: * `Python `_ 2.7, 3.4, 3.5 or 3.6 * `Numpy `_ 1.9 or later * `Astropy`_ 2.0 or later * `six `_ Additionally, some functionality is available only if the following optional dependencies are installed: * `Scipy`_ 0.16 or later * `scikit-image`_ 0.11 or later * `scikit-learn `_ 0.18 or later * `matplotlib `_ 1.3 or later .. warning:: While Photutils will import even if these dependencies are not installed, the functionality will be severely limited. It is very strongly recommended that you install `Scipy`_ and `scikit-image`_ to use Photutils. Both are easily installed via `conda`_ or `pip`_. Installing the latest released version ====================================== The latest released (stable) version of Photutils can be installed either with `conda`_ or `pip`_. Using conda ----------- Photutils can be installed with `conda`_ using the `astropy Anaconda channel `_:: conda install -c astropy photutils Using pip --------- To install using `pip`_, simply run:: pip install --no-deps photutils .. note:: You will need a C compiler (e.g. ``gcc`` or ``clang``) to be installed for the installation to succeed. .. note:: The ``--no-deps`` flag is optional, but highly recommended if you already have Numpy and Astropy installed, since otherwise pip will sometimes try to "help" you by upgrading your Numpy and Astropy installations, which may not always be desired. .. note:: If you get a ``PermissionError`` this means that you do not have the required administrative access to install new packages to your Python installation. In this case you may consider using the ``--user`` option to install the package into your home directory. You can read more about how to do this in the `pip documentation `_. Do **not** install Photutils or other third-party packages using ``sudo`` unless you are fully aware of the risks. Installing the latest development version ========================================= Prerequisites ------------- You will need `Cython`_ (0.15 or later), a compiler suite, and the development headers for Python and Numpy in order to build Photutils from the source distribution. On Linux, using the package manager for your distribution will usually be the easiest route, while on MacOS X you will need the XCode command line tools. The `instructions for building Numpy from source `_ are also a good resource for setting up your environment to build Python packages. .. note:: If you are using MacOS X, you will need to the XCode command line tools. One way to get them is to install `XCode `_. If you are using OS X 10.7 (Lion) or later, you must also explicitly install the command line tools. You can do this by opening the XCode application, going to **Preferences**, then **Downloads**, and then under **Components**, click on the Install button to the right of **Command Line Tools**. Alternatively, on 10.7 (Lion) or later, you do not need to install XCode, you can download just the command line tools from https://developer.apple.com/downloads/index.action (requires an Apple developer account). Building and installing manually -------------------------------- Photutils is being developed on `github`_. The latest development version of the Photutils source code can be retrieved using git:: git clone https://github.com/astropy/photutils.git Then, to build and install Photutils (from the root of the source tree):: cd photutils python setup.py install Building and installing using pip --------------------------------- Alternatively, `pip`_ can be used to retrieve, build, and install the latest development version from `github`_:: pip install --no-deps git+https://github.com/astropy/photutils.git .. note:: The ``--no-deps`` flag is optional, but highly recommended if you already have Numpy and Astropy installed, since otherwise pip will sometimes try to "help" you by upgrading your Numpy and Astropy installations, which may not always be desired. .. note:: If you get a ``PermissionError`` this means that you do not have the required administrative access to install new packages to your Python installation. In this case you may consider using the ``--user`` option to install the package into your home directory. You can read more about how to do this in the `pip documentation `_. Do **not** install Photutils or other third-party packages using ``sudo`` unless you are fully aware of the risks. Testing an installed Photutils ============================== The easiest way to test your installed version of Photutils is running correctly is to use the :func:`photutils.test()` function: .. doctest-skip:: >>> import photutils >>> photutils.test() The tests should run and report any failures, which you can report to the `Photutils issue tracker `_. .. note:: This way of running the tests may not work if you start Python from within the Photutils source distribution directory. .. _Scipy: http://www.scipy.org/ .. _scikit-image: http://scikit-image.org/ .. _pip: https://pip.pypa.io/en/latest/ .. _conda: http://conda.pydata.org/docs/ .. _Cython: http://cython.org .. _github: https://github.com/astropy/photutils photutils-0.4/docs/isophote.rst0000644000214200020070000002437013175634532021126 0ustar lbradleySTSCI\science00000000000000Elliptical Isophote Analysis (`photutils.isophote`) =================================================== Introduction ------------ The `~photutils.isophote` package provides tools to fit elliptical isophotes to a galaxy image. The isophotes in the image are measured using an iterative method described by `Jedrzejewski (1987; MNRAS 226, 747) `_. See the documentation of the :class:`~photutils.isophote.Ellipse` class for details about the algorithm. Please also see the :ref:`isophote-faq`. Getting Started --------------- For this example, let's create a simple simulated galaxy image:: >>> import numpy as np >>> from astropy.modeling.models import Gaussian2D >>> from photutils.datasets import make_noise_image >>> g = Gaussian2D(100., 75, 75, 20, 12, theta=40.*np.pi/180.) >>> ny = nx = 150 >>> y, x = np.mgrid[0:ny, 0:nx] >>> noise = make_noise_image((ny, nx), type='gaussian', mean=0., ... stddev=2., random_state=12345) >>> data = g(x, y) + noise .. plot:: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Gaussian2D from photutils.datasets import make_noise_image g = Gaussian2D(100., 75, 75, 20, 12, theta=40.*np.pi/180.) ny = nx = 150 y, x = np.mgrid[0:ny, 0:nx] noise = make_noise_image((ny, nx), type='gaussian', mean=0., stddev=2., random_state=12345) data = g(x, y) + noise plt.imshow(data, origin='lower') We must provide the elliptical isophote fitter with an initial ellipse to be fitted. This ellipse geometry is defined with the `~photutils.isophote.EllipseGeometry` class. Here we'll define an initial ellipse whose position angle is offset from the data:: >>> from photutils.isophote import EllipseGeometry >>> geometry = EllipseGeometry(x0=75, y0=75, sma=20, eps=0.5, ... pa=20.*np.pi/180.) Let's show this initial ellipse guess: .. doctest-skip:: >>> import matplotlib.pyplot as plt >>> from photutils import EllipticalAperture >>> aper = EllipticalAperture((geometry.x0, geometry.y0), geometry.sma, ... geometry.sma*(1 - geometry.eps), ... geometry.pa) >>> plt.imshow(data, origin='lower') >>> aper.plot(color='white') .. plot:: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Gaussian2D from photutils.datasets import make_noise_image from photutils.isophote import EllipseGeometry from photutils import EllipticalAperture g = Gaussian2D(100., 75, 75, 20, 12, theta=40.*np.pi/180.) ny = nx = 150 y, x = np.mgrid[0:ny, 0:nx] noise = make_noise_image((ny, nx), type='gaussian', mean=0., stddev=2., random_state=12345) data = g(x, y) + noise geometry = EllipseGeometry(x0=75, y0=75, sma=20, eps=0.5, pa=20.*np.pi/180.) aper = EllipticalAperture((geometry.x0, geometry.y0), geometry.sma, geometry.sma*(1 - geometry.eps), geometry.pa) plt.imshow(data, origin='lower') aper.plot(color='white') Next, we create an instance of the `~photutils.isophote.Ellipse` class, inputting the data to be fitted and the initial ellipse geometry object:: >>> from photutils.isophote import Ellipse >>> ellipse = Ellipse(data, geometry) To perform the elliptical isophote fit, we run the :meth:`~photutils.isophote.Ellipse.fit_image` method: .. doctest-requires:: scipy >>> isolist = ellipse.fit_image() The result is a list of isophotes as an `~photutils.isophote.IsophoteList` object, whose attributes are the fit values for each `~photutils.isophote.Isophote` sorted by the semimajor axis length. Let's print the fit position angles (radians): .. doctest-requires:: scipy >>> print(isolist.pa) # doctest: +SKIP [ 0. 0.16838914 0.18453378 0.20310945 0.22534975 0.25007781 0.28377499 0.32494582 0.38589202 0.40480013 0.39527698 0.38448771 0.40207495 0.40207495 0.28201524 0.28201524 0.19889817 0.1364335 0.1364335 0.13405719 0.17848892 0.25687327 0.35750355 0.64882699 0.72489435 0.91472008 0.94219702 0.87393299 0.82572916 0.7886367 0.75523282 0.7125274 0.70481612 0.7120097 0.71250791 0.69707669 0.7004807 0.70709823 0.69808124 0.68621341 0.69437566 0.70548293 0.70427021 0.69978326 0.70410887 0.69532744 0.69440413 0.70062534 0.68614488 0.7177538 0.7177538 0.7029571 0.7029571 0.7029571 ] We can also show the isophote values as a table, which is again sorted by the semimajor axis length (``sma``): .. doctest-requires:: scipy >>> print(isolist.to_table()) # doctest: +SKIP sma intens intens_err ... flag niter stop_code ... -------------- --------------- --------------- ... ---- ----- --------- 0.0 102.237692914 0.0 ... 0 0 0 0.534697261283 101.212218041 0.0280377938856 ... 0 10 0 0.588166987411 101.095404456 0.027821598428 ... 0 10 0 0.646983686152 100.971770355 0.0272405762608 ... 0 10 0 0.711682054767 100.842254551 0.0262991125932 ... 0 10 0 ... ... ... ... ... ... ... 51.874849202 3.44800874483 0.0881592058138 ... 0 50 2 57.0623341222 1.64031530995 0.0913122295433 ... 0 50 2 62.7685675344 0.692631010404 0.0786846787635 ... 0 32 0 69.0454242879 0.294659388337 0.0681758007533 ... 0 8 5 75.9499667166 0.0534892334515 0.0692483210903 ... 0 2 5 Length = 54 rows Let's plot the ellipticity, position angle, and the center x and y position as a function of the semimajor axis length: .. plot:: import matplotlib.pyplot as plt from astropy.modeling.models import Gaussian2D from photutils.datasets import make_noise_image from photutils.isophote import EllipseGeometry, Ellipse g = Gaussian2D(100., 75, 75, 20, 12, theta=40.*np.pi/180.) ny = nx = 150 y, x = np.mgrid[0:ny, 0:nx] noise = make_noise_image((ny, nx), type='gaussian', mean=0., stddev=2., random_state=12345) data = g(x, y) + noise geometry = EllipseGeometry(x0=75, y0=75, sma=20, eps=0.5, pa=20.*np.pi/180.) ellipse = Ellipse(data, geometry) isolist = ellipse.fit_image() plt.figure(figsize=(8, 8)) plt.subplots_adjust(hspace=0.35, wspace=0.35) plt.subplot(2, 2, 1) plt.errorbar(isolist.sma, isolist.eps, yerr=isolist.ellip_err, fmt='o', markersize=4) plt.xlabel('Semimajor Axis Length (pix)') plt.ylabel('Ellipticity') plt.subplot(2, 2, 2) plt.errorbar(isolist.sma, isolist.pa/np.pi*180., yerr=isolist.pa_err/np.pi* 80., fmt='o', markersize=4) plt.xlabel('Semimajor Axis Length (pix)') plt.ylabel('PA (deg)') plt.subplot(2, 2, 3) plt.errorbar(isolist.sma, isolist.x0, yerr=isolist.x0_err, fmt='o', markersize=4) plt.xlabel('Semimajor Axis Length (pix)') plt.ylabel('x0') plt.subplot(2, 2, 4) plt.errorbar(isolist.sma, isolist.y0, yerr=isolist.y0_err, fmt='o', markersize=4) plt.xlabel('Semimajor Axis Length (pix)') plt.ylabel('y0') We can build an elliptical model image from the `~photutils.isophote.IsophoteList` object using the :func:`~photutils.isophote.build_ellipse_model` function ( NOTE: this function requires `scipy `_): .. doctest-requires:: scipy >>> from photutils.isophote import build_ellipse_model >>> model_image = build_ellipse_model(data.shape, isolist) >>> residual = data - model_image Finally, let's plot the original data, overplotted with some of the isophotes, the elliptical model image, and the residual image: .. plot:: import matplotlib.pyplot as plt from astropy.modeling.models import Gaussian2D from photutils.datasets import make_noise_image from photutils.isophote import EllipseGeometry, Ellipse from photutils.isophote import build_ellipse_model g = Gaussian2D(100., 75, 75, 20, 12, theta=40.*np.pi/180.) ny = nx = 150 y, x = np.mgrid[0:ny, 0:nx] noise = make_noise_image((ny, nx), type='gaussian', mean=0., stddev=2., random_state=12345) data = g(x, y) + noise geometry = EllipseGeometry(x0=75, y0=75, sma=20, eps=0.5, pa=20.*np.pi/180.) ellipse = Ellipse(data, geometry) isolist = ellipse.fit_image() model_image = build_ellipse_model(data.shape, isolist) residual = data - model_image fig, (ax1, ax2, ax3) = plt.subplots(figsize=(14, 5), nrows=1, ncols=3) fig.subplots_adjust(left=0.04, right=0.98, bottom=0.02, top=0.98) ax1.imshow(data, origin='lower') ax1.set_title('Data') smas = np.linspace(10, 50, 5) for sma in smas: iso = isolist.get_closest(sma) x, y, = iso.sampled_coordinates() ax1.plot(x, y, color='white') ax2.imshow(model_image, origin='lower') ax2.set_title('Ellipse Model') ax3.imshow(residual, origin='lower') ax3.set_title('Residual') Additional Example Notebooks (online) ------------------------------------- Additional example notebooks showing examples with real data and advanced usage are available online: * `Basic example of the Ellipse fitting tool `_ * `Running Ellipse with sigma-clipping `_ * `Building an image model from results obtained by Ellipse fitting `_ * `Advanced Ellipse example: multi-band photometry and masked arrays `_ Reference/API ------------- .. automodapi:: photutils.isophote :no-heading: .. toctree:: :hidden: isophote_faq.rst photutils-0.4/docs/isophote_faq.rst0000644000214200020070000002242413175634532021753 0ustar lbradleySTSCI\science00000000000000.. _isophote-faq: Isophote Frequently Asked Questions ----------------------------------- .. _harmonic_ampl: 1. What are the basic equations relating harmonic amplitudes to geometrical parameter updates? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The basic elliptical isophote fitting algorithm, as described in `Jedrzejewski (1987; MNRAS 226, 747) `_ , computes corrections for the current ellipse's geometrical parameters by essentially "projecting" the fitted harmonic amplitudes onto the image plane: .. math:: {\delta}_{X0} = \frac {-B_{1}} {I'} .. math:: {\delta}_{Y0} = \frac {-A_{1} (1 - {\epsilon})} {I'} .. math:: {\delta}_{\epsilon} = \frac {-2 B_{2} (1 - {\epsilon})} {I' a_0} .. math:: {\delta}_{\Theta} = \frac {2 A_{2} (1 - {\epsilon})} {I' a_0 [(1 - {\epsilon}) ^ 2 - 1 ]} where :math:`\epsilon` is the ellipticity, :math:`\Theta` is the position angle, :math:`A_i` and :math:`B_i` are the harmonic coefficients, and :math:`I'` is the derivative of the intensity along the major axis direction evaluated at a semimajor axis length of :math:`a_0`. 2. Why use "ellipticity" instead of the canonical ellipse eccentricity? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The main reason is that ellipticity, defined as .. math:: \epsilon = 1 - \frac{b}{a} better relates with the visual "flattening" of an ellipse. By looking at a flattened circle it is easy to guess its ellipticity, as say 0.1. The same ellipse has an eccentricity of 0.44, which is not obvious from visual inspection. The quantities relate as .. math:: Ecc = \sqrt{1 - (1 - {\epsilon})^2} 3. How is the radial gradient estimated? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The radial intensity gradient is the most critical quantity computed by the fitting algorithm. As can be seen from the above formulae, small :math:`I'` values lead to large values for the correction terms. Thus, :math:`I'` errors may lead to large fluctuations in these terms, when :math:`I'` itself is small. This happens usually at the fainter, outer regions of galaxy images. `Busko (1996; ASPC 101, 139) `_ found by numerical experiments that the precision to which a given ellipse can be fitted is related to the relative error in the local radial gradient. Because of the gradient's critical role, the algorithm has a number of features to allow its estimation even under difficult conditions. The default gradient computation, the one used by the algorithm when it first starts to fit a new isophote, is based on the extraction of two intensity samples: #1 at the current ellipse position, and #2 at a similar ellipse with a 10% larger semimajor axis. If the gradient so estimated is not meaningful, the algorithm extracts another #2 sample, this time using a 20% larger radius. In this context, a meaningful gradient means "shallower", but still close to within a factor 3 from the previous isophote's gradient estimate. If still no meaningful gradient can be measured, the algorithm uses the value measured at the last fitted isophote, but decreased (in absolute value) by a factor 0.8. This factor is roughly what is expected from semimajor-axis geometrical-sampling steps of 10 - 20% and a deVaucouleurs law or an exponential disk in its inner region (r <~ 5 req). When using the last isophote's gradient as estimator for the current one, the current gradient error cannot be computed and is set to `None`. As a last resort, if no previous gradient estimate is available, the algorithm just guesses the current value by setting it to be (minus) 10% of the mean intensity at sample #1. This case usually happens only at the first isophote fitted by the algorithm. The use of approximate gradient estimators may seem in contradiction with the fact that isophote fitting errors depend on gradient error, as well as with the fact that the algorithm itself is so sensitive to the gradient value. The rationale behind the use of approximate estimators, however, is based on the fact that the gradient value is used only to compute increments, not the ellipse parameters themselves. Approximate estimators are useful along the first steps in the iteration sequence, in particular when local image contamination (stars, defects, etc.) might make it difficult to find the correct path towards the solution. However, if the gradient is still not well determined at convergence, the subsequent error computations, and the algorithm's behavior from that point on, will take the fact into account properly. For instance, the 3rd and 4th harmonic amplitude errors depend on the gradient relative error, and if this is not computable at the current isophote, the algorithm uses a reasonable estimate (80% of the value at the last successful isophote) in order to generate sensible estimates for those harmonic errors. 4. How are the errors estimated? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Most parameters computed directly at each isophote have their errors computed by standard error propagation. Errors in the ellipse geometry parameters, on the other hand, cannot be estimated in the same way, since these parameters are not computed directly but result from a number of updates from a starting guess value. An error analysis based on numerical experiments (`Busko 1996; ASPC 101, 139 `_) showed that the best error estimators for these geometrical parameters can be found by simply "projecting" the harmonic amplitude errors that come from the least-squares covariance matrix by the same formulae in :ref:`Question 1 ` above used to "project" the associated parameter updates. In other words, errors for the ellipse center, ellipticity, and position angle are computed by the same formulae as in :ref:`Question 1 `, but replacing the least-squares amplitudes by their errors. This is empirical and difficult to justify in terms of any theoretical error analysis, but it produces sensible error estimators in practice. 5. How is the image sampled? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When sampling is done using elliptical sectors (mean or median modes), the algorithm described in `Jedrzejewski (1987; MNRAS 226, 747) `_ uses an elaborate, high-precision scheme to take into account partial pixels that lie along elliptical sector boundaries. In the current implementation of the `~photutils.isophote.Ellipse` algorithm, this method was not implemented. Instead, pixels at sector boundaries are either fully included or discarded, depending on the precise position of their centers in relation to the elliptical geometric locus corresponding to the current ellipse. This design decision is based on two arguments: (i) it would be difficult to include partial pixels in median computation, and (ii) speed. Even when the chosen integration mode is not bilinear, the sampling algorithm resorts to it in case the number of sampled pixels inside any given sector is less than 5. It was found that bilinear mode gives smoother samples in those cases. Tests performed with artificial images showed that cosmic rays and defective pixels can be very effectively removed from the fit by a combination of median sampling and sigma-clipping. 6. How reliable are the fluxes computed by the `~photutils.isophote.Ellipse` algorithm? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The integrated fluxes and areas computed by `~photutils.isophote.Ellipse` were checked against results produced by the IRAF ``noao.digiphot.apphot`` tasks ``phot`` and ``polyphot``, using artificial images. Quantities computed by `~photutils.isophote.Ellipse` match the reference ones within < 0.1% in all tested cases. 7. How does the object centerer work? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The `~photutils.isophote.EllipseGeometry` class has a :meth:`~photutils.isophote.EllipseGeometry.find_center` method that runs an "object locator" around the input object coordinates. This routine performs a scan over a 10x10 pixel window centered on the input object coordinates. At each scan position, it extracts two concentric, roughly circular samples with radii 4 and 8 pixels. It then computes a signal-to-noise-like criterion using the intensity averages and standard deviations at each annulus: .. math:: c = \frac{f_{1} - f_{2}}{{\sqrt{\sigma_{1}^{2} + \sigma_{2}^{2}}}} and locates the pixel inside the scanned window where this criterion is a maximum. If the criterion so computed exceeds a given threshold, it assumes that a suitable object was detected at that position. The default threshold value is set to 0.1. This value, and the annuli and window sizes currently used, were found by trial and error using a number of both artificial and real galaxy images. It was found that very flattened galaxy images (ellipticity ~ 0.7) cannot be detected by such a simple algorithm. By increasing the threshold value the object locator becomes more strict, in the sense that it will not detect faint objects. To turn off the object locator, set the threshold to a value >> 1 in `~photutils.isophote.Ellipse`. This will prevent it from modifying whatever values for the center coordinates were given to the `~photutils.isophote.Ellipse` algorithm. photutils-0.4/docs/make.bat0000644000214200020070000001064112345377273020147 0ustar lbradleySTSCI\science00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Astropy.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Astropy.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) :end photutils-0.4/docs/Makefile0000644000214200020070000001074512773502634020203 0ustar lbradleySTSCI\science00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest #This is needed with git because git doesn't create a dir if it's empty $(shell [ -d "_static" ] || mkdir -p _static) help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" clean: -rm -rf $(BUILDDIR) -rm -rf api -rm -rf generated html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Astropy.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Astropy.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Astropy" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Astropy" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: @echo "Run 'python setup.py test' in the root directory to run doctests " \ @echo "in the documentation." photutils-0.4/docs/morphology.rst0000644000214200020070000000743013175634532021471 0ustar lbradleySTSCI\science00000000000000Morphological Properties (`photutils.morphology`) ================================================= Introduction ------------ The :func:`~photutils.morphology.data_properties` function can be used to calculate the morphological properties of a single source in a cutout image. `~photutils.morphology.data_properties` returns a `~photutils.segmentation.SourceProperties` object. Please see `~photutils.segmentation.SourceProperties` for the list of the many properties that are calculated. Even more properties are likely to be added in the future. If you have a segmentation image, the :func:`~photutils.segmentation.source_properties` function can be used to calculate the properties for all (or a specified subset) of the segmented sources. Please see `Source Photometry and Properties from Image Segmentation `_ for more details. Getting Started --------------- Let's extract a single object from a synthetic dataset and find calculate its morphological properties. For this example, we will subtract the background using simple sigma-clipped statistics. First, we create the source image and subtract its background:: >>> from photutils.datasets import make_4gaussians_image >>> from astropy.stats import sigma_clipped_stats >>> data = make_4gaussians_image()[43:79, 76:104] >>> mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5) >>> data -= median # subtract background Then, calculate its properties: .. doctest-requires:: scipy, skimage >>> from photutils import data_properties >>> cat = data_properties(data) >>> columns = ['id', 'xcentroid', 'ycentroid', 'semimajor_axis_sigma', ... 'semiminor_axis_sigma', 'orientation'] >>> tbl = cat.to_table(columns=columns) >>> tbl['xcentroid'].info.format = '.10f' # optional format >>> tbl['ycentroid'].info.format = '.10f' >>> tbl['semiminor_axis_sigma'].info.format = '.10f' >>> tbl['orientation'].info.format = '.10f' >>> print(tbl) id xcentroid ycentroid ... semiminor_axis_sigma orientation pix pix ... pix rad --- ------------- ------------- ... -------------------- ------------ 1 14.0225090502 16.9901801466 ... 3.6977761870 1.0494368937 Now let's use the measured morphological properties to define an approximate isophotal ellipse for the source: .. doctest-skip:: >>> from photutils import EllipticalAperture >>> position = (cat.xcentroid.value, cat.ycentroid.value) >>> r = 3.0 # approximate isophotal extent >>> a = cat.semimajor_axis_sigma.value * r >>> b = cat.semiminor_axis_sigma.value * r >>> theta = cat.orientation.value >>> apertures = EllipticalAperture(position, a, b, theta=theta) >>> plt.imshow(data, origin='lower', cmap='viridis', ... interpolation='nearest') >>> apertures.plot(color='#d62728') .. plot:: import matplotlib.pyplot as plt from photutils import data_properties, EllipticalAperture from photutils.datasets import make_4gaussians_image data = make_4gaussians_image()[43:79, 76:104] # extract single object cat = data_properties(data) columns = ['id', 'xcentroid', 'ycentroid', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'orientation'] tbl = cat.to_table(columns=columns) r = 2.5 # approximate isophotal extent position = (cat.xcentroid.value, cat.ycentroid.value) a = cat.semimajor_axis_sigma.value * r b = cat.semiminor_axis_sigma.value * r theta = cat.orientation.value apertures = EllipticalAperture(position, a, b, theta=theta) plt.imshow(data, origin='lower', cmap='viridis', interpolation='nearest') apertures.plot(color='#d62728') Reference/API ------------- .. automodapi:: photutils.morphology :no-heading: photutils-0.4/docs/nitpick-exceptions0000644000214200020070000000027513175634532022303 0ustar lbradleySTSCI\science00000000000000# photutils.morphology py:class photutils.morphology.CompoundModel1 # list disappeared from Python 2.7 intersphinx inventory # https://bugs.python.org/issue30882 py:obj list py:class list photutils-0.4/docs/overview.rst0000644000214200020070000000422013175634532021132 0ustar lbradleySTSCI\science00000000000000Overview ======== Introduction ------------ Photutils contains functions for: * estimating the background and background RMS in astronomical images * detecting sources in astronomical images * estimating morphological parameters of those sources (e.g., centroid and shape parameters) * performing aperture and PSF photometry The code and the documentation are available at the following links: * Code: https://github.com/astropy/photutils * Issue Tracker: https://github.com/astropy/photutils/issues * Documentation: https://photutils.readthedocs.io/ .. _coordinate-conventions: Coordinate Conventions ---------------------- In Photutils, pixel coordinates are zero-indexed, meaning that ``(x, y) = (0, 0)`` corresponds to the center of the lowest, leftmost array element. This means that the value of ``data[0, 0]`` is taken as the value over the range ``-0.5 < x <= 0.5``, ``-0.5 < y <= 0.5``. Note that this differs from the SourceExtractor_, IRAF_, FITS, and ds9_ conventions, in which the center of the lowest, leftmost array element is ``(1, 1)``. The ``x`` (column) coordinate corresponds to the second (fast) array index and the ``y`` (row) coordinate corresponds to the first (slow) index. ``data[y, x]`` gives the value at coordinates (x, y). Along with zero-indexing, this means that an array is defined over the coordinate range ``-0.5 < x <= data.shape[1] - 0.5``, ``-0.5 < y <= data.shape[0] - 0.5``. .. _SourceExtractor: http://www.astromatic.net/software/sextractor .. _IRAF: http://iraf.noao.edu/ .. _ds9: http://ds9.si.edu/ Bundled Datasets ---------------- In this documentation, we use example `datasets `_ provided by calling functions such as :func:`~photutils.datasets.load_star_image`. This function returns an Astropy :class:`~astropy.io.fits.ImageHDU` object, and is equivalent to doing: .. doctest-skip:: >>> from astropy.io import fits >>> hdu = fits.open('dataset.fits')[0] where the ``[0]`` accesses the first HDU in the FITS file. Contributors ------------ For the complete list of contributors please see the `Photutils contributors page on Github `_. photutils-0.4/docs/psf.rst0000644000214200020070000007415213175634532020067 0ustar lbradleySTSCI\science00000000000000PSF Photometry (`photutils.psf`) ================================ The `photutils.psf` module contains tools for model-fitting photometry, often called "PSF photometry". .. warning:: The PSF photometry API is currently considered *experimental* and may change in the future. We will aim to keep compatibility where practical, but will not finalize the API until sufficient user feedback has been accumulated. .. _psf-terminology: Terminology ----------- Different astronomy sub-fields use the terms Point Spread Function (PSF) and Point Response Function (PRF) somewhat differently, especially when colloquial usage is taken into account. For this module we assume that the PRF is an image of a point source *after discretization* e.g., onto a rectilinear CCD grid. This is the definition used by `Spitzer `_. Where relevant, we use this terminology for this sort of model, and consider "PSF" to refer to the underlying model. In many cases this distinction is unimportant, but can be critical when dealing with undersampled data. Despite this, in colloquial usage "PSF photometry" often means the same sort of model-fitting analysis, regardless to exactly what kind of model is actually being fit. We take this road, using "PSF photometry" as shorthand for the general approach. PSF Photometry -------------- Photutils provides a modular set of tools to perform PSF photometry for different science cases. These are implemented as separate classes to do sub-tasks of PSF photometry. It also provides high-level classes that connect these pieces together. In particular, it contains an implementation of the DAOPHOT algorithm (`~photutils.psf.DAOPhotPSFPhotometry`) proposed by `Stetson in his seminal paper `_ for crowded-field stellar photometry. The DAOPHOT algorithm consists in applying the loop FIND, GROUP, NSTAR, SUBTRACT, FIND until no more stars are detected or a given number of iterations is reached. Basically, `~photutils.psf.DAOPhotPSFPhotometry` works as follows. The first step is to estimate the sky background. For this task, photutils provides several classes to compute scalar and 2D backgrounds, see `~photutils.background` for details. The next step is to find an initial estimate of the positions of potential sources. This can be accomplished by using source detection algorithms, which are implemented in `~photutils.detection`. After finding sources one would apply a clustering algorithm in order to label the sources according to groups. Usually, those groups are formed by a distance criterion, which is the case of the grouping algorithm proposed by Stetson. In `~photutils.psf.DAOGroup`, we provide an implementation of that algorithm. In addition, `~photutils.psf.DBSCANGroup` can also be used to group sources with more complex distance criteria. The reason behind the construction of groups is illustrated as follows: imagine that one would like to fit 300 stars and the model for each star has three parameters to be fitted. If one constructs a single model to fit the 300 stars simultaneously, then the optimization algorithm will have to search for the solution in a 900 dimensional space, which is computationally expensive and error-prone. Reducing the stars in groups effectively reduces the dimension of the parameter space, which facilitates the optimization process. Provided that the groups are available, the next step is to fit the sources simultaneously for each group. This task can be done using an astropy fitter, for instance, `~astropy.modeling.fitting.LevMarLSQFitter`. After sources are fitted, they are subtracted from the given image and, after fitting all sources, the residual image is analyzed by the finding routine again in order to check if there exist any source which has not been detected previously. This process goes on until no more sources are identified by the finding routine. .. note:: It is important to note the conventions on the column names of the input/output astropy Tables which are passed along to the source detection and photometry objects. For instance, all source detection objects should output a table with columns named as ``xcentroid`` and ``ycentroid`` (check `~photutils.detection`). On the other hand, `~photutils.psf.DAOGroup` expects columns named as ``x_0`` and ``y_0``, which represents the initial guesses on the sources' centroids. Finally, the output of the fitting process shows columns named as ``x_fit``, ``y_fit``, ``flux_fit`` for the optimum values and ``x_0``, ``y_0``, ``flux_0`` for the initial guesses. Although this convention implies that the columns have to be renamed along the process, it has the advantage of clarity so that one can keep track and easily differentiate where input/outputs came from. High-Level Structure ^^^^^^^^^^^^^^^^^^^^ Photutils provides three classes to perform PSF Photometry: `~photutils.psf.BasicPSFPhotometry`, `~photutils.psf.IterativelySubtractedPSFPhotometry`, and `~photutils.psf.DAOPhotPSFPhotometry`. Together these provide the core workflow to make photometric measurements given an appropriate PSF (or other) model. `~photutils.psf.BasicPSFPhotometry` implements the minimum tools for model-fitting photometry. At its core, this involves finding sources in an image, grouping overlapping sources into a single model, fitting the model to the sources, and subtracting the models from the image. In DAOPHOT parlance, this is essentially running the "FIND, GROUP, NSTAR, SUBTRACT" once. Because it is only a single cycle of that sequence, this class should be used when the degree of crowdedness of the field is not very high, for instance, when most stars are separated by a distance no less than one FWHM and their brightness are relatively uniform. It is critical to understand, though, that `~photutils.psf.BasicPSFPhotometry` does not actually contain the functionality to *do* all these steps - that is provided by other objects (or can be user-written) functions. Rather it provides the framework and data structures in which these operations run. Because of this, `~photutils.psf.BasicPSFPhotometry` is particularly useful for build more complex workflows, as all of the stages can be turned on or off or replaced with different implementations as the user desires. `~photutils.psf.IterativelySubtractedPSFPhotometry` is similar to `~photutils.psf.BasicPSFPhotometry`, but it adds a parameter called ``n_iters`` which is the number of iterations for which the loop "FIND, GROUP, NSTAR, SUBTRACT, FIND..." will be performed. This class enables photometry in a scenario where there exists significant overlap between stars that are of quite different brightness. For instance, the detection algorithm may not be able to detect a faint and bright star very close together in the first iteration, but they will be detected in the next iteration after the brighter stars have been fit and subtracted. Like `~photutils.psf.BasicPSFPhotometry`, it does not include implementations of the stages of this process, but it provides the structure in which those stages run. `~photutils.psf.DAOPhotPSFPhotometry` is a special case of `~photutils.psf.IterativelySubtractedPSFPhotometry`. Unlike `~photutils.psf.IterativelySubtractedPSFPhotometry` and `~photutils.psf.BasicPSFPhotometry`, the class includes specific implementations of the stages of the photometric measurements, tuned to reproduce the algorithms used for the DAOPHOT code. Specifically, the ``finder``, ``group_maker``, ``bkg_estimator`` attributes are set to the `~photutils.detection.DAOStarFinder`, `~photutils.psf.DAOGroup`, and `~photutils.background.MMMBackground`, respectively. Therefore, users need to input the parameters of those classes to set up a `~photutils.psf.DAOPhotPSFPhotometry` object, rather than providing objects to do these stages (which is what the other classes require). Those classes and all of the classes they *use* for the steps in the photometry process can always be replaced by user-supplied functions if you wish to customize any stage of the photometry process. This makes the machinery very flexible, while still providing a "batteries included" approach with a default implementation that's suitable for many use cases. Basic Usage ^^^^^^^^^^^ The basic usage of, e.g., `~photutils.psf.IterativelySubtractedPSFPhotometry` is as follows: .. doctest-skip:: >>> # create an IterativelySubtractedPSFPhotometry object >>> from photutils.psf import IterativelySubtractedPSFPhotometry >>> my_photometry = IterativelySubtractedPSFPhotometry( ... finder=my_finder, group_maker=my_group_maker, ... bkg_estimator=my_bkg_estimator, psf_model=my_psf_model, ... fitter=my_fitter, niters=3, fitshape=(7,7)) >>> # get photometry results >>> photometry_results = my_photometry(image=my_image) >>> # get residual image >>> residual_image = my_photometry.get_residual_image() Where ``my_finder``, ``my_group_maker``, and ``my_bkg_estimator`` may be any suitable class or callable function. This approach allows one to customize every part of the photometry process provided that their input/output are compatible with the input/ouput expected by `~photutils.psf.IterativelySubtractedPSFPhotometry`. `photutils.psf` provides all the necessary classes to reproduce the DAOPHOT algorithm, but any individual part of that algorithm can be swapped for a user-defined function. See the API documentation for precise details on what these classes or functions should look like. Performing PSF Photometry ^^^^^^^^^^^^^^^^^^^^^^^^^ Let's take a look at a simple example with simulated stars whose PSF is assumed to be Gaussian. First let's create an image with four overlapping stars:: >>> import numpy as np >>> from astropy.table import Table >>> from photutils.datasets import (make_random_gaussians_table, ... make_noise_image, ... make_gaussian_sources_image) >>> sigma_psf = 2.0 >>> sources = Table() >>> sources['flux'] = [700, 800, 700, 800] >>> sources['x_mean'] = [12, 17, 12, 17] >>> sources['y_mean'] = [15, 15, 20, 20] >>> sources['x_stddev'] = sigma_psf*np.ones(4) >>> sources['y_stddev'] = sources['x_stddev'] >>> sources['theta'] = [0, 0, 0, 0] >>> sources['id'] = [1, 2, 3, 4] >>> tshape = (32, 32) >>> image = (make_gaussian_sources_image(tshape, sources) + ... make_noise_image(tshape, type='poisson', mean=6., ... random_state=1) + ... make_noise_image(tshape, type='gaussian', mean=0., ... stddev=2., random_state=1)) .. doctest-requires:: matplotlib >>> from matplotlib import rcParams >>> rcParams['font.size'] = 13 >>> import matplotlib.pyplot as plt >>> plt.imshow(image, cmap='viridis', aspect=1, interpolation='nearest', ... origin='lower') # doctest: +SKIP >>> plt.title('Simulated data') # doctest: +SKIP >>> plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) # doctest: +SKIP .. plot:: import numpy as np from astropy.table import Table from photutils.datasets import (make_random_gaussians_table, make_noise_image, make_gaussian_sources_image) sigma_psf = 2.0 sources = Table() sources['flux'] = [700, 800, 700, 800] sources['x_mean'] = [12, 17, 12, 17] sources['y_mean'] = [15, 15, 20, 20] sources['x_stddev'] = sigma_psf*np.ones(4) sources['y_stddev'] = sources['x_stddev'] sources['theta'] = [0, 0, 0, 0] sources['id'] = [1, 2, 3, 4] tshape = (32, 32) image = (make_gaussian_sources_image(tshape, sources) + make_noise_image(tshape, type='poisson', mean=6., random_state=1) + make_noise_image(tshape, type='gaussian', mean=0., stddev=2., random_state=1)) from matplotlib import rcParams rcParams['font.size'] = 13 import matplotlib.pyplot as plt plt.imshow(image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.title('Simulated data') plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) Then let's import the required classes to set up a `~photutils.psf.IterativelySubtractedPSFPhotometry` object:: >>> from photutils.detection import IRAFStarFinder >>> from photutils.psf import IntegratedGaussianPRF, DAOGroup >>> from photutils.background import MMMBackground, MADStdBackgroundRMS >>> from astropy.modeling.fitting import LevMarLSQFitter >>> from astropy.stats import gaussian_sigma_to_fwhm Let's then instantiate and use the objects: .. doctest-requires:: scipy, skimage >>> bkgrms = MADStdBackgroundRMS() >>> std = bkgrms(image) >>> iraffind = IRAFStarFinder(threshold=3.5*std, ... fwhm=sigma_psf*gaussian_sigma_to_fwhm, ... minsep_fwhm=0.01, roundhi=5.0, roundlo=-5.0, ... sharplo=0.0, sharphi=2.0) >>> daogroup = DAOGroup(2.0*sigma_psf*gaussian_sigma_to_fwhm) >>> mmm_bkg = MMMBackground() >>> fitter = LevMarLSQFitter() >>> psf_model = IntegratedGaussianPRF(sigma=sigma_psf) >>> from photutils.psf import IterativelySubtractedPSFPhotometry >>> photometry = IterativelySubtractedPSFPhotometry(finder=iraffind, ... group_maker=daogroup, ... bkg_estimator=mmm_bkg, ... psf_model=psf_model, ... fitter=LevMarLSQFitter(), ... niters=1, fitshape=(11,11)) >>> result_tab = photometry(image=image) >>> residual_image = photometry.get_residual_image() Note that the parameters values for the finder class, i.e., `~photutils.detection.IRAFStarFinder`, are completely chosen in an arbitrary manner and optimum values do vary according to the data. As mentioned before, the way to actually do the photometry is by using ``photometry`` as a function-like call. It's worth noting that ``image`` does not need to be background subtracted. The subtraction is done during the photometry process with the attribute ``bkg`` that was used to set up ``photometry``. Now, let's compare the simulated and the residual images: .. doctest-skip:: >>> plt.subplot(1, 2, 1) >>> plt.imshow(image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') >>> plt.title('Simulated data') >>> plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) >>> plt.subplot(1 ,2, 2) >>> plt.imshow(residual_image, cmap='viridis', aspect=1, ... interpolation='nearest', origin='lower') >>> plt.title('Residual Image') >>> plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) >>> plt.show() .. plot:: import numpy as np from photutils.datasets import (make_random_gaussians_table, make_noise_image, make_gaussian_sources_image) from astropy.table import Table sigma_psf = 2.0 sources = Table() sources['flux'] = [700, 800, 700, 800] sources['x_mean'] = [12, 17, 12, 17] sources['y_mean'] = [15, 15, 20, 20] sources['x_stddev'] = sigma_psf*np.ones(4) sources['y_stddev'] = sources['x_stddev'] sources['theta'] = [0, 0, 0, 0] sources['id'] = [1, 2, 3, 4] tshape = (32, 32) image = (make_gaussian_sources_image(tshape, sources) + make_noise_image(tshape, type='poisson', mean=6., random_state=1) + make_noise_image(tshape, type='gaussian', mean=0., stddev=2., random_state=1)) from photutils.detection import IRAFStarFinder from photutils.psf import IntegratedGaussianPRF, DAOGroup from photutils.background import MMMBackground, MADStdBackgroundRMS from astropy.modeling.fitting import LevMarLSQFitter from astropy.stats import gaussian_sigma_to_fwhm bkgrms = MADStdBackgroundRMS() std = bkgrms(image) iraffind = IRAFStarFinder(threshold=3.5*std, fwhm=sigma_psf*gaussian_sigma_to_fwhm, minsep_fwhm=0.01, roundhi=5.0, roundlo=-5.0, sharplo=0.0, sharphi=2.0) daogroup = DAOGroup(2.0*sigma_psf*gaussian_sigma_to_fwhm) mmm_bkg = MMMBackground() psf_model = IntegratedGaussianPRF(sigma=sigma_psf) fitter = LevMarLSQFitter() from photutils.psf import IterativelySubtractedPSFPhotometry photometry = IterativelySubtractedPSFPhotometry(finder=iraffind, group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), niters=1, fitshape=(11,11)) result_tab = photometry(image=image) residual_image = photometry.get_residual_image() from matplotlib import rcParams rcParams['font.size'] = 13 import matplotlib.pyplot as plt plt.subplot(1, 2, 1) plt.imshow(image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.title('Simulated data') plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) plt.subplot(1 ,2, 2) plt.imshow(residual_image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.title('Residual Image') plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) plt.show() Performing PSF Photometry with Fixed Centroids ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In case that the centroids positions of the stars are known a priori, then they can be held fixed during the fitting process and the optimizer will only consider flux as a variable. To do that, one has to set the ``fixed`` attribute for the centroid parameters in ``psf`` as ``True``. Consider the previous example after the line ``psf_model = IntegratedGaussianPRF(sigma=sigma_psf)``: .. doctest-skip:: >>> psf_model.x_0.fixed = True >>> psf_model.y_0.fixed = True >>> pos = Table(names=['x_0', 'y_0'], data=[sources['x_mean'], ... sources['y_mean']]) .. doctest-skip:: >>> photometry = BasicPSFPhotometry(group_maker=daogroup, ... bkg_estimator=mmm_bkg, ... psf_model=psf_model, ... fitter=LevMarLSQFitter(), ... fitshape=(11,11)) >>> result_tab = photometry(image=image, init_guesses=pos) >>> residual_image = photometry.get_residual_image() .. doctest-skip:: >>> plt.subplot(1, 2, 1) >>> plt.imshow(image, cmap='viridis', aspect=1, ... interpolation='nearest', origin='lower') >>> plt.title('Simulated data') >>> plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) >>> plt.subplot(1 ,2, 2) >>> plt.imshow(residual_image, cmap='viridis', aspect=1, ... interpolation='nearest', origin='lower') >>> plt.title('Residual Image') >>> plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) .. plot:: import numpy as np from photutils.datasets import (make_random_gaussians_table, make_noise_image, make_gaussian_sources_image) from astropy.table import Table sigma_psf = 2.0 sources = Table() sources['flux'] = [700, 800, 700, 800] sources['x_mean'] = [12, 17, 12, 17] sources['y_mean'] = [15, 15, 20, 20] sources['x_stddev'] = sigma_psf*np.ones(4) sources['y_stddev'] = sources['x_stddev'] sources['theta'] = [0, 0, 0, 0] sources['id'] = [1, 2, 3, 4] tshape = (32, 32) image = (make_gaussian_sources_image(tshape, sources) + make_noise_image(tshape, type='poisson', mean=6., random_state=1) + make_noise_image(tshape, type='gaussian', mean=0., stddev=2., random_state=1)) from photutils.detection import IRAFStarFinder from photutils.psf import IntegratedGaussianPRF, DAOGroup from photutils.background import MMMBackground, MADStdBackgroundRMS from astropy.modeling.fitting import LevMarLSQFitter from astropy.stats import gaussian_sigma_to_fwhm bkgrms = MADStdBackgroundRMS() std = bkgrms(image) daogroup = DAOGroup(2.0*sigma_psf*gaussian_sigma_to_fwhm) mmm_bkg = MMMBackground() psf_model = IntegratedGaussianPRF(sigma=sigma_psf) psf_model.x_0.fixed = True psf_model.y_0.fixed = True pos = Table(names=['x_0', 'y_0'], data=[sources['x_mean'], sources['y_mean']]) fitter = LevMarLSQFitter() from photutils.psf import BasicPSFPhotometry photometry = BasicPSFPhotometry(group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), fitshape=(11,11)) result_tab = photometry(image=image, init_guesses=pos) residual_image = photometry.get_residual_image() from matplotlib import rcParams import matplotlib.pyplot as plt rcParams['font.size'] = 13 plt.subplot(1, 2, 1) plt.imshow(image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.title('Simulated data') plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) plt.subplot(1 ,2, 2) plt.imshow(residual_image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.title('Residual Image') plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) plt.show() Fitting additional parameters ----------------------------- The PSF photometry classes can also be used to fit more model parameters than just the flux and center positions. While a more realistic use case might be fitting sky backgrounds, or shape parameters of galaxies, here we use the ``sigma`` parameter in `~photutils.psf.IntegratedGaussianPRF` as the simplest possible example of this feature. (For actual PSF photometry of stars you would *not* want to do this, because you the shape of the PSF should be set by bright stars or an optical model and held fixed when fitting.) First, let us instantiate a PSF model object: .. doctest-skip:: >>> gaussian_prf = IntegratedGaussianPRF() The attribute ``fixed`` for the ``sigma`` parameter is set to ``True`` by default, i.e., ``sigma`` is not considered during the fitting process. Let's first change this behavior: .. doctest-skip:: >>> gaussian_prf.sigma.fixed = False In addition, we need to indicate the initial guess which will be used in during the fitting process. By the default, the initial guess is taken as the default value of ``sigma``, but we can change that by doing: .. doctest-skip:: >>> gaussian_prf.sigma.value = 2.05 Now, let's create a simulated image which has a brighter star and one overlapping fainter companion so that the detection algorithm won't be able to identify it, and hence we should use `~photutils.psf.IterativelySubtractedPSFPhotometry` to measure the fainter star as well. Also, note that both of the stars have ``sigma=2.0``. .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm from photutils.datasets import (make_random_gaussians_table, make_noise_image, make_gaussian_sources_image) from photutils.psf import (IterativelySubtractedPSFPhotometry, BasicPSFPhotometry) from photutils import MMMBackground from photutils.psf import IntegratedGaussianPRF, DAOGroup from photutils.detection import DAOStarFinder from photutils.detection import IRAFStarFinder from astropy.table import Table from astropy.modeling.fitting import LevMarLSQFitter sources = Table() sources['flux'] = [10000, 1000] sources['x_mean'] = [18, 9] sources['y_mean'] = [17, 21] sources['x_stddev'] = [2] * 2 sources['y_stddev'] = sources['x_stddev'] sources['theta'] = [0] * 2 tshape = (32, 32) image = (make_gaussian_sources_image(tshape, sources) + make_noise_image(tshape, type='poisson', mean=6., random_state=1) + make_noise_image(tshape, type='gaussian', mean=0., stddev=2., random_state=1)) vmin, vmax = np.percentile(image, [5, 95]) plt.imshow(image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower', norm=LogNorm(vmin=vmin, vmax=vmax)) Let's instantiate the necessary objects in order to use an `~photutils.psf.IterativelySubtractedPSFPhotometry` to perform photometry: .. doctest-requires:: scipy, skimage >>> daogroup = DAOGroup(crit_separation=8) >>> mmm_bkg = MMMBackground() >>> iraffind = IRAFStarFinder(threshold=2.5*mmm_bkg(image), fwhm=4.5) >>> fitter = LevMarLSQFitter() >>> gaussian_prf = IntegratedGaussianPRF(sigma=2.05) >>> gaussian_prf.sigma.fixed = False >>> itr_phot_obj = IterativelySubtractedPSFPhotometry(finder=iraffind, ... group_maker=daogroup, ... bkg_estimator=mmm_bkg, ... psf_model=psf_model, ... fitter=fitter, ... fitshape=(11, 11), ... niters=2) Now, let's use the callable ``itr_phot_obj`` to perform photometry: .. doctest-requires:: scipy, skimage >>> phot_results = itr_phot_obj(image) >>> phot_results['id', 'group_id', 'iter_detected', 'x_0', 'y_0', 'flux_0'] #doctest: +SKIP id group_id iter_detected x_0 y_0 flux_0 --- -------- ------------- ------------- ------------- ------------- 1 1 1 18.0045935148 17.0060558543 9437.07321281 1 1 2 9.06141447183 21.0680052846 977.163727416 >>> phot_results['sigma_0', 'sigma_fit', 'x_fit', 'y_fit', 'flux_fit'] #doctest: +SKIP sigma_0 sigma_fit x_fit y_fit flux_fit ------- ------------- ------------- ------------- ------------- 2.05 1.98092026939 17.9995106906 17.0039419384 10016.4470148 2.05 1.98516037471 9.12116345703 21.0599164498 1036.79115883 We can see that ``sigma_0`` (the initial guess for ``sigma``) was assigned to the value we used when creating the PSF model. Let's take a look at the residual image:: >>> plt.imshow(itr_phot_obj.get_residual_image(), cmap='viridis', ... aspect=1, interpolation='nearest', origin='lower') #doctest: +SKIP .. plot:: from photutils.datasets import (make_random_gaussians_table, make_noise_image, make_gaussian_sources_image) import matplotlib.pyplot as plt from photutils.psf import (IterativelySubtractedPSFPhotometry, BasicPSFPhotometry) from astropy.stats import gaussian_sigma_to_fwhm from astropy.table import Table from photutils import MMMBackground from photutils.psf import IntegratedGaussianPRF, DAOGroup from photutils.detection import DAOStarFinder from astropy.modeling.fitting import LevMarLSQFitter from photutils.detection import IRAFStarFinder sources = Table() sources['flux'] = [10000, 1000] sources['x_mean'] = [18, 9] sources['y_mean'] = [17, 21] sources['x_stddev'] = [2] * 2 sources['y_stddev'] = sources['x_stddev'] sources['theta'] = [0] * 2 tshape = (32, 32) image = (make_gaussian_sources_image(tshape, sources) + make_noise_image(tshape, type='poisson', mean=6., random_state=1) + make_noise_image(tshape, type='gaussian', mean=0., stddev=2., random_state=1)) daogroup = DAOGroup(crit_separation=8) mmm_bkg = MMMBackground() psf_model = IntegratedGaussianPRF(sigma=2.05) iraffind = IRAFStarFinder(threshold=2.5*mmm_bkg(image), fwhm=4.5) fitter = LevMarLSQFitter() psf_model.sigma.fixed = False itr_phot_obj = IterativelySubtractedPSFPhotometry( finder=iraffind, group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=fitter, fitshape=(11, 11), niters=2) phot_results_itr = itr_phot_obj(image) plt.imshow(itr_phot_obj.get_residual_image(), cmap='viridis', aspect=1, interpolation='nearest', origin='lower') Additional Example Notebooks (online) ------------------------------------- * `PSF photometry on artificial Gaussian stars in crowded fields `_ * `PSF photometry on artificial Gaussian stars `_ * `PSF/PRF Photometry on Spitzer Data `_ References ---------- `Spitzer PSF vs. PRF `_ `Kepler PSF calibration `_ `The Kepler Pixel Response Function `_ `Stetson, Astronomical Society of the Pacific, Publications, (ISSN 0004-6280), vol. 99, March 1987, p. 191-222. `_ Reference/API ------------- .. automodapi:: photutils.psf :no-heading: .. automodapi:: photutils.psf.sandbox photutils-0.4/docs/psf_matching.rst0000644000214200020070000002176613175634532021744 0ustar lbradleySTSCI\science00000000000000.. _psf_matching: PSF Matching (`photutils.psf.matching`) ======================================= Introduction ------------ This subpackage contains tools to generate kernels for matching point spread functions (PSFs). Matching PSFs ------------- Photutils provides a function called :func:`~photutils.psf.matching.create_matching_kernel` that generates a matching kernel between two PSFs using the ratio of Fourier transforms (see e.g., `Gordon et al. 2008`_; `Aniano et al. 2011`_). For this first simple example, let's assume our source and target PSFs are noiseless 2D Gaussians. The "high-resolution" PSF will be a Gaussian with :math:`\sigma=3`. The "low-resolution" PSF will be a Gaussian with :math:`\sigma=5`:: >>> import numpy as np >>> from astropy.modeling.models import Gaussian2D >>> y, x = np.mgrid[0:51, 0:51] >>> gm1 = Gaussian2D(100, 25, 25, 3, 3) >>> gm2 = Gaussian2D(100, 25, 25, 5, 5) >>> g1 = gm1(x, y) >>> g2 = gm2(x, y) >>> g1 /= g1.sum() >>> g2 /= g2.sum() For these 2D Gaussians, the matching kernel should be a 2D Gaussian with :math:`\sigma=4` (``sqrt(5**2 - 3**2)``). Let's create the matching kernel using a Fourier ratio method. Note that the input source and target PSFs must have the same shape and pixel scale. >>> from photutils import create_matching_kernel >>> kernel = create_matching_kernel(g1, g2) Let's plot the result: .. plot:: :include-source: import numpy as np from astropy.modeling.models import Gaussian2D from photutils import create_matching_kernel import matplotlib.pyplot as plt y, x = np.mgrid[0:51, 0:51] gm1 = Gaussian2D(100, 25, 25, 3, 3) gm2 = Gaussian2D(100, 25, 25, 5, 5) g1 = gm1(x, y) g2 = gm2(x, y) g1 /= g1.sum() g2 /= g2.sum() kernel = create_matching_kernel(g1, g2) plt.imshow(kernel, cmap='Greys_r', origin='lower') plt.colorbar() We quickly observe that the result is not as expected. This is because of high-frequency noise in the Fourier transforms (even though these are noiseless PSFs, there is floating-point noise in the ratios). Using the Fourier ratio method, one must filter the high-frequency noise from the Fourier ratios. This is performed by inputing a `window function `_, which may be a function or a callable object. In general, the user will need to exercise some care when defining a window function. For more information, please see `Aniano et al. 2011`_. Photutils provides the following window classes: * `~photutils.psf.matching.HanningWindow` * `~photutils.psf.matching.TukeyWindow` * `~photutils.psf.matching.CosineBellWindow` * `~photutils.psf.matching.SplitCosineBellWindow` * `~photutils.psf.matching.TopHatWindow` Here are plots of 1D cuts across the center of the 2D window functions: .. plot:: :include-source: from photutils import (HanningWindow, TukeyWindow, CosineBellWindow, SplitCosineBellWindow, TopHatWindow) import matplotlib.pyplot as plt w1 = HanningWindow() w2 = TukeyWindow(alpha=0.5) w3 = CosineBellWindow(alpha=0.5) w4 = SplitCosineBellWindow(alpha=0.4, beta=0.3) w5 = TopHatWindow(beta=0.4) shape = (101, 101) y0 = (shape[0] - 1) // 2 plt.figure() plt.subplots_adjust(wspace=0.4, hspace=0.4) plt.subplot(2, 3, 1) plt.plot(w1(shape)[y0, :]) plt.title('Hanning') plt.xlabel('x') plt.ylim((0, 1.1)) plt.subplot(2, 3, 2) plt.plot(w2(shape)[y0, :]) plt.title('Tukey') plt.xlabel('x') plt.ylim((0, 1.1)) plt.subplot(2, 3, 3) plt.plot(w3(shape)[y0, :]) plt.title('Cosine Bell') plt.xlabel('x') plt.ylim((0, 1.1)) plt.subplot(2, 3, 4) plt.plot(w4(shape)[y0, :]) plt.title('Split Cosine Bell') plt.xlabel('x') plt.ylim((0, 1.1)) plt.subplot(2, 3, 5) plt.plot(w5(shape)[y0, :], label='Top Hat') plt.title('Top Hat') plt.xlabel('x') plt.ylim((0, 1.1)) However, the user may input any function or callable object to generate a custom window function. In this example, because these are noiseless PSFs, we will use a `~photutils.psf.matching.TopHatWindow` object as the low-pass filter:: >>> from photutils import TopHatWindow >>> window = TopHatWindow(0.35) >>> kernel = create_matching_kernel(g1, g2, window=window) Note that the output matching kernel from :func:`~photutils.psf.matching.create_matching_kernel` is always normalized such that the kernel array sums to 1:: >>> print(kernel.sum()) # doctest: +FLOAT_CMP 1.0 Let's display the new matching kernel: .. plot:: :include-source: import numpy as np from astropy.modeling.models import Gaussian2D from photutils import create_matching_kernel, TopHatWindow import matplotlib.pyplot as plt y, x = np.mgrid[0:51, 0:51] gm1 = Gaussian2D(100, 25, 25, 3, 3) gm2 = Gaussian2D(100, 25, 25, 5, 5) g1 = gm1(x, y) g2 = gm2(x, y) g1 /= g1.sum() g2 /= g2.sum() window = TopHatWindow(0.35) kernel = create_matching_kernel(g1, g2, window=window) plt.imshow(kernel, cmap='Greys_r', origin='lower') plt.colorbar() As desired, the result is indeed a 2D Gaussian with a :math:`\sigma=4`. Here we will show 1D cuts across the center of the kernel images: .. plot:: :include-source: import numpy as np from astropy.modeling.models import Gaussian2D from photutils import create_matching_kernel, TopHatWindow import matplotlib.pyplot as plt y, x = np.mgrid[0:51, 0:51] gm1 = Gaussian2D(100, 25, 25, 3, 3) gm2 = Gaussian2D(100, 25, 25, 5, 5) gm3 = Gaussian2D(100, 25, 25, 4, 4) g1 = gm1(x, y) g2 = gm2(x, y) g3 = gm3(x, y) g1 /= g1.sum() g2 /= g2.sum() g3 /= g3.sum() window = TopHatWindow(0.35) kernel = create_matching_kernel(g1, g2, window=window) kernel /= kernel.sum() plt.plot(kernel[25, :], label='Matching kernel') plt.plot(g3[25, :], label='$\sigma=4$ Gaussian') plt.xlabel('x') plt.ylabel('Flux') plt.legend() plt.ylim((0.0, 0.011)) Matching IRAC PSFs ------------------ For this example, let's generate a matching kernel to go from the Spitzer/IRAC channel 1 (3.6 microns) PSF to the channel 4 (8.0 microns) PSF. We load the PSFs using the :func:`~photutils.datasets.load_irac_psf` convenience function:: >>> from photutils.datasets import load_irac_psf >>> ch1_hdu = load_irac_psf(channel=1) # doctest: +REMOTE_DATA >>> ch4_hdu = load_irac_psf(channel=4) # doctest: +REMOTE_DATA >>> ch1 = ch1_hdu.data # doctest: +REMOTE_DATA >>> ch4 = ch4_hdu.data # doctest: +REMOTE_DATA Let's display the images: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.visualization import LogStretch from astropy.visualization.mpl_normalize import ImageNormalize from photutils.datasets import load_irac_psf ch1_hdu = load_irac_psf(channel=1) ch4_hdu = load_irac_psf(channel=4) ch1 = ch1_hdu.data ch4 = ch4_hdu.data norm = ImageNormalize(stretch=LogStretch()) plt.figure(figsize=(9, 4)) plt.subplot(1, 2, 1) plt.imshow(ch1, norm=norm, cmap='viridis', origin='lower') plt.title('IRAC channel 1 PSF') plt.subplot(1, 2, 2) plt.imshow(ch4, norm=norm, cmap='viridis', origin='lower') plt.title('IRAC channel 4 PSF') For this example, we will use the :class:`~photutils.psf.matching.CosineBellWindow` for the low-pass window. Also note that these Spitzer/IRAC channel 1 and 4 PSFs have the same shape and pixel scale. If that is not the case, one can use the :func:`~photutils.psf.matching.resize_psf` convenience function to resize a PSF image. Typically one would interpolate the lower-resolution PSF to the same size as the higher-resolution PSF. .. doctest-skip:: >>> from photutils import CosineBellWindow, create_matching_kernel >>> window = CosineBellWindow(alpha=0.35) >>> kernel = create_matching_kernel(ch1, ch4, window=window) Let's display the matching kernel result: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.visualization import LogStretch from astropy.visualization.mpl_normalize import ImageNormalize from photutils.datasets import load_irac_psf from photutils import CosineBellWindow, create_matching_kernel ch1_hdu = load_irac_psf(channel=1) ch4_hdu = load_irac_psf(channel=4) ch1 = ch1_hdu.data ch4 = ch4_hdu.data norm = ImageNormalize(stretch=LogStretch()) window = CosineBellWindow(alpha=0.35) kernel = create_matching_kernel(ch1, ch4, window=window) plt.imshow(kernel, norm=norm, cmap='viridis', origin='lower') plt.colorbar() plt.title('Matching kernel') The Spitzer/IRAC channel 1 image could then be convolved with this matching kernel to produce an image with the same resolution as the channel 4 image. Reference/API ------------- .. automodapi:: photutils.psf.matching :no-heading: .. _Gordon et al. 2008: http://adsabs.harvard.edu/abs/2008ApJ...682..336G .. _Aniano et al. 2011: http://adsabs.harvard.edu/abs/2011PASP..123.1218A photutils-0.4/docs/segmentation.rst0000644000214200020070000005554313175634532021777 0ustar lbradleySTSCI\science00000000000000.. _image_segmentation: Image Segmentation (`photutils.segmentation`) ============================================= Introduction ------------ Photutils includes a general-use function to detect sources (both point-like and extended) in an image using a process called `image segmentation `_ in the `computer vision `_ field. After detecting sources using image segmentation, we can then measure their photometry, centroids, and morphological properties by using additional tools in Photutils. Source Extraction Using Image Segmentation ------------------------------------------ Photutils provides tools to detect astronomical sources using image segmentation, which is a process of assigning a label to every pixel in an image such that pixels with the same label are part of the same source. The segmentation procedure implemented in Photutils is called the threshold method, where detected sources must have a minimum number of connected pixels that are each greater than a specified threshold value in an image. The threshold level is usually defined at some multiple of the background standard deviation (sigma) above the background. The image can also be filtered before thresholding to smooth the noise and maximize the detectability of objects with a shape similar to the filter kernel. In Photutils, source extraction is performed using the :func:`~photutils.segmentation.detect_sources` function. The :func:`~photutils.detection.detect_threshold` tool is a convenience function that generates a 2D detection threshold image using simple sigma-clipped statistics to estimate the background and background RMS. For this example, let's detect sources in a synthetic image provided by the `datasets `_ module:: >>> from photutils.datasets import make_100gaussians_image >>> data = make_100gaussians_image() We will use :func:`~photutils.detection.detect_threshold` to produce a detection threshold image. :func:`~photutils.detection.detect_threshold` will estimate the background and background RMS using sigma-clipped statistics, if they are not input. The threshold level is calculated using the ``snr`` input as the sigma level above the background. Here we generate a simple pixel-wise threshold at 3 sigma above the background:: >>> from photutils import detect_threshold >>> threshold = detect_threshold(data, snr=3.) For more sophisticated analyses, one should generate a 2D background and background-only error image (e.g., from your data reduction or by using :class:`~photutils.background.Background2D`). In that case, a 3-sigma threshold image is simply:: >>> threshold = bkg + (3.0 * bkg_rms) # doctest: +SKIP Note that if the threshold includes the background level (as above), then the image input into :func:`~photutils.segmentation.detect_sources` should *not* be background subtracted. Let's find sources that have 5 connected pixels that are each greater than the corresponding pixel-wise ``threshold`` level defined above. Because the threshold returned by :func:`~photutils.detection.detect_threshold` includes the background, we do not subtract the background from the data here. We will also input a 2D circular Gaussian kernel with a FWHM of 2 pixels to filter the image prior to thresholding: .. doctest-requires:: scipy >>> from astropy.convolution import Gaussian2DKernel >>> from astropy.stats import gaussian_fwhm_to_sigma >>> from photutils import detect_sources >>> sigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2. >>> kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) >>> kernel.normalize() >>> segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel) The result is a :class:`~photutils.segmentation.SegmentationImage` object with the same shape as the data, where sources are labeled by different positive integer values. A value of zero is always reserved for the background. Let's plot both the image and the segmentation image showing the detected sources: .. doctest-skip:: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from astropy.visualization import SqrtStretch >>> from astropy.visualization.mpl_normalize import ImageNormalize >>> norm = ImageNormalize(stretch=SqrtStretch()) >>> fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) >>> ax1.imshow(data, origin='lower', cmap='Greys_r', norm=norm) >>> ax2.imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345)) .. plot:: import numpy as np import matplotlib.pyplot as plt from astropy.stats import gaussian_fwhm_to_sigma from astropy.convolution import Gaussian2DKernel from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize from photutils.datasets import make_100gaussians_image from photutils import detect_threshold, detect_sources data = make_100gaussians_image() threshold = detect_threshold(data, snr=3.) sigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2. kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) kernel.normalize() segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel) norm = ImageNormalize(stretch=SqrtStretch()) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(data, origin='lower', cmap='Greys_r', norm=norm) ax2.imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345)) When the segmentation image is generated using image thresholding (e.g., using :func:`~photutils.segmentation.detect_sources`), the source segments effectively represent the isophotal footprint of each source. Source Deblending ^^^^^^^^^^^^^^^^^ In the example above, overlapping sources are detected as single sources. Separating those sources requires a deblending procedure, such as a multi-thresholding technique used by `SExtractor `_. Photutils provides an experimental :func:`~photutils.segmentation.deblend_sources` function that deblends sources uses a combination of multi-thresholding and `watershed segmentation `_. Note that in order to deblend sources, they must be separated enough such that there is a saddle between them. Here's a simple example of source deblending: .. doctest-requires:: scipy, skimage >>> from photutils import deblend_sources >>> segm_deblend = deblend_sources(data, segm, npixels=5, ... filter_kernel=kernel) where ``segm`` is the :class:`~photutils.segmentation.SegmentationImage` that was generated by :func:`~photutils.segmentation.detect_sources`. Note that the ``npixels`` and ``filter_kernel`` input values should match those used in :func:`~photutils.segmentation.detect_sources`. The result is a :class:`~photutils.segmentation.SegmentationImage` object containing the deblended segmentation image. Modifying a Segmentation Image ------------------------------ The :class:`~photutils.segmentation.SegmentationImage` object provides several methods that can be used to modify itself (e.g., combining labels, removing labels, removing border segments) prior to measuring source photometry and other source properties, including: * :meth:`~photutils.segmentation.SegmentationImage.relabel`: Relabel one or more label numbers. * :meth:`~photutils.segmentation.SegmentationImage.relabel_sequential`: Relable the label numbers sequentially. * :meth:`~photutils.segmentation.SegmentationImage.keep_labels`: Keep only certain label numbers. * :meth:`~photutils.segmentation.SegmentationImage.remove_labels`: Remove one or more label numbers. * :meth:`~photutils.segmentation.SegmentationImage.remove_border_labels`: Remove labeled segments near the image border. * :meth:`~photutils.segmentation.SegmentationImage.remove_masked_labels`: Remove labeled segments located within a masked region. * :meth:`~photutils.segmentation.SegmentationImage.outline_segments`: Outline the labeled segments for plotting. Centroids, Photometry, and Morphological Properties --------------------------------------------------- The :func:`~photutils.segmentation.source_properties` function is the primary tool for measuring the centroids, photometry, and morphological properties of sources defined in a segmentation image. When the segmentation image is generated using image thresholding (e.g., using :func:`~photutils.segmentation.detect_sources`), the source segments effectively represent the isophotal footprint of each source and the resulting photometry is effectively isophotal photometry. :func:`~photutils.segmentation.source_properties` returns a :class:`~photutils.SourceCatalog` object, which effectively acts like a list of :class:`~photutils.segmentation.SourceProperties` objects, one for each segmented source (or a specified subset of sources). An Astropy `~astropy.table.QTable` of source properties can be generated using the :meth:`~photutils.SourceCatalog.to_table` method. Please see :class:`~photutils.segmentation.SourceProperties` for the list of the many properties that are calculated for each source. More properties are likely to be added in the future. Let's detect sources and measure their properties in a synthetic image. For this example, we will use the :class:`~photutils.background.Background2D` class to produce a background and background noise image. We define a 2D detection threshold image using the background and background RMS images. We set the threshold at 3 sigma above the background: .. doctest-requires:: scipy >>> from astropy.convolution import Gaussian2DKernel >>> from photutils.datasets import make_100gaussians_image >>> from photutils import Background2D, MedianBackground >>> from photutils import detect_threshold, detect_sources >>> data = make_100gaussians_image() >>> bkg_estimator = MedianBackground() >>> bkg = Background2D(data, (50, 50), filter_size=(3, 3), ... bkg_estimator=bkg_estimator) >>> threshold = bkg.background + (3. * bkg.background_rms) Now we find sources that have 5 connected pixels that are each greater than the corresponding pixel-wise threshold image defined above. Because the threshold includes the background, we do not subtract the background from the data here. We also input a 2D circular Gaussian kernel with a FWHM of 2 pixels to filter the image prior to thresholding: .. doctest-requires:: scipy, skimage >>> from astropy.stats import gaussian_fwhm_to_sigma >>> sigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2. >>> kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) >>> kernel.normalize() >>> segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel) The result is a :class:`~photutils.segmentation.SegmentationImage` where sources are labeled by different positive integer values. Now let's measure the properties of the detected sources defined in the segmentation image with the minimum number of inputs to :func:`~photutils.segmentation.source_properties`: .. doctest-requires:: scipy, skimage >>> from photutils import source_properties >>> cat = source_properties(data, segm) >>> tbl = cat.to_table() >>> tbl['xcentroid'].info.format = '.10f' # optional format >>> tbl['ycentroid'].info.format = '.10f' >>> tbl['cxy'].info.format = '.10f' >>> tbl['cyy'].info.format = '.10f' >>> print(tbl) id xcentroid ycentroid ... cxy cyy pix pix ... 1 / pix2 1 / pix2 --- -------------- -------------- ... ------------- ------------ 1 235.1877193594 1.0991961528 ... -0.1920746278 1.2174907202 2 494.1399411137 5.7704424681 ... -0.5417755959 1.0244063365 3 207.3757266577 10.0753101977 ... 0.7764083298 0.4650609454 4 364.6895486330 10.8904591886 ... -0.5478887625 0.3040810336 5 258.1927719916 11.9617673653 ... 0.0443061873 0.3218333804 ... ... ... ... ... ... 82 74.4566900469 259.8333035016 ... 0.4789130934 0.5657327432 83 82.5392499545 267.7189336671 ... 0.0675912618 0.2448815867 84 477.6743849969 267.8914460476 ... -0.0214056255 0.3919147600 85 139.7637841053 275.0413983586 ... 0.2329325365 0.3523911744 86 434.0406656782 285.6070270358 ... -0.0607421731 0.0555135558 Length = 86 rows Let's use the measured morphological properties to define approximate isophotal ellipses for each source: .. doctest-requires:: scipy, skimage >>> from photutils import source_properties, EllipticalAperture >>> cat = source_properties(data, segm) >>> r = 3. # approximate isophotal extent >>> apertures = [] >>> for obj in cat: ... position = (obj.xcentroid.value, obj.ycentroid.value) ... a = obj.semimajor_axis_sigma.value * r ... b = obj.semiminor_axis_sigma.value * r ... theta = obj.orientation.value ... apertures.append(EllipticalAperture(position, a, b, theta=theta)) Now let's plot the results: .. doctest-skip:: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from astropy.visualization import SqrtStretch >>> from astropy.visualization.mpl_normalize import ImageNormalize >>> norm = ImageNormalize(stretch=SqrtStretch()) >>> fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) >>> ax1.imshow(data, origin='lower', cmap='Greys_r', norm=norm) >>> ax2.imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345)) >>> for aperture in apertures: ... aperture.plot(color='blue', lw=1.5, alpha=0.5, ax=ax1) ... aperture.plot(color='white', lw=1.5, alpha=1.0, ax=ax2) .. plot:: import numpy as np import matplotlib.pyplot as plt from astropy.stats import gaussian_fwhm_to_sigma from astropy.convolution import Gaussian2DKernel from astropy.visualization import SqrtStretch from astropy.visualization.mpl_normalize import ImageNormalize from photutils.datasets import make_100gaussians_image from photutils import Background2D, MedianBackground from photutils import detect_threshold, detect_sources from photutils import source_properties from photutils import EllipticalAperture data = make_100gaussians_image() bkg_estimator = MedianBackground() bkg = Background2D(data, (50, 50), filter_size=(3, 3), bkg_estimator=bkg_estimator) threshold = bkg.background + (3. * bkg.background_rms) sigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2. kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) kernel.normalize() segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel) cat = source_properties(data, segm) apertures = [] for obj in cat: position = (obj.xcentroid.value, obj.ycentroid.value) a = obj.semimajor_axis_sigma.value * 3. b = obj.semiminor_axis_sigma.value * 3. theta = obj.orientation.value apertures.append(EllipticalAperture(position, a, b, theta=theta)) norm = ImageNormalize(stretch=SqrtStretch()) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(data, origin='lower', cmap='Greys_r', norm=norm) ax2.imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345)) for aperture in apertures: aperture.plot(color='blue', lw=1.5, alpha=0.5, ax=ax1) aperture.plot(color='white', lw=1.5, alpha=1.0, ax=ax2) We can also specify a specific subset of sources, defined by their labels in the segmentation image: .. doctest-requires:: scipy, skimage >>> labels = [1, 5, 20, 50, 75, 80] >>> cat = source_properties(data, segm, labels=labels) >>> tbl2 = cat.to_table() >>> tbl2['xcentroid'].info.format = '.10f' # optional format >>> tbl2['ycentroid'].info.format = '.10f' >>> tbl2['cxy'].info.format = '.10f' >>> tbl2['cyy'].info.format = '.10f' >>> print(tbl2) id xcentroid ycentroid ... cxy cyy pix pix ... 1 / pix2 1 / pix2 --- -------------- -------------- ... ------------- ------------ 1 235.1877193594 1.0991961528 ... -0.1920746278 1.2174907202 5 258.1927719916 11.9617673653 ... 0.0443061873 0.3218333804 20 347.1775610058 66.5509575226 ... 0.1152773914 0.3595643546 50 380.7968731993 174.4185137066 ... -1.0305829129 1.2769245589 75 32.1762188270 241.1584869458 ... 0.1968605940 0.6011670347 80 355.6148340498 252.1422532191 ... 0.1785980510 0.4003324922 By default, the :meth:`~photutils.SourceCatalog.to_table` method will include most scalar-valued properties from :class:`~photutils.segmentation.SourceProperties`, but a subset of properties can also be specified (or excluded) in the `~astropy.table.QTable`: .. doctest-requires:: scipy, skimage >>> labels = [1, 5, 20, 50, 75, 80] >>> cat = source_properties(data, segm, labels=labels) >>> columns = ['id', 'xcentroid', 'ycentroid', 'source_sum', 'area'] >>> tbl3 = cat.to_table(columns=columns) >>> tbl3['xcentroid'].info.format = '.10f' # optional format >>> tbl3['ycentroid'].info.format = '.10f' >>> tbl3['source_sum'].info.format = '.10f' >>> print(tbl3) id xcentroid ycentroid source_sum area pix pix pix2 --- -------------- -------------- -------------- ---- 1 235.1877193594 1.0991961528 496.6356232064 27.0 5 258.1927719916 11.9617673653 347.6113420724 25.0 20 347.1775610058 66.5509575226 415.9925696777 31.0 50 380.7968731993 174.4185137066 145.7264175178 11.0 75 32.1762188270 241.1584869458 398.4114037113 29.0 80 355.6148340498 252.1422532191 906.4226000367 45.0 A `~astropy.wcs.WCS` transformation can also be input to :func:`~photutils.segmentation.source_properties` via the ``wcs`` keyword, in which case the sky coordinates at the source centroids will be returned. Background Properties ^^^^^^^^^^^^^^^^^^^^^ Like with :func:`~photutils.aperture_photometry`, the ``data`` array that is input to :func:`~photutils.segmentation.source_properties` should be background subtracted. If you input your background image (which should have already been subtracted from the data) into the ``background`` keyword of :func:`~photutils.segmentation.source_properties`, the background properties for each source will also be calculated: .. doctest-requires:: scipy, skimage >>> labels = [1, 5, 20, 50, 75, 80] >>> cat = source_properties(data, segm, labels=labels, ... background=bkg.background) >>> columns = ['id', 'background_at_centroid', 'background_mean', ... 'background_sum'] >>> tbl4 = cat.to_table(columns=columns) >>> tbl4['background_at_centroid'].info.format = '.10f' # optional format >>> tbl4['background_mean'].info.format = '{:.10f}' >>> tbl4['background_sum'].info.format = '{:.10f}' >>> print(tbl4) id background_at_centroid background_mean background_sum --- ---------------------- --------------- -------------- 1 5.2020326493 5.2021208257 140.4572622937 5 5.2137810422 5.2137801450 130.3445036251 20 5.2788524399 5.2787718244 163.6419265556 50 5.1986504100 5.1986157424 57.1847731664 75 5.2106279087 5.2106057357 151.1075663349 80 5.1249167847 5.1250208080 230.6259363620 Photometric Errors ^^^^^^^^^^^^^^^^^^ :func:`~photutils.segmentation.source_properties` requires inputting a *total* error array, i.e. the background-only error plus Poisson noise due to individual sources. The :func:`~photutils.utils.calc_total_error` function can be used to calculate the total error array from a background-only error array and an effective gain. The ``effective_gain``, which is the ratio of counts (electrons or photons) to the units of the data, is used to include the Poisson noise from the sources. ``effective_gain`` can either be a scalar value or a 2D image with the same shape as the ``data``. A 2D effective gain image is useful for mosaic images that have variable depths (i.e., exposure times) across the field. For example, one should use an exposure-time map as the ``effective_gain`` for a variable depth mosaic image in count-rate units. Let's assume our synthetic data is in units of electrons per second. In that case, the ``effective_gain`` should be the exposure time (here we set it to 500 seconds): .. doctest-requires:: scipy, skimage >>> from photutils.utils import calc_total_error >>> labels = [1, 5, 20, 50, 75, 80] >>> effective_gain = 500. >>> error = calc_total_error(data, bkg.background_rms, effective_gain) >>> cat = source_properties(data, segm, labels=labels, error=error) >>> columns = ['id', 'xcentroid', 'ycentroid', 'source_sum', ... 'source_sum_err'] >>> tbl5 = cat.to_table(columns=columns) >>> tbl5['xcentroid'].info.format = '.10f' # optional format >>> tbl5['ycentroid'].info.format = '.10f' >>> print(tbl5) id xcentroid ycentroid source_sum source_sum_err pix pix --- -------------- -------------- ------------- -------------- 1 235.1877193594 1.0991961528 496.635623206 11.0788667038 5 258.1927719916 11.9617673653 347.611342072 10.723068215 20 347.1775610058 66.5509575226 415.992569678 12.1782078398 50 380.7968731993 174.4185137066 145.726417518 7.29536295106 75 32.1762188270 241.1584869458 398.411403711 11.553412812 80 355.6148340498 252.1422532191 906.422600037 13.7686828317 `~photutils.segmentation.SourceProperties.source_sum` and `~photutils.segmentation.SourceProperties.source_sum_err` are the instrumental flux and propagated flux error within the source segments. Pixel Masking ^^^^^^^^^^^^^ Pixels can be completely ignored/excluded (e.g. bad pixels) when measuring the source properties by providing a boolean mask image via the ``mask`` keyword (`True` pixel values are masked) to the :func:`~photutils.segmentation.source_properties` function or :class:`~photutils.segmentation.SourceProperties` class. Filtering ^^^^^^^^^ `SExtractor`_'s centroid and morphological parameters are always calculated from a filtered "detection" image. The usual downside of the filtering is the sources will be made more circular than they actually are. If you wish to reproduce `SExtractor`_ results, then use the ``filter_kernel`` keyword to :func:`~photutils.segmentation.source_properties` to filter the ``data`` prior to centroid and morphological measurements. The input kernel should be the same one used to define the source segments in :func:`~photutils.segmentation.detect_sources`. If ``filter_kernel`` is `None`, then the centroid and morphological measurements will be performed on the unfiltered ``data``. Note that photometry is *always* performed on the unfiltered ``data``. Reference/API ------------- .. automodapi:: photutils.segmentation :no-heading: .. _SExtractor: http://www.astromatic.net/software/sextractor photutils-0.4/docs/utils.rst0000644000214200020070000000040113175634532020421 0ustar lbradleySTSCI\science00000000000000Utility Functions (`photutils.utils`) ===================================== Introduction ------------ The `photutils.utils` package contains general-purpose utility functions. Reference/API ------------- .. automodapi:: photutils.utils :no-heading: photutils-0.4/ez_setup.py0000644000214200020070000003037113175634532020020 0ustar lbradleySTSCI\science00000000000000#!/usr/bin/env python """ Setuptools bootstrapping installer. Maintained at https://github.com/pypa/setuptools/tree/bootstrap. Run this script to install or upgrade setuptools. This method is DEPRECATED. Check https://github.com/pypa/setuptools/issues/581 for more details. """ import os import shutil import sys import tempfile import zipfile import optparse import subprocess import platform import textwrap import contextlib from distutils import log try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen try: from site import USER_SITE except ImportError: USER_SITE = None # 33.1.1 is the last version that supports setuptools self upgrade/installation. DEFAULT_VERSION = "33.1.1" DEFAULT_URL = "https://pypi.io/packages/source/s/setuptools/" DEFAULT_SAVE_DIR = os.curdir DEFAULT_DEPRECATION_MESSAGE = "ez_setup.py is deprecated and when using it setuptools will be pinned to {0} since it's the last version that supports setuptools self upgrade/installation, check https://github.com/pypa/setuptools/issues/581 for more info; use pip to install setuptools" MEANINGFUL_INVALID_ZIP_ERR_MSG = 'Maybe {0} is corrupted, delete it and try again.' log.warn(DEFAULT_DEPRECATION_MESSAGE.format(DEFAULT_VERSION)) def _python_cmd(*args): """ Execute a command. Return True if the command succeeded. """ args = (sys.executable,) + args return subprocess.call(args) == 0 def _install(archive_filename, install_args=()): """Install Setuptools.""" with archive_context(archive_filename): # installing log.warn('Installing Setuptools') if not _python_cmd('setup.py', 'install', *install_args): log.warn('Something went wrong during the installation.') log.warn('See the error message above.') # exitcode will be 2 return 2 def _build_egg(egg, archive_filename, to_dir): """Build Setuptools egg.""" with archive_context(archive_filename): # building an egg log.warn('Building a Setuptools egg in %s', to_dir) _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) # returning the result log.warn(egg) if not os.path.exists(egg): raise IOError('Could not build the egg.') class ContextualZipFile(zipfile.ZipFile): """Supplement ZipFile class to support context manager for Python 2.6.""" def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __new__(cls, *args, **kwargs): """Construct a ZipFile or ContextualZipFile as appropriate.""" if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls) @contextlib.contextmanager def archive_context(filename): """ Unzip filename to a temporary directory, set to the cwd. The unzipped target is cleaned up after. """ tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) try: with ContextualZipFile(filename) as archive: archive.extractall() except zipfile.BadZipfile as err: if not err.args: err.args = ('', ) err.args = err.args + ( MEANINGFUL_INVALID_ZIP_ERR_MSG.format(filename), ) raise # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) yield finally: os.chdir(old_wd) shutil.rmtree(tmpdir) def _do_download(version, download_base, to_dir, download_delay): """Download Setuptools.""" py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys) tp = 'setuptools-{version}-{py_desig}.egg' egg = os.path.join(to_dir, tp.format(**locals())) if not os.path.exists(egg): archive = download_setuptools(version, download_base, to_dir, download_delay) _build_egg(egg, archive, to_dir) sys.path.insert(0, egg) # Remove previously-imported pkg_resources if present (see # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). if 'pkg_resources' in sys.modules: _unload_pkg_resources() import setuptools setuptools.bootstrap_install_from = egg def use_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=DEFAULT_SAVE_DIR, download_delay=15): """ Ensure that a setuptools version is installed. Return None. Raise SystemExit if the requested version or later cannot be installed. """ to_dir = os.path.abspath(to_dir) # prior to importing, capture the module state for # representative modules. rep_modules = 'pkg_resources', 'setuptools' imported = set(sys.modules).intersection(rep_modules) try: import pkg_resources pkg_resources.require("setuptools>=" + version) # a suitable version is already installed return except ImportError: # pkg_resources not available; setuptools is not installed; download pass except pkg_resources.DistributionNotFound: # no version of setuptools was found; allow download pass except pkg_resources.VersionConflict as VC_err: if imported: _conflict_bail(VC_err, version) # otherwise, unload pkg_resources to allow the downloaded version to # take precedence. del pkg_resources _unload_pkg_resources() return _do_download(version, download_base, to_dir, download_delay) def _conflict_bail(VC_err, version): """ Setuptools was imported prior to invocation, so it is unsafe to unload it. Bail out. """ conflict_tmpl = textwrap.dedent(""" The required version of setuptools (>={version}) is not available, and can't be installed while this script is running. Please install a more recent version first, using 'easy_install -U setuptools'. (Currently using {VC_err.args[0]!r}) """) msg = conflict_tmpl.format(**locals()) sys.stderr.write(msg) sys.exit(2) def _unload_pkg_resources(): sys.meta_path = [ importer for importer in sys.meta_path if importer.__class__.__module__ != 'pkg_resources.extern' ] del_modules = [ name for name in sys.modules if name.startswith('pkg_resources') ] for mod_name in del_modules: del sys.modules[mod_name] def _clean_check(cmd, target): """ Run the command to download target. If the command fails, clean up before re-raising the error. """ try: subprocess.check_call(cmd) except subprocess.CalledProcessError: if os.access(target, os.F_OK): os.unlink(target) raise def download_file_powershell(url, target): """ Download the file at url to target using Powershell. Powershell will validate trust. Raise an exception if the command cannot complete. """ target = os.path.abspath(target) ps_cmd = ( "[System.Net.WebRequest]::DefaultWebProxy.Credentials = " "[System.Net.CredentialCache]::DefaultCredentials; " '(new-object System.Net.WebClient).DownloadFile("%(url)s", "%(target)s")' % locals() ) cmd = [ 'powershell', '-Command', ps_cmd, ] _clean_check(cmd, target) def has_powershell(): """Determine if Powershell is available.""" if platform.system() != 'Windows': return False cmd = ['powershell', '-Command', 'echo test'] with open(os.path.devnull, 'wb') as devnull: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except Exception: return False return True download_file_powershell.viable = has_powershell def download_file_curl(url, target): cmd = ['curl', url, '--location', '--silent', '--output', target] _clean_check(cmd, target) def has_curl(): cmd = ['curl', '--version'] with open(os.path.devnull, 'wb') as devnull: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except Exception: return False return True download_file_curl.viable = has_curl def download_file_wget(url, target): cmd = ['wget', url, '--quiet', '--output-document', target] _clean_check(cmd, target) def has_wget(): cmd = ['wget', '--version'] with open(os.path.devnull, 'wb') as devnull: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except Exception: return False return True download_file_wget.viable = has_wget def download_file_insecure(url, target): """Use Python to download the file, without connection authentication.""" src = urlopen(url) try: # Read all the data in one block. data = src.read() finally: src.close() # Write all the data in one block to avoid creating a partial file. with open(target, "wb") as dst: dst.write(data) download_file_insecure.viable = lambda: True def get_best_downloader(): downloaders = ( download_file_powershell, download_file_curl, download_file_wget, download_file_insecure, ) viable_downloaders = (dl for dl in downloaders if dl.viable()) return next(viable_downloaders, None) def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=DEFAULT_SAVE_DIR, delay=15, downloader_factory=get_best_downloader): """ Download setuptools from a specified location and return its filename. `version` should be a valid setuptools version number that is available as an sdist for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. ``downloader_factory`` should be a function taking no arguments and returning a function for downloading a URL to a target. """ # making sure we use the absolute path to_dir = os.path.abspath(to_dir) zip_name = "setuptools-%s.zip" % version url = download_base + zip_name saveto = os.path.join(to_dir, zip_name) if not os.path.exists(saveto): # Avoid repeated downloads log.warn("Downloading %s", url) downloader = downloader_factory() downloader(url, saveto) return os.path.realpath(saveto) def _build_install_args(options): """ Build the arguments to 'python setup.py install' on the setuptools package. Returns list of command line arguments. """ return ['--user'] if options.user_install else [] def _parse_args(): """Parse the command line for options.""" parser = optparse.OptionParser() parser.add_option( '--user', dest='user_install', action='store_true', default=False, help='install in user site package') parser.add_option( '--download-base', dest='download_base', metavar="URL", default=DEFAULT_URL, help='alternative URL from where to download the setuptools package') parser.add_option( '--insecure', dest='downloader_factory', action='store_const', const=lambda: download_file_insecure, default=get_best_downloader, help='Use internal, non-validating downloader' ) parser.add_option( '--version', help="Specify which version to download", default=DEFAULT_VERSION, ) parser.add_option( '--to-dir', help="Directory to save (and re-use) package", default=DEFAULT_SAVE_DIR, ) options, args = parser.parse_args() # positional arguments are ignored return options def _download_args(options): """Return args for download_setuptools function from cmdline args.""" return dict( version=options.version, download_base=options.download_base, downloader_factory=options.downloader_factory, to_dir=options.to_dir, ) def main(): """Install or upgrade setuptools and EasyInstall.""" options = _parse_args() archive = download_setuptools(**_download_args(options)) return _install(archive, _build_install_args(options)) if __name__ == '__main__': sys.exit(main()) photutils-0.4/LICENSE.rst0000644000214200020070000000273413175634532017426 0ustar lbradleySTSCI\science00000000000000Copyright (c) 2011-2017, Photutils developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Photutils Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. photutils-0.4/licenses/0000755000214200020070000000000013175654702017412 5ustar lbradleySTSCI\science00000000000000photutils-0.4/licenses/README.rst0000644000214200020070000000036313175634532021102 0ustar lbradleySTSCI\science00000000000000Licenses ======== This directory holds license and credit information for works Photutils is derived from or distributes, and/or datasets. The license file for the Photutils package itself is placed in the root directory of this repository. photutils-0.4/LONG_DESCRIPTION.rst0000644000214200020070000000057013055576313020741 0ustar lbradleySTSCI\science00000000000000 * Code: https://github.com/astropy/photutils * Docs: https://photutils.readthedocs.io/ **Photutils** is an `affiliated package `_ of `Astropy `_ to provide tools for detecting and performing photometry of astronomical sources. It is an open source (BSD licensed) Python package. Contributions welcome! photutils-0.4/photutils/0000755000214200020070000000000013175654702017640 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/__init__.py0000644000214200020070000000166213175634532021755 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Photutils is an Astropy affiliated package to provide tools for detecting and performing photometry of astronomical sources. It also has tools for background estimation, PSF matching, centroiding, and morphological measurements. """ # Affiliated packages may add whatever they like to this file, but # should keep this content at the top. # ---------------------------------------------------------------------------- from ._astropy_init import * # noqa # ---------------------------------------------------------------------------- if not _ASTROPY_SETUP_: # noqa from .aperture import * # noqa from .background import * # noqa from .centroids import * # noqa from .detection import * # noqa from .morphology import * # noqa from .psf import * # noqa from .segmentation import * # noqa photutils-0.4/photutils/_astropy_init.py0000644000214200020070000001234113175634532023075 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst __all__ = ['__version__', '__githash__', 'test'] # this indicates whether or not we are in the package's setup.py try: _ASTROPY_SETUP_ except NameError: from sys import version_info if version_info[0] >= 3: import builtins else: import __builtin__ as builtins builtins._ASTROPY_SETUP_ = False try: from .version import version as __version__ except ImportError: __version__ = '' try: from .version import githash as __githash__ except ImportError: __githash__ = '' # set up the test command def _get_test_runner(): import os from astropy.tests.helper import TestRunner return TestRunner(os.path.dirname(__file__)) def test(package=None, test_path=None, args=None, plugins=None, verbose=False, pastebin=None, remote_data=False, pep8=False, pdb=False, coverage=False, open_files=False, **kwargs): """ Run the tests using `py.test `__. A proper set of arguments is constructed and passed to `pytest.main`_. .. _py.test: http://pytest.org/latest/ .. _pytest.main: http://pytest.org/latest/builtin.html#pytest.main Parameters ---------- package : str, optional The name of a specific package to test, e.g. 'io.fits' or 'utils'. If nothing is specified all default tests are run. test_path : str, optional Specify location to test by path. May be a single file or directory. Must be specified absolutely or relative to the calling directory. args : str, optional Additional arguments to be passed to pytest.main_ in the ``args`` keyword argument. plugins : list, optional Plugins to be passed to pytest.main_ in the ``plugins`` keyword argument. verbose : bool, optional Convenience option to turn on verbose output from py.test_. Passing True is the same as specifying ``'-v'`` in ``args``. pastebin : {'failed','all',None}, optional Convenience option for turning on py.test_ pastebin output. Set to ``'failed'`` to upload info for failed tests, or ``'all'`` to upload info for all tests. remote_data : bool, optional Controls whether to run tests marked with @remote_data. These tests use online data and are not run by default. Set to True to run these tests. pep8 : bool, optional Turn on PEP8 checking via the `pytest-pep8 plugin `_ and disable normal tests. Same as specifying ``'--pep8 -k pep8'`` in ``args``. pdb : bool, optional Turn on PDB post-mortem analysis for failing tests. Same as specifying ``'--pdb'`` in ``args``. coverage : bool, optional Generate a test coverage report. The result will be placed in the directory htmlcov. open_files : bool, optional Fail when any tests leave files open. Off by default, because this adds extra run time to the test suite. Requires the `psutil `_ package. parallel : int, optional When provided, run the tests in parallel on the specified number of CPUs. If parallel is negative, it will use the all the cores on the machine. Requires the `pytest-xdist `_ plugin installed. Only available when using Astropy 0.3 or later. kwargs Any additional keywords passed into this function will be passed on to the astropy test runner. This allows use of test-related functionality implemented in later versions of astropy without explicitly updating the package template. """ test_runner = _get_test_runner() return test_runner.run_tests( package=package, test_path=test_path, args=args, plugins=plugins, verbose=verbose, pastebin=pastebin, remote_data=remote_data, pep8=pep8, pdb=pdb, coverage=coverage, open_files=open_files, **kwargs) if not _ASTROPY_SETUP_: # noqa import os from warnings import warn from astropy.config.configuration import ( update_default_config, ConfigurationDefaultMissingError, ConfigurationDefaultMissingWarning) # add these here so we only need to cleanup the namespace at the end config_dir = None if not os.environ.get('ASTROPY_SKIP_CONFIG_UPDATE', False): config_dir = os.path.dirname(__file__) config_template = os.path.join(config_dir, __package__ + ".cfg") if os.path.isfile(config_template): try: update_default_config( __package__, config_dir, version=__version__) except TypeError as orig_error: try: update_default_config(__package__, config_dir) except ConfigurationDefaultMissingError as e: wmsg = (e.args[0] + " Cannot install default profile. If you are " "importing from source, this is expected.") warn(ConfigurationDefaultMissingWarning(wmsg)) del e except Exception: raise orig_error photutils-0.4/photutils/_compiler.c0000644000214200020070000000573113175654676021775 0ustar lbradleySTSCI\science00000000000000#include /*************************************************************************** * Macros for determining the compiler version. * * These are borrowed from boost, and majorly abridged to include only * the compilers we care about. ***************************************************************************/ #ifndef PY3K #if PY_MAJOR_VERSION >= 3 #define PY3K 1 #else #define PY3K 0 #endif #endif #define STRINGIZE(X) DO_STRINGIZE(X) #define DO_STRINGIZE(X) #X #if defined __clang__ /* Clang C++ emulates GCC, so it has to appear early. */ # define COMPILER "Clang version " __clang_version__ #elif defined(__INTEL_COMPILER) || defined(__ICL) || defined(__ICC) || defined(__ECC) /* Intel */ # if defined(__INTEL_COMPILER) # define INTEL_VERSION __INTEL_COMPILER # elif defined(__ICL) # define INTEL_VERSION __ICL # elif defined(__ICC) # define INTEL_VERSION __ICC # elif defined(__ECC) # define INTEL_VERSION __ECC # endif # define COMPILER "Intel C compiler version " STRINGIZE(INTEL_VERSION) #elif defined(__GNUC__) /* gcc */ # define COMPILER "GCC version " __VERSION__ #elif defined(__SUNPRO_CC) /* Sun Workshop Compiler */ # define COMPILER "Sun compiler version " STRINGIZE(__SUNPRO_CC) #elif defined(_MSC_VER) /* Microsoft Visual C/C++ Must be last since other compilers define _MSC_VER for compatibility as well */ # if _MSC_VER < 1200 # define COMPILER_VERSION 5.0 # elif _MSC_VER < 1300 # define COMPILER_VERSION 6.0 # elif _MSC_VER == 1300 # define COMPILER_VERSION 7.0 # elif _MSC_VER == 1310 # define COMPILER_VERSION 7.1 # elif _MSC_VER == 1400 # define COMPILER_VERSION 8.0 # elif _MSC_VER == 1500 # define COMPILER_VERSION 9.0 # elif _MSC_VER == 1600 # define COMPILER_VERSION 10.0 # else # define COMPILER_VERSION _MSC_VER # endif # define COMPILER "Microsoft Visual C++ version " STRINGIZE(COMPILER_VERSION) #else /* Fallback */ # define COMPILER "Unknown compiler" #endif /*************************************************************************** * Module-level ***************************************************************************/ struct module_state { /* The Sun compiler can't handle empty structs */ #if defined(__SUNPRO_C) || defined(_MSC_VER) int _dummy; #endif }; #if PY3K static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_compiler", NULL, sizeof(struct module_state), NULL, NULL, NULL, NULL, NULL }; #define INITERROR return NULL PyMODINIT_FUNC PyInit__compiler(void) #else #define INITERROR return PyMODINIT_FUNC init_compiler(void) #endif { PyObject* m; #if PY3K m = PyModule_Create(&moduledef); #else m = Py_InitModule3("_compiler", NULL, NULL); #endif if (m == NULL) INITERROR; PyModule_AddStringConstant(m, "compiler", COMPILER); #if PY3K return m; #endif } photutils-0.4/photutils/aperture/0000755000214200020070000000000013175654702021467 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/aperture/__init__.py0000644000214200020070000000061513175634532023601 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains modules and packages for identifying sources in an astronomical image. """ from .bounding_box import * # noqa from .circle import * # noqa from .core import * # noqa from .ellipse import * # noqa from .mask import * # noqa from .rectangle import * # noqa photutils-0.4/photutils/aperture/bounding_box.py0000644000214200020070000001570013067540023024507 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from astropy.io.fits.util import _is_int __all__ = ['BoundingBox'] class BoundingBox(object): """ A rectangular bounding box in integer (not float) pixel indices. Parameters ---------- ixmin, ixmax, iymin, iymax : int The bounding box pixel indices. Note that the upper values (``iymax`` and ``ixmax``) are exclusive as for normal slices in Python. The lower values (``ixmin`` and ``iymin``) must not be greater than the respective upper values (``ixmax`` and ``iymax``). Examples -------- >>> from photutils import BoundingBox >>> # constructing a BoundingBox like this is cryptic: >>> bbox = BoundingBox(1, 10, 2, 20) >>> # it's better to use keyword arguments for readability: >>> bbox = BoundingBox(ixmin=1, ixmax=10, iymin=2, iymax=20) >>> bbox # nice repr, useful for interactive work BoundingBox(ixmin=1, ixmax=10, iymin=2, iymax=20) >>> # sometimes it's useful to check if two bounding boxes are the same >>> bbox == BoundingBox(ixmin=1, ixmax=10, iymin=2, iymax=20) True >>> bbox == BoundingBox(ixmin=7, ixmax=10, iymin=2, iymax=20) False >>> # "shape" and "slices" can be useful when working with numpy arrays >>> bbox.shape # numpy order: (y, x) (18, 9) >>> bbox.slices # numpy order: (y, x) (slice(2, 20, None), slice(1, 10, None)) >>> # "extent" is useful when plotting the BoundingBox with matplotlib >>> bbox.extent # matplotlib order: (x, y) (0.5, 9.5, 1.5, 19.5) """ def __init__(self, ixmin, ixmax, iymin, iymax): if not _is_int(ixmin): raise TypeError('ixmin must be an integer') if not _is_int(ixmax): raise TypeError('ixmax must be an integer') if not _is_int(iymin): raise TypeError('iymin must be an integer') if not _is_int(iymax): raise TypeError('iymax must be an integer') if ixmin > ixmax: raise ValueError('ixmin must be <= ixmax') if iymin > iymax: raise ValueError('iymin must be <= iymax') self.ixmin = ixmin self.ixmax = ixmax self.iymin = iymin self.iymax = iymax @classmethod def _from_float(cls, xmin, xmax, ymin, ymax): """ Return the smallest bounding box that fully contains a given rectangle defined by float coordinate values. Following the pixel index convention, an integer index corresponds to the center of a pixel and the pixel edges span from (index - 0.5) to (index + 0.5). For example, the pixel edge spans of the following pixels are: - pixel 0: from -0.5 to 0.5 - pixel 1: from 0.5 to 1.5 - pixel 2: from 1.5 to 2.5 In addition, because `BoundingBox` upper limits are exclusive (by definition), 1 is added to the upper pixel edges. See examples below. Parameters ---------- xmin, xmax, ymin, ymax : float Float coordinates defining a rectangle. The lower values (``xmin`` and ``ymin``) must not be greater than the respective upper values (``xmax`` and ``ymax``). Returns ------- bbox : `BoundingBox` object The minimal ``BoundingBox`` object fully containing the input rectangle coordinates. Examples -------- >>> from photutils import BoundingBox >>> BoundingBox._from_float(xmin=1.0, xmax=10.0, ymin=2.0, ymax=20.0) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=21) >>> BoundingBox._from_float(xmin=1.4, xmax=10.4, ymin=1.6, ymax=10.6) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=12) """ ixmin = int(np.floor(xmin + 0.5)) ixmax = int(np.ceil(xmax + 0.5)) iymin = int(np.floor(ymin + 0.5)) iymax = int(np.ceil(ymax + 0.5)) return cls(ixmin, ixmax, iymin, iymax) def __eq__(self, other): if not isinstance(other, BoundingBox): raise TypeError('Can compare BoundingBox only to another ' 'BoundingBox.') return ( (self.ixmin == other.ixmin) and (self.ixmax == other.ixmax) and (self.iymin == other.iymin) and (self.iymax == other.iymax) ) def __repr__(self): data = self.__dict__ data['name'] = self.__class__.__name__ fmt = ('{name}(ixmin={ixmin}, ixmax={ixmax}, iymin={iymin}, ' 'iymax={iymax})') return fmt.format(**data) @property def shape(self): """ The ``(ny, nx)`` shape of the bounding box. """ return self.iymax - self.iymin, self.ixmax - self.ixmin @property def slices(self): """ The bounding box as a tuple of `slice` objects. The slice tuple is in numpy axis order (i.e. ``(y, x)``) and therefore can be used to slice numpy arrays. """ return (slice(self.iymin, self.iymax), slice(self.ixmin, self.ixmax)) @property def extent(self): """ The extent of the mask, defined as the ``(xmin, xmax, ymin, ymax)`` bounding box from the bottom-left corner of the lower-left pixel to the upper-right corner of the upper-right pixel. The upper edges here are the actual pixel positions of the edges, i.e. they are not "exclusive" indices used for python indexing. This is useful for plotting the bounding box using Matplotlib. """ return ( self.ixmin - 0.5, self.ixmax - 0.5, self.iymin - 0.5, self.iymax - 0.5, ) def as_patch(self, **kwargs): """ Return a `matplotlib.patches.Rectangle` that represents the bounding box. Parameters ---------- kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. Returns ------- result : `matplotlib.patches.Rectangle` A matplotlib rectangular patch. Examples -------- .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import BoundingBox bbox = BoundingBox(2, 7, 3, 8) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) np.random.seed(12345) ax.imshow(np.random.random((10, 10)), interpolation='nearest', cmap='viridis') ax.add_patch(bbox.as_patch(facecolor='none', edgecolor='white', lw=2.)) """ from matplotlib.patches import Rectangle return Rectangle(xy=(self.extent[0], self.extent[2]), width=self.shape[1], height=self.shape[0], **kwargs) photutils-0.4/photutils/aperture/circle.py0000644000214200020070000003147713067540023023304 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import math from astropy.coordinates import SkyCoord from .core import PixelAperture, SkyAperture from .bounding_box import BoundingBox from .mask import ApertureMask from ..geometry import circular_overlap_grid from ..utils.wcs_helpers import assert_angle_or_pixel __all__ = ['CircularMaskMixin', 'CircularAperture', 'CircularAnnulus', 'SkyCircularAperture', 'SkyCircularAnnulus'] class CircularMaskMixin(object): """ Mixin class to create masks for circular and circular-annulus aperture objects. """ def to_mask(self, method='exact', subpixels=5): """ Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects. """ use_exact, subpixels = self._translate_mask_mode(method, subpixels) if hasattr(self, 'r'): radius = self.r elif hasattr(self, 'r_out'): # annulus radius = self.r_out else: raise ValueError('Cannot determine the aperture radius.') masks = [] for bbox, edges in zip(self.bounding_boxes, self._centered_edges): ny, nx = bbox.shape mask = circular_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, radius, use_exact, subpixels) # subtract the inner circle for an annulus if hasattr(self, 'r_in'): mask -= circular_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, self.r_in, use_exact, subpixels) masks.append(ApertureMask(mask, bbox)) return masks class CircularAperture(CircularMaskMixin, PixelAperture): """ Circular aperture(s), defined in pixel coordinates. Parameters ---------- positions : array_like or `~astropy.units.Quantity` Pixel coordinates of the aperture center(s) in one of the following formats: * single ``(x, y)`` tuple * list of ``(x, y)`` tuples * ``Nx2`` or ``2xN`` `~numpy.ndarray` * ``Nx2`` or ``2xN`` `~astropy.units.Quantity` in pixel units Note that a ``2x2`` `~numpy.ndarray` or `~astropy.units.Quantity` is interpreted as ``Nx2``, i.e. two rows of (x, y) coordinates. r : float The radius of the aperture(s), in pixels. Raises ------ ValueError : `ValueError` If the input radius, ``r``, is negative. """ def __init__(self, positions, r): if r < 0: raise ValueError('r must be non-negative') self.positions = self._sanitize_positions(positions) self.r = float(r) self._params = ['r'] # TODO: make lazyproperty?, but update if positions or radius change @property def bounding_boxes(self): xmin = self.positions[:, 0] - self.r xmax = self.positions[:, 0] + self.r ymin = self.positions[:, 1] - self.r ymax = self.positions[:, 1] + self.r return [BoundingBox._from_float(x0, x1, y0, y1) for x0, x1, y0, y1 in zip(xmin, xmax, ymin, ymax)] # TODO: make lazyproperty?, but update if positions or radius change def area(self): return math.pi * self.r ** 2 def plot(self, origin=(0, 0), indices=None, ax=None, fill=False, **kwargs): import matplotlib.patches as mpatches plot_positions, ax, kwargs = self._prepare_plot( origin, indices, ax, fill, **kwargs) for position in plot_positions: patch = mpatches.Circle(position, self.r, **kwargs) ax.add_patch(patch) def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyCircularAperture` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyCircularAperture` object A `SkyCircularAperture` object. """ sky_params = self._to_sky_params(wcs, mode=mode) return SkyCircularAperture(**sky_params) class CircularAnnulus(CircularMaskMixin, PixelAperture): """ Circular annulus aperture(s), defined in pixel coordinates. Parameters ---------- positions : array_like or `~astropy.units.Quantity` Pixel coordinates of the aperture center(s) in one of the following formats: * single ``(x, y)`` tuple * list of ``(x, y)`` tuples * ``Nx2`` or ``2xN`` `~numpy.ndarray` * ``Nx2`` or ``2xN`` `~astropy.units.Quantity` in pixel units Note that a ``2x2`` `~numpy.ndarray` or `~astropy.units.Quantity` is interpreted as ``Nx2``, i.e. two rows of (x, y) coordinates. r_in : float The inner radius of the annulus. r_out : float The outer radius of the annulus. Raises ------ ValueError : `ValueError` If inner radius (``r_in``) is greater than outer radius (``r_out``). ValueError : `ValueError` If inner radius (``r_in``) is negative. """ def __init__(self, positions, r_in, r_out): if not (r_out > r_in): raise ValueError('r_out must be greater than r_in') if r_in < 0: raise ValueError('r_in must be non-negative') self.positions = self._sanitize_positions(positions) self.r_in = float(r_in) self.r_out = float(r_out) self._params = ['r_in', 'r_out'] @property def bounding_boxes(self): xmin = self.positions[:, 0] - self.r_out xmax = self.positions[:, 0] + self.r_out ymin = self.positions[:, 1] - self.r_out ymax = self.positions[:, 1] + self.r_out return [BoundingBox._from_float(x0, x1, y0, y1) for x0, x1, y0, y1 in zip(xmin, xmax, ymin, ymax)] def area(self): return math.pi * (self.r_out ** 2 - self.r_in ** 2) def plot(self, origin=(0, 0), indices=None, ax=None, fill=False, **kwargs): import matplotlib.patches as mpatches plot_positions, ax, kwargs = self._prepare_plot( origin, indices, ax, fill, **kwargs) resolution = 20 for position in plot_positions: patch_inner = mpatches.CirclePolygon(position, self.r_in, resolution=resolution) patch_outer = mpatches.CirclePolygon(position, self.r_out, resolution=resolution) path = self._make_annulus_path(patch_inner, patch_outer) patch = mpatches.PathPatch(path, **kwargs) ax.add_patch(patch) def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyCircularAnnulus` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyCircularAnnulus` object A `SkyCircularAnnulus` object. """ sky_params = self._to_sky_params(wcs, mode=mode) return SkyCircularAnnulus(**sky_params) class SkyCircularAperture(SkyAperture): """ Circular aperture(s), defined in sky coordinates. Parameters ---------- positions : `~astropy.coordinates.SkyCoord` Celestial coordinates of the aperture center(s). This can be either scalar coordinates or an array of coordinates. r : `~astropy.units.Quantity` The radius of the aperture(s), either in angular or pixel units. """ def __init__(self, positions, r): if isinstance(positions, SkyCoord): self.positions = positions else: raise TypeError('positions must be a SkyCoord object') assert_angle_or_pixel('r', r) self.r = r self._params = ['r'] def to_pixel(self, wcs, mode='all'): """ Convert the aperture to a `CircularAperture` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `CircularAperture` object A `CircularAperture` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return CircularAperture(**pixel_params) class SkyCircularAnnulus(SkyAperture): """ Circular annulus aperture(s), defined in sky coordinates. Parameters ---------- positions : `~astropy.coordinates.SkyCoord` Celestial coordinates of the aperture center(s). This can be either scalar coordinates or an array of coordinates. r_in : `~astropy.units.Quantity` The inner radius of the annulus, either in angular or pixel units. r_out : `~astropy.units.Quantity` The outer radius of the annulus, either in angular or pixel units. """ def __init__(self, positions, r_in, r_out): if isinstance(positions, SkyCoord): self.positions = positions else: raise TypeError('positions must be a SkyCoord object') assert_angle_or_pixel('r_in', r_in) assert_angle_or_pixel('r_out', r_out) if r_in.unit.physical_type != r_out.unit.physical_type: raise ValueError("r_in and r_out should either both be angles " "or in pixels.") self.r_in = r_in self.r_out = r_out self._params = ['r_in', 'r_out'] def to_pixel(self, wcs, mode='all'): """ Convert the aperture to a `CircularAnnulus` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `CircularAnnulus` object A `CircularAnnulus` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return CircularAnnulus(**pixel_params) photutils-0.4/photutils/aperture/core.py0000644000214200020070000010573013175634532022776 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import abc import copy import warnings from collections import OrderedDict import six import numpy as np from astropy.coordinates import SkyCoord from astropy.io import fits from astropy.nddata import support_nddata from astropy.table import QTable import astropy.units as u from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.misc import InheritDocstrings from astropy.wcs import WCS from astropy.wcs.utils import (skycoord_to_pixel, pixel_to_skycoord, wcs_to_celestial_frame) from ..utils import get_version_info from ..utils.wcs_helpers import pixel_scale_angle_at_skycoord __all__ = ['Aperture', 'SkyAperture', 'PixelAperture', 'aperture_photometry'] class _ABCMetaAndInheritDocstrings(InheritDocstrings, abc.ABCMeta): pass @six.add_metaclass(_ABCMetaAndInheritDocstrings) class Aperture(object): """ Abstract base class for all apertures. """ def __len__(self): if isinstance(self, SkyAperture) and self.positions.isscalar: return 1 return len(self.positions) def _positions_str(self, prefix=None): if isinstance(self, PixelAperture): return np.array2string(self.positions, separator=', ', prefix=prefix) elif isinstance(self, SkyAperture): return repr(self.positions) else: raise TypeError('Aperture must be a subclass of PixelAperture ' 'or SkyAperture') def __repr__(self): prefix = '<{0}('.format(self.__class__.__name__) params = [self._positions_str(prefix)] for param in self._params: params.append('{0}={1}'.format(param, getattr(self, param))) params = ', '.join(params) return '{0}{1})>'.format(prefix, params) def __str__(self): prefix = 'positions' cls_info = [ ('Aperture', self.__class__.__name__), (prefix, self._positions_str(prefix + ': '))] if self._params is not None: for param in self._params: cls_info.append((param, getattr(self, param))) fmt = ['{0}: {1}'.format(key, val) for key, val in cls_info] return '\n'.join(fmt) class PixelAperture(Aperture): """ Abstract base class for apertures defined in pixel coordinates. """ @staticmethod def _sanitize_positions(positions): if isinstance(positions, u.Quantity): if positions.unit is u.pixel: positions = np.atleast_2d(positions.value) else: raise u.UnitsError('positions should be in pixel units') elif isinstance(positions, (list, tuple, np.ndarray)): positions = np.atleast_2d(positions) if positions.shape[1] != 2: if positions.shape[0] == 2: positions = np.transpose(positions) else: raise TypeError('List or array of (x, y) pixel ' 'coordinates is expected, got "{0}".' .format(positions)) elif isinstance(positions, zip): # This is needed for zip to work seamlessly in Python 3 positions = np.atleast_2d(list(positions)) else: raise TypeError('List or array of (x, y) pixel coordinates ' 'is expected, got "{0}".'.format(positions)) if positions.ndim > 2: raise ValueError('{0}D position array is not supported. Only 2D ' 'arrays are supported.'.format(positions.ndim)) return positions @staticmethod def _translate_mask_mode(mode, subpixels, rectangle=False): if mode not in ('center', 'subpixel', 'exact'): raise ValueError('Invalid mask mode: {0}'.format(mode)) if rectangle and mode == 'exact': warnings.warn('The "exact" method is not yet implemented for ' 'rectangular apertures -- using "subpixel" method ' 'with "subpixels=32"', AstropyUserWarning) mode = 'subpixel' subpixels = 32 if mode == 'subpixels': if not isinstance(subpixels, int) or subpixels <= 0: raise ValueError('subpixels must be a strictly positive ' 'integer'.format(subpixels)) if mode == 'center': use_exact = 0 subpixels = 1 elif mode == 'subpixel': use_exact = 0 elif mode == 'exact': use_exact = 1 subpixels = 1 return use_exact, subpixels @abc.abstractproperty def bounding_boxes(self): """ A list of minimal bounding boxes (`~photutils.BoundingBox`), one for each position, for the aperture. """ raise NotImplementedError('Needs to be implemented in a ' 'PixelAperture subclass.') @property def _centered_edges(self): """ A list of ``(xmin, xmax, ymin, ymax)`` tuples, one for each position, of the pixel edges after recentering the aperture at the origin. These pixel edges are used by the low-level `photutils.geometry` functions. """ edges = [] for position, bbox in zip(self.positions, self.bounding_boxes): xmin = bbox.ixmin - 0.5 - position[0] xmax = bbox.ixmax - 0.5 - position[0] ymin = bbox.iymin - 0.5 - position[1] ymax = bbox.iymax - 0.5 - position[1] edges.append((xmin, xmax, ymin, ymax)) return edges def area(self): """ Return the exact area of the aperture shape. Returns ------- area : float The aperture area. """ raise NotImplementedError('Needs to be implemented in a ' 'PixelAperture subclass.') def mask_area(self, method='exact', subpixels=5): """ Return the area of the aperture(s) mask. For ``method`` other than ``'exact'``, this area will be less than the exact analytical area (e.g. the ``area`` method). Note that for these methods, the values can also differ because of fractional pixel positions. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- area : float A list of the mask area of the aperture(s). """ mask = self.to_mask(method=method, subpixels=subpixels) return [np.sum(m.data) for m in mask] @abc.abstractmethod def to_mask(self, method='exact', subpixels=5): """ Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects. """ raise NotImplementedError('Needs to be implemented in a ' 'PixelAperture subclass.') @staticmethod def _prepare_photometry_output(_list, unit=None): if len(_list) == 0: # if error is not input return _list if unit is not None: unit = u.Unit(unit, parse_strict='warn') if isinstance(unit, u.UnrecognizedUnit): warnings.warn('The input unit is not parseable as a valid ' 'unit.', AstropyUserWarning) unit = None if isinstance(_list[0], u.Quantity): # list of Quantity -> Quantity array output = u.Quantity(_list) if unit is not None: if output.unit != unit: warnings.warn('The input unit does not agree with the ' 'data and/or error unit.', AstropyUserWarning) else: if unit is not None: output = u.Quantity(_list, unit=unit) else: output = np.array(_list) return output def do_photometry(self, data, error=None, mask=None, method='exact', subpixels=5, unit=None): """ Perform aperture photometry on the input data. Parameters ---------- data : array_like or `~astropy.units.Quantity` instance The 2D array on which to perform photometry. ``data`` should be background subtracted. error : array_like or `~astropy.units.Quantity`, optional The pixel-wise Gaussian 1-sigma errors of the input ``data``. ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources (see `~photutils.utils.calc_total_error`) . ``error`` must have the same shape as the input ``data``. mask : array_like (bool), optional A boolean mask with the same shape as ``data`` where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'`` A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. unit : `~astropy.units.UnitBase` object or str, optional An object that represents the unit associated with the input ``data`` and ``error`` arrays. Must be a `~astropy.units.UnitBase` object or a string parseable by the :mod:`~astropy.units` package. If ``data`` or ``error`` already have a different unit, the input ``unit`` will not be used and a warning will be raised. Returns ------- aperture_sums : `~numpy.ndarray` or `~astropy.units.Quantity` The sums within each aperture. aperture_sum_errs : `~numpy.ndarray` or `~astropy.units.Quantity` The errors on the sums within each aperture. """ data = np.asanyarray(data) if mask is not None: mask = np.asanyarray(mask) data = copy.deepcopy(data) # do not modify input data data[mask] = 0 if error is not None: # do not modify input data error = copy.deepcopy(np.asanyarray(error)) error[mask] = 0. aperture_sums = [] aperture_sum_errs = [] for mask in self.to_mask(method=method, subpixels=subpixels): data_cutout = mask.cutout(data) if data_cutout is None: aperture_sums.append(np.nan) else: aperture_sums.append(np.sum(data_cutout * mask.data)) if error is not None: error_cutout = mask.cutout(error) if error_cutout is None: aperture_sum_errs.append(np.nan) else: aperture_var = np.sum(error_cutout ** 2 * mask.data) aperture_sum_errs.append(np.sqrt(aperture_var)) # handle Quantity objects and input units aperture_sums = self._prepare_photometry_output(aperture_sums, unit=unit) aperture_sum_errs = self._prepare_photometry_output(aperture_sum_errs, unit=unit) return aperture_sums, aperture_sum_errs @staticmethod def _make_annulus_path(patch_inner, patch_outer): """ Define a matplotlib annulus path from two patches. This preserves the cubic Bezier curves (CURVE4) of the aperture paths. """ import matplotlib.path as mpath path_inner = patch_inner.get_path() transform_inner = patch_inner.get_transform() path_inner = transform_inner.transform_path(path_inner) path_outer = patch_outer.get_path() transform_outer = patch_outer.get_transform() path_outer = transform_outer.transform_path(path_outer) verts_inner = path_inner.vertices[:-1][::-1] verts_inner = np.concatenate((verts_inner, [verts_inner[-1]])) verts = np.vstack((path_outer.vertices, verts_inner)) codes = np.hstack((path_outer.codes, path_inner.codes)) return mpath.Path(verts, codes) def _prepare_plot(self, origin=(0, 0), indices=None, ax=None, fill=False, **kwargs): """ Prepare to plot the aperture(s) on a matplotlib `~matplotlib.axes.Axes` instance. Parameters ---------- origin : array_like, optional The ``(x, y)`` position of the origin of the displayed image. indices : int or array of int, optional The indices of the aperture(s) to plot. ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current `~matplotlib.axes.Axes` instance is used. fill : bool, optional Set whether to fill the aperture patch. The default is `False`. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. Returns ------- plot_positions : `~numpy.ndarray` The positions of the apertures to plot, after any ``indices`` slicing and ``origin`` shift. ax : `matplotlib.axes.Axes` instance, optional The `~matplotlib.axes.Axes` on which to plot. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. """ import matplotlib.pyplot as plt if ax is None: ax = plt.gca() # This is necessary because the `matplotlib.patches.Patch` default # is ``fill=True``. Here we make the default ``fill=False``. kwargs['fill'] = fill plot_positions = copy.deepcopy(self.positions) if indices is not None: plot_positions = plot_positions[np.atleast_1d(indices)] plot_positions[:, 0] -= origin[0] plot_positions[:, 1] -= origin[1] return plot_positions, ax, kwargs @abc.abstractmethod def plot(self, origin=(0, 0), indices=None, ax=None, fill=False, **kwargs): """ Plot the aperture(s) on a matplotlib `~matplotlib.axes.Axes` instance. Parameters ---------- origin : array_like, optional The ``(x, y)`` position of the origin of the displayed image. indices : int or array of int, optional The indices of the aperture(s) to plot. ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current `~matplotlib.axes.Axes` instance is used. fill : bool, optional Set whether to fill the aperture patch. The default is `False`. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. """ raise NotImplementedError('Needs to be implemented in a ' 'PixelAperture subclass.') def _to_sky_params(self, wcs, mode='all'): """ Convert the pixel aperture parameters to those for a sky aperture. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- sky_params : dict A dictionary of parameters for an equivalent sky aperture. """ sky_params = {} x, y = np.transpose(self.positions) sky_params['positions'] = pixel_to_skycoord(x, y, wcs, mode=mode) # The aperture object must have a single value for each shape # parameter so we must use a single pixel scale for all positions. # Here, we define the scale at the WCS CRVAL position. crval = SkyCoord([wcs.wcs.crval], frame=wcs_to_celestial_frame(wcs), unit=wcs.wcs.cunit) scale, angle = pixel_scale_angle_at_skycoord(crval, wcs) params = self._params[:] theta_key = 'theta' if theta_key in self._params: sky_params[theta_key] = (self.theta * u.rad) - angle.to(u.rad) params.remove(theta_key) param_vals = [getattr(self, param) for param in params] for param, param_val in zip(params, param_vals): sky_params[param] = (param_val * u.pix * scale).to(u.arcsec) return sky_params @abc.abstractmethod def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyAperture` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` object The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyAperture` object A `SkyAperture` object. """ raise NotImplementedError('Needs to be implemented in a ' 'PixelAperture subclass.') class SkyAperture(Aperture): """ Abstract base class for all apertures defined in celestial coordinates. """ def _to_pixel_params(self, wcs, mode='all'): """ Convert the sky aperture parameters to those for a pixel aperture. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- pixel_params : dict A dictionary of parameters for an equivalent pixel aperture. """ pixel_params = {} x, y = skycoord_to_pixel(self.positions, wcs, mode=mode) pixel_params['positions'] = np.array([x, y]).transpose() # The aperture object must have a single value for each shape # parameter so we must use a single pixel scale for all positions. # Here, we define the scale at the WCS CRVAL position. crval = SkyCoord([wcs.wcs.crval], frame=wcs_to_celestial_frame(wcs), unit=wcs.wcs.cunit) scale, angle = pixel_scale_angle_at_skycoord(crval, wcs) params = self._params[:] theta_key = 'theta' if theta_key in self._params: pixel_params[theta_key] = (self.theta + angle).to(u.radian).value params.remove(theta_key) param_vals = [getattr(self, param) for param in params] if param_vals[0].unit.physical_type == 'angle': for param, param_val in zip(params, param_vals): pixel_params[param] = (param_val / scale).to(u.pixel).value else: # pixels for param, param_val in zip(params, param_vals): pixel_params[param] = param_val.value return pixel_params @abc.abstractmethod def to_pixel(self, wcs, mode='all'): """ Convert the aperture to a `PixelAperture` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` object The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `PixelAperture` object A `PixelAperture` object. """ raise NotImplementedError('Needs to be implemented in a ' 'SkyAperture subclass.') def _prepare_photometry_input(data, error, mask, wcs, unit): """ Parse the inputs to `aperture_photometry`. `aperture_photometry` accepts a wide range of inputs, e.g. ``data`` could be a numpy array, a Quantity array, or a fits HDU. This requires some parsing and validation to ensure that all inputs are complete and consistent. For example, the data could carry a unit and the wcs itself, so we need to check that it is consistent with the unit and wcs given as input parameters. """ if isinstance(data, fits.HDUList): for i in range(len(data)): if data[i].data is not None: warnings.warn("Input data is a HDUList object, photometry is " "run only for the {0} HDU." .format(i), AstropyUserWarning) data = data[i] break if isinstance(data, (fits.PrimaryHDU, fits.ImageHDU)): header = data.header data = data.data if 'BUNIT' in header: bunit = u.Unit(header['BUNIT'], parse_strict='warn') if isinstance(bunit, u.UnrecognizedUnit): warnings.warn('The BUNIT in the header of the input data is ' 'not parseable as a valid unit.', AstropyUserWarning) else: data = u.Quantity(data, unit=bunit) if wcs is None: try: wcs = WCS(header) except Exception: # A valid WCS was not found in the header. Let the calling # application raise an exception if it needs a WCS. pass data = np.asanyarray(data) if data.ndim != 2: raise ValueError('data must be a 2D array.') if unit is not None: unit = u.Unit(unit, parse_strict='warn') if isinstance(unit, u.UnrecognizedUnit): warnings.warn('The input unit is not parseable as a valid ' 'unit.', AstropyUserWarning) unit = None if isinstance(data, u.Quantity): if unit is not None and data.unit != unit: warnings.warn('The input unit does not agree with the data ' 'unit.', AstropyUserWarning) else: if unit is not None: data = u.Quantity(data, unit=unit) if error is not None: if isinstance(error, u.Quantity): if unit is not None and error.unit != unit: warnings.warn('The input unit does not agree with the error ' 'unit.', AstropyUserWarning) if np.isscalar(error.value): error = u.Quantity(np.broadcast_arrays(error, data), unit=error.unit)[0] else: if np.isscalar(error): error = np.broadcast_arrays(error, data)[0] if unit is not None: error = u.Quantity(error, unit=unit) error = np.asanyarray(error) if error.shape != data.shape: raise ValueError('error and data must have the same shape.') if mask is not None: mask = np.asanyarray(mask) if mask.shape != data.shape: raise ValueError('mask and data must have the same shape.') return data, error, mask, wcs @support_nddata def aperture_photometry(data, apertures, error=None, mask=None, method='exact', subpixels=5, unit=None, wcs=None): """ Perform aperture photometry on the input data by summing the flux within the given aperture(s). Parameters ---------- data : array_like, `~astropy.units.Quantity`, `~astropy.io.fits.ImageHDU`, or `~astropy.io.fits.HDUList` The 2D array on which to perform photometry. ``data`` should be background-subtracted. Units can be used during the photometry, either provided with the data (i.e. a `~astropy.units.Quantity` array) or the ``unit`` keyword. If ``data`` is an `~astropy.io.fits.ImageHDU` or `~astropy.io.fits.HDUList`, the unit is determined from the ``'BUNIT'`` header keyword. apertures : `~photutils.Aperture` The aperture(s) to use for the photometry. error : array_like or `~astropy.units.Quantity`, optional The pixel-wise Gaussian 1-sigma errors of the input ``data``. ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources (see `~photutils.utils.calc_total_error`) . ``error`` must have the same shape as the input ``data``. mask : array_like (bool), optional A boolean mask with the same shape as ``data`` where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. unit : `~astropy.units.UnitBase` object or str, optional An object that represents the unit associated with the input ``data`` and ``error`` arrays. Must be a `~astropy.units.UnitBase` object or a string parseable by the :mod:`~astropy.units` package. If ``data`` or ``error`` already have a different unit, the input ``unit`` will not be used and a warning will be raised. If ``data`` is an `~astropy.io.fits.ImageHDU` or `~astropy.io.fits.HDUList`, ``unit`` will override the ``'BUNIT'`` header keyword. wcs : `~astropy.wcs.WCS`, optional The WCS transformation to use if the input ``apertures`` is a `SkyAperture` object. If ``data`` is an `~astropy.io.fits.ImageHDU` or `~astropy.io.fits.HDUList`, ``wcs`` overrides any WCS transformation present in the header. Returns ------- table : `~astropy.table.QTable` A table of the photometry with the following columns: * ``'id'``: The source ID. * ``'xcenter'``, ``'ycenter'``: The ``x`` and ``y`` pixel coordinates of the input aperture center(s). * ``'celestial_center'``: The celestial coordinates of the input aperture center(s). Returned only if the input ``apertures`` is a `SkyAperture` object. * ``'aperture_sum'``: The sum of the values within the aperture. * ``'aperture_sum_err'``: The corresponding uncertainty in the ``'aperture_sum'`` values. Returned only if the input ``error`` is not `None`. The table metadata includes the Astropy and Photutils version numbers and the `aperture_photometry` calling arguments. Notes ----- This function is decorated with `~astropy.nddata.support_nddata` and thus supports `~astropy.nddata.NDData` objects as input. """ data, error, mask, wcs = _prepare_photometry_input(data, error, mask, wcs, unit) if method == 'subpixel': if (int(subpixels) != subpixels) or (subpixels <= 0): raise ValueError('subpixels must be a positive integer.') apertures = np.atleast_1d(apertures) # convert sky to pixel apertures skyaper = False if isinstance(apertures[0], SkyAperture): if wcs is None: raise ValueError('A WCS transform must be defined by the input ' 'data or the wcs keyword when using a ' 'SkyAperture object.') skyaper = True skycoord_pos = apertures[0].positions pix_aper = [aper.to_pixel(wcs) for aper in apertures] apertures = pix_aper # do comparison in pixels to avoid comparing SkyCoord objects positions = apertures[0].positions for aper in apertures[1:]: if not np.array_equal(aper.positions, positions): raise ValueError('Input apertures must all have identical ' 'positions.') meta = OrderedDict() meta['name'] = 'Aperture photometry results' meta['version'] = get_version_info() calling_args = ("method='{0}', subpixels={1}".format(method, subpixels)) meta['aperture_photometry_args'] = calling_args tbl = QTable(meta=meta) tbl['id'] = np.arange(len(apertures[0]), dtype=int) + 1 xypos_pixel = np.transpose(apertures[0].positions) * u.pixel tbl['xcenter'] = xypos_pixel[0] tbl['ycenter'] = xypos_pixel[1] if skyaper: if skycoord_pos.isscalar: tbl['celestial_center'] = (skycoord_pos,) else: tbl['celestial_center'] = skycoord_pos for i, aper in enumerate(apertures): aper_sum, aper_sum_err = aper.do_photometry(data, error=error, mask=mask, method=method, subpixels=subpixels) sum_key = 'aperture_sum' sum_err_key = 'aperture_sum_err' if len(apertures) > 1: sum_key += '_{}'.format(i) sum_err_key += '_{}'.format(i) tbl[sum_key] = aper_sum if error is not None: tbl[sum_err_key] = aper_sum_err return tbl photutils-0.4/photutils/aperture/ellipse.py0000644000214200020070000004042013067540023023464 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import math import numpy as np from astropy.coordinates import SkyCoord from .core import PixelAperture, SkyAperture from .bounding_box import BoundingBox from .mask import ApertureMask from ..geometry import elliptical_overlap_grid from ..utils.wcs_helpers import assert_angle, assert_angle_or_pixel __all__ = ['EllipticalMaskMixin', 'EllipticalAperture', 'EllipticalAnnulus', 'SkyEllipticalAperture', 'SkyEllipticalAnnulus'] class EllipticalMaskMixin(object): """ Mixin class to create masks for elliptical and elliptical-annulus aperture objects. """ def to_mask(self, method='exact', subpixels=5): """ Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects. """ use_exact, subpixels = self._translate_mask_mode(method, subpixels) if hasattr(self, 'a'): a = self.a b = self.b elif hasattr(self, 'a_in'): # annulus a = self.a_out b = self.b_out b_in = self.a_in * self.b_out / self.a_out else: raise ValueError('Cannot determine the aperture shape.') masks = [] for bbox, edges in zip(self.bounding_boxes, self._centered_edges): ny, nx = bbox.shape mask = elliptical_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, a, b, self.theta, use_exact, subpixels) # subtract the inner ellipse for an annulus if hasattr(self, 'a_in'): mask -= elliptical_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, self.a_in, b_in, self.theta, use_exact, subpixels) masks.append(ApertureMask(mask, bbox)) return masks class EllipticalAperture(EllipticalMaskMixin, PixelAperture): """ Elliptical aperture(s), defined in pixel coordinates. Parameters ---------- positions : array_like or `~astropy.units.Quantity` Pixel coordinates of the aperture center(s) in one of the following formats: * single ``(x, y)`` tuple * list of ``(x, y)`` tuples * ``Nx2`` or ``2xN`` `~numpy.ndarray` * ``Nx2`` or ``2xN`` `~astropy.units.Quantity` in pixel units Note that a ``2x2`` `~numpy.ndarray` or `~astropy.units.Quantity` is interpreted as ``Nx2``, i.e. two rows of (x, y) coordinates. a : float The semimajor axis. b : float The semiminor axis. theta : float The rotation angle in radians of the semimajor axis from the positive ``x`` axis. The rotation angle increases counterclockwise. Raises ------ ValueError : `ValueError` If either axis (``a`` or ``b``) is negative. """ def __init__(self, positions, a, b, theta): if a < 0 or b < 0: raise ValueError("'a' and 'b' must be non-negative.") self.positions = self._sanitize_positions(positions) self.a = float(a) self.b = float(b) self.theta = float(theta) self._params = ['a', 'b', 'theta'] @property def bounding_boxes(self): """ A list of minimal bounding boxes (`~photutils.BoundingBox`), one for each position, enclosing the exact elliptical apertures. """ cos_theta = np.cos(self.theta) sin_theta = np.sin(self.theta) ax = self.a * cos_theta ay = self.a * sin_theta bx = self.b * -sin_theta by = self.b * cos_theta dx = np.sqrt(ax*ax + bx*bx) dy = np.sqrt(ay*ay + by*by) xmin = self.positions[:, 0] - dx xmax = self.positions[:, 0] + dx ymin = self.positions[:, 1] - dy ymax = self.positions[:, 1] + dy return [BoundingBox._from_float(x0, x1, y0, y1) for x0, x1, y0, y1 in zip(xmin, xmax, ymin, ymax)] def area(self): return math.pi * self.a * self.b def plot(self, origin=(0, 0), indices=None, ax=None, fill=False, **kwargs): import matplotlib.patches as mpatches plot_positions, ax, kwargs = self._prepare_plot( origin, indices, ax, fill, **kwargs) theta_deg = self.theta * 180. / np.pi for position in plot_positions: patch = mpatches.Ellipse(position, 2.*self.a, 2.*self.b, theta_deg, **kwargs) ax.add_patch(patch) def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyEllipticalAperture` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyEllipticalAperture` object A `SkyEllipticalAperture` object. """ sky_params = self._to_sky_params(wcs, mode=mode) return SkyEllipticalAperture(**sky_params) class EllipticalAnnulus(EllipticalMaskMixin, PixelAperture): """ Elliptical annulus aperture(s), defined in pixel coordinates. Parameters ---------- positions : array_like or `~astropy.units.Quantity` Pixel coordinates of the aperture center(s) in one of the following formats: * single ``(x, y)`` tuple * list of ``(x, y)`` tuples * ``Nx2`` or ``2xN`` `~numpy.ndarray` * ``Nx2`` or ``2xN`` `~astropy.units.Quantity` in pixel units Note that a ``2x2`` `~numpy.ndarray` or `~astropy.units.Quantity` is interpreted as ``Nx2``, i.e. two rows of (x, y) coordinates. a_in : float The inner semimajor axis. a_out : float The outer semimajor axis. b_out : float The outer semiminor axis. The inner semiminor axis is calculated as: .. math:: b_{in} = b_{out} \\left(\\frac{a_{in}}{a_{out}}\\right) theta : float The rotation angle in radians of the semimajor axis from the positive ``x`` axis. The rotation angle increases counterclockwise. Raises ------ ValueError : `ValueError` If inner semimajor axis (``a_in``) is greater than outer semimajor axis (``a_out``). ValueError : `ValueError` If either the inner semimajor axis (``a_in``) or the outer semiminor axis (``b_out``) is negative. """ def __init__(self, positions, a_in, a_out, b_out, theta): if not (a_out > a_in): raise ValueError('"a_out" must be greater than "a_in".') if a_in < 0 or b_out < 0: raise ValueError('"a_in" and "b_out" must be non-negative.') self.positions = self._sanitize_positions(positions) self.a_in = float(a_in) self.a_out = float(a_out) self.b_out = float(b_out) self.b_in = self.b_out * self.a_in / self.a_out self.theta = float(theta) self._params = ['a_in', 'a_out', 'b_out', 'theta'] @property def bounding_boxes(self): """ A list of minimal bounding boxes (`~photutils.BoundingBox`), one for each position, enclosing the exact elliptical apertures. """ cos_theta = np.cos(self.theta) sin_theta = np.sin(self.theta) ax = self.a_out * cos_theta ay = self.a_out * sin_theta bx = self.b_out * -sin_theta by = self.b_out * cos_theta dx = np.sqrt(ax*ax + bx*bx) dy = np.sqrt(ay*ay + by*by) xmin = self.positions[:, 0] - dx xmax = self.positions[:, 0] + dx ymin = self.positions[:, 1] - dy ymax = self.positions[:, 1] + dy return [BoundingBox._from_float(x0, x1, y0, y1) for x0, x1, y0, y1 in zip(xmin, xmax, ymin, ymax)] def area(self): return math.pi * (self.a_out * self.b_out - self.a_in * self.b_in) def plot(self, origin=(0, 0), indices=None, ax=None, fill=False, **kwargs): import matplotlib.patches as mpatches plot_positions, ax, kwargs = self._prepare_plot( origin, indices, ax, fill, **kwargs) theta_deg = self.theta * 180. / np.pi for position in plot_positions: patch_inner = mpatches.Ellipse(position, 2.*self.a_in, 2.*self.b_in, theta_deg, **kwargs) patch_outer = mpatches.Ellipse(position, 2.*self.a_out, 2.*self.b_out, theta_deg, **kwargs) path = self._make_annulus_path(patch_inner, patch_outer) patch = mpatches.PathPatch(path, **kwargs) ax.add_patch(patch) def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyEllipticalAnnulus` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyEllipticalAnnulus` object A `SkyEllipticalAnnulus` object. """ sky_params = self._to_sky_params(wcs, mode=mode) return SkyEllipticalAnnulus(**sky_params) class SkyEllipticalAperture(SkyAperture): """ Elliptical aperture(s), defined in sky coordinates. Parameters ---------- positions : `~astropy.coordinates.SkyCoord` Celestial coordinates of the aperture center(s). This can be either scalar coordinates or an array of coordinates. a : `~astropy.units.Quantity` The semimajor axis, either in angular or pixel units. b : `~astropy.units.Quantity` The semiminor axis, either in angular or pixel units. theta : `~astropy.units.Quantity` The position angle (in angular units) of the semimajor axis. For a right-handed world coordinate system, the position angle increases counterclockwise from North (PA=0). """ def __init__(self, positions, a, b, theta): if isinstance(positions, SkyCoord): self.positions = positions else: raise TypeError('positions must be a SkyCoord instance') assert_angle_or_pixel('a', a) assert_angle_or_pixel('b', b) assert_angle('theta', theta) if a.unit.physical_type != b.unit.physical_type: raise ValueError("a and b should either both be angles " "or in pixels") self.a = a self.b = b self.theta = theta self._params = ['a', 'b', 'theta'] def to_pixel(self, wcs, mode='all'): """ Convert the aperture to an `EllipticalAperture` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `EllipticalAperture` object An `EllipticalAperture` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return EllipticalAperture(**pixel_params) class SkyEllipticalAnnulus(SkyAperture): """ Elliptical annulus aperture(s), defined in sky coordinates. Parameters ---------- positions : `~astropy.coordinates.SkyCoord` Celestial coordinates of the aperture center(s). This can be either scalar coordinates or an array of coordinates. a_in : `~astropy.units.Quantity` The inner semimajor axis, either in angular or pixel units. a_out : `~astropy.units.Quantity` The outer semimajor axis, either in angular or pixel units. b_out : float The outer semiminor axis, either in angular or pixel units. The inner semiminor axis is calculated as: .. math:: b_{in} = b_{out} \\left(\\frac{a_{in}}{a_{out}}\\right) theta : `~astropy.units.Quantity` The position angle (in angular units) of the semimajor axis. For a right-handed world coordinate system, the position angle increases counterclockwise from North (PA=0). """ def __init__(self, positions, a_in, a_out, b_out, theta): if isinstance(positions, SkyCoord): self.positions = positions else: raise TypeError('positions must be a SkyCoord instance') assert_angle_or_pixel('a_in', a_in) assert_angle_or_pixel('a_out', a_out) assert_angle_or_pixel('b_out', b_out) assert_angle('theta', theta) if a_in.unit.physical_type != a_out.unit.physical_type: raise ValueError("a_in and a_out should either both be angles " "or in pixels") if a_out.unit.physical_type != b_out.unit.physical_type: raise ValueError("a_out and b_out should either both be angles " "or in pixels") self.a_in = a_in self.a_out = a_out self.b_out = b_out self.theta = theta self._params = ['a_in', 'a_out', 'b_out', 'theta'] def to_pixel(self, wcs, mode='all'): """ Convert the aperture to an `EllipticalAnnulus` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `EllipticalAnnulus` object An `EllipticalAnnulus` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return EllipticalAnnulus(**pixel_params) photutils-0.4/photutils/aperture/mask.py0000644000214200020070000001451413175634532023000 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import astropy.units as u __all__ = ['ApertureMask'] class ApertureMask(object): """ Class for an aperture mask. Parameters ---------- mask : array_like A 2D array of an aperture mask representing the fractional overlap of the aperture on the pixel grid. This should be the full-sized (i.e. not truncated) array that is the direct output of one of the low-level `photutils.geometry` functions. bbox : `photutils.BoundingBox` The bounding box object defining the aperture minimal bounding box. """ def __init__(self, data, bbox): self.data = np.asanyarray(data) if self.data.shape != bbox.shape: raise ValueError('mask data and bounding box must have the same ' 'shape') self.bbox = bbox def __array__(self): """ Array representation of the mask data array (e.g., for matplotlib). """ return self.data @property def shape(self): """ The shape of the mask data array. """ return self.data.shape def _overlap_slices(self, shape): """ Calculate the slices for the overlapping part of the bounding box and an array of the given shape. Parameters ---------- shape : tuple of int The ``(ny, nx)`` shape of array where the slices are to be applied. Returns ------- slices_large : tuple of slices A tuple of slice objects for each axis of the large array, such that ``large_array[slices_large]`` extracts the region of the large array that overlaps with the small array. slices_small : slice A tuple of slice objects for each axis of the small array, such that ``small_array[slices_small]`` extracts the region of the small array that is inside the large array. """ if len(shape) != 2: raise ValueError('input shape must have 2 elements.') xmin = self.bbox.ixmin xmax = self.bbox.ixmax ymin = self.bbox.iymin ymax = self.bbox.iymax if xmin >= shape[1] or ymin >= shape[0] or xmax <= 0 or ymax <= 0: # no overlap of the aperture with the data return None, None slices_large = (slice(max(ymin, 0), min(ymax, shape[0])), slice(max(xmin, 0), min(xmax, shape[1]))) slices_small = (slice(max(-ymin, 0), min(ymax - ymin, shape[0] - ymin)), slice(max(-xmin, 0), min(xmax - xmin, shape[1] - xmin))) return slices_large, slices_small def to_image(self, shape): """ Return an image of the mask in a 2D array of the given shape, taking any edge effects into account. Parameters ---------- shape : tuple of int The ``(ny, nx)`` shape of the output array. Returns ------- result : `~numpy.ndarray` A 2D array of the mask. """ if len(shape) != 2: raise ValueError('input shape must have 2 elements.') mask = np.zeros(shape) try: mask[self.bbox.slices] = self.data except ValueError: # partial or no overlap slices_large, slices_small = self._overlap_slices(shape) if slices_small is None: return None # no overlap mask = np.zeros(shape) mask[slices_large] = self.data[slices_small] return mask def cutout(self, data, fill_value=0.): """ Create a cutout from the input data over the mask bounding box, taking any edge effects into account. Parameters ---------- data : array_like or `~astropy.units.Quantity` A 2D array on which to apply the aperture mask. fill_value : float, optional The value is used to fill pixels where the aperture mask does not overlap with the input ``data``. The default is 0. Returns ------- result : `~numpy.ndarray` A 2D array cut out from the input ``data`` representing the same cutout region as the aperture mask. If there is a partial overlap of the aperture mask with the input data, pixels outside of the data will be assigned to ``fill_value``. `None` is returned if there is no overlap of the aperture with the input ``data``. """ data = np.asanyarray(data) cutout = data[self.bbox.slices] if cutout.shape != self.shape: slices_large, slices_small = self._overlap_slices(data.shape) if slices_small is None: return None # no overlap cutout = np.zeros(self.shape, dtype=data.dtype) cutout[:] = fill_value cutout[slices_small] = data[slices_large] if isinstance(data, u.Quantity): cutout = u.Quantity(cutout, unit=data.unit) return cutout def multiply(self, data, fill_value=0.): """ Multiply the aperture mask with the input data, taking any edge effects into account. The result is a mask-weighted cutout from the data. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array to multiply with the aperture mask. fill_value : float, optional The value is used to fill pixels where the aperture mask does not overlap with the input ``data``. The default is 0. Returns ------- result : `~numpy.ndarray` A 2D mask-weighted cutout from the input ``data``. If there is a partial overlap of the aperture mask with the input data, pixels outside of the data will be assigned to ``fill_value`` before being multipled with the mask. `None` is returned if there is no overlap of the aperture with the input ``data``. """ return self.cutout(data, fill_value=fill_value) * self.data photutils-0.4/photutils/aperture/rectangle.py0000644000214200020070000004452513067540023024005 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import math import numpy as np from astropy.coordinates import SkyCoord from .core import PixelAperture, SkyAperture from .bounding_box import BoundingBox from .mask import ApertureMask from ..geometry import rectangular_overlap_grid from ..utils.wcs_helpers import assert_angle, assert_angle_or_pixel __all__ = ['RectangularMaskMixin', 'RectangularAperture', 'RectangularAnnulus', 'SkyRectangularAperture', 'SkyRectangularAnnulus'] class RectangularMaskMixin(object): """ Mixin class to create masks for rectangular or rectangular-annulus aperture objects. """ def to_mask(self, method='exact', subpixels=5): """ Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects. """ use_exact, subpixels = self._translate_mask_mode(method, subpixels, rectangle=True) if hasattr(self, 'w'): w = self.w h = self.h elif hasattr(self, 'w_out'): # annulus w = self.w_out h = self.h_out h_in = self.w_in * self.h_out / self.w_out else: raise ValueError('Cannot determine the aperture radius.') masks = [] for bbox, edges in zip(self.bounding_boxes, self._centered_edges): ny, nx = bbox.shape mask = rectangular_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, w, h, self.theta, 0, subpixels) # subtract the inner circle for an annulus if hasattr(self, 'w_in'): mask -= rectangular_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, self.w_in, h_in, self.theta, 0, subpixels) masks.append(ApertureMask(mask, bbox)) return masks class RectangularAperture(RectangularMaskMixin, PixelAperture): """ Rectangular aperture(s), defined in pixel coordinates. Parameters ---------- positions : array_like or `~astropy.units.Quantity` Pixel coordinates of the aperture center(s) in one of the following formats: * single ``(x, y)`` tuple * list of ``(x, y)`` tuples * ``Nx2`` or ``2xN`` `~numpy.ndarray` * ``Nx2`` or ``2xN`` `~astropy.units.Quantity` in pixel units Note that a ``2x2`` `~numpy.ndarray` or `~astropy.units.Quantity` is interpreted as ``Nx2``, i.e. two rows of (x, y) coordinates. w : float The full width of the aperture. For ``theta=0`` the width side is along the ``x`` axis. h : float The full height of the aperture. For ``theta=0`` the height side is along the ``y`` axis. theta : float The rotation angle in radians of the width (``w``) side from the positive ``x`` axis. The rotation angle increases counterclockwise. Raises ------ ValueError : `ValueError` If either width (``w``) or height (``h``) is negative. """ def __init__(self, positions, w, h, theta): if w < 0 or h < 0: raise ValueError("'w' and 'h' must be nonnegative.") self.positions = self._sanitize_positions(positions) self.w = float(w) self.h = float(h) self.theta = float(theta) self._params = ['w', 'h', 'theta'] @property def bounding_boxes(self): """ A list of minimal bounding boxes (`~photutils.BoundingBox`), one for each position, enclosing the exact rectangular apertures. """ w2 = self.w / 2. h2 = self.h / 2. cos_theta = math.cos(self.theta) sin_theta = math.sin(self.theta) dx1 = abs(w2 * cos_theta - h2 * sin_theta) dy1 = abs(w2 * sin_theta + h2 * cos_theta) dx2 = abs(w2 * cos_theta + h2 * sin_theta) dy2 = abs(w2 * sin_theta - h2 * cos_theta) dx = max(dx1, dx2) dy = max(dy1, dy2) xmin = self.positions[:, 0] - dx xmax = self.positions[:, 0] + dx ymin = self.positions[:, 1] - dy ymax = self.positions[:, 1] + dy return [BoundingBox._from_float(x0, x1, y0, y1) for x0, x1, y0, y1 in zip(xmin, xmax, ymin, ymax)] def area(self): return self.w * self.h def plot(self, origin=(0, 0), indices=None, ax=None, fill=False, **kwargs): import matplotlib.patches as mpatches plot_positions, ax, kwargs = self._prepare_plot( origin, indices, ax, fill, **kwargs) hw = self.w / 2. hh = self.h / 2. sint = math.sin(self.theta) cost = math.cos(self.theta) dx = (hh * sint) - (hw * cost) dy = -(hh * cost) - (hw * sint) plot_positions = plot_positions + np.array([dx, dy]) theta_deg = self.theta * 180. / np.pi for position in plot_positions: patch = mpatches.Rectangle(position, self.w, self.h, theta_deg, **kwargs) ax.add_patch(patch) def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyRectangularAperture` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyRectangularAperture` object A `SkyRectangularAperture` object. """ sky_params = self._to_sky_params(wcs, mode=mode) return SkyRectangularAperture(**sky_params) class RectangularAnnulus(RectangularMaskMixin, PixelAperture): """ Rectangular annulus aperture(s), defined in pixel coordinates. Parameters ---------- positions : array_like or `~astropy.units.Quantity` Pixel coordinates of the aperture center(s) in one of the following formats: * single ``(x, y)`` tuple * list of ``(x, y)`` tuples * ``Nx2`` or ``2xN`` `~numpy.ndarray` * ``Nx2`` or ``2xN`` `~astropy.units.Quantity` in pixel units Note that a ``2x2`` `~numpy.ndarray` or `~astropy.units.Quantity` is interpreted as ``Nx2``, i.e. two rows of (x, y) coordinates. w_in : float The inner full width of the aperture. For ``theta=0`` the width side is along the ``x`` axis. w_out : float The outer full width of the aperture. For ``theta=0`` the width side is along the ``x`` axis. h_out : float The outer full height of the aperture. The inner full height is calculated as: .. math:: h_{in} = h_{out} \\left(\\frac{w_{in}}{w_{out}}\\right) For ``theta=0`` the height side is along the ``y`` axis. theta : float The rotation angle in radians of the width side from the positive ``x`` axis. The rotation angle increases counterclockwise. Raises ------ ValueError : `ValueError` If inner width (``w_in``) is greater than outer width (``w_out``). ValueError : `ValueError` If either the inner width (``w_in``) or the outer height (``h_out``) is negative. """ def __init__(self, positions, w_in, w_out, h_out, theta): if not (w_out > w_in): raise ValueError("'w_out' must be greater than 'w_in'") if w_in < 0 or h_out < 0: raise ValueError("'w_in' and 'h_out' must be non-negative") self.positions = self._sanitize_positions(positions) self.w_in = float(w_in) self.w_out = float(w_out) self.h_out = float(h_out) self.h_in = self.w_in * self.h_out / self.w_out self.theta = float(theta) self._params = ['w_in', 'w_out', 'h_out', 'theta'] @property def bounding_boxes(self): """ A list of minimal bounding boxes (`~photutils.BoundingBox`), one for each position, enclosing the rectangular apertures for the "exact" case. """ w2 = self.w_out / 2. h2 = self.h_out / 2. cos_theta = math.cos(self.theta) sin_theta = math.sin(self.theta) dx1 = abs(w2 * cos_theta - h2 * sin_theta) dy1 = abs(w2 * sin_theta + h2 * cos_theta) dx2 = abs(w2 * cos_theta + h2 * sin_theta) dy2 = abs(w2 * sin_theta - h2 * cos_theta) dx = max(dx1, dx2) dy = max(dy1, dy2) xmin = self.positions[:, 0] - dx xmax = self.positions[:, 0] + dx ymin = self.positions[:, 1] - dy ymax = self.positions[:, 1] + dy return [BoundingBox._from_float(x0, x1, y0, y1) for x0, x1, y0, y1 in zip(xmin, xmax, ymin, ymax)] def area(self): return self.w_out * self.h_out - self.w_in * self.h_in def plot(self, origin=(0, 0), indices=None, ax=None, fill=False, **kwargs): import matplotlib.patches as mpatches plot_positions, ax, kwargs = self._prepare_plot( origin, indices, ax, fill, **kwargs) sint = math.sin(self.theta) cost = math.cos(self.theta) theta_deg = self.theta * 180. / np.pi hw_inner = self.w_in / 2. hh_inner = self.h_in / 2. dx_inner = (hh_inner * sint) - (hw_inner * cost) dy_inner = -(hh_inner * cost) - (hw_inner * sint) positions_inner = plot_positions + np.array([dx_inner, dy_inner]) hw_outer = self.w_out / 2. hh_outer = self.h_out / 2. dx_outer = (hh_outer * sint) - (hw_outer * cost) dy_outer = -(hh_outer * cost) - (hw_outer * sint) positions_outer = plot_positions + np.array([dx_outer, dy_outer]) for i, position_inner in enumerate(positions_inner): patch_inner = mpatches.Rectangle(position_inner, self.w_in, self.h_in, theta_deg, **kwargs) patch_outer = mpatches.Rectangle(positions_outer[i], self.w_out, self.h_out, theta_deg, **kwargs) path = self._make_annulus_path(patch_inner, patch_outer) patch = mpatches.PathPatch(path, **kwargs) ax.add_patch(patch) def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyRectangularAnnulus` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyRectangularAnnulus` object A `SkyRectangularAnnulus` object. """ sky_params = self._to_sky_params(wcs, mode=mode) return SkyRectangularAnnulus(**sky_params) class SkyRectangularAperture(SkyAperture): """ Rectangular aperture(s), defined in sky coordinates. Parameters ---------- positions : `~astropy.coordinates.SkyCoord` Celestial coordinates of the aperture center(s). This can be either scalar coordinates or an array of coordinates. w : `~astropy.units.Quantity` The full width of the aperture, either in angular or pixel units. For ``theta=0`` the width side is along the North-South axis. h : `~astropy.units.Quantity` The full height of the aperture, either in angular or pixel units. For ``theta=0`` the height side is along the East-West axis. theta : `~astropy.units.Quantity` The position angle (in angular units) of the width side. For a right-handed world coordinate system, the position angle increases counterclockwise from North (PA=0). """ def __init__(self, positions, w, h, theta): if isinstance(positions, SkyCoord): self.positions = positions else: raise TypeError('positions must be a SkyCoord instance') assert_angle_or_pixel('w', w) assert_angle_or_pixel('h', h) assert_angle('theta', theta) if w.unit.physical_type != h.unit.physical_type: raise ValueError("'w' and 'h' should either both be angles or " "in pixels") self.w = w self.h = h self.theta = theta self._params = ['w', 'h', 'theta'] def to_pixel(self, wcs, mode='all'): """ Convert the aperture to a `RectangularAperture` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `RectangularAperture` object A `RectangularAperture` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return RectangularAperture(**pixel_params) class SkyRectangularAnnulus(SkyAperture): """ Rectangular annulus aperture(s), defined in sky coordinates. Parameters ---------- positions : `~astropy.coordinates.SkyCoord` Celestial coordinates of the aperture center(s). This can be either scalar coordinates or an array of coordinates. w_in : `~astropy.units.Quantity` The inner full width of the aperture, either in angular or pixel units. For ``theta=0`` the width side is along the North-South axis. w_out : `~astropy.units.Quantity` The outer full width of the aperture, either in angular or pixel units. For ``theta=0`` the width side is along the North-South axis. h_out : `~astropy.units.Quantity` The outer full height of the aperture, either in angular or pixel units. The inner full height is calculated as: .. math:: h_{in} = h_{out} \\left(\\frac{w_{in}}{w_{out}}\\right) For ``theta=0`` the height side is along the East-West axis. theta : `~astropy.units.Quantity` The position angle (in angular units) of the width side. For a right-handed world coordinate system, the position angle increases counterclockwise from North (PA=0). """ def __init__(self, positions, w_in, w_out, h_out, theta): if isinstance(positions, SkyCoord): self.positions = positions else: raise TypeError('positions must be a SkyCoord instance') assert_angle_or_pixel('w_in', w_in) assert_angle_or_pixel('w_out', w_out) assert_angle_or_pixel('h_out', h_out) assert_angle('theta', theta) if w_in.unit.physical_type != w_out.unit.physical_type: raise ValueError("w_in and w_out should either both be angles or " "in pixels") if w_out.unit.physical_type != h_out.unit.physical_type: raise ValueError("w_out and h_out should either both be angles " "or in pixels") self.w_in = w_in self.w_out = w_out self.h_out = h_out self.theta = theta self._params = ['w_in', 'w_out', 'h_out', 'theta'] def to_pixel(self, wcs, mode='all'): """ Convert the aperture to a `RectangularAnnulus` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `RectangularAnnulus` object A `RectangularAnnulus` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return RectangularAnnulus(**pixel_params) photutils-0.4/photutils/aperture/tests/0000755000214200020070000000000013175654702022631 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/aperture/tests/__init__.py0000644000214200020070000000017013055576313024736 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains affiliated package tests. """ photutils-0.4/photutils/aperture/tests/test_aperture_photometry.py0000644000214200020070000007716713175634532030404 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The tests in this file test the accuracy of the photometric results. Here we test directly with aperture objects since we are checking the algorithms in aperture_photometry, not in the wrappers. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest import numpy as np from numpy.testing import (assert_allclose, assert_array_equal, assert_array_less) from astropy.coordinates import SkyCoord from astropy.io import fits from astropy.nddata import NDData from astropy.table import Table from astropy.tests.helper import remote_data import astropy.units as u from astropy.wcs.utils import pixel_to_skycoord from ..core import aperture_photometry from ..circle import (CircularAperture, CircularAnnulus, SkyCircularAperture, SkyCircularAnnulus) from ..ellipse import (EllipticalAperture, EllipticalAnnulus, SkyEllipticalAperture, SkyEllipticalAnnulus) from ..rectangle import (RectangularAperture, RectangularAnnulus, SkyRectangularAperture, SkyRectangularAnnulus) from ...datasets import (get_path, make_4gaussians_image, make_wcs, make_imagehdu) try: import matplotlib # noqa HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False APERTURE_CL = [CircularAperture, CircularAnnulus, EllipticalAperture, EllipticalAnnulus, RectangularAperture, RectangularAnnulus] TEST_APERTURES = list(zip(APERTURE_CL, ((3.,), (3., 5.), (3., 5., 1.), (3., 5., 4., 1.), (5, 8, np.pi / 4), (8, 12, 8, np.pi / 8)))) @pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES) def test_outside_array(aperture_class, params): data = np.ones((10, 10), dtype=np.float) aperture = aperture_class((-60, 60), *params) fluxtable = aperture_photometry(data, aperture) # aperture is fully outside array: assert np.isnan(fluxtable['aperture_sum']) @pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES) def test_inside_array_simple(aperture_class, params): data = np.ones((40, 40), dtype=np.float) aperture = aperture_class((20., 20.), *params) table1 = aperture_photometry(data, aperture, method='center', subpixels=10) table2 = aperture_photometry(data, aperture, method='subpixel', subpixels=10) table3 = aperture_photometry(data, aperture, method='exact', subpixels=10) true_flux = aperture.area() if not isinstance(aperture, (RectangularAperture, RectangularAnnulus)): assert_allclose(table3['aperture_sum'], true_flux) assert_allclose(table2['aperture_sum'], table3['aperture_sum'], atol=0.1) assert table1['aperture_sum'] < table3['aperture_sum'] @pytest.mark.skipif('not HAS_MATPLOTLIB') @pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES) def test_aperture_plots(aperture_class, params): # This test should run without any errors, and there is no return # value. # TODO: check the content of the plot aperture = aperture_class((20., 20.), *params) aperture.plot() def test_aperture_pixel_positions(): pos1 = (10, 20) pos2 = u.Quantity((10, 20), unit=u.pixel) pos3 = ((10, 20, 30), (10, 20, 30)) pos3_pairs = ((10, 10), (20, 20), (30, 30)) r = 3 ap1 = CircularAperture(pos1, r) ap2 = CircularAperture(pos2, r) ap3 = CircularAperture(pos3, r) assert_allclose(np.atleast_2d(pos1), ap1.positions) assert_allclose(np.atleast_2d(pos2.value), ap2.positions) assert_allclose(pos3_pairs, ap3.positions) class BaseTestAperturePhotometry(object): def test_scalar_error(self): # Scalar error error = 1. if not hasattr(self, 'mask'): mask = None true_error = np.sqrt(self.area) else: mask = self.mask # 1 masked pixel true_error = np.sqrt(self.area - 1) table1 = aperture_photometry(self.data, self.aperture, method='center', mask=mask, error=error) table2 = aperture_photometry(self.data, self.aperture, method='subpixel', subpixels=12, mask=mask, error=error) table3 = aperture_photometry(self.data, self.aperture, method='exact', mask=mask, error=error) if not isinstance(self.aperture, (RectangularAperture, RectangularAnnulus)): assert_allclose(table3['aperture_sum'], self.true_flux) assert_allclose(table2['aperture_sum'], table3['aperture_sum'], atol=0.1) assert np.all(table1['aperture_sum'] < table3['aperture_sum']) if not isinstance(self.aperture, (RectangularAperture, RectangularAnnulus)): assert_allclose(table3['aperture_sum_err'], true_error) assert_allclose(table2['aperture_sum'], table3['aperture_sum'], atol=0.1) assert np.all(table1['aperture_sum_err'] < table3['aperture_sum_err']) def test_array_error(self): # Array error error = np.ones(self.data.shape, dtype=np.float) if not hasattr(self, 'mask'): mask = None true_error = np.sqrt(self.area) else: mask = self.mask # 1 masked pixel true_error = np.sqrt(self.area - 1) table1 = aperture_photometry(self.data, self.aperture, method='center', mask=mask, error=error) table2 = aperture_photometry(self.data, self.aperture, method='subpixel', subpixels=12, mask=mask, error=error) table3 = aperture_photometry(self.data, self.aperture, method='exact', mask=mask, error=error) if not isinstance(self.aperture, (RectangularAperture, RectangularAnnulus)): assert_allclose(table3['aperture_sum'], self.true_flux) assert_allclose(table2['aperture_sum'], table3['aperture_sum'], atol=0.1) assert np.all(table1['aperture_sum'] < table3['aperture_sum']) if not isinstance(self.aperture, (RectangularAperture, RectangularAnnulus)): assert_allclose(table3['aperture_sum_err'], true_error) assert_allclose(table2['aperture_sum_err'], table3['aperture_sum_err'], atol=0.1) assert np.all(table1['aperture_sum_err'] < table3['aperture_sum_err']) class TestCircular(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=np.float) position = (20., 20.) r = 10. self.aperture = CircularAperture(position, r) self.area = np.pi * r * r self.true_flux = self.area class TestCircularArray(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=np.float) position = ((20., 20.), (25., 25.)) r = 10. self.aperture = CircularAperture(position, r) self.area = np.pi * r * r self.area = np.array((self.area, ) * 2) self.true_flux = self.area class TestCircularAnnulus(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=np.float) position = (20., 20.) r_in = 8. r_out = 10. self.aperture = CircularAnnulus(position, r_in, r_out) self.area = np.pi * (r_out * r_out - r_in * r_in) self.true_flux = self.area class TestCircularAnnulusArray(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=np.float) position = ((20., 20.), (25., 25.)) r_in = 8. r_out = 10. self.aperture = CircularAnnulus(position, r_in, r_out) self.area = np.pi * (r_out * r_out - r_in * r_in) self.area = np.array((self.area, ) * 2) self.true_flux = self.area class TestElliptical(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=np.float) position = (20., 20.) a = 10. b = 5. theta = -np.pi / 4. self.aperture = EllipticalAperture(position, a, b, theta) self.area = np.pi * a * b self.true_flux = self.area class TestEllipticalAnnulus(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=np.float) position = (20., 20.) a_in = 5. a_out = 8. b_out = 5. theta = -np.pi / 4. self.aperture = EllipticalAnnulus(position, a_in, a_out, b_out, theta) self.area = (np.pi * (a_out * b_out) - np.pi * (a_in * b_out * a_in / a_out)) self.true_flux = self.area class TestRectangularAperture(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=np.float) position = (20., 20.) h = 5. w = 8. theta = np.pi / 4. self.aperture = RectangularAperture(position, w, h, theta) self.area = h * w self.true_flux = self.area class TestRectangularAnnulus(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=np.float) position = (20., 20.) h_out = 8. w_in = 8. w_out = 12. h_in = w_in * h_out / w_out theta = np.pi / 8. self.aperture = RectangularAnnulus(position, w_in, w_out, h_out, theta) self.area = h_out * w_out - h_in * w_in self.true_flux = self.area class TestMaskedSkipCircular(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=np.float) self.mask = np.zeros((40, 40), dtype=bool) self.mask[20, 20] = True position = (20., 20.) r = 10. self.aperture = CircularAperture(position, r) self.area = np.pi * r * r self.true_flux = self.area - 1 class BaseTestDifferentData(object): def test_basic_circular_aperture_photometry(self): aperture = CircularAperture(self.position, self.radius) table = aperture_photometry(self.data, aperture, method='exact', unit='adu') assert_allclose(table['aperture_sum'].value, self.true_flux) assert table['aperture_sum'].unit, self.fluxunit assert np.all(table['xcenter'].value == np.transpose(self.position)[0]) assert np.all(table['ycenter'].value == np.transpose(self.position)[1]) class TestInputPrimaryHDU(BaseTestDifferentData): def setup_class(self): data = np.ones((40, 40), dtype=np.float) self.data = fits.ImageHDU(data=data) self.data.header['BUNIT'] = 'adu' self.radius = 3 self.position = (20, 20) self.true_flux = np.pi * self.radius * self.radius self.fluxunit = u.adu class TestInputHDUList(BaseTestDifferentData): def setup_class(self): data0 = np.ones((40, 40), dtype=np.float) data1 = np.empty((40, 40), dtype=np.float) data1.fill(2) self.data = fits.HDUList([fits.ImageHDU(data=data0), fits.ImageHDU(data=data1)]) self.radius = 3 self.position = (20, 20) # It should stop at the first extension self.true_flux = np.pi * self.radius * self.radius class TestInputHDUDifferentBUNIT(BaseTestDifferentData): def setup_class(self): data = np.ones((40, 40), dtype=np.float) self.data = fits.ImageHDU(data=data) self.data.header['BUNIT'] = 'Jy' self.radius = 3 self.position = (20, 20) self.true_flux = np.pi * self.radius * self.radius self.fluxunit = u.adu class TestInputNDData(BaseTestDifferentData): def setup_class(self): data = np.ones((40, 40), dtype=np.float) self.data = NDData(data, unit=u.adu) self.radius = 3 self.position = [(20, 20), (30, 30)] self.true_flux = np.pi * self.radius * self.radius self.fluxunit = u.adu @remote_data def test_wcs_based_photometry_to_catalogue(): pathcat = get_path('spitzer_example_catalog.xml', location='remote') pathhdu = get_path('spitzer_example_image.fits', location='remote') hdu = fits.open(pathhdu) scale = hdu[0].header['PIXSCAL1'] catalog = Table.read(pathcat) pos_skycoord = SkyCoord(catalog['l'], catalog['b'], frame='galactic') photometry_skycoord = aperture_photometry( hdu, SkyCircularAperture(pos_skycoord, 4 * u.arcsec)) photometry_skycoord_pix = aperture_photometry( hdu, SkyCircularAperture(pos_skycoord, 4. / scale * u.pixel)) assert_allclose(photometry_skycoord['aperture_sum'], photometry_skycoord_pix['aperture_sum']) # Photometric unit conversion is needed to match the catalogue factor = (1.2 * u.arcsec) ** 2 / u.pixel converted_aperture_sum = (photometry_skycoord['aperture_sum'] * factor).to(u.mJy / u.pixel) fluxes_catalog = catalog['f4_5'].filled() # There shouldn't be large outliers, but some differences is OK, as # fluxes_catalog is based on PSF photometry, etc. assert_allclose(fluxes_catalog, converted_aperture_sum.value, rtol=1e0) assert(np.mean(np.fabs(((fluxes_catalog - converted_aperture_sum.value) / fluxes_catalog))) < 0.1) def test_wcs_based_photometry(): data = make_4gaussians_image() wcs = make_wcs(data.shape) hdu = make_imagehdu(data, wcs=wcs) # hard wired positions in make_4gaussian_image pos_orig_pixel = u.Quantity(([160., 25., 150., 90.], [70., 40., 25., 60.]), unit=u.pixel) pos_skycoord = pixel_to_skycoord(pos_orig_pixel[0], pos_orig_pixel[1], wcs) pos_skycoord_s = pos_skycoord[2] photometry_skycoord_circ = aperture_photometry( hdu, SkyCircularAperture(pos_skycoord, 3 * u.arcsec)) photometry_skycoord_circ_2 = aperture_photometry( hdu, SkyCircularAperture(pos_skycoord, 2 * u.arcsec)) photometry_skycoord_circ_s = aperture_photometry( hdu, SkyCircularAperture(pos_skycoord_s, 3 * u.arcsec)) assert_allclose(photometry_skycoord_circ['aperture_sum'][2], photometry_skycoord_circ_s['aperture_sum']) photometry_skycoord_circ_ann = aperture_photometry( hdu, SkyCircularAnnulus(pos_skycoord, 2 * u.arcsec, 3 * u.arcsec)) photometry_skycoord_circ_ann_s = aperture_photometry( hdu, SkyCircularAnnulus(pos_skycoord_s, 2 * u.arcsec, 3 * u.arcsec)) assert_allclose(photometry_skycoord_circ_ann['aperture_sum'][2], photometry_skycoord_circ_ann_s['aperture_sum']) assert_allclose(photometry_skycoord_circ_ann['aperture_sum'], photometry_skycoord_circ['aperture_sum'] - photometry_skycoord_circ_2['aperture_sum']) photometry_skycoord_ell = aperture_photometry( hdu, SkyEllipticalAperture(pos_skycoord, 3 * u.arcsec, 3.0001 * u.arcsec, 45 * u.arcsec)) photometry_skycoord_ell_2 = aperture_photometry( hdu, SkyEllipticalAperture(pos_skycoord, 2 * u.arcsec, 2.0001 * u.arcsec, 45 * u.arcsec)) photometry_skycoord_ell_s = aperture_photometry( hdu, SkyEllipticalAperture(pos_skycoord_s, 3 * u.arcsec, 3.0001 * u.arcsec, 45 * u.arcsec)) photometry_skycoord_ell_ann = aperture_photometry( hdu, SkyEllipticalAnnulus(pos_skycoord, 2 * u.arcsec, 3 * u.arcsec, 3.0001 * u.arcsec, 45 * u.arcsec)) photometry_skycoord_ell_ann_s = aperture_photometry( hdu, SkyEllipticalAnnulus(pos_skycoord_s, 2 * u.arcsec, 3 * u.arcsec, 3.0001 * u.arcsec, 45 * u.arcsec)) assert_allclose(photometry_skycoord_ell['aperture_sum'][2], photometry_skycoord_ell_s['aperture_sum']) assert_allclose(photometry_skycoord_ell_ann['aperture_sum'][2], photometry_skycoord_ell_ann_s['aperture_sum']) assert_allclose(photometry_skycoord_ell['aperture_sum'], photometry_skycoord_circ['aperture_sum'], rtol=5e-3) assert_allclose(photometry_skycoord_ell_ann['aperture_sum'], photometry_skycoord_ell['aperture_sum'] - photometry_skycoord_ell_2['aperture_sum'], rtol=1e-4) photometry_skycoord_rec = aperture_photometry( hdu, SkyRectangularAperture(pos_skycoord, 6 * u.arcsec, 6 * u.arcsec, 0 * u.arcsec), method='subpixel', subpixels=20) photometry_skycoord_rec_4 = aperture_photometry( hdu, SkyRectangularAperture(pos_skycoord, 4 * u.arcsec, 4 * u.arcsec, 0 * u.arcsec), method='subpixel', subpixels=20) photometry_skycoord_rec_s = aperture_photometry( hdu, SkyRectangularAperture(pos_skycoord_s, 6 * u.arcsec, 6 * u.arcsec, 0 * u.arcsec), method='subpixel', subpixels=20) photometry_skycoord_rec_ann = aperture_photometry( hdu, SkyRectangularAnnulus(pos_skycoord, 4 * u.arcsec, 6 * u.arcsec, 6 * u.arcsec, 0 * u.arcsec), method='subpixel', subpixels=20) photometry_skycoord_rec_ann_s = aperture_photometry( hdu, SkyRectangularAnnulus(pos_skycoord_s, 4 * u.arcsec, 6 * u.arcsec, 6 * u.arcsec, 0 * u.arcsec), method='subpixel', subpixels=20) assert_allclose(photometry_skycoord_rec['aperture_sum'][2], photometry_skycoord_rec_s['aperture_sum']) assert np.all(photometry_skycoord_rec['aperture_sum'] > photometry_skycoord_circ['aperture_sum']) assert_allclose(photometry_skycoord_rec_ann['aperture_sum'][2], photometry_skycoord_rec_ann_s['aperture_sum']) assert_allclose(photometry_skycoord_rec_ann['aperture_sum'], photometry_skycoord_rec['aperture_sum'] - photometry_skycoord_rec_4['aperture_sum'], rtol=1e-4) def test_basic_circular_aperture_photometry_unit(): data1 = np.ones((40, 40), dtype=np.float) data2 = u.Quantity(data1, unit=u.adu) radius = 3 position = (20, 20) true_flux = np.pi * radius * radius unit = u.adu table1 = aperture_photometry(data1, CircularAperture(position, radius), unit=unit) table2 = aperture_photometry(data2, CircularAperture(position, radius), unit=unit) assert_allclose(table1['aperture_sum'].value, true_flux) assert_allclose(table2['aperture_sum'].value, true_flux) assert table1['aperture_sum'].unit == unit assert table2['aperture_sum'].unit == data2.unit == unit def test_aperture_photometry_with_error_units(): """Test aperture_photometry when error has units (see #176).""" data1 = np.ones((40, 40), dtype=np.float) data2 = u.Quantity(data1, unit=u.adu) error = u.Quantity(data1, unit=u.adu) radius = 3 true_flux = np.pi * radius * radius unit = u.adu position = (20, 20) table1 = aperture_photometry(data2, CircularAperture(position, radius), error=error) assert_allclose(table1['aperture_sum'].value, true_flux) assert_allclose(table1['aperture_sum_err'].value, np.sqrt(true_flux)) assert table1['aperture_sum'].unit == unit assert table1['aperture_sum_err'].unit == unit def test_aperture_photometry_inputs_with_mask(): """ Test that aperture_photometry does not modify the input data or error array when a mask is input. """ data = np.ones((5, 5)) aperture = CircularAperture((2, 2), 2.) mask = np.zeros_like(data, dtype=bool) data[2, 2] = 100. # bad pixel mask[2, 2] = True error = np.sqrt(data) data_in = data.copy() error_in = error.copy() t1 = aperture_photometry(data, aperture, error=error, mask=mask) assert_array_equal(data, data_in) assert_array_equal(error, error_in) assert_allclose(t1['aperture_sum'][0], 11.5663706144) t2 = aperture_photometry(data, aperture) assert_allclose(t2['aperture_sum'][0], 111.566370614) TEST_ELLIPSE_EXACT_APERTURES = [(3.469906, 3.923861394, 3.), (0.3834415188257778, 0.3834415188257778, 0.3)] @pytest.mark.parametrize('x,y,r', TEST_ELLIPSE_EXACT_APERTURES) def test_ellipse_exact_grid(x, y, r): """ Test elliptical exact aperture photometry on a grid of pixel positions. This is a regression test for the bug discovered in this issue: https://github.com/astropy/photutils/issues/198 """ data = np.ones((10, 10)) aperture = EllipticalAperture((x, y), r, r, 0.) t = aperture_photometry(data, aperture, method='exact') actual = t['aperture_sum'][0] / (np.pi * r ** 2) assert_allclose(actual, 1) @pytest.mark.parametrize('value', [np.nan, np.inf]) def test_nan_inf_mask(value): """Test that nans and infs are properly masked [267].""" data = np.ones((9, 9)) mask = np.zeros_like(data, dtype=bool) data[4, 4] = value mask[4, 4] = True radius = 2. aper = CircularAperture((4, 4), radius) tbl = aperture_photometry(data, aper, mask=mask) desired = (np.pi * radius**2) - 1 assert_allclose(tbl['aperture_sum'], desired) def test_aperture_partial_overlap(): data = np.ones((20, 20)) error = np.ones((20, 20)) xypos = [(10, 10), (0, 0), (0, 19), (19, 0), (19, 19)] r = 5. aper = CircularAperture(xypos, r=r) tbl = aperture_photometry(data, aper, error=error) assert_allclose(tbl['aperture_sum'][0], np.pi * r ** 2) assert_array_less(tbl['aperture_sum'][1:], np.pi * r ** 2) unit = u.MJy / u.sr tbl = aperture_photometry(data * unit, aper, error=error * unit) assert_allclose(tbl['aperture_sum'][0].value, np.pi * r ** 2) assert_array_less(tbl['aperture_sum'][1:].value, np.pi * r ** 2) assert_array_less(tbl['aperture_sum_err'][1:].value, np.pi * r ** 2) assert tbl['aperture_sum'].unit == unit assert tbl['aperture_sum_err'].unit == unit def test_pixel_aperture_repr(): aper = CircularAperture((10, 20), r=3.0) a_repr = '' a_str = 'Aperture: CircularAperture\npositions: [[10, 20]]\nr: 3.0' assert repr(aper) == a_repr assert str(aper) == a_str aper = CircularAnnulus((10, 20), r_in=3.0, r_out=5.0) a_repr = '' a_str = ('Aperture: CircularAnnulus\npositions: [[10, 20]]\nr_in: 3.0\n' 'r_out: 5.0') assert repr(aper) == a_repr assert str(aper) == a_str aper = EllipticalAperture((10, 20), a=5.0, b=3.0, theta=15.0) a_repr = '' a_str = ('Aperture: EllipticalAperture\npositions: [[10, 20]]\n' 'a: 5.0\nb: 3.0\ntheta: 15.0') assert repr(aper) == a_repr assert str(aper) == a_str aper = EllipticalAnnulus((10, 20), a_in=4.0, a_out=8.0, b_out=4.0, theta=15.0) a_repr = ('') a_str = ('Aperture: EllipticalAnnulus\npositions: [[10, 20]]\na_in: ' '4.0\na_out: 8.0\nb_out: 4.0\ntheta: 15.0') assert repr(aper) == a_repr assert str(aper) == a_str aper = RectangularAperture((10, 20), w=5.0, h=3.0, theta=15.0) a_repr = '' a_str = ('Aperture: RectangularAperture\npositions: [[10, 20]]\n' 'w: 5.0\nh: 3.0\ntheta: 15.0') assert repr(aper) == a_repr assert str(aper) == a_str aper = RectangularAnnulus((10, 20), w_in=4.0, w_out=8.0, h_out=4.0, theta=15.0) a_repr = ('') a_str = ('Aperture: RectangularAnnulus\npositions: [[10, 20]]\n' 'w_in: 4.0\nw_out: 8.0\nh_out: 4.0\ntheta: 15.0') assert repr(aper) == a_repr assert str(aper) == a_str def test_sky_aperture_repr(): s = SkyCoord([1, 2], [3, 4], unit='deg') aper = SkyCircularAperture(s, r=3*u.pix) a_repr = (', r=3.0 pix)>') a_str = ('Aperture: SkyCircularAperture\npositions: \n' 'r: 3.0 pix') assert repr(aper) == a_repr assert str(aper) == a_str aper = SkyCircularAnnulus(s, r_in=3.*u.pix, r_out=5*u.pix) a_repr = (', r_in=3.0 pix, r_out=5.0 pix)>') a_str = ('Aperture: SkyCircularAnnulus\npositions: \n' 'r_in: 3.0 pix\nr_out: 5.0 pix') assert repr(aper) == a_repr assert str(aper) == a_str aper = SkyEllipticalAperture(s, a=3*u.pix, b=5*u.pix, theta=15*u.deg) a_repr = (', a=3.0 pix, b=5.0 pix,' ' theta=15.0 deg)>') a_str = ('Aperture: SkyEllipticalAperture\npositions: \n' 'a: 3.0 pix\nb: 5.0 pix\ntheta: 15.0 deg') assert repr(aper) == a_repr assert str(aper) == a_str aper = SkyEllipticalAnnulus(s, a_in=3*u.pix, a_out=5*u.pix, b_out=3*u.pix, theta=15*u.deg) a_repr = (', a_in=3.0 pix, ' 'a_out=5.0 pix, b_out=3.0 pix, theta=15.0 deg)>') a_str = ('Aperture: SkyEllipticalAnnulus\npositions: \n' 'a_in: 3.0 pix\na_out: 5.0 pix\nb_out: 3.0 pix\n' 'theta: 15.0 deg') assert repr(aper) == a_repr assert str(aper) == a_str aper = SkyRectangularAperture(s, w=3*u.pix, h=5*u.pix, theta=15*u.deg) a_repr = (', w=3.0 pix, h=5.0 pix' ', theta=15.0 deg)>') a_str = ('Aperture: SkyRectangularAperture\npositions: \n' 'w: 3.0 pix\nh: 5.0 pix\ntheta: 15.0 deg') assert repr(aper) == a_repr assert str(aper) == a_str aper = SkyRectangularAnnulus(s, w_in=3*u.pix, w_out=3.4*u.pix, h_out=5*u.pix, theta=15*u.deg) a_repr = (', w_in=3.0 pix, ' 'w_out=3.4 pix, h_out=5.0 pix, theta=15.0 deg)>') a_str = ('Aperture: SkyRectangularAnnulus\npositions: \n' 'w_in: 3.0 pix\nw_out: 3.4 pix\nh_out: 5.0 pix\n' 'theta: 15.0 deg') assert repr(aper) == a_repr assert str(aper) == a_str def test_rectangular_bbox(): # odd sizes width = 7 height = 3 a = RectangularAperture((50, 50), w=width, h=height, theta=0) assert a.bounding_boxes[0].shape == (height, width) a = RectangularAperture((50.5, 50.5), w=width, h=height, theta=0) assert a.bounding_boxes[0].shape == (height + 1, width + 1) a = RectangularAperture((50, 50), w=width, h=height, theta=90.*np.pi/180.) assert a.bounding_boxes[0].shape == (width, height) # even sizes width = 8 height = 4 a = RectangularAperture((50, 50), w=width, h=height, theta=0) assert a.bounding_boxes[0].shape == (height + 1, width + 1) a = RectangularAperture((50.5, 50.5), w=width, h=height, theta=0) assert a.bounding_boxes[0].shape == (height, width) a = RectangularAperture((50.5, 50.5), w=width, h=height, theta=90.*np.pi/180.) assert a.bounding_boxes[0].shape == (width, height) def test_elliptical_bbox(): # integer axes a = 7 b = 3 ap = EllipticalAperture((50, 50), a=a, b=b, theta=0) assert ap.bounding_boxes[0].shape == (2*b + 1, 2*a + 1) ap = EllipticalAperture((50.5, 50.5), a=a, b=b, theta=0) assert ap.bounding_boxes[0].shape == (2*b, 2*a) ap = EllipticalAperture((50, 50), a=a, b=b, theta=90.*np.pi/180.) assert ap.bounding_boxes[0].shape == (2*a + 1, 2*b + 1) # fractional axes a = 7.5 b = 4.5 ap = EllipticalAperture((50, 50), a=a, b=b, theta=0) assert ap.bounding_boxes[0].shape == (2*b, 2*a) ap = EllipticalAperture((50.5, 50.5), a=a, b=b, theta=0) assert ap.bounding_boxes[0].shape == (2*b + 1, 2*a + 1) ap = EllipticalAperture((50, 50), a=a, b=b, theta=90.*np.pi/180.) assert ap.bounding_boxes[0].shape == (2*a, 2*b) def test_to_sky_pixel(): data = make_4gaussians_image() wcs = make_wcs(data.shape) ap = CircularAperture(((12.3, 15.7), (48.19, 98.14)), r=3.14) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.r, ap2.r) ap = CircularAnnulus(((12.3, 15.7), (48.19, 98.14)), r_in=3.14, r_out=5.32) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.r_in, ap2.r_in) assert_allclose(ap.r_out, ap2.r_out) ap = EllipticalAperture(((12.3, 15.7), (48.19, 98.14)), a=3.14, b=5.32, theta=103.*np.pi/180.) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.a, ap2.a) assert_allclose(ap.b, ap2.b) assert_allclose(ap.theta, ap2.theta) ap = EllipticalAnnulus(((12.3, 15.7), (48.19, 98.14)), a_in=3.14, a_out=15.32, b_out=4.89, theta=103.*np.pi/180.) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.a_in, ap2.a_in) assert_allclose(ap.a_out, ap2.a_out) assert_allclose(ap.b_out, ap2.b_out) assert_allclose(ap.theta, ap2.theta) ap = RectangularAperture(((12.3, 15.7), (48.19, 98.14)), w=3.14, h=5.32, theta=103.*np.pi/180.) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.w, ap2.w) assert_allclose(ap.h, ap2.h) assert_allclose(ap.theta, ap2.theta) ap = RectangularAnnulus(((12.3, 15.7), (48.19, 98.14)), w_in=3.14, w_out=15.32, h_out=4.89, theta=103.*np.pi/180.) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.w_in, ap2.w_in) assert_allclose(ap.w_out, ap2.w_out) assert_allclose(ap.h_out, ap2.h_out) assert_allclose(ap.theta, ap2.theta) photutils-0.4/photutils/aperture/tests/test_bounding_box.py0000644000214200020070000000530613175634532026722 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) from numpy.testing import assert_allclose import pytest from ..bounding_box import BoundingBox try: import matplotlib # noqa HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False def test_bounding_box_init(): bbox = BoundingBox(1, 10, 2, 20) assert bbox.ixmin == 1 assert bbox.ixmax == 10 assert bbox.iymin == 2 assert bbox.iymax == 20 def test_bounding_box_init_minmax(): with pytest.raises(ValueError): BoundingBox(100, 1, 1, 100) with pytest.raises(ValueError): BoundingBox(1, 100, 100, 1) def test_bounding_box_inputs(): with pytest.raises(TypeError): BoundingBox([1], [10], [2], [9]) with pytest.raises(TypeError): BoundingBox([1, 2], 10, 2, 9) with pytest.raises(TypeError): BoundingBox(1.0, 10.0, 2.0, 9.0) with pytest.raises(TypeError): BoundingBox(1.3, 10, 2, 9) with pytest.raises(TypeError): BoundingBox(1, 10.3, 2, 9) with pytest.raises(TypeError): BoundingBox(1, 10, 2.3, 9) with pytest.raises(TypeError): BoundingBox(1, 10, 2, 9.3) def test_bounding_box_from_float(): # This is the example from the method docstring bbox = BoundingBox._from_float(xmin=1.0, xmax=10.0, ymin=2.0, ymax=20.0) assert bbox == BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=21) bbox = BoundingBox._from_float(xmin=1.4, xmax=10.4, ymin=1.6, ymax=10.6) assert bbox == BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=12) def test_bounding_box_eq(): bbox = BoundingBox(1, 10, 2, 20) assert bbox == bbox assert bbox != BoundingBox(9, 10, 2, 20) assert bbox != BoundingBox(1, 99, 2, 20) assert bbox != BoundingBox(1, 10, 9, 20) assert bbox != BoundingBox(1, 10, 2, 99) def test_bounding_box_repr(): bbox = BoundingBox(1, 10, 2, 20) assert repr(bbox) == 'BoundingBox(ixmin=1, ixmax=10, iymin=2, iymax=20)' assert eval(repr(bbox)) == bbox def test_bounding_box_shape(): bbox = BoundingBox(1, 10, 2, 20) assert bbox.shape == (18, 9) def test_bounding_box_slices(): bbox = BoundingBox(1, 10, 2, 20) assert bbox.slices == (slice(2, 20), slice(1, 10)) def test_bounding_box_extent(): bbox = BoundingBox(1, 10, 2, 20) assert_allclose(bbox.extent, (0.5, 9.5, 1.5, 19.5)) @pytest.mark.skipif('not HAS_MATPLOTLIB') def test_bounding_box_as_patch(): bbox = BoundingBox(1, 10, 2, 20) patch = bbox.as_patch() assert_allclose(patch.get_xy(), (0.5, 1.5)) assert_allclose(patch.get_width(), 9) assert_allclose(patch.get_height(), 18) photutils-0.4/photutils/background/0000755000214200020070000000000013175654702021757 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/background/__init__.py0000644000214200020070000000036313175634532024071 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains modules and packages for background and background rms estimation. """ from .core import * # noqa from .background_2d import * # noqa photutils-0.4/photutils/background/background_2d.py0000644000214200020070000007503213175634532025043 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines background classes to estimate the 2D background and background RMS in a 2D image. """ from __future__ import (absolute_import, division, print_function, unicode_literals) from itertools import product import numpy as np from numpy.lib.index_tricks import index_exp from astropy.stats import SigmaClip from astropy.utils import lazyproperty from .core import SExtractorBackground, StdBackgroundRMS from ..utils import ShepardIDWInterpolator __all__ = ['BkgZoomInterpolator', 'BkgIDWInterpolator', 'Background2D'] __doctest_requires__ = {('BkgZoomInterpolator', 'Background2D'): ['scipy']} class BkgZoomInterpolator(object): """ This class generates full-sized background and background RMS images from lower-resolution mesh images using the `~scipy.ndimage.zoom` (spline) interpolator. This class must be used in concert with the `Background2D` class. Parameters ---------- order : int, optional The order of the spline interpolation used to resize the low-resolution background and background RMS mesh images. The value must be an integer in the range 0-5. The default is 3 (bicubic interpolation). mode : {'reflect', 'constant', 'nearest', 'wrap'}, optional Points outside the boundaries of the input are filled according to the given mode. Default is 'reflect'. cval : float, optional The value used for points outside the boundaries of the input if ``mode='constant'``. Default is 0.0 """ def __init__(self, order=3, mode='reflect', cval=0.0): self.order = order self.mode = mode self.cval = cval def __call__(self, mesh, bkg2d_obj): """ Resize the 2D mesh array. Parameters ---------- mesh : 2D `~numpy.ndarray` The low-resolution 2D mesh array. bkg2d_obj : `Background2D` object The `Background2D` object that prepared the ``mesh`` array. Returns ------- result : 2D `~numpy.ndarray` The resized background or background RMS image. """ mesh = np.asanyarray(mesh) if np.ptp(mesh) == 0: return np.zeros_like(bkg2d_obj.data) + np.min(mesh) from scipy.ndimage import zoom if bkg2d_obj.edge_method == 'pad': # The mesh is first resized to the larger padded-data size # (i.e. zoom_factor should be an integer) and then cropped # back to the final data size. zoom_factor = (int(bkg2d_obj.nyboxes * bkg2d_obj.box_size[0] / mesh.shape[0]), int(bkg2d_obj.nxboxes * bkg2d_obj.box_size[1] / mesh.shape[1])) result = zoom(mesh, zoom_factor, order=self.order, mode=self.mode, cval=self.cval) return result[0:bkg2d_obj.data.shape[0], 0:bkg2d_obj.data.shape[1]] else: # The mesh is resized directly to the final data size. zoom_factor = (float(bkg2d_obj.data.shape[0] / mesh.shape[0]), float(bkg2d_obj.data.shape[1] / mesh.shape[1])) return zoom(mesh, zoom_factor, order=self.order, mode=self.mode, cval=self.cval) class BkgIDWInterpolator(object): """ This class generates full-sized background and background RMS images from lower-resolution mesh images using inverse-distance weighting (IDW) interpolation (`~photutils.utils.ShepardIDWInterpolator`). This class must be used in concert with the `Background2D` class. Parameters ---------- leafsize : float, optional The number of points at which the k-d tree algorithm switches over to brute-force. ``leafsize`` must be positive. See `scipy.spatial.cKDTree` for further information. n_neighbors : int, optional The maximum number of nearest neighbors to use during the interpolation. power : float, optional The power of the inverse distance used for the interpolation weights. reg : float, optional The regularization parameter. It may be used to control the smoothness of the interpolator. """ def __init__(self, leafsize=10, n_neighbors=10, power=1.0, reg=0.0): self.leafsize = leafsize self.n_neighbors = n_neighbors self.power = power self.reg = reg def __call__(self, mesh, bkg2d_obj): """ Resize the 2D mesh array. Parameters ---------- mesh : 2D `~numpy.ndarray` The low-resolution 2D mesh array. bkg2d_obj : `Background2D` object The `Background2D` object that prepared the ``mesh`` array. Returns ------- result : 2D `~numpy.ndarray` The resized background or background RMS image. """ mesh = np.asanyarray(mesh) if np.ptp(mesh) == 0: return np.zeros_like(bkg2d_obj.data) + np.min(mesh) mesh1d = mesh[bkg2d_obj.mesh_yidx, bkg2d_obj.mesh_xidx] f = ShepardIDWInterpolator(bkg2d_obj.yx, mesh1d, leafsize=self.leafsize) data = f(bkg2d_obj.data_coords, n_neighbors=self.n_neighbors, power=self.power, reg=self.reg) return data.reshape(bkg2d_obj.data.shape) class Background2D(object): """ Class to estimate a 2D background and background RMS noise in an image. The background is estimated using sigma-clipped statistics in each mesh of a grid that covers the input ``data`` to create a low-resolution, and possibly irregularly-gridded, background map. The final background map is calculated by interpolating the low-resolution background map. Parameters ---------- data : array_like The 2D array from which to estimate the background and/or background RMS map. box_size : int or array_like (int) The box size along each axis. If ``box_size`` is a scalar then a square box of size ``box_size`` will be used. If ``box_size`` has two elements, they should be in ``(ny, nx)`` order. For best results, the box shape should be chosen such that the ``data`` are covered by an integer number of boxes in both dimensions. When this is not the case, see the ``edge_method`` keyword for more options. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from calculations. exclude_percentile : float in the range of [0, 100], optional The percentage of masked pixels in a mesh, used as a threshold for determining if the mesh is excluded. If a mesh has more than ``exclude_percentile`` percent of its pixels masked then it will be excluded from the low-resolution map. Masked pixels include those from the input ``mask``, those resulting from the data padding (i.e. if ``edge_method='pad'``), and those resulting from any sigma clipping (i.e. if ``sigma_clip`` is used). Setting ``exclude_percentile=0`` will exclude meshes that have any masked pixels. Setting ``exclude_percentile=100`` will only exclude meshes that are completely masked. Note that completely masked meshes are *always* excluded. For best results, ``exclude_percentile`` should be kept as low as possible (as long as there are sufficient pixels for reasonable statistical estimates). The default is 10. filter_size : int or array_like (int), optional The window size of the 2D median filter to apply to the low-resolution background map. If ``filter_size`` is a scalar then a square box of size ``filter_size`` will be used. If ``filter_size`` has two elements, they should be in ``(ny, nx)`` order. A filter size of ``1`` (or ``(1, 1)``) means no filtering. filter_threshold : int, optional The threshold value for used for selective median filtering of the low-resolution 2D background map. The median filter will be applied to only the background meshes with values larger than ``filter_threshold``. Set to `None` to filter all meshes (default). edge_method : {'pad', 'crop'}, optional The method used to determine how to handle the case where the image size is not an integer multiple of the ``box_size`` in either dimension. Both options will resize the image to give an exact multiple of ``box_size`` in both dimensions. * ``'pad'``: pad the image along the top and/or right edges. This is the default and recommended method. * ``'crop'``: crop the image along the top and/or right edges. sigma_clip : `astropy.stats.SigmaClip` instance, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=10``. bkg_estimator : callable, optional A callable object (a function or e.g., an instance of any `~photutils.background.BackgroundBase` subclass) used to estimate the background in each of the meshes. The callable object must take in a 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray` and have an ``axis`` keyword (internally, the background will be calculated along ``axis=1``). The callable object must return a 1D `~numpy.ma.MaskedArray`. If ``bkg_estimator`` includes sigma clipping, it will be ignored (use the ``sigma_clip`` keyword to define sigma clipping). The default is an instance of `~photutils.background.SExtractorBackground`. bkgrms_estimator : callable, optional A callable object (a function or e.g., an instance of any `~photutils.background.BackgroundRMSBase` subclass) used to estimate the background RMS in each of the meshes. The callable object must take in a 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray` and have an ``axis`` keyword (internally, the background RMS will be calculated along ``axis=1``). The callable object must return a 1D `~numpy.ma.MaskedArray`. If ``bkgrms_estimator`` includes sigma clipping, it will be ignored (use the ``sigma_clip`` keyword to define sigma clipping). The default is an instance of `~photutils.background.StdBackgroundRMS`. interpolator : callable, optional A callable object (a function or object) used to interpolate the low-resolution background or background RMS mesh to the full-size background or background RMS maps. The default is an instance of `BkgZoomInterpolator`. Notes ----- If there is only one background mesh element (i.e., ``box_size`` is the same size as the ``data``), then the background map will simply be a constant image. """ def __init__(self, data, box_size, mask=None, exclude_percentile=10., filter_size=(3, 3), filter_threshold=None, edge_method='pad', sigma_clip=SigmaClip(sigma=3., iters=10), bkg_estimator=SExtractorBackground(sigma_clip=None), bkgrms_estimator=StdBackgroundRMS(sigma_clip=None), interpolator=BkgZoomInterpolator()): data = np.asanyarray(data) box_size = np.atleast_1d(box_size) if len(box_size) == 1: box_size = np.repeat(box_size, 2) self.box_size = (min(box_size[0], data.shape[0]), min(box_size[1], data.shape[1])) self.box_npixels = self.box_size[0] * self.box_size[1] if mask is not None: mask = np.asanyarray(mask) if mask.shape != data.shape: raise ValueError('mask and data must have the same shape') if exclude_percentile < 0 or exclude_percentile > 100: raise ValueError('exclude_percentile must be between 0 and 100 ' '(inclusive).') self.data = data self.mask = mask self.exclude_percentile = exclude_percentile filter_size = np.atleast_1d(filter_size) if len(filter_size) == 1: filter_size = np.repeat(filter_size, 2) self.filter_size = filter_size self.filter_threshold = filter_threshold self.edge_method = edge_method self.sigma_clip = sigma_clip bkg_estimator.sigma_clip = None bkgrms_estimator.sigma_clip = None self.bkg_estimator = bkg_estimator self.bkgrms_estimator = bkgrms_estimator self.interpolator = interpolator self._prepare_data() self._calc_bkg_bkgrms() self._calc_coordinates() def _pad_data(self, yextra, xextra): """ Pad the ``data`` and ``mask`` to have an integer number of background meshes of size ``box_size`` in both dimensions. The padding is added on the top and/or right edges (this is the best option for the "zoom" interpolator). Parameters ---------- yextra, xextra : int The modulus of the data size and the box size in both the ``y`` and ``x`` dimensions. This is the number of extra pixels beyond a multiple of the box size in the ``y`` and ``x`` dimensions. Returns ------- result : `~numpy.ma.MaskedArray` The padded data and mask as a masked array. """ ypad = 0 xpad = 0 if yextra > 0: ypad = self.box_size[0] - yextra if xextra > 0: xpad = self.box_size[1] - xextra pad_width = ((0, ypad), (0, xpad)) # mode must be a string for numpy < 0.11 # (see https://github.com/numpy/numpy/issues/7112) mode = str('constant') data = np.pad(self.data, pad_width, mode=mode, constant_values=[1.e10]) # mask the padded regions pad_mask = np.zeros_like(data) y0 = data.shape[0] - ypad x0 = data.shape[1] - xpad pad_mask[y0:, :] = True pad_mask[:, x0:] = True # pad the input mask separately (there is no np.ma.pad function) if self.mask is not None: mask = np.pad(self.mask, pad_width, mode=mode, constant_values=[True]) mask = np.logical_or(mask, pad_mask) else: mask = pad_mask return np.ma.masked_array(data, mask=mask) def _crop_data(self): """ Crop the ``data`` and ``mask`` to have an integer number of background meshes of size ``box_size`` in both dimensions. The data are cropped on the top and/or right edges (this is the best option for the "zoom" interpolator). Returns ------- result : `~numpy.ma.MaskedArray` The cropped data and mask as a masked array. """ ny_crop = self.nyboxes * self.box_size[1] nx_crop = self.nxboxes * self.box_size[0] crop_slc = index_exp[0:ny_crop, 0:nx_crop] if self.mask is not None: mask = self.mask[crop_slc] else: mask = False return np.ma.masked_array(self.data[crop_slc], mask=mask) def _select_meshes(self, data): """ Define the x and y indices with respect to the low-resolution mesh image of the meshes to use for the background interpolation. The ``exclude_percentile`` keyword determines which meshes are not used for the background interpolation. Parameters ---------- data : 2D `~numpy.ma.MaskedArray` A 2D array where the y dimension represents each mesh and the x dimension represents the data in each mesh. Returns ------- mesh_idx : 1D `~numpy.ndarray` The 1D mesh indices. """ # the number of masked pixels in each mesh nmasked = np.ma.count_masked(data, axis=1) # meshes that contain more than ``exclude_percentile`` percent # masked pixels are excluded: # - for exclude_percentile=0, good meshes will be only where # nmasked=0 # - meshes where nmasked=self.box_npixels are *always* excluded # (second conditional needed for exclude_percentile=100) threshold_npixels = self.exclude_percentile / 100. * self.box_npixels mesh_idx = np.where((nmasked <= threshold_npixels) & (nmasked != self.box_npixels))[0] # good meshes if len(mesh_idx) == 0: raise ValueError('All meshes contain > {0} ({1} percent per ' 'mesh) masked pixels. Please check your data ' 'or decrease "exclude_percentile".' .format(threshold_npixels, self.exclude_percentile)) return mesh_idx def _prepare_data(self): """ Prepare the data. First, pad or crop the 2D data array so that there are an integer number of meshes in both dimensions, creating a masked array. Then reshape into a different 2D masked array where each row represents the data in a single mesh. This method also performs a first cut at rejecting certain meshes as specified by the input keywords. """ self.nyboxes = self.data.shape[0] // self.box_size[0] self.nxboxes = self.data.shape[1] // self.box_size[1] yextra = self.data.shape[0] % self.box_size[0] xextra = self.data.shape[1] % self.box_size[1] if (xextra + yextra) == 0: # no resizing of the data is necessary data_ma = np.ma.masked_array(self.data, mask=self.mask) else: # pad or crop the data if self.edge_method == 'pad': data_ma = self._pad_data(yextra, xextra) self.nyboxes = data_ma.shape[0] // self.box_size[0] self.nxboxes = data_ma.shape[1] // self.box_size[1] elif self.edge_method == 'crop': data_ma = self._crop_data() else: raise ValueError('edge_method must be "pad" or "crop"') self.nboxes = self.nxboxes * self.nyboxes # a reshaped 2D masked array with mesh data along the x axis mesh_data = np.ma.swapaxes(data_ma.reshape( self.nyboxes, self.box_size[0], self.nxboxes, self.box_size[1]), 1, 2).reshape(self.nyboxes * self.nxboxes, self.box_npixels) # first cut on rejecting meshes self.mesh_idx = self._select_meshes(mesh_data) self._mesh_data = mesh_data[self.mesh_idx, :] return def _make_2d_array(self, data): """ Convert a 1D array of mesh values to a masked 2D mesh array given the 1D mesh indices ``mesh_idx``. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. Returns ------- result : 2D `~numpy.ma.MaskedArray` A 2D masked array. Pixels not defined in ``mesh_idx`` are masked. """ if data.shape != self.mesh_idx.shape: raise ValueError('data and mesh_idx must have the same shape') if np.ma.is_masked(data): raise ValueError('data must not be a masked array') data2d = np.zeros(self._mesh_shape).astype(data.dtype) data2d[self.mesh_yidx, self.mesh_xidx] = data if len(self.mesh_idx) == self.nboxes: # no meshes were masked return data2d else: # some meshes were masked mask2d = np.ones(data2d.shape).astype(np.bool) mask2d[self.mesh_yidx, self.mesh_xidx] = False return np.ma.masked_array(data2d, mask=mask2d) def _interpolate_meshes(self, data, n_neighbors=10, eps=0., power=1., reg=0.): """ Use IDW interpolation to fill in any masked pixels in the low-resolution 2D mesh background and background RMS images. This is required to use a regular-grid interpolator to expand the low-resolution image to the full size image. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. n_neighbors : int, optional The maximum number of nearest neighbors to use during the interpolation. eps : float, optional Set to use approximate nearest neighbors; the kth neighbor is guaranteed to be no further than (1 + ``eps``) times the distance to the real *k*-th nearest neighbor. See `scipy.spatial.cKDTree.query` for further information. power : float, optional The power of the inverse distance used for the interpolation weights. See the Notes section for more details. reg : float, optional The regularization parameter. It may be used to control the smoothness of the interpolator. See the Notes section for more details. Returns ------- result : 2D `~numpy.ndarray` A 2D array of the mesh values where masked pixels have been filled by IDW interpolation. """ yx = np.column_stack([self.mesh_yidx, self.mesh_xidx]) coords = np.array(list(product(range(self.nyboxes), range(self.nxboxes)))) f = ShepardIDWInterpolator(yx, data) img1d = f(coords, n_neighbors=n_neighbors, power=power, eps=eps, reg=reg) return img1d.reshape(self._mesh_shape) def _selective_filter(self, data, indices): """ Selectively filter only pixels above ``filter_threshold`` in the background mesh. The same pixels are filtered in both the background and background RMS meshes. Parameters ---------- data : 2D `~numpy.ndarray` A 2D array of mesh values. indices : 2 tuple of int A tuple of the ``y`` and ``x`` indices of the pixels to filter. Returns ------- filtered_data : 2D `~numpy.ndarray` The filtered 2D array of mesh values. """ data_out = np.copy(data) for i, j in zip(*indices): yfs, xfs = self.filter_size hyfs, hxfs = yfs // 2, xfs // 2 y0, y1 = max(i - hyfs, 0), min(i - hyfs + yfs, data.shape[0]) x0, x1 = max(j - hxfs, 0), min(j - hxfs + xfs, data.shape[1]) data_out[i, j] = np.median(data[y0:y1, x0:x1]) return data_out def _filter_meshes(self): """ Apply a 2D median filter to the low-resolution 2D mesh, including only pixels inside the image at the borders. """ from scipy.ndimage import generic_filter try: nanmedian_func = np.nanmedian # numpy >= 1.9 except AttributeError: # pragma: no cover from scipy.stats import nanmedian nanmedian_func = nanmedian if self.filter_threshold is None: # filter the entire arrays self.background_mesh = generic_filter( self.background_mesh, nanmedian_func, size=self.filter_size, mode='constant', cval=np.nan) self.background_rms_mesh = generic_filter( self.background_rms_mesh, nanmedian_func, size=self.filter_size, mode='constant', cval=np.nan) else: # selectively filter indices = np.nonzero(self.background_mesh > self.filter_threshold) self.background_mesh = self._selective_filter( self.background_mesh, indices) self.background_rms_mesh = self._selective_filter( self.background_rms_mesh, indices) return def _calc_bkg_bkgrms(self): """ Calculate the background and background RMS estimate in each of the meshes. Both meshes are computed at the same time here method because the filtering of both depends on the background mesh. The ``background_mesh`` and ``background_rms_mesh`` images are equivalent to the low-resolution "MINIBACKGROUND" and "MINIBACK_RMS" background maps in SExtractor, respectively. """ if self.sigma_clip is not None: data_sigclip = self.sigma_clip(self._mesh_data, axis=1) else: data_sigclip = self._mesh_data del self._mesh_data # preform mesh rejection on sigma-clipped data (i.e. for any # newly-masked pixels) idx = self._select_meshes(data_sigclip) self.mesh_idx = self.mesh_idx[idx] # indices for the output mesh self._data_sigclip = data_sigclip[idx] # always a 2D masked array self._mesh_shape = (self.nyboxes, self.nxboxes) self.mesh_yidx, self.mesh_xidx = np.unravel_index(self.mesh_idx, self._mesh_shape) # These properties are needed later to calculate # background_mesh_ma and background_rms_mesh_ma. Note that _bkg1d # and _bkgrms1d are masked arrays, but the mask should always be # False. self._bkg1d = self.bkg_estimator(self._data_sigclip, axis=1) self._bkgrms1d = self.bkgrms_estimator(self._data_sigclip, axis=1) # make the unfiltered 2D mesh arrays (these are not masked) if len(self._bkg1d) == self.nboxes: bkg = self._make_2d_array(self._bkg1d) bkgrms = self._make_2d_array(self._bkgrms1d) else: bkg = self._interpolate_meshes(self._bkg1d) bkgrms = self._interpolate_meshes(self._bkgrms1d) self._background_mesh_unfiltered = bkg self._background_rms_mesh_unfiltered = bkgrms self.background_mesh = bkg self.background_rms_mesh = bkgrms # filter the 2D mesh arrays if not np.array_equal(self.filter_size, [1, 1]): self._filter_meshes() return def _calc_coordinates(self): """ Calculate the coordinates to use when calling an interpolator. These are needed for `Background2D` and `BackgroundIDW2D`. Regular-grid interpolators require a 2D array of values. Some require a 2D meshgrid of x and y. Other require a strictly increasing 1D array of the x and y ranges. """ # the position coordinates used to initialize an interpolation self.y = (self.mesh_yidx * self.box_size[0] + (self.box_size[0] - 1) / 2.) self.x = (self.mesh_xidx * self.box_size[1] + (self.box_size[1] - 1) / 2.) self.yx = np.column_stack([self.y, self.x]) # the position coordinates used when calling an interpolator nx, ny = self.data.shape self.data_coords = np.array(list(product(range(ny), range(nx)))) @lazyproperty def mesh_nmasked(self): """ A 2D (masked) array of the number of masked pixels in each mesh. Only meshes included in the background estimation are included. The array is masked only if meshes were excluded. """ return self._make_2d_array( np.ma.count_masked(self._data_sigclip, axis=1)) @lazyproperty def background_mesh_ma(self): """ The background 2D (masked) array mesh prior to any interpolation. The array is masked only if meshes were excluded. """ if len(self._bkg1d) == self.nboxes: return self.background_mesh else: return self._make_2d_array(self._bkg1d) # masked array @lazyproperty def background_rms_mesh_ma(self): """ The background RMS 2D (masked) array mesh prior to any interpolation. The array is masked only if meshes were excluded. """ if len(self._bkgrms1d) == self.nboxes: return self.background_rms_mesh else: return self._make_2d_array(self._bkgrms1d) # masked array @lazyproperty def background_median(self): """ The median value of the 2D low-resolution background map. This is equivalent to the value SExtractor prints to stdout (i.e., "(M+D) Background: "). """ return np.median(self.background_mesh) @lazyproperty def background_rms_median(self): """ The median value of the low-resolution background RMS map. This is equivalent to the value SExtractor prints to stdout (i.e., "(M+D) RMS: "). """ return np.median(self.background_rms_mesh) @lazyproperty def background(self): """A 2D `~numpy.ndarray` containing the background image.""" return self.interpolator(self.background_mesh, self) @lazyproperty def background_rms(self): """A 2D `~numpy.ndarray` containing the background RMS image.""" return self.interpolator(self.background_rms_mesh, self) def plot_meshes(self, ax=None, marker='+', color='blue', outlines=False, **kwargs): """ Plot the low-resolution mesh boxes on a matplotlib Axes instance. Parameters ---------- ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current ``Axes`` instance is used. marker : str, optional The marker to use to mark the center of the boxes. Default is '+'. color : str, optional The color for the markers and the box outlines. Default is 'blue'. outlines : bool, optional Whether or not to plot the box outlines in addition to the box centers. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. Used only if ``outlines`` is True. """ import matplotlib.pyplot as plt kwargs['color'] = color if ax is None: ax = plt.gca() ax.scatter(self.x, self.y, marker=marker, color=color) if outlines: from ..aperture import RectangularAperture xy = np.column_stack([self.x, self.y]) apers = RectangularAperture(xy, self.box_size[1], self.box_size[0], 0.) apers.plot(ax=ax, **kwargs) return photutils-0.4/photutils/background/core.py0000644000214200020070000004663313175634532023274 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines background classes to estimate a scalar background and background RMS from an array (which may be masked) of any dimension. These classes were designed as part of an object-oriented interface for the tools in the PSF subpackage. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import abc import six import numpy as np from astropy.stats import (SigmaClip, biweight_location, biweight_scale, mad_std) from astropy.utils.misc import InheritDocstrings __all__ = ['BackgroundBase', 'BackgroundRMSBase', 'MeanBackground', 'MedianBackground', 'ModeEstimatorBackground', 'MMMBackground', 'SExtractorBackground', 'BiweightLocationBackground', 'StdBackgroundRMS', 'MADStdBackgroundRMS', 'BiweightScaleBackgroundRMS'] def _masked_median(data, axis=None): """ Calculate the median of a (masked) array. This function is necessary for a consistent interface across all numpy versions. A bug was introduced in numpy v1.10 where `numpy.ma.median` (with ``axis=None``) returns a single-valued `~numpy.ma.MaskedArray` if the input data is a `~numpy.ndarray` or if the data is a `~numpy.ma.MaskedArray`, but the mask is `False` everywhere. Parameters ---------- data : array-like The input data. axis : int or `None`, optional The array axis along which the median is calculated. If `None`, then the entire array is used. Returns ------- result : float or `~numpy.ma.MaskedArray` The resulting median. If ``axis`` is `None`, then a float is returned, otherwise a `~numpy.ma.MaskedArray` is returned. """ _median = np.ma.median(data, axis=axis) if axis is None and np.ma.isMaskedArray(_median): _median = _median.item() return _median class _ABCMetaAndInheritDocstrings(InheritDocstrings, abc.ABCMeta): pass @six.add_metaclass(_ABCMetaAndInheritDocstrings) class BackgroundBase(object): """ Base class for classes that estimate scalar background values. Parameters ---------- sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. """ def __init__(self, sigma_clip=SigmaClip(sigma=3., iters=5)): self.sigma_clip = sigma_clip def __call__(self, data, axis=None): return self.calc_background(data, axis=axis) @abc.abstractmethod def calc_background(self, data, axis=None): """ Calculate the background value. Parameters ---------- data : array_like or `~numpy.ma.MaskedArray` The array for which to calculate the background value. axis : int or `None`, optional The array axis along which the background is calculated. If `None`, then the entire array is used. Returns ------- result : float or `~numpy.ma.MaskedArray` The calculated background value. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ma.MaskedArray` will be returned. """ @six.add_metaclass(_ABCMetaAndInheritDocstrings) class BackgroundRMSBase(object): """ Base class for classes that estimate scalar background RMS values. Parameters ---------- sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. """ def __init__(self, sigma_clip=SigmaClip(sigma=3., iters=5)): self.sigma_clip = sigma_clip def __call__(self, data, axis=None): return self.calc_background_rms(data, axis=axis) @abc.abstractmethod def calc_background_rms(self, data, axis=None): """ Calculate the background RMS value. Parameters ---------- data : array_like or `~numpy.ma.MaskedArray` The array for which to calculate the background RMS value. axis : int or `None`, optional The array axis along which the background RMS is calculated. If `None`, then the entire array is used. Returns ------- result : float or `~numpy.ma.MaskedArray` The calculated background RMS value. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ma.MaskedArray` will be returned. """ class MeanBackground(BackgroundBase): """ Class to calculate the background in an array as the (sigma-clipped) mean. Parameters ---------- sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. Examples -------- >>> from astropy.stats import SigmaClip >>> from photutils import MeanBackground >>> data = np.arange(100) >>> sigma_clip = SigmaClip(sigma=3.) >>> bkg = MeanBackground(sigma_clip) The background value can be calculated by using the `calc_background` method, e.g.: >>> bkg_value = bkg.calc_background(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 Alternatively, the background value can be calculated by calling the class instance as a function, e.g.: >>> bkg_value = bkg(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 """ def calc_background(self, data, axis=None): if self.sigma_clip is not None: data = self.sigma_clip(data, axis=axis) return np.ma.mean(data, axis=axis) class MedianBackground(BackgroundBase): """ Class to calculate the background in an array as the (sigma-clipped) median. Parameters ---------- sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. Examples -------- >>> from astropy.stats import SigmaClip >>> from photutils import MedianBackground >>> data = np.arange(100) >>> sigma_clip = SigmaClip(sigma=3.) >>> bkg = MedianBackground(sigma_clip) The background value can be calculated by using the `calc_background` method, e.g.: >>> bkg_value = bkg.calc_background(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 Alternatively, the background value can be calculated by calling the class instance as a function, e.g.: >>> bkg_value = bkg(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 """ def calc_background(self, data, axis=None): if self.sigma_clip is not None: data = self.sigma_clip(data, axis=axis) return _masked_median(data, axis=axis) class ModeEstimatorBackground(BackgroundBase): """ Class to calculate the background in an array using a mode estimator of the form ``(median_factor * median) - (mean_factor * mean)``. Parameters ---------- median_factor : float, optional The multiplicative factor for the data median. Defaults to 3. mean_factor : float, optional The multiplicative factor for the data mean. Defaults to 2. sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. Examples -------- >>> from astropy.stats import SigmaClip >>> from photutils import ModeEstimatorBackground >>> data = np.arange(100) >>> sigma_clip = SigmaClip(sigma=3.) >>> bkg = ModeEstimatorBackground(median_factor=3., mean_factor=2., ... sigma_clip=sigma_clip) The background value can be calculated by using the `calc_background` method, e.g.: >>> bkg_value = bkg.calc_background(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 Alternatively, the background value can be calculated by calling the class instance as a function, e.g.: >>> bkg_value = bkg(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 """ def __init__(self, median_factor=3., mean_factor=2., **kwargs): super(ModeEstimatorBackground, self).__init__(**kwargs) self.median_factor = median_factor self.mean_factor = mean_factor def calc_background(self, data, axis=None): if self.sigma_clip is not None: data = self.sigma_clip(data, axis=axis) return ((self.median_factor * _masked_median(data, axis=axis)) - (self.mean_factor * np.ma.mean(data, axis=axis))) class MMMBackground(ModeEstimatorBackground): """ Class to calculate the background in an array using the DAOPHOT MMM algorithm. The background is calculated using a mode estimator of the form ``(3 * median) - (2 * mean)``. Parameters ---------- sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. Examples -------- >>> from astropy.stats import SigmaClip >>> from photutils import MMMBackground >>> data = np.arange(100) >>> sigma_clip = SigmaClip(sigma=3.) >>> bkg = MMMBackground(sigma_clip=sigma_clip) The background value can be calculated by using the `~photutils.background.core.ModeEstimatorBackground.calc_background` method, e.g.: >>> bkg_value = bkg.calc_background(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 Alternatively, the background value can be calculated by calling the class instance as a function, e.g.: >>> bkg_value = bkg(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 """ def __init__(self, **kwargs): kwargs['median_factor'] = 3. kwargs['mean_factor'] = 2. super(MMMBackground, self).__init__(**kwargs) class SExtractorBackground(BackgroundBase): """ Class to calculate the background in an array using the SExtractor algorithm. The background is calculated using a mode estimator of the form ``(2.5 * median) - (1.5 * mean)``. If ``(mean - median) / std > 0.3`` then the median is used instead. Despite what the `SExtractor`_ User's Manual says, this is the method it *always* uses. .. _SExtractor: http://www.astromatic.net/software/sextractor Parameters ---------- sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. Examples -------- >>> from astropy.stats import SigmaClip >>> from photutils import SExtractorBackground >>> data = np.arange(100) >>> sigma_clip = SigmaClip(sigma=3.) >>> bkg = SExtractorBackground(sigma_clip) The background value can be calculated by using the `calc_background` method, e.g.: >>> bkg_value = bkg.calc_background(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 Alternatively, the background value can be calculated by calling the class instance as a function, e.g.: >>> bkg_value = bkg(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 """ def calc_background(self, data, axis=None): if self.sigma_clip is not None: data = self.sigma_clip(data, axis=axis) _median = np.atleast_1d(_masked_median(data, axis=axis)) _mean = np.atleast_1d(np.ma.mean(data, axis=axis)) _std = np.atleast_1d(np.ma.std(data, axis=axis)) bkg = np.atleast_1d((2.5 * _median) - (1.5 * _mean)) bkg = np.ma.where(_std == 0, _mean, bkg) idx = np.ma.where(_std != 0) condition = (np.abs(_mean[idx] - _median[idx]) / _std[idx]) < 0.3 bkg[idx] = np.ma.where(condition, bkg[idx], _median[idx]) # np.ma.where always returns a masked array if axis is None and np.ma.isMaskedArray(bkg): bkg = bkg.item() return bkg class BiweightLocationBackground(BackgroundBase): """ Class to calculate the background in an array using the biweight location. Parameters ---------- c : float, optional Tuning constant for the biweight estimator. Default value is 6.0. M : float, optional Initial guess for the biweight location. Default value is `None`. sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. Examples -------- >>> from astropy.stats import SigmaClip >>> from photutils import BiweightLocationBackground >>> data = np.arange(100) >>> sigma_clip = SigmaClip(sigma=3.) >>> bkg = BiweightLocationBackground(sigma_clip=sigma_clip) The background value can be calculated by using the `calc_background` method, e.g.: >>> bkg_value = bkg.calc_background(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 Alternatively, the background value can be calculated by calling the class instance as a function, e.g.: >>> bkg_value = bkg(data) >>> print(bkg_value) # doctest: +FLOAT_CMP 49.5 """ def __init__(self, c=6, M=None, **kwargs): super(BiweightLocationBackground, self).__init__(**kwargs) self.c = c self.M = M def calc_background(self, data, axis=None): if self.sigma_clip is not None: data = self.sigma_clip(data, axis=axis) return biweight_location(data, c=self.c, M=self.M, axis=axis) class StdBackgroundRMS(BackgroundRMSBase): """ Class to calculate the background RMS in an array as the (sigma-clipped) standard deviation. Parameters ---------- sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. Examples -------- >>> from astropy.stats import SigmaClip >>> from photutils import StdBackgroundRMS >>> data = np.arange(100) >>> sigma_clip = SigmaClip(sigma=3.) >>> bkgrms = StdBackgroundRMS(sigma_clip) The background RMS value can be calculated by using the `calc_background_rms` method, e.g.: >>> bkgrms_value = bkgrms.calc_background_rms(data) >>> print(bkgrms_value) # doctest: +FLOAT_CMP 28.866070047722118 Alternatively, the background RMS value can be calculated by calling the class instance as a function, e.g.: >>> bkgrms_value = bkgrms(data) >>> print(bkgrms_value) # doctest: +FLOAT_CMP 28.866070047722118 """ def calc_background_rms(self, data, axis=None): if self.sigma_clip is not None: data = self.sigma_clip(data, axis=axis) return np.ma.std(data, axis=axis) class MADStdBackgroundRMS(BackgroundRMSBase): """ Class to calculate the background RMS in an array as using the `median absolute deviation (MAD) `_. The standard deviation estimator is given by: .. math:: \\sigma \\approx \\frac{{\\textrm{{MAD}}}}{{\\Phi^{{-1}}(3/4)}} \\approx 1.4826 \\ \\textrm{{MAD}} where :math:`\\Phi^{{-1}}(P)` is the normal inverse cumulative distribution function evaluated at probability :math:`P = 3/4`. Parameters ---------- sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. Examples -------- >>> from astropy.stats import SigmaClip >>> from photutils import MADStdBackgroundRMS >>> data = np.arange(100) >>> sigma_clip = SigmaClip(sigma=3.) >>> bkgrms = MADStdBackgroundRMS(sigma_clip) The background RMS value can be calculated by using the `calc_background_rms` method, e.g.: >>> bkgrms_value = bkgrms.calc_background_rms(data) >>> print(bkgrms_value) # doctest: +FLOAT_CMP 37.065055462640053 Alternatively, the background RMS value can be calculated by calling the class instance as a function, e.g.: >>> bkgrms_value = bkgrms(data) >>> print(bkgrms_value) # doctest: +FLOAT_CMP 37.065055462640053 """ def calc_background_rms(self, data, axis=None): if self.sigma_clip is not None: data = self.sigma_clip(data, axis=axis) return mad_std(data, axis=axis) class BiweightScaleBackgroundRMS(BackgroundRMSBase): """ Class to calculate the background RMS in an array as the (sigma-clipped) biweight scale. Parameters ---------- c : float, optional Tuning constant for the biweight estimator. Default value is 9.0. M : float, optional Initial guess for the biweight location. Default value is `None`. sigma_clip : `astropy.stats.SigmaClip` object, optional A `~astropy.stats.SigmaClip` object that defines the sigma clipping parameters. If `None` then no sigma clipping will be performed. The default is to perform sigma clipping with ``sigma=3.`` and ``iters=5``. Examples -------- >>> from astropy.stats import SigmaClip >>> from photutils import BiweightScaleBackgroundRMS >>> data = np.arange(100) >>> sigma_clip = SigmaClip(sigma=3.) >>> bkgrms = BiweightScaleBackgroundRMS(sigma_clip=sigma_clip) The background RMS value can be calculated by using the `calc_background_rms` method, e.g.: >>> bkgrms_value = bkgrms.calc_background_rms(data) >>> print(bkgrms_value) # doctest: +FLOAT_CMP 30.094338485893392 Alternatively, the background RMS value can be calculated by calling the class instance as a function, e.g.: >>> bkgrms_value = bkgrms(data) >>> print(bkgrms_value) # doctest: +FLOAT_CMP 30.094338485893392 """ def __init__(self, c=9.0, M=None, **kwargs): super(BiweightScaleBackgroundRMS, self).__init__(**kwargs) self.c = c self.M = M def calc_background_rms(self, data, axis=None): if self.sigma_clip is not None: data = self.sigma_clip(data, axis=axis) return biweight_scale(data, c=self.c, M=self.M, axis=axis) photutils-0.4/photutils/background/tests/0000755000214200020070000000000013175654702023121 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/background/tests/__init__.py0000644000214200020070000000017013055576313025226 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains affiliated package tests. """ photutils-0.4/photutils/background/tests/test_background_2d.py0000644000214200020070000001774113175634532027247 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import itertools import numpy as np from numpy.testing import assert_allclose, assert_equal import pytest from ..core import MeanBackground from ..background_2d import (BkgZoomInterpolator, BkgIDWInterpolator, Background2D) try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False try: import matplotlib # noqa HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False DATA = np.ones((100, 100)) BKG_RMS = np.zeros((100, 100)) BKG_MESH = np.ones((4, 4)) BKG_RMS_MESH = np.zeros((4, 4)) PADBKG_MESH = np.ones((5, 5)) PADBKG_RMS_MESH = np.zeros((5, 5)) FILTER_SIZES = [(1, 1), (3, 3)] INTERPOLATORS = [BkgZoomInterpolator(), BkgIDWInterpolator()] @pytest.mark.skipif('not HAS_SCIPY') class TestBackground2D(object): @pytest.mark.parametrize(('filter_size', 'interpolator'), list(itertools.product(FILTER_SIZES, INTERPOLATORS))) def test_background(self, filter_size, interpolator): b = Background2D(DATA, (25, 25), filter_size=filter_size, interpolator=interpolator) assert_allclose(b.background, DATA) assert_allclose(b.background_rms, BKG_RMS) assert_allclose(b.background_mesh, BKG_MESH) assert_allclose(b.background_rms_mesh, BKG_RMS_MESH) assert b.background_median == 1.0 assert b.background_rms_median == 0.0 @pytest.mark.parametrize('interpolator', INTERPOLATORS) def test_background_nonconstant(self, interpolator): data = np.copy(DATA) data[25:50, 50:75] = 10. bkg_low_res = np.copy(BKG_MESH) bkg_low_res[1, 2] = 10. b1 = Background2D(data, (25, 25), filter_size=(1, 1), interpolator=interpolator) assert_allclose(b1.background_mesh, bkg_low_res) assert b1.background.shape == data.shape b2 = Background2D(data, (25, 25), filter_size=(1, 1), edge_method='pad', interpolator=interpolator) assert_allclose(b2.background_mesh, bkg_low_res) assert b2.background.shape == data.shape def test_no_sigma_clipping(self): data = np.copy(DATA) data[10, 10] = 100. b1 = Background2D(data, (25, 25), filter_size=(1, 1), bkg_estimator=MeanBackground()) b2 = Background2D(data, (25, 25), filter_size=(1, 1), sigma_clip=None, bkg_estimator=MeanBackground()) assert b2.background_mesh[0, 0] > b1.background_mesh[0, 0] @pytest.mark.parametrize('filter_size', FILTER_SIZES) def test_resizing(self, filter_size): b1 = Background2D(DATA, (23, 22), filter_size=filter_size, bkg_estimator=MeanBackground(), edge_method='crop') b2 = Background2D(DATA, (23, 22), filter_size=filter_size, bkg_estimator=MeanBackground(), edge_method='pad') assert_allclose(b1.background, b2.background) assert_allclose(b1.background_rms, b2.background_rms) @pytest.mark.parametrize('box_size', ([(25, 25), (23, 22)])) def test_background_mask(self, box_size): """ Test with an input mask. Note that box_size=(23, 22) tests the resizing of the image and mask. """ data = np.copy(DATA) data[25:50, 25:50] = 100. mask = np.zeros_like(DATA, dtype=np.bool) mask[25:50, 25:50] = True b = Background2D(data, box_size, filter_size=(1, 1), mask=mask, bkg_estimator=MeanBackground()) assert_allclose(b.background, DATA) assert_allclose(b.background_rms, BKG_RMS) # test edge crop with b2 = Background2D(data, box_size, filter_size=(1, 1), mask=mask, bkg_estimator=MeanBackground(), edge_method='crop') assert_allclose(b2.background, DATA) def test_mask(self): data = np.copy(DATA) data[25:50, 25:50] = 100. mask = np.zeros_like(DATA, dtype=np.bool) mask[25:50, 25:50] = True b1 = Background2D(data, (25, 25), filter_size=(1, 1), mask=None, bkg_estimator=MeanBackground()) assert_equal(b1.background_mesh, b1.background_mesh_ma) assert_equal(b1.background_rms_mesh, b1.background_rms_mesh_ma) assert not np.ma.is_masked(b1.mesh_nmasked) b2 = Background2D(data, (25, 25), filter_size=(1, 1), mask=mask, bkg_estimator=MeanBackground()) assert np.ma.count(b2.background_mesh_ma) < b2.nboxes assert np.ma.count(b2.background_rms_mesh_ma) < b2.nboxes assert np.ma.is_masked(b2.mesh_nmasked) def test_completely_masked(self): with pytest.raises(ValueError): mask = np.ones_like(DATA, dtype=np.bool) Background2D(DATA, (25, 25), mask=mask) def test_zero_padding(self): """Test case where padding is added only on one axis.""" b = Background2D(DATA, (25, 22), filter_size=(1, 1)) assert_allclose(b.background, DATA) assert_allclose(b.background_rms, BKG_RMS) assert b.background_median == 1.0 assert b.background_rms_median == 0.0 def test_filter_threshold(self): """Only meshes greater than filter_threshold are filtered.""" data = np.copy(DATA) data[25:50, 50:75] = 10. b = Background2D(data, (25, 25), filter_size=(3, 3), filter_threshold=9.) assert_allclose(b.background, DATA) assert_allclose(b.background_mesh, BKG_MESH) b2 = Background2D(data, (25, 25), filter_size=(3, 3), filter_threshold=11.) # no filtering assert b2.background_mesh[1, 2] == 10 def test_filter_threshold_high(self): """No filtering because filter_threshold is too large.""" data = np.copy(DATA) data[25:50, 50:75] = 10. ref_data = np.copy(BKG_MESH) ref_data[1, 2] = 10. b = Background2D(data, (25, 25), filter_size=(3, 3), filter_threshold=100.) assert_allclose(b.background_mesh, ref_data) def test_filter_threshold_nofilter(self): """No filtering because filter_size is (1, 1).""" data = np.copy(DATA) data[25:50, 50:75] = 10. ref_data = np.copy(BKG_MESH) ref_data[1, 2] = 10. b = Background2D(data, (25, 25), filter_size=(1, 1), filter_threshold=1.) assert_allclose(b.background_mesh, ref_data) def test_scalar_sizes(self): b1 = Background2D(DATA, (25, 25), filter_size=(3, 3)) b2 = Background2D(DATA, 25, filter_size=3) assert_allclose(b1.background, b2.background) assert_allclose(b1.background_rms, b2.background_rms) def test_exclude_percentile(self): with pytest.raises(ValueError): Background2D(DATA, (5, 5), exclude_percentile=-1) with pytest.raises(ValueError): Background2D(DATA, (5, 5), exclude_percentile=101) def test_mask_badshape(self): with pytest.raises(ValueError): Background2D(DATA, (25, 25), filter_size=(1, 1), mask=np.zeros((2, 2))) def test_invalid_edge_method(self): with pytest.raises(ValueError): Background2D(DATA, (23, 22), filter_size=(1, 1), edge_method='not_valid') def test_invalid_mesh_idx_len(self): with pytest.raises(ValueError): bkg = Background2D(DATA, (25, 25), filter_size=(1, 1)) bkg._make_2d_array(np.arange(3)) @pytest.mark.skipif('not HAS_MATPLOTLIB') def test_plot_meshes(self): """ This test should run without any errors, but there is no return value. """ b = Background2D(DATA, (25, 25)) b.plot_meshes(outlines=True) photutils-0.4/photutils/background/tests/test_core.py0000644000214200020070000000571613175634532025472 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest import numpy as np from numpy.testing import assert_allclose from astropy.stats import SigmaClip from ...datasets.make import make_noise_image from ..core import (MeanBackground, MedianBackground, ModeEstimatorBackground, MMMBackground, SExtractorBackground, BiweightLocationBackground, StdBackgroundRMS, MADStdBackgroundRMS, BiweightScaleBackgroundRMS) BKG = 0.0 STD = 0.5 DATA = make_noise_image((100, 100), type='gaussian', mean=BKG, stddev=STD, random_state=12345) BKG_CLASS0 = [MeanBackground, MedianBackground, ModeEstimatorBackground, MMMBackground, SExtractorBackground] # BiweightLocationBackground cannot handle a constant background # (astropy.stats.biweight_location needs to be fixed) BKG_CLASS = BKG_CLASS0 + [BiweightLocationBackground] RMS_CLASS = [StdBackgroundRMS, MADStdBackgroundRMS, BiweightScaleBackgroundRMS] SIGMA_CLIP = SigmaClip(sigma=3.) @pytest.mark.parametrize('bkg_class', BKG_CLASS0) def test_constant_background(bkg_class): data = np.ones((100, 100)) bkg = bkg_class(sigma_clip=SIGMA_CLIP) bkgval = bkg.calc_background(data) assert not np.ma.isMaskedArray(bkgval) assert_allclose(bkgval, 1.0) assert_allclose(bkg(data), bkg.calc_background(data)) @pytest.mark.parametrize('bkg_class', BKG_CLASS) def test_background(bkg_class): bkg = bkg_class(sigma_clip=SIGMA_CLIP) bkgval = bkg.calc_background(DATA) assert not np.ma.isMaskedArray(bkgval) assert_allclose(bkgval, BKG, atol=1.e-2) assert_allclose(bkg(DATA), bkg.calc_background(DATA)) @pytest.mark.parametrize('bkg_class', BKG_CLASS) def test_background_axis(bkg_class): bkg = bkg_class(sigma_clip=SIGMA_CLIP) bkg_arr = bkg.calc_background(DATA, axis=0) bkgi = [] for i in range(100): bkgi.append(bkg.calc_background(DATA[:, i])) bkgi = np.array(bkgi) assert_allclose(bkg_arr, bkgi) bkg_arr = bkg.calc_background(DATA, axis=1) bkgi = [] for i in range(100): bkgi.append(bkg.calc_background(DATA[i, :])) bkgi = np.array(bkgi) assert_allclose(bkg_arr, bkgi) def test_sextrator_background_zero_std(): data = np.ones((100, 100)) bkg = SExtractorBackground(sigma_clip=None) assert_allclose(bkg.calc_background(data), 1.0) def test_sextrator_background_skew(): data = np.arange(100) data[70:] = 1.e7 bkg = SExtractorBackground(sigma_clip=None) assert_allclose(bkg.calc_background(data), np.median(data)) @pytest.mark.parametrize('rms_class', RMS_CLASS) def test_background_rms(rms_class): bkgrms = rms_class(sigma_clip=SIGMA_CLIP) assert_allclose(bkgrms.calc_background_rms(DATA), STD, atol=1.e-2) assert_allclose(bkgrms(DATA), bkgrms.calc_background_rms(DATA)) photutils-0.4/photutils/centroids/0000755000214200020070000000000013175654702021632 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/centroids/__init__.py0000644000214200020070000000027013175634532023741 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains tools for centroiding objects in an astronomical image. """ from .core import * # noqa photutils-0.4/photutils/centroids/core.py0000644000214200020070000002726213055576313023143 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Functions for centroiding sources and measuring their morphological properties. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import warnings import numpy as np from astropy.modeling import Fittable2DModel, Parameter from astropy.modeling.models import (Gaussian1D, Gaussian2D, Const1D, Const2D, CONSTRAINTS_DOC) from astropy.modeling.fitting import LevMarLSQFitter from astropy.utils.exceptions import AstropyUserWarning from ..morphology import data_properties __all__ = ['GaussianConst2D', 'centroid_com', 'gaussian1d_moments', 'fit_2dgaussian', 'centroid_1dg', 'centroid_2dg'] class _GaussianConst1D(Const1D + Gaussian1D): """A model for a 1D Gaussian plus a constant.""" class GaussianConst2D(Fittable2DModel): """ A model for a 2D Gaussian plus a constant. Parameters ---------- constant : float Value of the constant. amplitude : float Amplitude of the Gaussian. x_mean : float Mean of the Gaussian in x. y_mean : float Mean of the Gaussian in y. x_stddev : float Standard deviation of the Gaussian in x. ``x_stddev`` and ``y_stddev`` must be specified unless a covariance matrix (``cov_matrix``) is input. y_stddev : float Standard deviation of the Gaussian in y. ``x_stddev`` and ``y_stddev`` must be specified unless a covariance matrix (``cov_matrix``) is input. theta : float, optional Rotation angle in radians. The rotation angle increases counterclockwise. """ constant = Parameter(default=1) amplitude = Parameter(default=1) x_mean = Parameter(default=0) y_mean = Parameter(default=0) x_stddev = Parameter(default=1) y_stddev = Parameter(default=1) theta = Parameter(default=0) @staticmethod def evaluate(x, y, constant, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta): """Two dimensional Gaussian plus constant function.""" model = Const2D(constant)(x, y) + Gaussian2D(amplitude, x_mean, y_mean, x_stddev, y_stddev, theta)(x, y) return model GaussianConst2D.__doc__ += CONSTRAINTS_DOC def centroid_com(data, mask=None): """ Calculate the centroid of a 2D array as its "center of mass" determined from image moments. Invalid values (e.g. NaNs or infs) in the ``data`` array are automatically masked. Parameters ---------- data : array_like The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- centroid : `~numpy.ndarray` The ``x, y`` coordinates of the centroid. """ from skimage.measure import moments data = np.ma.asanyarray(data) if mask is not None and mask is not np.ma.nomask: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape.') data.mask |= mask if np.any(~np.isfinite(data)): data = np.ma.masked_invalid(data) warnings.warn('Input data contains input values (e.g. NaNs or infs), ' 'which were automatically masked.', AstropyUserWarning) # Convert the data to a float64 (double) `numpy.ndarray`, # which is required for input to `skimage.measure.moments`. # Masked values are set to zero. data = data.astype(np.float) data.fill_value = 0. data = data.filled() m = moments(data, 1) xcen = m[1, 0] / m[0, 0] ycen = m[0, 1] / m[0, 0] return np.array([xcen, ycen]) def gaussian1d_moments(data, mask=None): """ Estimate 1D Gaussian parameters from the moments of 1D data. This function can be useful for providing initial parameter values when fitting a 1D Gaussian to the ``data``. Parameters ---------- data : array_like (1D) The 1D array. mask : array_like (1D bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- amplitude, mean, stddev : float The estimated parameters of a 1D Gaussian. """ if np.any(~np.isfinite(data)): data = np.ma.masked_invalid(data) warnings.warn('Input data contains input values (e.g. NaNs or infs), ' 'which were automatically masked.', AstropyUserWarning) else: data = np.ma.array(data) if mask is not None and mask is not np.ma.nomask: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape.') data.mask |= mask data.fill_value = 0. data = data.filled() x = np.arange(data.size) x_mean = np.sum(x * data) / np.sum(data) x_stddev = np.sqrt(abs(np.sum(data * (x - x_mean)**2) / np.sum(data))) amplitude = np.ptp(data) return amplitude, x_mean, x_stddev def fit_2dgaussian(data, error=None, mask=None): """ Fit a 2D Gaussian plus a constant to a 2D image. Invalid values (e.g. NaNs or infs) in the ``data`` or ``error`` arrays are automatically masked. The mask for invalid values represents the combination of the invalid-value masks for the ``data`` and ``error`` arrays. Parameters ---------- data : array_like The 2D array of the image. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- result : A `GaussianConst2D` model instance. The best-fitting Gaussian 2D model. """ data = np.ma.asanyarray(data) if mask is not None and mask is not np.ma.nomask: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape.') data.mask |= mask if np.any(~np.isfinite(data)): data = np.ma.masked_invalid(data) warnings.warn('Input data contains input values (e.g. NaNs or infs), ' 'which were automatically masked.', AstropyUserWarning) if error is not None: error = np.ma.masked_invalid(error) if data.shape != error.shape: raise ValueError('data and error must have the same shape.') data.mask |= error.mask weights = 1.0 / error.clip(min=1.e-30) else: weights = np.ones(data.shape) if np.ma.count(data) < 7: raise ValueError('Input data must have a least 7 unmasked values to ' 'fit a 2D Gaussian plus a constant.') # assign zero weight to masked pixels if data.mask is not np.ma.nomask: weights[data.mask] = 0. mask = data.mask data.fill_value = 0.0 data = data.filled() # Subtract the minimum of the data as a crude background estimate. # This will also make the data values positive, preventing issues with # the moment estimation in data_properties (moments from negative data # values can yield undefined Gaussian parameters, e.g. x/y_stddev). props = data_properties(data - np.min(data), mask=mask) init_const = 0. # subtracted data minimum above init_amplitude = np.ptp(data) g_init = GaussianConst2D(constant=init_const, amplitude=init_amplitude, x_mean=props.xcentroid.value, y_mean=props.ycentroid.value, x_stddev=props.semimajor_axis_sigma.value, y_stddev=props.semiminor_axis_sigma.value, theta=props.orientation.value) fitter = LevMarLSQFitter() y, x = np.indices(data.shape) gfit = fitter(g_init, x, y, data, weights=weights) return gfit def centroid_1dg(data, error=None, mask=None): """ Calculate the centroid of a 2D array by fitting 1D Gaussians to the marginal ``x`` and ``y`` distributions of the array. Invalid values (e.g. NaNs or infs) in the ``data`` or ``error`` arrays are automatically masked. The mask for invalid values represents the combination of the invalid-value masks for the ``data`` and ``error`` arrays. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- centroid : `~numpy.ndarray` The ``x, y`` coordinates of the centroid. """ data = np.ma.asanyarray(data) if mask is not None and mask is not np.ma.nomask: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape.') data.mask |= mask if np.any(~np.isfinite(data)): data = np.ma.masked_invalid(data) warnings.warn('Input data contains input values (e.g. NaNs or infs), ' 'which were automatically masked.', AstropyUserWarning) if error is not None: error = np.ma.masked_invalid(error) if data.shape != error.shape: raise ValueError('data and error must have the same shape.') data.mask |= error.mask error.mask = data.mask xy_error = np.array([np.sqrt(np.ma.sum(error**2, axis=i)) for i in [0, 1]]) xy_weights = [(1.0 / xy_error[i].clip(min=1.e-30)) for i in [0, 1]] else: xy_weights = [np.ones(data.shape[i]) for i in [1, 0]] # assign zero weight to masked pixels if data.mask is not np.ma.nomask: bad_idx = [np.all(data.mask, axis=i) for i in [0, 1]] for i in [0, 1]: xy_weights[i][bad_idx[i]] = 0. xy_data = np.array([np.ma.sum(data, axis=i) for i in [0, 1]]) constant_init = np.ma.min(data) centroid = [] for (data_i, weights_i) in zip(xy_data, xy_weights): params_init = gaussian1d_moments(data_i) g_init = _GaussianConst1D(constant_init, *params_init) fitter = LevMarLSQFitter() x = np.arange(data_i.size) g_fit = fitter(g_init, x, data_i, weights=weights_i) centroid.append(g_fit.mean_1.value) return np.array(centroid) def centroid_2dg(data, error=None, mask=None): """ Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus a constant) to the array. Invalid values (e.g. NaNs or infs) in the ``data`` or ``error`` arrays are automatically masked. The mask for invalid values represents the combination of the invalid-value masks for the ``data`` and ``error`` arrays. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- centroid : `~numpy.ndarray` The ``x, y`` coordinates of the centroid. """ gfit = fit_2dgaussian(data, error=error, mask=mask) return np.array([gfit.x_mean.value, gfit.y_mean.value]) photutils-0.4/photutils/centroids/tests/0000755000214200020070000000000013175654702022774 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/centroids/tests/__init__.py0000644000214200020070000000017113055576313025102 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This packages contains affiliated package tests. """ photutils-0.4/photutils/centroids/tests/test_core.py0000644000214200020070000001313613175634532025340 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import itertools import numpy as np from numpy.testing import assert_allclose from astropy.modeling.models import Gaussian1D, Gaussian2D import pytest from ..core import (centroid_com, centroid_1dg, centroid_2dg, gaussian1d_moments, fit_2dgaussian) try: import skimage # noqa HAS_SKIMAGE = True except ImportError: HAS_SKIMAGE = False XCS = [25.7] YCS = [26.2] XSTDDEVS = [3.2, 4.0] YSTDDEVS = [5.7, 4.1] THETAS = np.array([30., 45.]) * np.pi / 180. DATA = np.zeros((3, 3)) DATA[0:2, 1] = 1. DATA[1, 0:2] = 1. DATA[1, 1] = 2. @pytest.mark.parametrize( ('xc_ref', 'yc_ref', 'x_stddev', 'y_stddev', 'theta'), list(itertools.product(XCS, YCS, XSTDDEVS, YSTDDEVS, THETAS))) @pytest.mark.skipif('not HAS_SKIMAGE') def test_centroids(xc_ref, yc_ref, x_stddev, y_stddev, theta): model = Gaussian2D(2.4, xc_ref, yc_ref, x_stddev=x_stddev, y_stddev=y_stddev, theta=theta) y, x = np.mgrid[0:50, 0:47] data = model(x, y) xc, yc = centroid_com(data) assert_allclose([xc_ref, yc_ref], [xc, yc], rtol=0, atol=1.e-3) xc2, yc2 = centroid_1dg(data) assert_allclose([xc_ref, yc_ref], [xc2, yc2], rtol=0, atol=1.e-3) xc3, yc3 = centroid_2dg(data) assert_allclose([xc_ref, yc_ref], [xc3, yc3], rtol=0, atol=1.e-3) @pytest.mark.parametrize( ('xc_ref', 'yc_ref', 'x_stddev', 'y_stddev', 'theta'), list(itertools.product(XCS, YCS, XSTDDEVS, YSTDDEVS, THETAS))) @pytest.mark.skipif('not HAS_SKIMAGE') def test_centroids_witherror(xc_ref, yc_ref, x_stddev, y_stddev, theta): model = Gaussian2D(2.4, xc_ref, yc_ref, x_stddev=x_stddev, y_stddev=y_stddev, theta=theta) y, x = np.mgrid[0:50, 0:50] data = model(x, y) error = np.sqrt(data) xc2, yc2 = centroid_1dg(data, error=error) assert_allclose([xc_ref, yc_ref], [xc2, yc2], rtol=0, atol=1.e-3) xc3, yc3 = centroid_2dg(data, error=error) assert_allclose([xc_ref, yc_ref], [xc3, yc3], rtol=0, atol=1.e-3) @pytest.mark.skipif('not HAS_SKIMAGE') def test_centroids_withmask(): xc_ref, yc_ref = 24.7, 25.2 model = Gaussian2D(2.4, xc_ref, yc_ref, x_stddev=5.0, y_stddev=5.0) y, x = np.mgrid[0:50, 0:50] data = model(x, y) mask = np.zeros_like(data, dtype=bool) data[10, 10] = 1.e5 mask[10, 10] = True xc, yc = centroid_com(data, mask=mask) assert_allclose([xc, yc], [xc_ref, yc_ref], rtol=0, atol=1.e-3) xc2, yc2 = centroid_1dg(data, mask=mask) assert_allclose([xc2, yc2], [xc_ref, yc_ref], rtol=0, atol=1.e-3) xc3, yc3 = centroid_2dg(data, mask=mask) assert_allclose([xc3, yc3], [xc_ref, yc_ref], rtol=0, atol=1.e-3) @pytest.mark.skipif('not HAS_SKIMAGE') @pytest.mark.parametrize('use_mask', [True, False]) def test_centroids_nan_withmask(use_mask): xc_ref, yc_ref = 24.7, 25.2 model = Gaussian2D(2.4, xc_ref, yc_ref, x_stddev=5.0, y_stddev=5.0) y, x = np.mgrid[0:50, 0:50] data = model(x, y) data[20, :] = np.nan if use_mask: mask = np.zeros_like(data, dtype=bool) mask[20, :] = True else: mask = None xc, yc = centroid_com(data, mask=mask) assert_allclose(xc, xc_ref, rtol=0, atol=1.e-3) assert yc > yc_ref xc2, yc2 = centroid_1dg(data, mask=mask) assert_allclose([xc2, yc2], [xc_ref, yc_ref], rtol=0, atol=1.e-3) xc3, yc3 = centroid_2dg(data, mask=mask) assert_allclose([xc3, yc3], [xc_ref, yc_ref], rtol=0, atol=1.e-3) @pytest.mark.skipif('not HAS_SKIMAGE') def test_centroid_com_mask(): """Test centroid_com with and without an image_mask.""" data = np.ones((2, 2)).astype(np.float) mask = [[False, False], [True, True]] centroid = centroid_com(data, mask=None) centroid_mask = centroid_com(data, mask=mask) assert_allclose([0.5, 0.5], centroid, rtol=0, atol=1.e-6) assert_allclose([0.5, 0.0], centroid_mask, rtol=0, atol=1.e-6) @pytest.mark.skipif('not HAS_SKIMAGE') def test_invalid_mask_shape(): """ Test if ValueError raises if mask shape doesn't match data shape. """ data = np.zeros((4, 4)) mask = np.zeros((2, 2), dtype=bool) with pytest.raises(ValueError): centroid_com(data, mask=mask) with pytest.raises(ValueError): centroid_1dg(data, mask=mask) with pytest.raises(ValueError): centroid_2dg(data, mask=mask) with pytest.raises(ValueError): gaussian1d_moments(data, mask=mask) @pytest.mark.skipif('not HAS_SKIMAGE') def test_invalid_error_shape(): """ Test if ValueError raises if error shape doesn't match data shape. """ error = np.zeros((2, 2), dtype=bool) with pytest.raises(ValueError): centroid_1dg(np.zeros((4, 4)), error=error) with pytest.raises(ValueError): centroid_2dg(np.zeros((4, 4)), error=error) def test_gaussian1d_moments(): x = np.arange(100) desired = (75, 50, 5) g = Gaussian1D(*desired) data = g(x) result = gaussian1d_moments(data) assert_allclose(result, desired, rtol=0, atol=1.e-6) data[0] = 1.e5 mask = np.zeros_like(data).astype(bool) mask[0] = True result = gaussian1d_moments(data, mask=mask) assert_allclose(result, desired, rtol=0, atol=1.e-6) data[0] = np.nan mask = np.zeros_like(data).astype(bool) mask[0] = True result = gaussian1d_moments(data, mask=mask) assert_allclose(result, desired, rtol=0, atol=1.e-6) def test_fit2dgaussian_dof(): data = np.ones((2, 2)) with pytest.raises(ValueError): fit_2dgaussian(data) photutils-0.4/photutils/conftest.py0000644000214200020070000000265213175634532022043 0ustar lbradleySTSCI\science00000000000000# this contains imports plugins that configure py.test for astropy tests. # by importing them here in conftest.py they are discoverable by py.test # no matter how it is invoked within the source tree. from astropy.tests.pytest_plugins import * # noqa # Uncomment the following line to treat all DeprecationWarnings as # exceptions # enable_deprecations_as_exceptions() # noqa # Uncomment and customize the following lines to add/remove entries from # the list of packages for which version numbers are displayed when running # the tests. Making it pass for KeyError is essential in some cases when # the package uses other astropy affiliated packages. try: PYTEST_HEADER_MODULES['Astropy'] = 'astropy' # noqa PYTEST_HEADER_MODULES['scikit-image'] = 'skimage' # noqa del PYTEST_HEADER_MODULES['h5py'] # noqa except (NameError, KeyError): # NameError is needed to support Astropy < 1.0 pass # Uncomment the following lines to display the version number of the # package rather than the version number of Astropy in the top line when # running the tests. import os # This is to figure out the affiliated package version, rather than # using Astropy's try: from .version import version except ImportError: version = 'dev' try: packagename = os.path.basename(os.path.dirname(__file__)) TESTED_VERSIONS[packagename] = version # noqa except NameError: # Needed to support Astropy <= 1.0.0 pass photutils-0.4/photutils/cython_version.py0000644000214200020070000000007213175654701023261 0ustar lbradleySTSCI\science00000000000000# Generated file; do not modify cython_version = '0.27.2' photutils-0.4/photutils/datasets/0000755000214200020070000000000013175654702021450 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/datasets/__init__.py0000644000214200020070000000026313175634532023561 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Load or make datasets for examples and tests. """ from .load import * # noqa from .make import * # noqa photutils-0.4/photutils/datasets/data/0000755000214200020070000000000013175654702022361 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/datasets/data/fermi_counts.fits.gz0000644000214200020070000005066012346164012026360 0ustar lbradleySTSCI\science00000000000000‹{‰Rfermi_counts_gc.fitsí}m\DZÞY.%ù^$äK¾$› øJFL“Ô‹e3qšZË„%JW†å‹€ ©•Ęä2»«·Ÿ™Y¶Tûl½wõ™³2 ¢v¦OwUuuÕSÕ}ÎÌÜ¿ûáÇìïíýn¡ƒ½ë{Žž}qtüôdïôhïwîïœ>|öùÃãϹ{{¿¿{ðñÝ¿ü~³â÷ðøøá÷{Ÿ?<}¸wúýóCžËtïö_îÞßøÝXñ{öõÓ¿ï}Ñ8?~zøìäñѳ–ßþ_öï½'Î7Lf/é%½¤—ô’^ÒKzI/é†Öåäý»ÝûÝÞëw?¼ýþª’Þ{\¾¾wÖzøÝéYUjñÓêç·ÞºwíÎíOÎûË'·¯]»öóŸÿü—{«ÿ­¸??><9|¶ZéÓ¯÷žýßÃG§k?zzxúÕÑç{w>Y¹Ë†ß›¿ºqó„öÿûõrøÅáñá³G‡’‹œ×ï“?ßþ€_ß½¿Úè·÷ðt£Ì±‡ñ÷ö?8`ù]»ñ«µýþrm³¤Ÿ=:Þ8ó¯Õr?ÿêû“Ç>Yí@Wf] }~tòøÎ§÷îžñ{ýóÃ/7ŒÎÛïã6ìëgO×|^æ»^7_¬Ç탋ëñÞþÈ‚lÖcÃïfÝzðñq¶+ý" ²YžßÙz|–X7#ëñ™¾Ÿ|tp[›ïݧ¿\MõèôáÆêo¬¤þB±ß{·Ö˜¼Öo…'o]»yóÚÍwnܸõæ[·n¼ûúŠáŸî=:><㷙ý7>[ѵ?¼öÞ{_}uëéÓ[''{Ÿnøýáîû÷n¸¿â÷·ÇÏž~þàÑÑ×+oxððW_¬ðxÍ0Dûìß¿óÑÇgþwÿà‚ýž=|z¸6Ûéá“ÓGGÏ÷¾<|vx¼ÒöÙ—gxŽîÞ»ðɧýVî¼iâù=~vrzüõÓ•'« ×ö»öÑïïoìwã7×Öÿ½}pó[o¿uë­_ÿjí×ë9Ÿœ><>=3ÞÃgŸï>~¡ôÊ+þvrxüÍ‹ÕútÃoÿÞ{m=Îø­ÖcóßüWLÜîÜýpí‚+~ͧÎÏwív'{_o†oX>Y±þ±Ïùâšß_÷?ùHô¿GOŽý}ÇÇ/âÞXß¿ûŸÝ_ûßÁ§ßæl=¹µf'ߟœ>]iºŠ®UŒ|}rˆÇlk~ŸìÿaÍïƒîÜþà¿Aà‹ãõRoxl¦¿p1ìüéödzùí+~ß~µÂµ<|1óÏqJçÿÕÓ½¿>Û{øüù“Çï|ÿÁGŸðù’ò[u<›ó·«ñ_?{øÍÃÇOþmŠ«©?|öýٵϿ¾÷Þý?íoìÇÐÛ¢ÙEÚÿߟ޽÷Ñ_.ò[y÷f}÷WéüÙÑwë%Y¥ºÝàëÉóÃG¿X¡Øé…sÅUhìòçýOVëññáéj^>~ôÕá““£g›e¹¾·‰êëëXüø®C¿;Ÿìß>øè“õú~yºB˜=\ßûG_œ~ûðø,2¾9<Þ•gðµŠß ˜QúãÝû+vŸ1’>xxzg ]>|~pøôù:$~ùÍ*k½µ6Å;×o¼}ýÆ;{oÞ¼uã×·Þúí*<\Íëûøí÷Ü1Ç|ÿ¸çO÷?ýp5ßÇ_üñèñ_ÿøìñç««¿¯oæûÇ÷>Ý[I~ô÷“¯Ÿî}ýüóMäþå¿>XáøÊó‚ß _n¯Ømðþ×ïÜüÍM´ßá6¹ÈÅõ½û«á¬ÞX•æ>¼}ÿOoìÿyÿÞÁƒ;«…½ÿË·~ùñ»þõ/^g¦ÆÒ{÷WhuÆï½U(oö« pßÍà¿V¯½~óÖYÉ™eõ‚ßj¾gù| 5üVó=ãÇás‚_«‡^?¸ýûp¿•à·BÓ3~·Þ?¸[ ßÊ~omøíßÛÿäýÏ*ìwÆïÃÃ?ï跲߿›ëd{ëæ‹ê4Éu3ß·7üþº¿ÊÃ|pûÞûì§uÜÌ÷m±žLð[Í÷Œßz®ïôòÛÿËÁºü[ó»÷à£{¨ßu²ÓßTZ5<=X'_=yrôíÆ¿z|rztüý&7®j¾Çë~|ôtU´=ÿútò«œûý^Ë ¿kEÔøý~ÿý»÷~ЋêtøÍz7ô`­Ô‹"øß6yÿÿhó­ÖïîÆ2OŸÙçÖÞõ“G«¢öÑW׿|òðäôéóëß|ëÆµ?ܼ~ãúñ7ßúí;ï¼ûöÍ·|sóÍ6u|tÔøæ]„ï?YW«š{µ¾·öÞX鱿6Ü•r'ÿíw7~±÷¯ÿº÷Æþ7§ûO>_•lëòç­võ/ô|wµÉY÷ø!ÿžïù?_ôüÍwohÛ!Q¿;ë{»w?:+OÏ9ãÕׯ¿[o~7Î}ztVt¬Jê•Sì®K·[„Ÿfý/ŽŽÖ~ÒÜäß¾<}|&ã¹0¬ñ#ÿ|òäÁwÏ<_µ<8¿oÝ\ó}°*¡Ÿ¯xþË/ÜÆ|-…0Äoì7~?–×ßþÅÏßÜûÝïönn|äGcqÓsû-bd 衧 9ñ8ÌüÀþÏ¿Ê9̰ù~uôôðúáÑ·‡Ï®¯KÒë«]ÛÓÇןutzôìúªpöòÁ·‡‡òýƒooÜøíÙ ¾¹qãææø!œ¹YÔßœ[ÔMš_ÃÄ‹ªdÕôâÕª­Õ«ÖÆom¥Õ~ÿÆÍõõM%¸±ù»zÿÖoß}ó×ï¾yã­uã†+ÍýëÆöþ‡ùn.lD½“ðâ ?¦Þøtͽñ#6§˜ou¾\?·ò"‹·¤ž¢jý^>Oó’^ÒKzI/é%½¤Ñ4ÍC;3ɹ̱öÝa^_éSG”é¯éY©ƒ&ÓË{'1Ö÷šæŽ%NÞ6ãyþ.$=—¤ÿ:þTâa4Uͧ'ŸxÇTcæòê×;ÏøˆŒÝé,‡_Æyr^U¾±Æ=u’·ÖYÎq6ÎêQUëq¤éÔë'#±c >» áËK¨÷¤÷#äeó¡‡säi\d>Zß‘óFR\-_–¨S£lœfÇrãªjàžs‹¹i ûB«–«ÂÜ%û?¥H½Þÿb_Ôs¾×#wÔØjsíq«xxx{qv[qšñIϹ}ô}Oô´GåÌuΗÅVUÚ´Š.ËYÛ(¬ŽêQ5~é6IÕs§ñU]÷eûõ𯨫2}¥1K¯gFÉßÖùJ5Uï]¤÷sҒψ3´íœ6wmâ‘›¹ÿ²”u½Ì˜ù’Æçó/×z>òì‹{Ïœ":DúY>3 o"{ú¨ŒŠºä²ÅÏeÓwéôS±§7άšhiöµ×Ÿ£ÏÈñ£({v°Ôù¬i®³ß¥ð®ËÕK]ïmÕÆ•µÝ½-žs=G=wñèµ_lzÎ¥Ë6|¤z\”8_ø©ãÍM™{‘þ?Õçó–â7œÿüŒÝó™ÃG×öKð‹Ë®ÿ4꜎‹ÇmÅÑ4*ž¶±ÏXâó5^Zж±k•þÔ“_©|Ïܸ1ǹNïxÏ&*iÔ³=ýG¬I¦ofï¥%âÿÈÜF½ÛÓ?;fmÐýs4-EFÛÔ§úÌ,Z¿djoné=[³t ÿ¬¾‘öŒ.s“5§Þšui1š¥jÿþ)ÐÈóϾ#¯ÖuÜ-Éï3X3k£´­û@ÙõÞÖ3-?Ç×s¯ïèsï|æ”Ý‹i=²G’§ž[¢Þ£)ZS.ÅFÛÈÛžû(ìÔøpÏsD0`d¶-y½Ô9K4;ç:ÿõðÎÆ\Ä_zq·ç{ô8‹F<ãV‘'æ: B²¾¯l[±‘‘—½?µä3õËÆw¹—)E1µbo”é[ͧúüškáÕë•å_5n4/äKkùŠÜ±äX¯8§¬î_ÅϳvK^JÛÀÌ¥Èé¥mŸ•/ ÷¸ó2žÁJ´ÏÐ\±þûÚ”¿'5ÁXÿޏ—²ÔøöèºísÎ%ÄU–.£Îm³ÎÎò‹øÐeX³UAœ«kûÌYTËsÝÃØ6UœÿZ}·] VÒ¶q¡ªfÈì'qMw„×½²zÇÍÍ3J#íÁÆžziäù~æ,³³k›ùbiõ‡–¸·ãdYõˆ·ŸÄ4mkOhñªXÿÞº¥ºÖÔøáþ6#/ëS#~:;†ŽÍâÛ¨Øé‰ályÙq{IgNž¾ÕBûl«nïáU}.•?ªÖÌ—?—æ>Oñ½ŽùÛ”³ ŒòðÛÕ{Ð <äê–¹êÓj;SÞ&£ýUÒÍãw=þ¡µEjÃȘ¹jLj¬9q[㑟ªÞci|ªÏþ2õFÎe¦¥øxOüõʉʛû»€«Ï5(Eμç#=úôRï³÷ÑZ màõ¯ž\7·Ý%yÈ/ò»•™ÞkK«-G¯MvGp·µkß:«ªjÕÑç #Î’$9#ûæƒ4âY-/Uøó¶ÏZztÕöL޹/õPt­çÖsŽü¶-¢9Û¼cé{o-¤ÅKÔïGÿ>aÅ^¡×‡,<ù|¾;,‘³Çˆoxøyè2Çrä\Å{­ò¢G¿ÑÄùotÏYM6Ý­uÒ~ft_e¿Ñ{ìHî‹à¯g-zr“¥Ç÷cŸˆ/Eë— Íu†cшg¡"ø7 «—ˆ¯ÄoçéÓ«gï^;3Ϊ“$ýzýó²Ö^ÊÎ/š»2û‡ˆ]çüþímŸqTÊϬD'î|Ã+÷§FÛð¯ê:ÅÛ18Sÿ"õüݨ³ )ïÌù J´w¼ã3g^¬ä}í:öãöm‘3XM®&»Ê3ùµ‚*÷’ej…LŸ¥œ{e® ¶ë3$ k3û|Á—ˆ=*cõÍäð9×rTmŠsÏàu¶¾Ÿ;¤{%Q¿òàµÕ‘×;¦Šw5¶ebidÅgí3u¨5ÎsÝC46Öñó³#uðRñÀ’iím<û#Mfï|«ãGÓ+Ûœ½z0":ßž:4CZLdjßÈûê=‰&#;¾Š*yVåæl Ô#ÛÛ¯çÙ÷l_Í®¯:dTQOþ´ø-*kË^Šìû²¼¼X«ôè“yfãyGàÓCž³ Ϙ¬ÜmÔÓªÀ¾è\­Xõ½}žÚ\;'Ä~Úø Ì‹K‘þV¿,¯ ®Z1‹¥{äRûHÜŽ­Ö‰“Ä}oêÈÜÕ“[³ø¤Åp…îÛÆm¤9ÏL­ªá{óÕ#‹[دz ½ùõŸ•rZ&xæ\‰ÑÞübÉÎ×[KDtÒxpuÅzË/¶±êå«­IE~îáåñïL š‘ã!+ž+ꈌþ#rüˆ>•ã´±­ÀÞLŒö÷žÕs2#­:ùiyÐSiü9=­¾^5Y^š^™8¬Æžl !ýfDÔF=X¤½ÏÖ¢Uq—­×~ T•ó­ö›Y¸ É¹"ôãøàøž|š­·qçÔ™ïqÈÆV¶¿$¯gO õåðÞ““zc8’ó¢øÓ›OFDÚÇX¼*dfpbTèåÕs!b³ªúgî<;rçå­“²ò310⹎ª|›Ñ%"¯ê÷çôiiÏVÉwD^çÆUÛ-ãk¿âž—ïÁÝ©ŸOu^ñðÔpÆÂ Í·zlñßJÊ~—‡zöðÜu\‹lò´U¬…´ÈòÊôíõÕª¹gøZõ\Ïo¨4ýz~+(S‡fûJ~Tµ·ôøI6î*kE­oÍOnŽs`gå>§ºfò®u—ð:=“ââ8òݯ\äãçØÿXýGò‘'¼k®­Ëœµ·ßRîˆðÏê…Tñ[b½ý½ùmD]™sõÞ£’*ðWêŸ]‡[yócÕ÷.е}ÅÑ'"/’ë³þV»¢¦å:i}´Tª·½Ô§Þ5¶ÎÒªœ ‡KÒs†^Š`W«dïgGütgê·¿·Vã®q¹4«OtÞ^õU”oÕ˜ ¼ÉR¤æ˜ Û¢y®J©N•bÑ‹èžÉ¢lž‹Ä¼ÅÛ²ƒôÚsÆÏéåsµOJ{‰~Õ}½kÇ}® ûkŸ}óØAò‹+ðº"†‘oT¯ÈumL/yí™cö·‰´š¨7ï{Ö k‡h½ƒçÌÑ¢jߊ̹'Þ´óæ,O‹8¿ã®Y<¬Ü“©#µAd\DžÔ'ãѸæÆië%æemæý®I«Æñø@U:G¼EeIùEš7gw)Oys^4v«jH¯?D)ãVný,FeóJs°oÆF#êÄ^¼ÐâAòY ã<>¦µizxÆZ„úáë]Òæù,§{iÏ¢röó♥_Åù‚–ü¾$é ýíWož•Ö»:ÿzyD×ÇKÑgq¼~Öc3Mf?¯OFùfÛ. q±Óó]ÞœôððÔØÒ˜lM!®ŽˆþÒ~Ek“ö¹sølE=ì¹7è‘Cç¼KÚ𺛼Ÿ9ñê%õÑê1ªËȵÌäG´e¦fÍ~__V¦fc ë¼X˜_D¬/ôúj¶¯„SÞZÄ«&»‘çÙ',L¶ÖÅ’EßGrw]³Ÿ‹4¾\?ézo|jgNø>â/ø¹¡®GâÂÓ_[o/E1жÎ+žû¿™Z4‹S™œEùxmñÊ;¢ò·ðg޼¡µiö‹ÌG‹MO¼j~`ñôÜg­¸wèë݃h9gŽú2“oFS÷¥6®Ô/òœRâý~:ìÓƒŸT¶VÇpr-òÖ4VÿhέÐ%zûDñÔÓó•ÈøŒLlóÔkYÙ]¿Òò@Ô×¢ëéyþ9b#O®[SälÑÞÜoQfŒ—·¶ÿµmv…kïö·ac¦¦”Ú)OOâ­g¼ûªjêÁ.'jµ˜ä›žÏÉZTy¶­ñíñ¥¨ÿ¶×^;H‰sq±ÅÅ`¡°ÆÖðÇöâ·ÖNÉÚ—dó”7F$œáâ–’ö]óžûœ^óúsæ;“­þ^Ì¥zJÏOIºà¾`wúÑo­Ü¤Õ\_«žMœãÚ*j+¯ßèØh}›¡ª:¦‚"qÐKÞg+¬ü©óbʉî<˜JõÉø¢FÃ<¿‰ÍsVüIqÒÚ¥{Ä?K^K¼y4"+’ÿw”¸ÒøbÍÃÉAÒÎ’8ŠÔí–N™Z‚ËyIë6â³%‘ÏR{k òÄi†¬ùzr‡…‰·6Öâ!éáåÁÕa´¯´¦QìÈ`M#m?›±KgÇã5ü|c„21Ðó¸ŸlÝnù­)®Nç÷ Ó¤ÛLòi¬›¼¼P_NiœFVÌk2"Ÿaµâœãѯ—0—qkåÕ^[óÕð0#ÏÓ¯çZ¤¶îÉ•ÜXŒ oÌ[6öú>GÒ^€»Ó“°^ã^kñï‰1ÚÆÅ}Õ籪ê&m ¥µÓÚ£²#c×5Þ«/þþó‹W>ž9á²÷FG¬ŸçÚú¯÷w³}"ýpL¶¾”Ú#zdóŽGÎMÊKÚü«b·——uBßWŸé"z}^#í3q½~¬é¥álçõ‰è~‡# ã¼ëoñ¶æ£­µ¯¹±¼÷ºÖýþiõïµÕ¿ÿ²ú÷_§ó9Dª5‘V‹Ò¶Ìó'yê,éó?Üx­ïšv᯴=uj”‡6xë=ë}¤Þìi÷7ßFÖÉ[ kmšœH®ÌèíÅnI®T_y>Áa\‹c,}­:Õ»&ÚùæUAž¥×Þ“W,ü·Æyðѫ׺?æ†5­÷ ?›ÎòÇzñšb=ÅJŸimhS-GEÖMÃI+Ÿrq¡ù¨äŸVN‘øKý¢ù£‘Sž³ø9Î{¹u·ò$Ó’_¹²rmöÙ)zjWoþá®EmÅÝ·öÖ\_N‡¨NÞšË[ažŒÞ_‹ø,’dÓ¨¸³Zé5Öûx­½_ãÞ+/þþ‡él?r•´IzIzÐ6Ï™P&wxûIÏ(OB;Î׊Km^ÌF}¬>š<üç•˽ÏàÇ»g^Ö#zDÚ£}£>ѧw ès–ÈÓËÛz>ª—4 ÓðŽã#ùm—žïÑ0“ÓMºæÍIkí_䬯ŠNg+jµLÓoGxý ù+åïºJÚÆÒçÀ8~ÑüŠzJ|µ:’æÕiâ}Ú+ZßfpE»fa•ã+pAóá(Ž×•‰ÇÁ(?ºö[°“Ö–ÍgÚGr®ÅS:óÏ®}V©¯Õ'ú,&—⠇ІÜ:Yò"íÙœÏùŠ뛼vf…{Žʵ^cÆ6b»%Çz/Å•”S´˜ëÅ(^õÈð>?çyÞyJµl„8ß¶ðNÃ~O­©=¼×¤…këÅ… ü¤|8[zðSò!K_OÞG^Ü}Iïž\¥a8‡Ù\ìÉåQ|¥mZ>÷òõÆ‘d+úk¾hŒzî­Yï38€}9~’?KßMÙ^kX!å?mœ—<¹!sÖïÉ]Œ†­»ÓÅú¥‘§¶èÍ>™|¯­_Ö¦š_¡/Xuo‹sim¤’bžÃ݉iß™.Ön(Çû¯e±õ´î‘Fb?g‘\01:p¹KÃ8‹?^oóÆuo}®NçÏr<þ/a®¥W¦®¥ý´ýµ¦‡ƒQ\Ö0OÃ?íšæKÒœ=<²ùŒÃŽbm—ÚÚ½·Ÿ1¼4 ×¹½¤—‡+¼8fñ´®yó#ʎȳps½GÖš¸8ðø¤$Ûj—| ¯k˜À½·|Púl¶ôW{­µ­ÉºCñ:r®£åBéµ”#=6çäqyÏë'§¤û(Y1låcÚæñ7ª¯ô žØŽÜG“tiã%ýè{+†²Ï2Eqyýwý,ù:‡ÐïÚ±ø´ØÕÎǦ‰^¨çtÒbyFÉë¿ÖkªG'é8<§î½WdµI¼$ý<ã"ùóÎvÖy°ä#‘œÄñ‘ô±ô¶ôàúxI£ùš…Õ‘8ÐÖei:dc…öçjié^­¦‹Ç_¹qT¦ÇÆœ®¹fF)ê^¿ }[ÎxÍËÅ›¦¯$›ÓÅK–ŸÐ÷8îÙéûU-ÜkºDHÂ+®°wf%éŠzZqÄá2§’šLIžg^œ/zb]ãíÅÚfù#Õûx1^³“•c¸÷<•üTòAí¸þvrºZçTZüïÂ{*ã%µI×½ñEÉ:3òø§¦Sï8ßzÖÕ’ÇñÌ<—"­¡ägôºW&cN‘dJ˜‰Äù!ç÷¨ŽÁñ´ýêdÏSÏÉãÎ<ø¢ñ—Ú´sNž´¶ôA»sd}qyG»¯EûrÏ@qxFåàP$‰Gžõó`úxdÏ<ÁxϼQVÔ$¾TwêÿÜ=IIž¥—öž“ý,œñP4' Eîÿp~â­½­}Ÿ–sü±óKoœp:FúröÃ>Ü9«µŽXÿ!qx§Å€4õ¢¼8²Ö•òçÆõÚßÂ0ÌÜžSz–m)Õª»ooÍ6žüý%_ñâ7ןËURÜq:H÷½½ßË*}î1SòkNljé+­þ¥¾ÐÎf88ùÑ{÷RÌr<%ݽ¯%~šj¾¬ùR#\7)¥ó@:&bËG´±/ª'’tÏŒêAÇzqóg®ÊB\‘âÇIë)ÝÇÔâ ãÆsdùö‘Î)¸>”h~¦Mãgw6ŠºKûiiM¸k´4–ãÑtD}¸ë”8L³|Pk—jtN®·žG9ܾSŠiœ§Æåt²æÄéï­a,½<×¹¿HÞs”­A=6£Ïrã$Üy;qk'­§—g#í™//k^\\aɾT/./pŸáÐ|ŽkËæIîšÕÞ®qòÛk »w ‡7ZÌéÉÉ×|ûã3n6îL¼m§P®¦£„×Ú\"{^äëÍG¨¿fŽ„ã´Íûœ®(ïÊt~ §)f_/Žxr“¶þÈOóY‰—Ä1FÃ>ÉîtNÖs¹Üžû£NíÝOs1"í¯¹{#Ó—òÐjWïþVZ Ï4l–0†{ÏÅ™D_¶°êèåMùJ6ÐHŠWN†äçÚ\iv”|SãMÛ_}ñÏ€#çéÓÇš—gÜ^K50ö‘tm¯Â{G³‰öÛ{’Or˜Ž}P.¶!YüiüÓ>R?Nõ!̹è\_)N-Ìð’7î9œoqÂÉGÿ—润‹vÅñÔ—®Nç×…Û·îÂ{ ³ZÜÓ=?Õ‰óE«­ýŒ·¶÷ÕÖ–óGÔÍ㟴æçè=ÝÆS«û¿qݤ}‡åãŽpëNeKõ —ÃP'É÷¥—óöçOWåaýƒë¡}o•ƒ¿5„:!iëíÁd‹pmaÞ´tF¬˜È{i߃:j¹Aò )7Ð>ÍæfYÄÅ•”Q7:¦bdkC~Ò~”óuîyX”gù ^£×)î#_œëšÚ÷H^yñš“oá ·VNIï%¡Äù«g\›”s9}$ÙFiúr¿Y&ñåj`idá¾å§†i±ê±“¦+µ=WksÏëXø¦éÌa$âú"/Å7ÎÇÂ(-×IûB-v‘8Œ’®5³bíƒ>„|­8Ôöô\¬hØËùW‡pó r¹ù£¾Ú?Ú_Ó–Ã{i<7OÔ‹“ÅÍ­ý¥ûQÉ9]hÞâpš«©ÐæW oëÏ̓¾·~‹ž«QO_8F›3ö‘j Î’Îô:mÇÜ$a g'-N9ì“âV:סüÖÔâýIÒÑ£¿ÔG‹[©N•tçÖR‹-Úoý÷UÒ_Zc-WHx…ûkÜWÒØA¾\Í€:qížïxÒü–úŠÛœµkønØg®ss§ã8>öÒ÷t¿Á=‡¸!a¦fwOþš9ô5ú'ËÒ‡«uè¼ñž¿Xxµ¦öÝJܺâñzQÛóIõG»ÆÅƼ„ãZm3Á5ÚŽñ)żƃúbg+ÖÑŽœOaÌ!îHAÇKù‚ë+Õ T_)þ¸u•jUJÞﲓÖ1±Žå|ë*Œ§g²‡°Ÿ`\› çS’¿qñÍÍ›«q¹z•Ê@âbžÓ[G ƒ¨´ß¢˜˜>Òo Q=¤ïÓÑ|ýñ…«¸ë¯LmÁ£5Ö«Ðg0~¸9LП¾çüÃÚÿHú#_ÎŽxašÎïéu©l|hMÏ«`®A_¡ü¤X¦óÄxÄëT_¼†uŽãt¡üÐîÜþ‡›—•9µµç®q{m®®‘üUõæIÎ/¸vl“ê.ko#á#õA®?wÿypÏO®‰žÝîÀiÞÔNø·ço8 s9~œoq×iŸÆ“Þãábý‰bú.Þ›§|1–´8˜kèçRüÐyá~Jóu.×j¯¹ç÷®B_J¸7k}¸X±Îg,_àlÊ=‹‹²¸5ELAûI¶ÁñO)&ôK/ÎæØc’Ži÷m¸zrbú£Ýp.šè8ŒõöšÓ›ÊåüŸ«ûè|P_Š C¹}=‡ T? [ék´‡7tîôúUè;Me¶qÔŸ^ëVÝÆá37h3ºÁ³(œ ç¸öt.hoÄ_´+Û“ò—Oå#I¶Ã>ÓtÞR|Ð6¬k%œåæ*õ·ìÓdbŒ`,Jµ^×to6E_¦<¥ûÜ&!hDçˆXù•ú—KÑ–T6Ý?ìNçmÜÚ'2–¾§ça­1»µ¡L)Ss÷̨íðû‡Ð'¨ h_û¸~h_´!ú”vomÄa0çSTW)Ïb}%ùíú_û~ø¶¦-ß¶õÄ<„8Eu£:^1œŽtÜ9cö`ý0Mçqw—Ìk®½2÷ oQß« oœó<´­Ö„Ò¹‡%\ŠÏFJøKe`_*S«M%=h_|ÝüL«»è{ëqNÔ§Ðܽj[íÞ±Trûn¬†IW˜œ©L.h|h±*ÅW/Òv®ÆáôD¼§uçCÈïUr;£@{p6@{Q½Zæ±ÖÛããþ•ÚX’Ïù+ýÛx"Qÿår7‡éœ->í <©^í=bç4?Šwë¾ëµjßñÛd½6ýˆ­ˆ›’­Ðwq 9Œâæ<)ïGqÏ¢~´ÆÁyá=.¿µqt-Zô+ÊŸûìy#®nàæËõÕì†x±#­EÓã›»ïCåJ˜.õå®q<¤34ìŽz`ÞÅsN.‡MÓy»q¾.]Ÿ˜vª'?´…V«I˜Bç#í‰[ûU×ú¬éCy¢PGúwšÎÛú5¢Åºn-¶°Þ£>Ì݃D¿Aœçü‡®úü4ý³Ü3üv§‹q޶h23¨?Ðþ8?¬¯h¦±ÁùêÐæ³;·5ž)qµÍ-íÚzM<š~m½©©ž\¾@ü£r9,äòÆ·ÈWª(:?j›W Ã•W&Y'j{/¦éâhìp„ö¡s–êÄb\Ô“‹»†m¾®S^ȃbæs<¿C|šàºåKÆâ¾—«Û<¿0ÏH¾‰:âz!6Ðkt½q´¾—æÃÉåÖ×ý˜âžAÑ8ÁÕ×󱉴a¾¢1߯`<4Â}#êD÷\^ã| ±ªí蜗Ú|0Néßç¯M?>3òOÓùuâöæèóÔF˜÷(Z—O/d"!~#.bô‡Æk¬f3êë˜7h=ñÏÓy_§¼¨­qÎt]±ÖÄúa"íTíËa'ÖèRþ¢¯ƒ¦é"oäÏÅ5Ö/4Žwa<æ: Oé{ô?är%<áâ´ñàäÒXÆ5E]'¦Oû» ©Ïq¹ëU¦ ç8M×Ã/ÃéƒûN?+¯Lë.ԓÛi=ÜÚ[íú‡>„sCŸ£ÔÖ²½FÝÚ8Z'ÒØÆØÃ<έÑDþJ¹Ú‚ž1M0ž{¾»_ÄÅù4Ï8~ý÷ÕéGœ£ÏÒ5ÆùТø€sçð—¶qµ÷#.LÂ{Œ_Êcš.Ötœï´÷íà4ñëÊéÖÚ(_ÄßѯQ6ú]̘Sðo«±éZ¿FôÆzƒö¥±†µ*êŠygš.Η®áUèÏÙ†Æe³óÕé<>N0Žòk„÷)¨|Ê“Êl5ÆÎ4—‹v¡„˜Ìá;µ)òÇqè T¬%^…¾ë¹ýûÿþóêß$}®>Í_#¨nˆ›4?#Ñ1\ŽjxÖâ…ÚabÚû9¬Ã\EóúZ3Á8J¨ òçr·v\ ÷0'¢_IÏp±DmÄåJÎní:â"õqúL bêÁ7qñï±Fŵ£u"½‡GûKøIýwš.ú=Å`N¦„hG¬—<9AzMm1M÷›¸'Ãó1j'<ì‹×p<[Ógwº8?zނפ¸‘Ö•¶ÑýW¿Oðþ ´Q_§s§òwá/­I¸œ¾ü§éÇøhDÇJkƒsnrp^LÓù¹4MÓù«ÅUÛËýlú±Â=¾–öSô/Î…Ë;4)/Ü×Òµ£9ŸÃHNäóêtž?ú5b‡ P& ÛðœþãpèµéüºP»N0ñyÓÜÍí ¸X×r7Ž®õajcnmv }"í­â3ú>‡EWá:—ç¥=.Ú }uG~ˆ#\ÎáÆcžâæÆá5Ú‰êCåp9Q´]ò—ê…÷‰9Ÿ@}&2†Ú®}û+}þŒòi×¹øä°—âÀ.ü£v³úÞ_âæÚx·¿ Ô'©oM¤þ¥üiÜ¢R}§é¼N˜S)íÀ5®îkúpÿZžù'"‡Ö†»Óùß.à0}ïHþMç€×è9Æ ú2}9”Ο‹äGuB›bNÇ9ruÆ4שéÂíwp[~çâ‚ÃHº^QÛ¢ °m\×zbÆqøCyR´æ¤c±^âêÎ8ì¤~€{¤iºÈ_s¥½&â&ú·œí&f,úÚ׆‹Yinxßʦ÷ª8þ¨âåÕößœšC¨~xÇÑ÷R¼ÓyÒkæà>³uë'Å §Ór)_ÄWií&¸Fý±ûÓxäb…öÅ9MЧ®­†ïÎp˜ÂåF‰·ˆ·hKn^?\;:ïs±Ì­'‡ö•ðŒË[Ôfíî¸õäâÇÑë\ç{¸†8ªî}¸ØãäaŽálŽv™˜ñÒ™…tŽ€úáëiº(SŠn ^Çk¸nqzPûãz4ÜHªýÛˆžQáÜѯµx¤ý^›.êLqk‚þéÃÕž­ýÇå>©ŽòêGç„þ3Mϲñ™3<ǧþ¾Cúp±Ž1xúpþÂÍCÊ·hÒÚp1ƒºáÜׄ÷#¹5Çùp~Ž:ríÓt1hîŒI®‘øJxŽþ„²Ú\‘Ú ûJשÿhºsïQgN')†%Ûr2¯À_j#´9žq¾ˆqÌù4g·ÖÎå^úšâ7ú+òhzrxLy#¯ÖÆñB>»pòÁ3Òv‹%ç˜ß&èÏù ­¤û'í:Úw‚6Îo¹ó< 'íËÙ’³›VsQþÜG‹g Ï´¸§íœÿL~Š$ÕY\å-áô·õEý‘/§Êåüû"¦Ó±xåigûèCX#j¸Ku”öáx®%ÙB«%° õxpk ­òÃsl“ü–êÏp˜ÈÉ‘æ%ù•´.´m=î5rÃÖ ®SÌAÞøÜ-â· v¤|QOªG#ÎQÎDú¡]=úMð^Zw Ë´÷ØÎǽªôšÓ‰ówIIÊ^—ÎÛ{®^àêGŽ$,àôàü…ó.o ÎøL±ÄŸÓm-åoËϧé|,q±§E]¸8ÐúIëOek˜C_ã½InWIúqyqbúHçx”'ÚCZNç¿í5÷y"Ä ®æðè%áæΞܺKµ(mãöê\sº£,Ô}­ÍcwâךÚùHy\óO-ÿp¼¨.œí8%¾Ö{ÔÕCXËqëÃÙ…ú!wÊñÔöÏ~#áÙóÄüm:MÐN× ÏÐñŒKÂWi­¹÷k_kÞGë‹|¤s±ÖOÚ3pü´µ§ò8ûj„}$\“ð_søHýHº_GÇIçi\_ª«d+lC½¨Þ8?ŠÇ!®S8ŸÀzÏÔ¸³]#¹¹JØŒ±lá©Ö†ã%[Jc%¬ vi¹¸åôæø5âîIs¢üð5ò”ÆqçO¨/î×è½Ͼ ¯K±Ïù7Æ3.²Î’žZ^àâEÃgf([ò+)†P/꿜ÍRT|Mûr5¾6_mèXÉO¥õÍwœ4 Wµ9¬IºoÃÙ˜Ó‡òÔb–ëO¯{ð µyî ¾ }9INÛ¹= §Ç ñB:Ç”|JâI_Kkƒ¹ÆÚSã>ûI52ÕuõÄ£EÜ3([ª§‰·/æ\©¶Ú™Î¯!7_ëÕ×ŸÚ uôÄ¥ôëF¨;gîs^t¼Eœý9°üCÂxs%–|WÓÓAZi¼GN#i¯/õ—ôÒ°/¢&Óª1i†c$›âûH}!‘´Å>^Lçtôìi¬õãü™;{âò77No­ŽÇùÓqÚwº iñ'=—´#\Ã9xžÁv‹'Îßz-å¡ îùmN7i¿§Í_KÏÎJñ¤í…©¾4/J5„‡¤ùHø"ÕbV OLŒ% Ó¤5¢<½g9Ÿ E1]ó)Ï8mݤ~Üù'[³wÆÂTî:ŽÏ朓ÆOó!ä×^s¸Œ¥Ú»íO4_´Îx°%iß&ù––K¸½ òÅkßÔüÑòEÉæíµtö|¸yI{6ŠÛÔ—$¼°æÄÕø8ÔÊÇkÆRRl7þ8^Ã^‰§„3’–? <¼æ­y%²žgòðÓöO^\çdYþ¯½ÖÖ_Â;mÎ~Xó’â/2ŽÃH‹?¶iö“t²b@ÛKKõ‡Ukb¿Ièká¶£¼æ>wbr׬ë´ÝÚ‡qµ g;mΜI¶œ¦‹±%Õ«–|œ#'¿Éãü ç‹û2ª?÷,Ÿ$OÒŸƒ<=ØÄ½—ôâdFðY›—öI Ë$_ôê$µi¶ÉøµÕ§‘ôYGKô mÝ,ý¼vúKŸoÔb©ýÕâ_cãg¤ñ? ϸXçHÃ'”-á*¾æžßËb…4Î:£ôìMÖ_aúàx_NÓùï³Hª§¡éÑl§}OígéŒï­5ÕtÔæíÍOÞZ¼-~™{—–¾Z?nÆòóúWó4¿Îà=ë&=gãÅCާtÆÂõ—ÖŽê†ØÏÙƒ’ç+ÊäÚ-¬ÔH³½'nPŽõä0É'<û7I~†$½0'xýX‹kË>Ú½s齯³½÷ø3ömï¥3^K¾´–]Qw|ïÅ7.½9‹9Hž=a$†=þAßGìšÁ¯]аtœ†MˆÍÚ}a8ž(“Ó£ý»)ßõ¿«L;÷9Un}µVjãbÏâ£ëü¼Xfá…—´{gžxo¯­¼ÀµsýÑÎQ¼ä®q¾Ëݳ‰ØM#ïz{Ú½q6‡îZ³Î¸1oéºv~®Ù"rFíÁl/I¸­ÉñžƒzH«¹véLÉÃ[²­µf\—·¹ïPãdcÓ°Úš¿–{­½Œû™œ< =û8I'¯ÕŸÊçleñ•òåËÉxP’ö´MËçš-=¹œ#m~Ïy4wFHÿ¡ŸX÷K<ë§µ¡ ½¸kùp…íµqžvϽ&ɇzs”ER¬®_sß%&é—yÀÛÎ]×θ#Äõ®?®¾–~Còu=ß«¦éÊŘÔߛù¾qúdÇc»–k,>‘uF™=ñJùà˜èÚeÈk'ÍϼxÀÕ%˜«q|ë£õóRd="ï5žÞ=¥'§Zí½<1âÍ£¸Ž‘1HÑ{ÐÖñ¦7^{ŽÆÂǨ½9’puŽìSé8‹¤<#á®us#ñcQD¾4ÖŠ¡FÜÙcôŒhÝWú5”cñ¡¹kÒ{ï5/yž‘Åvï&£Ÿ”;µ|êÕAÛçGó¢ô^#­ÎÈðˆ`„×4ì‘Ö"³ïæäYã%LãtÒäHzpm‘û2Ò™ 7Ž;çpÉë#˜o¸ñYŸ±¾óCjËä«­‘tJº®ÉŒ¬-76kcçëZÆæ‘>ÜótŒt¯/³ð`'§‡wŽž:–’öÜG-²¾ç6ŠÛyÎéûèºY¸já{ä^w=ëß.Hkì±{Ã)i÷Yð½?9ûÓ×Ñ3@´Q/þYX̵epŽó-´‹„oÈ;¢«ÖÛ1$ŸÒ°Ð3‡²Ö€ÊõÜWöòò´·k\ªñ¢×¥Üf­ŸÇ75ŸÄ³çu<ùÅËOÃ+oLX|{Ρ½ºS½µ˜Òüųw–j&Êד±/ž yî5[>ì™—Ó$²Ö±Ç_$þŽÈêÕ±±ênL$ιïÅB,Òîé{|ÉsU"/Frã,½¸°ðI{£åîÜQ‹)-V¬|eµ!?©¿‡_fí2ë¦õõ>Ë@ûzÏþ5¹ÙyxÆHµ½¦Õ e|É“«ñLuÕøc—÷´>œž/OÍñåâWúŽø¨ÏDâHÂ?Ú®}Ž<ªö·ÆZ~ç‘]ù,¥fË,ŸÞq¨‡gŸê½.­•çÙ=I?/i1Ê]ó¬³w>)ý4œòÔš<ÔQÓ‹ë#µyúhÏRry0ól¢7ïrkyî š³¹Ü¼¼ë¡É£ÔsvhéÔ“ ‡ÐÆFó¼·¯õ9ò‰<ƒ åGîºÖ¯‚¢ºVñƒ"çá^?·p=ó,Ф„9Úy©%Ǫy²:{ãÊÊmquk„¸Ø“ødy{s9Žáþjü8>–n#(ZJ˜×«£•Ç«ê%¯|nݬø³žYñÆOÖ–^=3Tñl„·Ÿ·¿÷ù`K^eó<®½4/éùĨoKŸQ¤~ÌÍC›[o}á5iŒ4Nš»%#›S¥þ‘³T©†Ç^ŠæÌ^,ª—­,]¢6ˆà’Ͻu”w#6ò>ï\!+ÊÈßÓ~VJ¸§ñÔxEÆôò§ãðþ:òôÌÉò[+GÐ>Z®öÄGE}„y³•$7›Ó35htm¬œ­¬>Öšzô°â#[ÃUÖš’>èãyïs|¸öP¯¢ë8‚zãldŸHަäÉ5²âÝ#ŸŽ“t±ÎÃ<ÏŽeÉSC!~Gòwk¯¾7 ‘õLš7ßkØá£]÷òÒÆgI[ç‘á!ë™@Ižç³Z}y¾ëãÍ‘ÜéµEëŸY³9òÊì©á"¼½|³ç 5biµ³†ñܹU¦Nµ®KñªÅD»¤s:OÔ¾_)B#Æj>áO׌{ÞÜâÙ/D÷ºÕ±%åpÏà3M^ß·úhzjºUåËϼ㣵®§Þ«~VËEÚÚecΪI¥÷™ú•ÒÚn‘{"^]¼ŸýäàµH,TáõnŒ&wD}àñ•žš%Ú?ÂO['î>p7¢úpc#¶³¾ÏÉ’eµ{^Gek¸á“é7'yãÑÂ}J‘Å{ÿ BÑgr+ûFȪٰ֑l„ßÓŽ×=zhü[íæ¡ÌgW=ØÌÕ5\ã­½:Jï›,íúܤÕdR ¥ñ²ê7_´ÖòŒ«Ê¡QYÚó–ÞMâí‰ÿ?‰—÷š…I¾^òú¤uÝŠMO-è‘íå£Õٺ““½ã´v‰¯Çf#bWjëy>/j«¾´Ö¡*'I:U]‹Ê®ä5'Yþå}VŸ»æÙ [¾µIµ ¥ZrkE)#_ª%«eG®k¸Ñóš¶õ®UÆ÷£|GÔÕT™Ó®0¯­1ZNò^«ð…Þqk›ÙcõÆz¦¶âøUc{DvTL­æãÉžºš»9wŠÚÏ[‹{Û£Ôã7ÜÜÑ^ÑïM@Þ–M´ü'áú{ã/ò Ië/QD§¨lnN‘=Œwoáy~_[}%},ž|ÑøTç;K.×··«¬}8ŠÔš}ïmåKoÌj}-}v”kUà‘FÖçü³þ:_8,«¨ÛGÌ%âOY™ÙzËêS9ßQäÑ{y¥WŽ7—Wɯ–1‚¬ZxdD}(»7°Ú¸vo“xõ¬±'ÿhíÖxé3ð_wkÏ~Ö0ÓVõ¹®l®ï­${yäVãGE½#­¯§ö§Ôû]–>ž~=6¶> ÕËy¨!«|À{'wÑg‚¢6‰~dz‡¨m8y;Â?M'IúIû]•g}W þ^¡'xô–Þ[c²µ^ËÆT”<¹VòÇÏ+j©LŸ©½3¼zuék=£êͯ=Ø5—ý8^žšÖ³Þ£âÛs>¦ñöÔÍÞÏÉyëÒªìé[7U¹ SëzõÏøOÏ÷ŒdäZå§é±­|Ðd{çäÙ+XsóÌu®ïcÈ'·ò¹rÄ‚^ÿ“Ö`T¬"ÿ ï;Ïé7˜ñ5®M4oyò]$Æ,ò|-JV ¦õÑÆÐq†{ý*2_§Tx÷UñÍm½¹pi”­O#µñlìY·Œnõ¤÷ZöœÙSc÷ä¼lÜ÷ö‹•/-=¬ÏíFëµõ?ϳQë4M±ïùÏð§ý£µˆ¶&œÞQ{÷Pœˆ¢±Ô«K%ï옞>’/p¾aý7Þâ©é±W¦6P¯ÿö`—ć{ïÕ«"ö½±Âí!¬üìÍß8뻀,²ösa§'ö*åeøEj6Ϻg}9J‘ú®Š,ìlò‘2µž÷û£2*ÉÒ ±£JF´OO²l\Þ1’/nË'4Ûxü×s=²o‰ÔÙ<±u•?Gq®:ÏIµÐ¶)û}¹È#2VùHílɑڪ¨×Ÿ{cÈê7b]=|%,²êhÍÌ]S½õІÓQ~=zDIÚ#H¸&­wdm-òîKz)cÞüX…W;“¼žž±Qþ‘ß/òêßS³aî‰Ì§Êo=ò¢˜à­'{}nDœUðŒÆ[¤6æd ŽEs€ÆÛ’½¦žïfaïÈþ#óÝ–|K·,Eô‘âLó,롹â8ƒcÙ|N×*>â½6JV»Þ;¯lNåÆY±^‘çzÇUä¿êuî?:¶=y”ËWsÆI„‡µïŒê!ÍŽÑ\gÅTðw'"cGP4n%ßœ3ïjDõˆ|µ;‘º·ÊW4œŒ~n7R£oƒªsÖèçm<÷=¢û!麵'ðÈjšÊ:E’˽ÎÈÊÚÛZskœ§¾‰Î)Ã#‹_sÄ|¥Œh¼Js¯Ú›Tçÿß“hßÐSK{rbO]4º†¬¦ŠÜÐ#¯çùí3¨Z®áújäÅ;IŸh¾“â3£gE>¨¢h™oöslÛö¯ «öèÕ±×½u\DŸJþÖÜ"ù,ƒAYŸñPEMé±±«^̪š«vMZ‹‘¹w ¸»d¹Û®—·I=˜R=X©ñÈø}Å~U‹oîŒnIµS…®¾²Ö«bŸÖ›_22=ý<û_iŒG~åïަžøª¦&¯ò{´«r휶ðÆf•NsÆi%Uä©(¯Þ½—µŸ« ϳÜ^=´ë•öߟ*»¢ã¸ÜìñŠzon\Ëø=WÇE0«wSEÙ¼ìÝëfuðú[FŽå5i¯¹xmµ2Êööï•Å­-Wóir³zDj¦ŠÜ¨õÍŽ Yš{ïBåE>ÐHòwš¯9{Æ É‡25~ï^Vºfa~U­ÑkëèøÈï&÷èÞûû–â°¶’¼˜§a×úuö»Ãµ>sû5>Z§Ò÷^UP¤æÊÄCVíºÔó…©<Ôó9oÿŠý ¶-ý;`«hä>.½¹¾G^tL%.J<=ß)nái&pûÔ±ëzö"ÙOË+=ØÒ“w­Üƒmžï¦çøTÕ}3|GÒÒ0¥ZÞè}¯öm¹™¯ mø'3³W÷ò¶ê‘4zŸ¥ÉÕÚ/½5µÕ®õ«¶†éšÍ=:ØTàÒ?ŠêŸù^ù Y5ÈO<µA´¶óø”UõÔȽc2{Ï ]¬ñ=õ+¾ÏÔ#Uy¬»#ûK®£{(‹sëU1nŽXi—¹ê¤mïu¢Øbý¦÷Þ[㌨s¼|²²{ϸ×kꉟž:e›{ƒ9b![x‹°G‡¬xô™ã·d¥~h³h â%MçÞz`„~]ªeTÈ÷®[oáAûFý*òÝ'Ú8Žzëâ5Z–GÕÞªgœ¶/Û“ÒW“ãÙsxøôR$.¼úYØÖ›{²±,é%Ų'ß i>Ð[?õŒ™ƒ¢þ«}_-öÙ&Í•#GÅ$£sUOX|GaaUáï‘gÙ|Äþ!j—9ÖÊ"ÔÙûí‘ëÙ¾~ZîòèQQ'gø{0>BUû9KÆHz׈êPñ{bÒžFÚ·jµ“·–µt©îëå׋u=²{øUî•GÈñ¶EÉ㯕ò$ÞÿQy¡ýÍždìÒ;ߥî[-Q¿¨NôJt¬ç7 ¤qÑ<ãå]‘+µq\Kóñè9c=vË<³™«Ö•W™KçÀÞ ßž\Ø‹¥VííñÙˆ®KÃÏQ¹_â9W}UÅ_Ò¿×nÕu–Ƴ÷=}Ov¿¡ê5ˆîq¤ö{à Ÿ¯ŠsOÏxîw%\Ûž æˆó‘Q5f†ÅXkßÙÚ²Ÿw‰êB_[¾Y—ž½yÅø¬¼È¹Àˆ}Z¤ÞÂ1sî½Xã;'Gúw-¥xŽÎ#º§Í|Wìœ5[ÕÚ Ÿès}’m£v™«NËúyÔ"ò³}GÔ†RåúôÊöäÎ 9=Ï&VQU]݃ÜÚâ:·g3{¯Q˜TÍoDG|¹šæÚ#6VîÍìÅ¢×zó$Çûý·Ú{K‡ ýz×Ú¯}o–g„"õ{V—lÝáÁù¥7>ªö(wDþÞöiTå?Ñš-:ߪõ¨&/®Î×£iiòzða©5ØHûnâ›Ñgî³%’f—LnÙYªµ¼u˜WfeiLU½Ø3wK~fëáÛK\~ªXGm¾YÌãé»ÓFÊ®ðç9b1ËSï¨_Œðo¯œ¹žQÅÞö沞þ~RnÈÈš«V‰âr†gïü2X‘]é«=ºnƒ¢øÛÃ/šGå±(U×{š « ¯WÄQdl¶Två~oŽõ³(£®oG瘷WOÍg=óÕäfÞW‘5¯%'ÏyíÍ™süælf/õ7<éú6ü`)øPEsÆØy2óÝ9ZÞ™Ó?£µŒ•C¤qÖøê}_Ïø*û:«˜‹*üœëÛkß^_©´sÕÞw‰T=—Þó4 ‡zÖ ‹G^ªä‘¥ÍËÒÞ`t>ŠâNvï:jm§õY$mÍæzþº‡Ï\¾á‘¿>¯%œuPù£ëÎ¥ïFðÉä· ÖeÎizs¢—ßœu:í¿í¸¢„Ï|ŒÐÍû\Éœqˆü­ý`µnKñk^£Ï/zp|)6´¨gŽÚïüFyYä=;ÉêâÑuŽÏÂWМ{%”Ë½Ž¶EeEùôìµé{Ëÿ3ò¬¾™ßè]J~ÈüvWškvÏœåSA‘½K¯Œª~V_nNÛÈ×ÙºÛÝ,57y)ºŸó\‹ö—~“ r?¿í<Í‘7¯USÅž·jO@Û<9·ú÷©ÙsÍ×*ñyÔþÜŠ•‘û¿ŒíæŽåm`‡uŽ^¡ÓÎÄóª²{eýZÉ_ã±ä|ÞûÛ=Ù±‹ø‘GŸ±ï­g22Ökƒù<3‡ê3ù|,;z}¥gÏìù¬ó¶Îg4’rxÔ‘zÁKUg=ÛÀÔ¹ðpô\*u‰®CÅ~o„_fiÄÙˆ5&ËkýþZÏ9ÚœµÁ¶øŽÞ£,!oT¬cOÌWŽÑøpk9VY4çÃ6ÈÒgiºFúEöQQŠœqhzK}¹3´Qg'#öܼ3÷ë*È»–ž9/©õÈ¡C… 2q.±~ÿvnŒÌÔ=]«ööò®×\çDÿÈÔ³©àÕK½2³ço£p+ãß#÷Z’Ž–žÿÈñX}†·„Üíc1Om_9zo·íÚ(Ãc„ΣîåeûÀ)nÝGŸÁE÷ѺÑÚ=ÉÊø­ôͨOfeGöò—…zÖ$º·X¿Ïà‡§™ƒ$[-57ŒàÕó “¹k^žK±ÅTU_ôæ/Oûh½î=þ·«\óò—rÓÜ¿:£´}<ñ0Z¿Ñ{Ïì~Ìë÷#ê·LŸÖoÄ^:sÞš‘3'--îªý§bÍFúôe¡Þ³Ñ(æŒÞÓGú¤¹}k®<6÷ÙWeÏTæðÕjª>ïíáQ}žë=Ÿ“|µ"4w]¹”šii{ÒmQï9_v/ºd;WíW²wm=ú SãFåVå¥jUDñÄÛ¿G–Ô–É—½²GаoY>RµŸ_^ž#~óÚcInä»y4æ&ÏœGગÏ;iûIÚ‰LŽ[¢O¬iIº¬©›¬µí©Uæ"¯o¢o{ÇX1¿„ýØ´´3¼*Zzí3âLinÏWužW…CžxŽRv.™}}¯/Ðó—9êK”ïmÏöë¡j|[Âb)¼æ¦HŒ/ažÕ:hŸEâß\±·:+s®é¡Ê߃Ëô™³&Ñr}fO½4º úÍ]ëõòÑ|£÷¼T?çg¸©æ;¢Ϫz?£ËÒã4BK¨‹·µßïÅ3n:õê<·ÌQ䎥Ÿ»hr²ydäœGå›9È[#fæU]cxe\F²ì\U[õ¬IïÞÌÛ÷§²¦¶9Ÿ‘ùmDŽ™«fž³6÷öûsÝÀ 횇·ÇFÖ=|ß‹ŸHUë8çYÒølCÞÒrÍæ6¢žµxnë|ˆk¯¨u·}Oii~­‘ÇîKˆ nüˆß½Íè1§ÜÔ{~iáËœgSÛ®-ç–×kÛŠûTKöó%ë&‘v<"×i|¬séÈ~‚ãÕ³o}®Øãû™ñÑ{‘þÛ¼gâÝ â{ תϽz¨'–4͵WÐ>'^MsÝ“ÑüÞSCY×¢úD© kªög•5ef_‘[éÏ^âµ¥kJ4'¶y~ÕKÛæÃáÆ6ö@saЬôÿ,á«æphotutils-0.4/photutils/datasets/data/README.rst0000644000214200020070000000025612410036146024036 0ustar lbradleySTSCI\science00000000000000This folder contains data files bundled with `photutils`. fermi_counts.fits.gz -------------------- Fermi LAT counts image created by Christoph Deil (copied from Gammapy). photutils-0.4/photutils/datasets/load.py0000644000214200020070000002023413175634532022741 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Load example datasets. """ from __future__ import (absolute_import, division, print_function, unicode_literals) try: # python >= 3 from urllib.error import HTTPError, URLError except ImportError: # python 2 from urllib2 import HTTPError, URLError from astropy.io import fits from astropy.table import Table from astropy.utils.data import get_pkg_data_filename, download_file __all__ = ['get_path', 'load_spitzer_image', 'load_spitzer_catalog', 'load_irac_psf', 'load_fermi_image', 'load_star_image'] def get_path(filename, location='local', cache=True, show_progress=False): """ Get path (location on your disk) for a given file. Parameters ---------- filename : str File name in the local or remote data folder. location : {'local', 'remote', 'photutils-datasets'} File location. ``'local'`` means bundled with ``photutils``. ``'remote'`` means the astropy data server (or the photutils-datasets repo as a backup) or the Astropy cache on your machine. ``'photutils-datasets'`` means the photutils-datasets repo or the Astropy cache on your machine. cache : bool, optional Whether to cache the contents of remote URLs. Default is `True`. show_progress : bool, optional Whether to display a progress bar during the download (default is `False`). Returns ------- path : str Path (location on your disk) of the file. Examples -------- >>> from astropy.io import fits >>> from photutils import datasets >>> hdulist = fits.open(datasets.get_path('fermi_counts.fits.gz')) """ datasets_url = ('https://github.com/astropy/photutils-datasets/raw/' 'master/data/{0}'.format(filename)) if location == 'local': path = get_pkg_data_filename('data/' + filename) elif location == 'remote': # pragma: no cover try: url = 'https://data.astropy.org/photometry/{0}'.format(filename) path = download_file(url, cache=cache, show_progress=show_progress) except (URLError, HTTPError): # timeout or not found path = download_file(datasets_url, cache=cache, show_progress=show_progress) elif location == 'photutils-datasets': # pragma: no cover path = download_file(datasets_url, cache=cache, show_progress=show_progress) else: raise ValueError('Invalid location: {0}'.format(location)) return path def load_spitzer_image(show_progress=False): # pragma: no cover """ Load a 4.5 micron Spitzer image. The catalog for this image is returned by :func:`load_spitzer_catalog`. Parameters ---------- show_progress : bool, optional Whether to display a progress bar during the download (default is `False`). Returns ------- hdu : `~astropy.io.fits.ImageHDU` The 4.5 micron Spitzer image in a FITS image HDU. See Also -------- load_spitzer_catalog Examples -------- .. plot:: :include-source: from photutils import datasets hdu = datasets.load_spitzer_image() plt.imshow(hdu.data, origin='lower', vmax=50) """ path = get_path('spitzer_example_image.fits', location='remote', show_progress=show_progress) hdu = fits.open(path)[0] return hdu def load_spitzer_catalog(show_progress=False): # pragma: no cover """ Load a 4.5 micron Spitzer catalog. The image from which this catalog was derived is returned by :func:`load_spitzer_image`. Parameters ---------- show_progress : bool, optional Whether to display a progress bar during the download (default is `False`). Returns ------- catalog : `~astropy.table.Table` The catalog of sources. See Also -------- load_spitzer_image Examples -------- .. plot:: :include-source: from photutils import datasets catalog = datasets.load_spitzer_catalog() plt.scatter(catalog['l'], catalog['b']) plt.xlabel('Galactic l') plt.ylabel('Galactic b') plt.xlim(18.39, 18.05) plt.ylim(0.13, 0.30) """ path = get_path('spitzer_example_catalog.xml', location='remote', show_progress=show_progress) table = Table.read(path) return table def load_irac_psf(channel, show_progress=False): # pragma: no cover """ Load a Spitzer IRAC PSF image. Parameters ---------- channel : int (1-4) The IRAC channel number: * Channel 1: 3.6 microns * Channel 2: 4.5 microns * Channel 3: 5.8 microns * Channel 4: 8.0 microns show_progress : bool, optional Whether to display a progress bar during the download (default is `False`). Returns ------- hdu : `~astropy.io.fits.ImageHDU` The IRAC PSF in a FITS image HDU. Examples -------- .. plot:: :include-source: from astropy.visualization import LogStretch, ImageNormalize from photutils.datasets import load_irac_psf hdu1 = load_irac_psf(1) hdu2 = load_irac_psf(2) hdu3 = load_irac_psf(3) hdu4 = load_irac_psf(4) norm = ImageNormalize(hdu1.data, stretch=LogStretch()) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) ax1.imshow(hdu1.data, origin='lower', interpolation='nearest', norm=norm) ax1.set_title('IRAC Ch1 PSF') ax2.imshow(hdu2.data, origin='lower', interpolation='nearest', norm=norm) ax2.set_title('IRAC Ch2 PSF') ax3.imshow(hdu3.data, origin='lower', interpolation='nearest', norm=norm) ax3.set_title('IRAC Ch3 PSF') ax4.imshow(hdu4.data, origin='lower', interpolation='nearest', norm=norm) ax4.set_title('IRAC Ch4 PSF') plt.tight_layout() plt.show() """ channel = int(channel) if channel < 1 or channel > 4: raise ValueError('channel must be 1, 2, 3, or 4') fn = 'irac_ch{0}_flight.fits'.format(channel) path = get_path(fn, location='remote', show_progress=show_progress) hdu = fits.open(path)[0] return hdu def load_fermi_image(show_progress=False): """ Load a Fermi counts image for the Galactic center region. Parameters ---------- show_progress : bool, optional Whether to display a progress bar during the download (default is `False`). Returns ------- hdu : `~astropy.io.fits.ImageHDU` A FITS image HDU. Examples -------- .. plot:: :include-source: from photutils import datasets hdu = datasets.load_fermi_image() plt.imshow(hdu.data, vmax=10, origin='lower', interpolation='nearest') """ path = get_path('fermi_counts.fits.gz', location='local', show_progress=show_progress) hdu = fits.open(path)[1] return hdu def load_star_image(show_progress=False): # pragma: no cover """ Load an optical image of stars. This is an image of M67 from photographic data obtained as part of the National Geographic Society - Palomar Observatory Sky Survey (NGS-POSS). The image was digitized from the POSS-I Red plates as part of the Digitized Sky Survey produced at the Space Telescope Science Institute. Parameters ---------- show_progress : bool, optional Whether to display a progress bar during the download (default is `False`). Returns ------- hdu : `~astropy.io.fits.ImageHDU` The M67 image in a FITS image HDU. Examples -------- .. plot:: :include-source: from photutils import datasets hdu = datasets.load_star_image() plt.imshow(hdu.data, origin='lower', interpolation='nearest') """ path = get_path('M6707HH.fits', location='remote', show_progress=show_progress) hdu = fits.open(path)[0] return hdu photutils-0.4/photutils/datasets/make.py0000644000214200020070000006354313175634532022751 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Make example datasets. """ from __future__ import (absolute_import, division, print_function, unicode_literals) from collections import OrderedDict import numpy as np from astropy.convolution import discretize_model from astropy.io import fits from astropy.modeling.models import Gaussian2D from astropy.table import Table from astropy.wcs import WCS from ..utils import check_random_state __all__ = ['apply_poisson_noise', 'make_noise_image', 'make_random_models_table', 'make_random_gaussians_table', 'make_model_sources_image', 'make_gaussian_sources_image', 'make_4gaussians_image', 'make_100gaussians_image', 'make_wcs', 'make_imagehdu'] def apply_poisson_noise(data, random_state=None): """ Apply Poisson noise to an array, where the value of each element in the input array represents the expected number of counts. Each pixel in the output array is generated by drawing a random sample from a Poisson distribution whose expectation value is given by the pixel value in the input array. Parameters ---------- data : array-like The array on which to apply Poisson noise. Every pixel in the array must have a positive value (i.e. counts). random_state : int or `~numpy.random.RandomState`, optional Pseudo-random number generator state used for random sampling. Returns ------- result : `~numpy.ndarray` The data array after applying Poisson noise. See Also -------- make_noise_image Examples -------- .. plot:: :include-source: from photutils.datasets import make_4gaussians_image from photutils.datasets import apply_poisson_noise data1 = make_4gaussians_image(noise=False) data2 = apply_poisson_noise(data1, random_state=12345) # plot the images import matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(data1, origin='lower', interpolation='nearest') ax1.set_title('Original image') ax2.imshow(data2, origin='lower', interpolation='nearest') ax2.set_title('Original image with Poisson noise applied') """ data = np.asanyarray(data) if np.any(data < 0): raise ValueError('data must not contain any negative values') prng = check_random_state(random_state) return prng.poisson(data) def make_noise_image(shape, type='gaussian', mean=None, stddev=None, random_state=None): """ Make a noise image containing Gaussian or Poisson noise. Parameters ---------- shape : 2-tuple of int The shape of the output 2D image. type : {'gaussian', 'poisson'} The distribution used to generate the random noise: * ``'gaussian'``: Gaussian distributed noise. * ``'poisson'``: Poisson distributed noise. mean : float The mean of the random distribution. Required for both Gaussian and Poisson noise. The default is 0. stddev : float, optional The standard deviation of the Gaussian noise to add to the output image. Required for Gaussian noise and ignored for Poisson noise (the variance of the Poisson distribution is equal to its mean). random_state : int or `~numpy.random.RandomState`, optional Pseudo-random number generator state used for random sampling. Separate function calls with the same noise parameters and ``random_state`` will generate the identical noise image. Returns ------- image : 2D `~numpy.ndarray` Image containing random noise. See Also -------- apply_poisson_noise Examples -------- .. plot:: :include-source: # make Gaussian and Poisson noise images from photutils.datasets import make_noise_image shape = (100, 100) image1 = make_noise_image(shape, type='gaussian', mean=0., stddev=5.) image2 = make_noise_image(shape, type='poisson', mean=5.) # plot the images import matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) ax1.imshow(image1, origin='lower', interpolation='nearest') ax1.set_title('Gaussian noise ($\\mu=0$, $\\sigma=5.$)') ax2.imshow(image2, origin='lower', interpolation='nearest') ax2.set_title('Poisson noise ($\\mu=5$)') """ if mean is None: raise ValueError('"mean" must be input') prng = check_random_state(random_state) if type == 'gaussian': if stddev is None: raise ValueError('"stddev" must be input for Gaussian noise') image = prng.normal(loc=mean, scale=stddev, size=shape) elif type == 'poisson': image = prng.poisson(lam=mean, size=shape) else: raise ValueError('Invalid type: {0}. Use one of ' '{"gaussian", "poisson"}.'.format(type)) return image def make_random_models_table(n_sources, param_ranges, random_state=None): """ Make a `~astropy.table.Table` containing randomly generated parameters for an Astropy model to simulate a set of sources. Each row of the table corresponds to a source whose parameters are defined by the column names. The parameters are drawn from a uniform distribution over the specified input ranges. The output table can be input into :func:`make_model_sources_image` to create an image containing the model sources. Parameters ---------- n_sources : float The number of random model sources to generate. param_ranges : dict The lower and upper boundaries for each of the model parameters as a `dict` mapping the parameter name to its ``(lower, upper)`` bounds. random_state : int or `~numpy.random.RandomState`, optional Pseudo-random number generator state used for random sampling. Returns ------- table : `~astropy.table.Table` A table of parameters for the randomly generated sources. Each row of the table corresponds to a source whose model parameters are defined by the column names. The column names will be the keys of the dictionary ``param_ranges``. See Also -------- make_random_gaussians_table, make_model_sources_image Notes ----- To generate identical parameter values from separate function calls, ``param_ranges`` must be input as an `~collections.OrderedDict` with the same parameter ranges and ``random_state`` must be the same. Examples -------- >>> from collections import OrderedDict >>> from photutils.datasets import make_random_models_table >>> n_sources = 5 >>> param_ranges = [('amplitude', [500, 1000]), ... ('x_mean', [0, 500]), ... ('y_mean', [0, 300]), ... ('x_stddev', [1, 5]), ... ('y_stddev', [1, 5]), ... ('theta', [0, np.pi])] >>> param_ranges = OrderedDict(param_ranges) >>> sources = make_random_models_table(n_sources, param_ranges, ... random_state=12345) >>> print(sources) amplitude x_mean y_mean ... y_stddev theta ------------- ------------- ------------- ... ------------- -------------- 964.808046409 297.77235149 224.314442781 ... 3.56990131158 2.29238586176 658.187777291 482.257259868 288.392020822 ... 3.86981448325 3.12278892062 591.959405839 326.588548436 2.51648938247 ... 2.87039602888 2.12646148032 602.280139277 374.453318767 31.9333130093 ... 2.30233871016 2.48444221236 783.862514541 326.784935426 89.6111141308 ... 2.75857842354 0.536942976674 """ prng = check_random_state(random_state) sources = Table() for param_name, (lower, upper) in param_ranges.items(): # Generate a column for every item in param_ranges, even if it # is not in the model (e.g. flux). However, such columns will # be ignored when rendering the image. sources[param_name] = prng.uniform(lower, upper, n_sources) return sources def make_random_gaussians_table(n_sources, param_ranges, random_state=None): """ Make a `~astropy.table.Table` containing randomly generated parameters for 2D Gaussian sources. Each row of the table corresponds to a Gaussian source whose parameters are defined by the column names. The parameters are drawn from a uniform distribution over the specified input ranges. The output table can be input into :func:`make_gaussian_sources_image` to create an image containing the 2D Gaussian sources. Parameters ---------- n_sources : float The number of random Gaussian sources to generate. param_ranges : dict The lower and upper boundaries for each of the `~astropy.modeling.functional_models.Gaussian2D` parameters as a `dict` mapping the parameter name to its ``(lower, upper)`` bounds. The dictionary keys must be valid `~astropy.modeling.functional_models.Gaussian2D` parameter names or ``'flux'``. If ``'flux'`` is specified, but not ``'amplitude'`` then the 2D Gaussian amplitudes will be calculated and placed in the output table. If both ``'flux'`` and ``'amplitude'`` are specified, then ``'flux'`` will be ignored. Model parameters not defined in ``param_ranges`` will be set to the default value. random_state : int or `~numpy.random.RandomState`, optional Pseudo-random number generator state used for random sampling. Returns ------- table : `~astropy.table.Table` A table of parameters for the randomly generated Gaussian sources. Each row of the table corresponds to a Gaussian source whose parameters are defined by the column names. See Also -------- make_random_models_table, make_gaussian_sources_image Notes ----- To generate identical parameter values from separate function calls, ``param_ranges`` must be input as an `~collections.OrderedDict` with the same parameter ranges and ``random_state`` must be the same. Examples -------- >>> from collections import OrderedDict >>> from photutils.datasets import make_random_gaussians_table >>> n_sources = 5 >>> param_ranges = [('amplitude', [500, 1000]), ... ('x_mean', [0, 500]), ... ('y_mean', [0, 300]), ... ('x_stddev', [1, 5]), ... ('y_stddev', [1, 5]), ... ('theta', [0, np.pi])] >>> param_ranges = OrderedDict(param_ranges) >>> sources = make_random_gaussians_table(n_sources, param_ranges, ... random_state=12345) >>> print(sources) amplitude x_mean y_mean ... y_stddev theta ------------- ------------- ------------- ... ------------- -------------- 964.808046409 297.77235149 224.314442781 ... 3.56990131158 2.29238586176 658.187777291 482.257259868 288.392020822 ... 3.86981448325 3.12278892062 591.959405839 326.588548436 2.51648938247 ... 2.87039602888 2.12646148032 602.280139277 374.453318767 31.9333130093 ... 2.30233871016 2.48444221236 783.862514541 326.784935426 89.6111141308 ... 2.75857842354 0.536942976674 To specifying the flux range instead of the amplitude range: >>> param_ranges = [('flux', [500, 1000]), ... ('x_mean', [0, 500]), ... ('y_mean', [0, 300]), ... ('x_stddev', [1, 5]), ... ('y_stddev', [1, 5]), ... ('theta', [0, np.pi])] >>> param_ranges = OrderedDict(param_ranges) >>> sources = make_random_gaussians_table(n_sources, param_ranges, ... random_state=12345) >>> print(sources) flux x_mean y_mean ... theta amplitude ------------- ------------- ------------- ... -------------- ------------- 964.808046409 297.77235149 224.314442781 ... 2.29238586176 11.8636845806 658.187777291 482.257259868 288.392020822 ... 3.12278892062 6.38543882684 591.959405839 326.588548436 2.51648938247 ... 2.12646148032 7.31222089567 602.280139277 374.453318767 31.9333130093 ... 2.48444221236 8.56917814506 783.862514541 326.784935426 89.6111141308 ... 0.536942976674 11.6117069638 Note that in this case the output table contains both a flux and amplitude column. The flux column will be ignored when generating an image of the models using :func:`make_gaussian_sources_image`. """ sources = make_random_models_table(n_sources, param_ranges, random_state=random_state) # convert Gaussian2D flux to amplitude if 'flux' in param_ranges and 'amplitude' not in param_ranges: model = Gaussian2D(x_stddev=1, y_stddev=1) if 'x_stddev' in sources.colnames: xstd = sources['x_stddev'] else: xstd = model.x_stddev.value # default if 'y_stddev' in sources.colnames: ystd = sources['y_stddev'] else: ystd = model.y_stddev.value # default sources = sources.copy() sources['amplitude'] = sources['flux'] / (2. * np.pi * xstd * ystd) return sources def make_model_sources_image(shape, model, source_table, oversample=1): """ Make an image containing sources generated from a user-specified model. Parameters ---------- shape : 2-tuple of int The shape of the output 2D image. model : 2D astropy.modeling.models object The model to be used for rendering the sources. source_table : `~astropy.table.Table` Table of parameters for the sources. Each row of the table corresponds to a source whose model parameters are defined by the column names, which must match the model parameter names. Column names that do not match model parameters will be ignored. Model parameters not defined in the table will be set to the ``model`` default value. oversample : float, optional The sampling factor used to discretize the models on a pixel grid. If the value is 1.0 (the default), then the models will be discretized by taking the value at the center of the pixel bin. Note that this method will not preserve the total flux of very small sources. Otherwise, the models will be discretized by taking the average over an oversampled grid. The pixels will be oversampled by the ``oversample`` factor. Returns ------- image : 2D `~numpy.ndarray` Image containing model sources. See Also -------- make_random_models_table, make_gaussian_sources_image Examples -------- .. plot:: :include-source: from collections import OrderedDict from astropy.modeling.models import Moffat2D from photutils.datasets import (make_random_models_table, make_model_sources_image) model = Moffat2D() n_sources = 10 shape = (100, 100) param_ranges = [('amplitude', [100, 200]), ('x_0', [0, shape[1]]), ('y_0', [0, shape[0]]), ('gamma', [5, 10]), ('alpha', [1, 2])] param_ranges = OrderedDict(param_ranges) sources = make_random_models_table(n_sources, param_ranges, random_state=12345) data = make_model_sources_image(shape, model, sources) plt.imshow(data) """ image = np.zeros(shape, dtype=np.float64) y, x = np.indices(shape) params_to_set = [] for param in source_table.colnames: if param in model.param_names: params_to_set.append(param) # Save the initial parameter values so we can set them back when # done with the loop. It's best not to copy a model, because some # models (e.g. PSF models) may have substantial amounts of data in # them. init_params = {param: getattr(model, param) for param in params_to_set} try: for i, source in enumerate(source_table): for param in params_to_set: setattr(model, param, source[param]) if oversample == 1: image += model(x, y) else: image += discretize_model(model, (0, shape[1]), (0, shape[0]), mode='oversample', factor=oversample) finally: for param, value in init_params.items(): setattr(model, param, value) return image def make_gaussian_sources_image(shape, source_table, oversample=1): """ Make an image containing 2D Gaussian sources. Parameters ---------- shape : 2-tuple of int The shape of the output 2D image. source_table : `~astropy.table.Table` Table of parameters for the Gaussian sources. Each row of the table corresponds to a Gaussian source whose parameters are defined by the column names. With the exception of ``'flux'``, column names that do not match model parameters will be ignored (flux will be converted to amplitude). If both ``'flux'`` and ``'amplitude'`` are present, then ``'flux'`` will be ignored. Model parameters not defined in the table will be set to the default value. oversample : float, optional The sampling factor used to discretize the models on a pixel grid. If the value is 1.0 (the default), then the models will be discretized by taking the value at the center of the pixel bin. Note that this method will not preserve the total flux of very small sources. Otherwise, the models will be discretized by taking the average over an oversampled grid. The pixels will be oversampled by the ``oversample`` factor. Returns ------- image : 2D `~numpy.ndarray` Image containing 2D Gaussian sources. See Also -------- make_model_sources_image, make_random_gaussians_table Examples -------- .. plot:: :include-source: # make a table of Gaussian sources from astropy.table import Table table = Table() table['amplitude'] = [50, 70, 150, 210] table['x_mean'] = [160, 25, 150, 90] table['y_mean'] = [70, 40, 25, 60] table['x_stddev'] = [15.2, 5.1, 3., 8.1] table['y_stddev'] = [2.6, 2.5, 3., 4.7] table['theta'] = np.array([145., 20., 0., 60.]) * np.pi / 180. # make an image of the sources without noise, with Gaussian # noise, and with Poisson noise from photutils.datasets import make_gaussian_sources_image from photutils.datasets import make_noise_image shape = (100, 200) image1 = make_gaussian_sources_image(shape, table) image2 = image1 + make_noise_image(shape, type='gaussian', mean=5., stddev=5.) image3 = image1 + make_noise_image(shape, type='poisson', mean=5.) # plot the images import matplotlib.pyplot as plt fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(8, 12)) ax1.imshow(image1, origin='lower', interpolation='nearest') ax1.set_title('Original image') ax2.imshow(image2, origin='lower', interpolation='nearest') ax2.set_title('Original image with added Gaussian noise' ' ($\\mu = 5, \\sigma = 5$)') ax3.imshow(image3, origin='lower', interpolation='nearest') ax3.set_title('Original image with added Poisson noise ($\\mu = 5$)') """ model = Gaussian2D(x_stddev=1, y_stddev=1) if 'x_stddev' in source_table.colnames: xstd = source_table['x_stddev'] else: xstd = model.x_stddev.value # default if 'y_stddev' in source_table.colnames: ystd = source_table['y_stddev'] else: ystd = model.y_stddev.value # default colnames = source_table.colnames if 'flux' in colnames and 'amplitude' not in colnames: source_table = source_table.copy() source_table['amplitude'] = (source_table['flux'] / (2. * np.pi * xstd * ystd)) return make_model_sources_image(shape, model, source_table, oversample=oversample) def make_4gaussians_image(noise=True): """ Make an example image containing four 2D Gaussians plus a constant background. The background has a mean of 5. If ``noise`` is `True`, then Gaussian noise with a mean of 0 and a standard deviation of 5 is added to the output image. Parameters ---------- noise : bool, optional Whether to include noise in the output image (default is `True`). Returns ------- image : 2D `~numpy.ndarray` Image containing four 2D Gaussian sources. See Also -------- make_100gaussians_image Examples -------- .. plot:: :include-source: from photutils import datasets image = datasets.make_4gaussians_image() plt.imshow(image, origin='lower', interpolation='nearest') """ table = Table() table['amplitude'] = [50, 70, 150, 210] table['x_mean'] = [160, 25, 150, 90] table['y_mean'] = [70, 40, 25, 60] table['x_stddev'] = [15.2, 5.1, 3., 8.1] table['y_stddev'] = [2.6, 2.5, 3., 4.7] table['theta'] = np.array([145., 20., 0., 60.]) * np.pi / 180. shape = (100, 200) data = make_gaussian_sources_image(shape, table) + 5. if noise: data += make_noise_image(shape, type='gaussian', mean=0., stddev=5., random_state=12345) return data def make_100gaussians_image(noise=True): """ Make an example image containing 100 2D Gaussians plus a constant background. The background has a mean of 5. If ``noise`` is `True`, then Gaussian noise with a mean of 0 and a standard deviation of 2 is added to the output image. Parameters ---------- noise : bool, optional Whether to include noise in the output image (default is `True`). Returns ------- image : 2D `~numpy.ndarray` Image containing 100 2D Gaussian sources. See Also -------- make_4gaussians_image Examples -------- .. plot:: :include-source: from photutils import datasets image = datasets.make_100gaussians_image() plt.imshow(image, origin='lower', interpolation='nearest') """ n_sources = 100 flux_range = [500, 1000] xmean_range = [0, 500] ymean_range = [0, 300] xstddev_range = [1, 5] ystddev_range = [1, 5] params = OrderedDict([('flux', flux_range), ('x_mean', xmean_range), ('y_mean', ymean_range), ('x_stddev', xstddev_range), ('y_stddev', ystddev_range), ('theta', [0, 2*np.pi])]) sources = make_random_gaussians_table(n_sources, params, random_state=12345) shape = (300, 500) data = make_gaussian_sources_image(shape, sources) + 5. if noise: data += make_noise_image(shape, type='gaussian', mean=0., stddev=2., random_state=12345) return data def make_wcs(shape, galactic=False): """ Create a simple celestial WCS object in either the ICRS or Galactic coordinate frame. Parameters ---------- shape : 2-tuple of int The shape of the 2D array to be used with the output `~astropy.wcs.WCS` object. galactic : bool, optional If `True`, then the output WCS will be in the Galactic coordinate frame. If `False` (default), then the output WCS will be in the ICRS coordinate frame. Returns ------- wcs : `~astropy.wcs.WCS` object The world coordinate system (WCS) transformation. See Also -------- make_imagehdu Examples -------- >>> from photutils.datasets import make_wcs >>> shape = (100, 100) >>> wcs = make_wcs(shape) >>> print(wcs.wcs.crpix) [ 50. 50.] >>> print(wcs.wcs.crval) [ 197.8925 -1.36555556] """ wcs = WCS(naxis=2) rho = np.pi / 3. scale = 0.1 / 3600. wcs._naxis1 = shape[1] # nx wcs._naxis2 = shape[0] # ny wcs.wcs.crpix = [shape[1] / 2, shape[0] / 2] # 1-indexed (x, y) wcs.wcs.crval = [197.8925, -1.36555556] wcs.wcs.cunit = ['deg', 'deg'] wcs.wcs.cd = [[-scale * np.cos(rho), scale * np.sin(rho)], [scale * np.sin(rho), scale * np.cos(rho)]] if not galactic: wcs.wcs.radesys = 'ICRS' wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] else: wcs.wcs.ctype = ['GLON-CAR', 'GLAT-CAR'] return wcs def make_imagehdu(data, wcs=None): """ Create a FITS `~astropy.io.fits.ImageHDU` containing the input 2D image. Parameters ---------- data : 2D array-like The input 2D data. wcs : `~astropy.wcs.WCS`, optional The world coordinate system (WCS) transformation to include in the output FITS header. Returns ------- image_hdu : `~astropy.io.fits.ImageHDU` The FITS `~astropy.io.fits.ImageHDU`. See Also -------- make_wcs Examples -------- >>> from photutils.datasets import make_imagehdu, make_wcs >>> shape = (100, 100) >>> data = np.ones(shape) >>> wcs = make_wcs(shape) >>> hdu = make_imagehdu(data, wcs=wcs) >>> print(hdu.data.shape) (100, 100) """ data = np.asanyarray(data) if data.ndim != 2: raise ValueError('data must be a 2D array') if wcs is not None: header = wcs.to_header() else: header = None return fits.ImageHDU(data, header=header) photutils-0.4/photutils/datasets/setup_package.py0000644000214200020070000000021012721610567024623 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst def get_package_data(): return {'photutils.datasets': ['data/*']} photutils-0.4/photutils/datasets/tests/0000755000214200020070000000000013175654702022612 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/datasets/tests/__init__.py0000644000214200020070000000017013055576313024717 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains affiliated package tests. """ photutils-0.4/photutils/datasets/tests/test_load.py0000644000214200020070000000122613175634532025142 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest from astropy.tests.helper import remote_data from .. import load, get_path def test_get_path(): with pytest.raises(ValueError): get_path('filename', location='invalid') def test_load_fermi_image(): hdu = load.load_fermi_image() assert len(hdu.header) == 81 assert hdu.data.shape == (201, 401) @remote_data def test_load_star_image(): hdu = load.load_star_image() assert len(hdu.header) == 104 assert hdu.data.shape == (1059, 1059) photutils-0.4/photutils/datasets/tests/test_make.py0000644000214200020070000001151313175634532025140 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_allclose import pytest from astropy.table import Table from astropy.modeling.models import Moffat2D from .. import (make_noise_image, apply_poisson_noise, make_gaussian_sources_image, make_random_gaussians_table, make_4gaussians_image, make_100gaussians_image, make_random_models_table, make_model_sources_image, make_wcs) TABLE = Table() TABLE['flux'] = [1, 2, 3] TABLE['x_mean'] = [30, 50, 70.5] TABLE['y_mean'] = [50, 50, 50.5] TABLE['x_stddev'] = [1, 2, 3.5] TABLE['y_stddev'] = [2, 1, 3.5] TABLE['theta'] = np.array([0., 30, 50]) * np.pi / 180. def test_make_noise_image(): shape = (100, 100) image = make_noise_image(shape, 'gaussian', mean=0., stddev=2.) assert image.shape == shape assert_allclose(image.mean(), 0., atol=1.) def test_make_noise_image_poisson(): shape = (100, 100) image = make_noise_image(shape, 'poisson', mean=1.) assert image.shape == shape assert_allclose(image.mean(), 1., atol=1.) def test_make_noise_image_nomean(): """Test if ValueError raises if mean is not input.""" with pytest.raises(ValueError): shape = (100, 100) make_noise_image(shape, 'gaussian', stddev=2.) def test_make_noise_image_nostddev(): """ Test if ValueError raises if stddev is not input for Gaussian noise. """ with pytest.raises(ValueError): shape = (100, 100) make_noise_image(shape, 'gaussian', mean=2.) def test_apply_poisson_noise(): shape = (100, 100) data = np.ones(shape) result = apply_poisson_noise(data) assert result.shape == shape assert_allclose(result.mean(), 1., atol=1.) def test_apply_poisson_noise_negative(): """Test if negative image values raises ValueError.""" with pytest.raises(ValueError): shape = (100, 100) data = np.zeros(shape) - 1. apply_poisson_noise(data) def test_make_gaussian_sources_image(): shape = (100, 100) image = make_gaussian_sources_image(shape, TABLE) assert image.shape == shape assert_allclose(image.sum(), TABLE['flux'].sum()) def test_make_gaussian_sources_image_amplitude(): table = TABLE.copy() table.remove_column('flux') table['amplitude'] = [1, 2, 3] shape = (100, 100) image = make_gaussian_sources_image(shape, table) assert image.shape == shape def test_make_gaussian_sources_image_oversample(): shape = (100, 100) image = make_gaussian_sources_image(shape, TABLE, oversample=10) assert image.shape == shape assert_allclose(image.sum(), TABLE['flux'].sum()) def test_make_random_gaussians_table(): n_sources = 5 param_ranges = dict([('amplitude', [500, 1000]), ('x_mean', [0, 500]), ('y_mean', [0, 300]), ('x_stddev', [1, 5]), ('y_stddev', [1, 5]), ('theta', [0, np.pi])]) table = make_random_gaussians_table(n_sources, param_ranges, random_state=12345) assert len(table) == n_sources def test_make_random_gaussians_table_flux(): n_sources = 5 param_ranges = dict([('flux', [500, 1000]), ('x_mean', [0, 500]), ('y_mean', [0, 300]), ('x_stddev', [1, 5]), ('y_stddev', [1, 5]), ('theta', [0, np.pi])]) table = make_random_gaussians_table(n_sources, param_ranges, random_state=12345) assert 'amplitude' in table.colnames assert len(table) == n_sources def test_make_4gaussians_image(): shape = (100, 200) data_sum = 176219.18059091491 image = make_4gaussians_image() assert image.shape == shape assert_allclose(image.sum(), data_sum, rtol=1.e-6) def test_make_100gaussians_image(): shape = (300, 500) data_sum = 826182.24501251709 image = make_100gaussians_image() assert image.shape == shape assert_allclose(image.sum(), data_sum, rtol=1.e-6) def test_make_random_models_table(): model = Moffat2D(amplitude=1) param_ranges = {'x_0': (0, 300), 'y_0': (0, 500), 'gamma': (1, 3), 'alpha': (1.5, 3)} source_table = make_random_models_table(10, param_ranges) # most of the make_model_sources_image options are exercised in the # make_gaussian_sources_image tests image = make_model_sources_image((300, 500), model, source_table) assert image.sum() > 1 def test_make_wcs(): shape = (100, 200) wcs = make_wcs(shape) assert wcs._naxis1 == shape[1] assert wcs._naxis2 == shape[0] assert wcs.wcs.radesys == 'ICRS' wcs = make_wcs(shape, galactic=True) assert wcs.wcs.ctype[0] == 'GLON-CAR' assert wcs.wcs.ctype[1] == 'GLAT-CAR' photutils-0.4/photutils/detection/0000755000214200020070000000000013175654702021616 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/detection/__init__.py0000644000214200020070000000035713175634532023733 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains modules and packages for identifying sources in an astronomical image. """ from .core import * # noqa from .findstars import * # noqa photutils-0.4/photutils/detection/core.py0000644000214200020070000002760113175634532023125 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """Functions for detecting sources in an astronomical image.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from astropy.stats import sigma_clipped_stats from astropy.table import Column, Table from ..utils.cutouts import cutout_footprint from ..utils.wcs_helpers import pixel_to_icrs_coords __all__ = ['detect_threshold', 'find_peaks'] def detect_threshold(data, snr, background=None, error=None, mask=None, mask_value=None, sigclip_sigma=3.0, sigclip_iters=None): """ Calculate a pixel-wise threshold image that can be used to detect sources. Parameters ---------- data : array_like The 2D array of the image. snr : float The signal-to-noise ratio per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. background : float or array_like, optional The background value(s) of the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. If the input ``data`` has been background-subtracted, then set ``background`` to ``0.0``. If `None`, then a scalar background value will be estimated using sigma-clipped statistics. error : float or array_like, optional The Gaussian 1-sigma standard deviation of the background noise in ``data``. ``error`` should include all sources of "background" error, but *exclude* the Poisson error of the sources. If ``error`` is a 2D image, then it should represent the 1-sigma background error in each pixel of ``data``. If `None`, then a scalar background rms value will be estimated using sigma-clipped statistics. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. Returns ------- threshold : 2D `~numpy.ndarray` A 2D image with the same shape as ``data`` containing the pixel-wise threshold values. See Also -------- :func:`photutils.segmentation.detect_sources` Notes ----- The ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters`` inputs are used only if it is necessary to estimate ``background`` or ``error`` using sigma-clipped background statistics. If ``background`` and ``error`` are both input, then ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters`` are ignored. """ if background is None or error is None: data_mean, data_median, data_std = sigma_clipped_stats( data, mask=mask, mask_value=mask_value, sigma=sigclip_sigma, iters=sigclip_iters) bkgrd_image = np.zeros_like(data) + data_mean bkgrdrms_image = np.zeros_like(data) + data_std if background is None: background = bkgrd_image else: if np.isscalar(background): background = np.zeros_like(data) + background else: if background.shape != data.shape: raise ValueError('If input background is 2D, then it ' 'must have the same shape as the input ' 'data.') if error is None: error = bkgrdrms_image else: if np.isscalar(error): error = np.zeros_like(data) + error else: if error.shape != data.shape: raise ValueError('If input error is 2D, then it ' 'must have the same shape as the input ' 'data.') return background + (error * snr) def find_peaks(data, threshold, box_size=3, footprint=None, mask=None, border_width=None, npeaks=np.inf, subpixel=False, error=None, wcs=None): """ Find local peaks in an image that are above above a specified threshold value. Peaks are the maxima above the ``threshold`` within a local region. The regions are defined by either the ``box_size`` or ``footprint`` parameters. ``box_size`` defines the local region around each pixel as a square box. ``footprint`` is a boolean array where `True` values specify the region shape. If multiple pixels within a local region have identical intensities, then the coordinates of all such pixels are returned. Otherwise, there will be only one peak pixel per local region. Thus, the defined region effectively imposes a minimum separation between peaks (unless there are identical peaks within the region). When using subpixel precision (``subpixel=True``), then a cutout of the specified ``box_size`` or ``footprint`` will be taken centered on each peak and fit with a 2D Gaussian (plus a constant). In this case, the fitted local centroid and peak value (the Gaussian amplitude plus the background constant) will also be returned in the output table. Parameters ---------- data : array_like The 2D array of the image. threshold : float or array-like The data value or pixel-wise data values to be used for the detection threshold. A 2D ``threshold`` must have the same shape as ``data``. See `detect_threshold` for one way to create a ``threshold`` image. box_size : scalar or tuple, optional The size of the local region to search for peaks at every point in ``data``. If ``box_size`` is a scalar, then the region shape will be ``(box_size, box_size)``. Either ``box_size`` or ``footprint`` must be defined. If they are both defined, then ``footprint`` overrides ``box_size``. footprint : `~numpy.ndarray` of bools, optional A boolean array where `True` values describe the local footprint region within which to search for peaks at every point in ``data``. ``box_size=(n, m)`` is equivalent to ``footprint=np.ones((n, m))``. Either ``box_size`` or ``footprint`` must be defined. If they are both defined, then ``footprint`` overrides ``box_size``. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. border_width : bool, optional The width in pixels to exclude around the border of the ``data``. npeaks : int, optional The maximum number of peaks to return. When the number of detected peaks exceeds ``npeaks``, the peaks with the highest peak intensities will be returned. subpixel : bool, optional If `True`, then a cutout of the specified ``box_size`` or ``footprint`` will be taken centered on each peak and fit with a 2D Gaussian (plus a constant). In this case, the fitted local centroid and peak value (the Gaussian amplitude plus the background constant) will also be returned in the output table. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. ``error`` is used only to weight the 2D Gaussian fit performed when ``subpixel=True``. wcs : `~astropy.wcs.WCS` The WCS transformation to use to convert from pixel coordinates to ICRS world coordinates. If `None`, then the world coordinates will not be returned in the output `~astropy.table.Table`. Returns ------- output : `~astropy.table.Table` A table containing the x and y pixel location of the peaks and their values. If ``subpixel=True``, then the table will also contain the local centroid and fitted peak value. """ from scipy import ndimage if np.all(data == data.flat[0]): return [] if footprint is not None: data_max = ndimage.maximum_filter(data, footprint=footprint, mode='constant', cval=0.0) else: data_max = ndimage.maximum_filter(data, size=box_size, mode='constant', cval=0.0) peak_goodmask = (data == data_max) # good pixels are True if mask is not None: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape') peak_goodmask = np.logical_and(peak_goodmask, ~mask) if border_width is not None: for i in range(peak_goodmask.ndim): peak_goodmask = peak_goodmask.swapaxes(0, i) peak_goodmask[:border_width] = False peak_goodmask[-border_width:] = False peak_goodmask = peak_goodmask.swapaxes(0, i) peak_goodmask = np.logical_and(peak_goodmask, (data > threshold)) y_peaks, x_peaks = peak_goodmask.nonzero() peak_values = data[y_peaks, x_peaks] if len(x_peaks) > npeaks: idx = np.argsort(peak_values)[::-1][:npeaks] x_peaks = x_peaks[idx] y_peaks = y_peaks[idx] peak_values = peak_values[idx] if subpixel: from ..centroids import fit_2dgaussian # prevents circular import x_centroid, y_centroid = [], [] fit_peak_values = [] for (y_peak, x_peak) in zip(y_peaks, x_peaks): rdata, rmask, rerror, slc = cutout_footprint( data, (x_peak, y_peak), box_size=box_size, footprint=footprint, mask=mask, error=error) gaussian_fit = fit_2dgaussian(rdata, mask=rmask, error=rerror) if gaussian_fit is None: x_cen, y_cen, fit_peak_value = np.nan, np.nan, np.nan else: x_cen = slc[1].start + gaussian_fit.x_mean.value y_cen = slc[0].start + gaussian_fit.y_mean.value fit_peak_value = (gaussian_fit.constant.value + gaussian_fit.amplitude.value) x_centroid.append(x_cen) y_centroid.append(y_cen) fit_peak_values.append(fit_peak_value) columns = (x_peaks, y_peaks, peak_values, x_centroid, y_centroid, fit_peak_values) names = ('x_peak', 'y_peak', 'peak_value', 'x_centroid', 'y_centroid', 'fit_peak_value') else: columns = (x_peaks, y_peaks, peak_values) names = ('x_peak', 'y_peak', 'peak_value') table = Table(columns, names=names) if wcs is not None: icrs_ra_peak, icrs_dec_peak = pixel_to_icrs_coords(x_peaks, y_peaks, wcs) table.add_column(Column(icrs_ra_peak, name='icrs_ra_peak'), index=2) table.add_column(Column(icrs_dec_peak, name='icrs_dec_peak'), index=3) if subpixel: icrs_ra_centroid, icrs_dec_centroid = pixel_to_icrs_coords( x_centroid, y_centroid, wcs) idx = table.colnames.index('y_centroid') table.add_column(Column(icrs_ra_centroid, name='icrs_ra_centroid'), index=idx+1) table.add_column(Column(icrs_dec_centroid, name='icrs_dec_centroid'), index=idx+2) return table photutils-0.4/photutils/detection/findstars.py0000644000214200020070000010223013175634532024162 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module implements classes, called Finders, for detecting stars in an astronomical image. The convention is that all Finders are subclasses of an abstract class called ``StarFinderBase``. Each Finder class should define a method called ``find_stars`` that finds stars in an image. """ from __future__ import (absolute_import, division, print_function, unicode_literals) from collections import defaultdict import warnings import math import abc import six import numpy as np from astropy.table import Column, Table from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.misc import InheritDocstrings from astropy.stats import gaussian_fwhm_to_sigma from .core import find_peaks from ..utils.convolution import filter_data __all__ = ['DAOStarFinder', 'IRAFStarFinder', 'StarFinderBase'] class _ABCMetaAndInheritDocstrings(InheritDocstrings, abc.ABCMeta): pass @six.add_metaclass(_ABCMetaAndInheritDocstrings) class StarFinderBase(object): """ Abstract base class for Star Finders. """ def __call__(self, data): return self.find_stars(data) @abc.abstractmethod def find_stars(self, data): """ Find stars in an astronomical image. Parameters ---------- data : array_like The 2D image array. Returns ------- table : `~astropy.table.Table` A table of found objects with the following parameters: * ``id``: unique object identification number. * ``xcentroid, ycentroid``: object centroid. * ``sharpness``: object sharpness. * ``roundness1``: object roundness based on symmetry. * ``roundness2``: object roundness based on marginal Gaussian fits. * ``npix``: number of pixels in the Gaussian kernel. * ``sky``: the input ``sky`` parameter. * ``peak``: the peak, sky-subtracted, pixel value of the object. * ``flux``: the object flux calculated as the peak density in the convolved image divided by the detection threshold. This derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. * ``mag``: the object instrumental magnitude calculated as ``-2.5 * log10(flux)``. The derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. Notes ----- For the convolution step, this routine sets pixels beyond the image borders to 0.0. The equivalent parameters in IRAF's `starfind`_ are ``boundary='constant'`` and ``constant=0.0``. IRAF's `starfind`_ uses ``hwhmpsf``, ``fradius``, and ``sepmin`` as input parameters. The equivalent input values for `~photutils.detection.IRAFStarFinder` are: * ``fwhm = hwhmpsf * 2`` * ``sigma_radius = fradius * sqrt(2.0*log(2.0))`` * ``minsep_fwhm = 0.5 * sepmin`` The main differences between `~photutils.detection.DAOStarFinder` and `~photutils.detection.IRAFStarFinder` are: * `~photutils.detection.IRAFStarFinder` always uses a 2D circular Gaussian kernel, while `~photutils.detection.DAOStarFinder` can use an elliptical Gaussian kernel. * `~photutils.detection.IRAFStarFinder` calculates the objects' centroid, roundness, and sharpness using image moments. .. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind .. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind """ raise NotImplementedError class DAOStarFinder(StarFinderBase): """ Detect stars in an image using the DAOFIND (`Stetson 1987 `_) algorithm. DAOFIND (`Stetson 1987; PASP 99, 191 `_) searches images for local density maxima that have a peak amplitude greater than ``threshold`` (approximately; ``threshold`` is applied to a convolved image) and have a size and shape similar to the defined 2D Gaussian kernel. The Gaussian kernel is defined by the ``fwhm``, ``ratio``, ``theta``, and ``sigma_radius`` input parameters. ``DAOStarFinder`` finds the object centroid by fitting the marginal x and y 1D distributions of the Gaussian kernel to the marginal x and y distributions of the input (unconvolved) ``data`` image. ``DAOStarFinder`` calculates the object roundness using two methods. The ``roundlo`` and ``roundhi`` bounds are applied to both measures of roundness. The first method (``roundness1``; called ``SROUND`` in `DAOFIND`_) is based on the source symmetry and is the ratio of a measure of the object's bilateral (2-fold) to four-fold symmetry. The second roundness statistic (``roundness2``; called ``GROUND`` in `DAOFIND`_) measures the ratio of the difference in the height of the best fitting Gaussian function in x minus the best fitting Gaussian function in y, divided by the average of the best fitting Gaussian functions in x and y. A circular source will have a zero roundness. An source extended in x or y will have a negative or positive roundness, respectively. The sharpness statistic measures the ratio of the difference between the height of the central pixel and the mean of the surrounding non-bad pixels in the convolved image, to the height of the best fitting Gaussian function at that point. Parameters ---------- threshold : float The absolute image value above which to select sources. fwhm : float The full-width half-maximum (FWHM) of the major axis of the Gaussian kernel in units of pixels. ratio : float, optional The ratio of the minor to major axis standard deviations of the Gaussian kernel. ``ratio`` must be strictly positive and less than or equal to 1.0. The default is 1.0 (i.e., a circular Gaussian kernel). theta : float, optional The position angle (in degrees) of the major axis of the Gaussian kernel measured counter-clockwise from the positive x axis. sigma_radius : float, optional The truncation radius of the Gaussian kernel in units of sigma (standard deviation) [``1 sigma = FWHM / (2.0*sqrt(2.0*log(2.0)))``]. sharplo : float, optional The lower bound on sharpness for object detection. sharphi : float, optional The upper bound on sharpness for object detection. roundlo : float, optional The lower bound on roundess for object detection. roundhi : float, optional The upper bound on roundess for object detection. sky : float, optional The background sky level of the image. Setting ``sky`` affects only the output values of the object ``peak``, ``flux``, and ``mag`` values. The default is 0.0, which should be used to replicate the results from `DAOFIND`_. exclude_border : bool, optional Set to `True` to exclude sources found within half the size of the convolution kernel from the image borders. The default is `False`, which is the mode used by `DAOFIND`_. See Also -------- IRAFStarFinder Notes ----- For the convolution step, this routine sets pixels beyond the image borders to 0.0. The equivalent parameters in `DAOFIND`_ are ``boundary='constant'`` and ``constant=0.0``. References ---------- .. [1] Stetson, P. 1987; PASP 99, 191 (http://adsabs.harvard.edu/abs/1987PASP...99..191S) .. [2] http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind .. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind """ def __init__(self, threshold, fwhm, ratio=1.0, theta=0.0, sigma_radius=1.5, sharplo=0.2, sharphi=1.0, roundlo=-1.0, roundhi=1.0, sky=0.0, exclude_border=False): self.threshold = threshold self.fwhm = fwhm self.ratio = ratio self.theta = theta self.sigma_radius = sigma_radius self.sharplo = sharplo self.sharphi = sharphi self.roundlo = roundlo self.roundhi = roundhi self.sky = sky self.exclude_border = exclude_border def find_stars(self, data): daofind_kernel = _FindObjKernel(self.fwhm, self.ratio, self.theta, self.sigma_radius) self.threshold *= daofind_kernel.relerr objs = _findobjs(data, self.threshold, daofind_kernel, exclude_border=self.exclude_border) tbl = _daofind_properties(objs, self.threshold, daofind_kernel, self.sky) if len(objs) == 0: warnings.warn('No sources were found.', AstropyUserWarning) return tbl # empty table table_mask = ((tbl['sharpness'] > self.sharplo) & (tbl['sharpness'] < self.sharphi) & (tbl['roundness1'] > self.roundlo) & (tbl['roundness1'] < self.roundhi) & (tbl['roundness2'] > self.roundlo) & (tbl['roundness2'] < self.roundhi)) tbl = tbl[table_mask] idcol = Column(name='id', data=np.arange(len(tbl)) + 1) tbl.add_column(idcol, 0) if len(tbl) == 0: warnings.warn('Sources were found, but none pass the sharpness ' 'and roundness criteria.', AstropyUserWarning) return tbl class IRAFStarFinder(StarFinderBase): """ Detect stars in an image using IRAF's "starfind" algorithm. `starfind`_ searches images for local density maxima that have a peak amplitude greater than ``threshold`` above the local background and have a PSF full-width half-maximum similar to the input ``fwhm``. The objects' centroid, roundness (ellipticity), and sharpness are calculated using image moments. Parameters ---------- threshold : float The absolute image value above which to select sources. fwhm : float The full-width half-maximum (FWHM) of the 2D circular Gaussian kernel in units of pixels. minsep_fwhm : float, optional The minimum separation for detected objects in units of ``fwhm``. sigma_radius : float, optional The truncation radius of the Gaussian kernel in units of sigma (standard deviation) [``1 sigma = FWHM / 2.0*sqrt(2.0*log(2.0))``]. sharplo : float, optional The lower bound on sharpness for object detection. sharphi : float, optional The upper bound on sharpness for object detection. roundlo : float, optional The lower bound on roundess for object detection. roundhi : float, optional The upper bound on roundess for object detection. sky : float, optional The background sky level of the image. Inputing a ``sky`` value will override the background sky estimate. Setting ``sky`` affects only the output values of the object ``peak``, ``flux``, and ``mag`` values. The default is ``None``, which means the sky value will be estimated using the `starfind`_ method. exclude_border : bool, optional Set to `True` to exclude sources found within half the size of the convolution kernel from the image borders. The default is `False`, which is the mode used by `starfind`_. See Also -------- DAOStarFinder References ---------- .. [1] http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind .. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind """ def __init__(self, threshold, fwhm, sigma_radius=1.5, minsep_fwhm=2.5, sharplo=0.5, sharphi=2.0, roundlo=0.0, roundhi=0.2, sky=None, exclude_border=False): self.threshold = threshold self.fwhm = fwhm self.sigma_radius = sigma_radius self.minsep_fwhm = minsep_fwhm self.sharplo = sharplo self.sharphi = sharphi self.roundlo = roundlo self.roundhi = roundhi self.sky = sky self.exclude_border = exclude_border def find_stars(self, data): starfind_kernel = _FindObjKernel(self.fwhm, ratio=1.0, theta=0.0, sigma_radius=self.sigma_radius) min_separation = max(2, int((self.fwhm * self.minsep_fwhm) + 0.5)) objs = _findobjs(data, self.threshold, starfind_kernel, min_separation=min_separation, exclude_border=self.exclude_border) tbl = _irafstarfind_properties(objs, starfind_kernel, self.sky) if len(objs) == 0: warnings.warn('No sources were found.', AstropyUserWarning) return tbl # empty table table_mask = ((tbl['sharpness'] > self.sharplo) & (tbl['sharpness'] < self.sharphi) & (tbl['roundness'] > self.roundlo) & (tbl['roundness'] < self.roundhi)) tbl = tbl[table_mask] idcol = Column(name='id', data=np.arange(len(tbl)) + 1) tbl.add_column(idcol, 0) if len(tbl) == 0: warnings.warn('Sources were found, but none pass the sharpness ' 'and roundness criteria.', AstropyUserWarning) return tbl def _findobjs(data, threshold, kernel, min_separation=None, exclude_border=False, local_peaks=True): """ Find sources in an image by convolving the image with the input kernel and selecting connected pixels above a given threshold. Parameters ---------- data : array_like The 2D array of the image. threshold : float The absolute image value above which to select sources. Note that this threshold is not the same threshold input to ``daofind`` or ``irafstarfind``. It should be multiplied by the kernel relerr. kernel : `_FindObjKernel` The convolution kernel. The dimensions should match those of the cutouts. The kernel should be normalized to zero sum. exclude_border : bool, optional Set to `True` to exclude sources found within half the size of the convolution kernel from the image borders. The default is `False`, which is the mode used by `DAOFIND`_ and `starfind`_. local_peaks : bool, optional Set to `True` to exactly match the `DAOFIND`_ method of finding local peaks. If `False`, then only one peak per thresholded segment will be used. Returns ------- objects : list of `_ImgCutout` A list of `_ImgCutout` objects containing the image cutout for each source. .. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind .. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind """ from scipy import ndimage x_kernradius = kernel.kern.shape[1] // 2 y_kernradius = kernel.kern.shape[0] // 2 if not exclude_border: # create a larger image padded by zeros ysize = int(data.shape[0] + (2. * y_kernradius)) xsize = int(data.shape[1] + (2. * x_kernradius)) data_padded = np.zeros((ysize, xsize)) data_padded[y_kernradius:y_kernradius + data.shape[0], x_kernradius:x_kernradius + data.shape[1]] = data data = data_padded convolved_data = filter_data(data, kernel.kern, mode='constant', fill_value=0.0, check_normalization=False) if not exclude_border: # keep border=0 in convolved data convolved_data[:y_kernradius, :] = 0. convolved_data[-y_kernradius:, :] = 0. convolved_data[:, :x_kernradius] = 0. convolved_data[:, -x_kernradius:] = 0. selem = ndimage.generate_binary_structure(2, 2) object_labels, nobjects = ndimage.label(convolved_data > threshold, structure=selem) objects = [] if nobjects == 0: return objects # find object peaks in the convolved data if local_peaks: # footprint overrides min_separation in find_peaks if min_separation is None: # daofind footprint = kernel.mask.astype(np.bool) else: from skimage.morphology import disk footprint = disk(min_separation) tbl = find_peaks(convolved_data, threshold, footprint=footprint) coords = np.transpose([tbl['y_peak'], tbl['x_peak']]) else: object_slices = ndimage.find_objects(object_labels) coords = [] for object_slice in object_slices: # thresholded_object is not the same size as the kernel thresholded_object = convolved_data[object_slice] ypeak, xpeak = np.unravel_index(thresholded_object.argmax(), thresholded_object.shape) xpeak += object_slice[1].start ypeak += object_slice[0].start coords.append((ypeak, xpeak)) for (ypeak, xpeak) in coords: # now extract the object from the data, centered on the peak # pixel in the convolved image, with the same size as the kernel x0 = xpeak - x_kernradius x1 = xpeak + x_kernradius + 1 y0 = ypeak - y_kernradius y1 = ypeak + y_kernradius + 1 if x0 < 0 or x1 > data.shape[1]: continue # pragma: no cover (isolated continue is never tested) if y0 < 0 or y1 > data.shape[0]: continue # pragma: no cover (isolated continue is never tested) object_data = data[y0:y1, x0:x1] object_convolved_data = convolved_data[y0:y1, x0:x1].copy() if not exclude_border: # correct for image padding x0 -= x_kernradius y0 -= y_kernradius imgcutout = _ImgCutout(object_data, object_convolved_data, x0, y0) objects.append(imgcutout) return objects def _irafstarfind_properties(imgcutouts, kernel, sky=None): """ Find the properties of each detected source, as defined by IRAF's ``starfind``. Parameters ---------- imgcutouts : list of `_ImgCutout` A list of `_ImgCutout` objects containing the image cutout for each source. kernel : `_FindObjKernel` The convolution kernel. The dimensions should match those of the cutouts. ``kernel.gkernel`` should have a peak pixel value of 1.0 and not contain any masked pixels. sky : float, optional The absolute sky level. If sky is ``None``, then a local sky level will be estimated (in a crude fashion). Returns ------- table : `~astropy.table.Table` A table of the objects' properties. """ result = defaultdict(list) for imgcutout in imgcutouts: if sky is None: skymask = ~kernel.mask.astype(np.bool) # 1=sky, 0=obj nsky = np.count_nonzero(skymask) if nsky == 0: meansky = imgcutout.data.max() - imgcutout.convdata.max() else: meansky = (imgcutout.data * skymask).sum() / nsky else: meansky = sky objvals = _irafstarfind_moments(imgcutout, kernel, meansky) for key, val in objvals.items(): result[key].append(val) names = ['xcentroid', 'ycentroid', 'fwhm', 'sharpness', 'roundness', 'pa', 'npix', 'sky', 'peak', 'flux', 'mag'] if len(result) == 0: for name in names: result[name] = [] table = Table(result, names=names) return table def _irafstarfind_moments(imgcutout, kernel, sky): """ Find the properties of each detected source, as defined by IRAF's ``starfind``. Parameters ---------- imgcutout : `_ImgCutout` The image cutout for a single detected source. kernel : `_FindObjKernel` The convolution kernel. The dimensions should match those of ``imgcutout``. ``kernel.gkernel`` should have a peak pixel value of 1.0 and not contain any masked pixels. sky : float The local sky level around the source. Returns ------- result : dict A dictionary of the object parameters. """ from skimage.measure import moments, moments_central result = defaultdict(list) img = np.array((imgcutout.data - sky) * kernel.mask) img = np.where(img > 0, img, 0) # starfind discards negative pixels if np.count_nonzero(img) <= 1: return {} m = moments(img, 1) result['xcentroid'] = m[1, 0] / m[0, 0] result['ycentroid'] = m[0, 1] / m[0, 0] result['npix'] = float(np.count_nonzero(img)) # float for easier testing result['sky'] = sky result['peak'] = np.max(img) flux = img.sum() result['flux'] = flux result['mag'] = -2.5 * np.log10(flux) mu = moments_central( img, result['ycentroid'], result['xcentroid'], 2) / m[0, 0] musum = mu[2, 0] + mu[0, 2] mudiff = mu[2, 0] - mu[0, 2] result['fwhm'] = 2.0 * np.sqrt(np.log(2.0) * musum) result['sharpness'] = result['fwhm'] / kernel.fwhm result['roundness'] = np.sqrt(mudiff**2 + 4.0*mu[1, 1]**2) / musum pa = 0.5 * np.arctan2(2.0 * mu[1, 1], mudiff) * (180.0 / np.pi) if pa < 0.0: pa += 180.0 result['pa'] = pa result['xcentroid'] += imgcutout.x0 result['ycentroid'] += imgcutout.y0 return result def _daofind_properties(imgcutouts, threshold, kernel, sky=0.0): """ Find the properties of each detected source, as defined by `DAOFIND`_. Parameters ---------- imgcutouts : list of `_ImgCutout` A list of `_ImgCutout` objects containing the image cutout for each source. threshold : float The absolute image value above which to select sources. kernel : `_FindObjKernel` The convolution kernel. The dimensions should match those of the objects in ``imgcutouts``. ``kernel.gkernel`` should have a peak pixel value of 1.0 and not contain any masked pixels. sky : float, optional The local sky level around the source. ``sky`` is used only to calculate the source peak value and flux. The default is 0.0. Returns ------- table : `~astropy.table.Table` A table of the object parameters. .. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind """ result = defaultdict(list) ykcen, xkcen = kernel.center for imgcutout in imgcutouts: convobj = imgcutout.convdata.copy() convobj[ykcen, xkcen] = 0.0 q1 = convobj[0:ykcen+1, xkcen+1:] q2 = convobj[0:ykcen, 0:xkcen+1] q3 = convobj[ykcen:, 0:xkcen] q4 = convobj[ykcen+1:, xkcen:] sum2 = -q1.sum() + q2.sum() - q3.sum() + q4.sum() sum4 = np.abs(convobj).sum() result['roundness1'].append(2.0 * sum2 / sum4) obj = imgcutout.data objpeak = obj[ykcen, xkcen] convpeak = imgcutout.convdata[ykcen, xkcen] npts = kernel.mask.sum() obj_masked = obj * kernel.mask objmean = (obj_masked.sum() - objpeak) / (npts - 1) # exclude peak sharp = (objpeak - objmean) / convpeak result['sharpness'].append(sharp) dx, dy, g_roundness = _daofind_centroid_roundness(obj, kernel) yc, xc = imgcutout.center result['xcentroid'].append(xc + dx) result['ycentroid'].append(yc + dy) result['roundness2'].append(g_roundness) result['sky'].append(sky) # DAOFIND uses sky=0 result['npix'].append(float(obj.size)) result['peak'].append(objpeak - sky) flux = (convpeak / threshold) - (sky * obj.size) result['flux'].append(flux) if flux <= 0: mag = np.nan else: mag = -2.5 * np.log10(flux) result['mag'].append(mag) names = ['xcentroid', 'ycentroid', 'sharpness', 'roundness1', 'roundness2', 'npix', 'sky', 'peak', 'flux', 'mag'] if len(result) == 0: for name in names: result[name] = [] table = Table(result, names=names) return table def _daofind_centroid_roundness(obj, kernel): """ Calculate the source (x, y) centroid and `DAOFIND`_ "GROUND" roundness statistic. `DAOFIND`_ finds the centroid by fitting 1D Gaussians (marginal x/y distributions of the kernel) to the marginal x/y distributions of the original (unconvolved) image. The roundness statistic measures the ratio of the difference in the height of the best fitting Gaussian function in x minus the best fitting Gaussian function in y, divided by the average of the best fitting Gaussian functions in x and y. A circular source will have a zero roundness. An source extended in x (y) will have a negative (positive) roundness. Parameters ---------- obj : array_like The 2D array of the source cutout. kernel : `_FindObjKernel` The convolution kernel. The dimensions should match those of ``obj``. ``kernel.gkernel`` should have a peak pixel value of 1.0 and not contain any masked pixels. Returns ------- dx, dy : float Fractional shift in x and y of the image centroid relative to the maximum pixel. g_roundness : float `DAOFIND`_ roundness (GROUND) statistic. .. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind """ dx, hx = _daofind_centroidfit(obj, kernel, axis=0) dy, hy = _daofind_centroidfit(obj, kernel, axis=1) g_roundness = 2.0 * (hx - hy) / (hx + hy) return dx, dy, g_roundness def _daofind_centroidfit(obj, kernel, axis): """ Find the source centroid along one axis by fitting a 1D Gaussian to the marginal x or y distribution of the unconvolved source data. Parameters ---------- obj : array_like The 2D array of the source cutout. kernel : `_FindObjKernel` The convolution kernel. The dimensions should match those of ``obj``. ``kernel.gkernel`` should have a peak pixel value of 1.0 and not contain any masked pixels. axis : {0, 1} The axis for which the centroid is computed: * 0: for the x axis * 1: for the y axis Returns ------- dx : float Fractional shift in x or y (depending on ``axis`` value) of the image centroid relative to the maximum pixel. hx : float Height of the best-fitting Gaussian to the marginal x or y (depending on ``axis`` value) distribution of the unconvolved source data. """ # define a triangular weighting function, peaked in the middle # and equal to one at the edge nyk, nxk = kernel.shape ykrad, xkrad = kernel.center ywtd, xwtd = np.mgrid[0:nyk, 0:nxk] xwt = xkrad - abs(xwtd - xkrad) + 1.0 ywt = ykrad - abs(ywtd - ykrad) + 1.0 if axis == 0: wt = xwt[0] wts = ywt ksize = nxk kernel_sigma = kernel.xsigma krad = ksize // 2 sumdx_vec = krad - np.arange(ksize) elif axis == 1: wt = ywt.T[0] wts = xwt ksize = nyk kernel_sigma = kernel.ysigma krad = ksize // 2 sumdx_vec = np.arange(ksize) - krad n = wt.sum() sg = (kernel.gkernel * wts).sum(axis) sumg = (wt * sg).sum() sumg2 = (wt * sg**2).sum() vec = krad - np.arange(ksize) dgdx = sg * vec sdgdx = (wt * dgdx).sum() sdgdx2 = (wt * dgdx**2).sum() sgdgdx = (wt * sg * dgdx).sum() sd = (obj * wts).sum(axis) sumd = (wt * sd).sum() sumgd = (wt * sg * sd).sum() sddgdx = (wt * sd * dgdx).sum() sumdx = (wt * sd * sumdx_vec).sum() # linear least-squares fit (data = sky + hx*gkernel) to find amplitudes denom = (n*sumg2 - sumg**2) hx = (n*sumgd - sumg*sumd) / denom # sky = (sumg2*sumd - sumg*sumgd) / denom dx = (sgdgdx - (sddgdx - sdgdx*sumd)) / (hx * sdgdx2 / kernel_sigma**2) hsize = (ksize / 2.) if abs(dx) > hsize: dx = 0 if sumd == 0: dx = 0.0 else: dx = float(sumdx / sumd) if abs(dx) > hsize: dx = 0.0 return dx, hx class _ImgCutout(object): """Class to hold image cutouts.""" def __init__(self, data, convdata, x0, y0): """ Parameters ---------- data : array_like The cutout 2D image from the input unconvolved 2D image. convdata : array_like The cutout 2D image from the convolved 2D image. x0, y0 : float Image coordinates of the lower left pixel of the cutout region. The pixel origin is (0, 0). """ self.data = data self.convdata = convdata self.x0 = x0 self.y0 = y0 @property def radius(self): return [size // 2 for size in self.data.shape] @property def center(self): yr, xr = self.radius return yr + self.y0, xr + self.x0 class _FindObjKernel(object): """ Calculate a 2D Gaussian density enhancement kernel. This kernel has negative wings and sums to zero. It is used by both `DAOStarFinder` and `IRAFStarFinder`. Parameters ---------- fwhm : float The full-width half-maximum (FWHM) of the major axis of the Gaussian kernel in units of pixels. ratio : float, optional The ratio of the minor to major axis standard deviations of the Gaussian kernel. ``ratio`` must be strictly positive and less than or equal to 1.0. The default is 1.0 (i.e., a circular Gaussian kernel). theta : float, optional The position angle (in degrees) of the major axis of the Gaussian kernel measured counter-clockwise from the positive x axis. sigma_radius : float, optional The truncation radius of the Gaussian kernel in units of sigma (standard deviation) [``1 sigma = FWHM / 2.0*sqrt(2.0*log(2.0))``]. The default is 1.5. Notes ----- The object attributes include the dimensions of the elliptical kernel and the coefficients of a 2D elliptical Gaussian function expressed as: ``f(x,y) = A * exp(-g(x,y))`` where ``g(x,y) = a*(x-x0)**2 + 2*b*(x-x0)*(y-y0) + c*(y-y0)**2`` References ---------- .. [1] http://en.wikipedia.org/wiki/Gaussian_function """ def __init__(self, fwhm, ratio=1.0, theta=0.0, sigma_radius=1.5): if fwhm < 0: raise ValueError('fwhm must be positive, ' 'got fwhm={0}'.format(fwhm)) if ratio <= 0 or ratio > 1: raise ValueError('ratio must be positive and less or equal ' 'than 1, got ratio={0}'.format(ratio)) if sigma_radius <= 0: raise ValueError('sigma_radius must be positive, got ' 'sigma_radius={0}'.format(sigma_radius)) self.fwhm = fwhm self.sigma_radius = sigma_radius self.ratio = ratio self.theta = theta self.theta_radians = np.deg2rad(self.theta) self.xsigma = self.fwhm * gaussian_fwhm_to_sigma self.ysigma = self.xsigma * self.ratio self.a = None self.b = None self.c = None self.f = None self.nx = None self.ny = None self.xc = None self.yc = None self.circrad = None self.ellrad = None self.gkernel = None self.mask = None self.npts = None self.kern = None self.relerr = None self.set_gausspars() self.mk_kern() @property def shape(self): return self.kern.shape @property def center(self): """Index of the kernel center.""" return [size // 2 for size in self.kern.shape] def set_gausspars(self): xsigma2 = self.xsigma**2 ysigma2 = self.ysigma**2 cost = np.cos(self.theta_radians) sint = np.sin(self.theta_radians) self.a = (cost**2 / (2.0 * xsigma2)) + (sint**2 / (2.0 * ysigma2)) self.b = 0.5 * cost * sint * (1.0/xsigma2 - 1.0/ysigma2) # CCW self.c = (sint**2 / (2.0 * xsigma2)) + (cost**2 / (2.0 * ysigma2)) # find the extent of an ellipse with radius = sigma_radius*sigma; # solve for the horizontal and vertical tangents of an ellipse # defined by g(x,y) = f self.f = self.sigma_radius**2 / 2.0 denom = self.a*self.c - self.b**2 self.nx = 2 * int(max(2, math.sqrt(self.c*self.f / denom))) + 1 self.ny = 2 * int(max(2, math.sqrt(self.a*self.f / denom))) + 1 return def mk_kern(self): yy, xx = np.mgrid[0:self.ny, 0:self.nx] self.xc = self.nx // 2 self.yc = self.ny // 2 self.circrad = np.sqrt((xx-self.xc)**2 + (yy-self.yc)**2) self.ellrad = (self.a*(xx-self.xc)**2 + 2.0*self.b*(xx-self.xc)*(yy-self.yc) + self.c*(yy-self.yc)**2) self.gkernel = np.exp(-self.ellrad) self.mask = np.where((self.ellrad <= self.f) | (self.circrad <= 2.0), 1, 0).astype(np.int16) self.npts = self.mask.sum() self.kern = self.gkernel * self.mask # normalize the kernel to zero sum (denom = variance * npts) denom = ((self.kern**2).sum() - (self.kern.sum()**2 / self.npts)) self.relerr = 1.0 / np.sqrt(denom) self.kern = (((self.kern - (self.kern.sum() / self.npts)) / denom) * self.mask) return photutils-0.4/photutils/detection/setup_package.py0000644000214200020070000000022312444404542024771 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst def get_package_data(): return {'photutils.detection.tests': ['data/*.txt']} photutils-0.4/photutils/detection/tests/0000755000214200020070000000000013175654702022760 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/detection/tests/__init__.py0000644000214200020070000000017112345377273025073 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This packages contains affiliated package tests. """ photutils-0.4/photutils/detection/tests/data/0000755000214200020070000000000013175654702023671 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/detection/tests/data/daofind_test_thresh08.0_fwhm01.0.txt0000644000214200020070000000646412444404542032303 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid sharpness roundness1 roundness2 npix sky peak flux mag 1 88.46990059776188 58.48886418244315 0.8762686366344555 0.5208952612806851 -0.4122721184034913 25.0 0.0 41.219046927862664 1.7835016184670074 -0.6281837694550259 2 327.4237440417888 65.44679142919296 0.9157937804266859 0.04525792010641404 0.7509409883712366 25.0 0.0 22.751918266079162 1.0446321071558946 -0.04740842477087964 3 35.3030291035496 100.99885418235492 0.9642784405023673 0.8997709899005626 0.4262749271277977 25.0 0.0 20.102985322609683 1.0144135507640197 -0.015537604949063939 4 207.35081374580741 114.17862428357554 0.8924335237573308 0.9346385083631819 -0.518807699686835 25.0 0.0 29.754644643579955 1.3969918280271438 -0.3629846640809128 5 290.43063459156474 113.06404861735642 0.898082603591199 -0.05099656208774482 -0.23165223978862307 25.0 0.0 46.446092373907746 1.900514493186413 -0.6971779646253444 6 200.92861884647542 131.13339385033788 0.9484922230198581 0.22387509766367475 -0.27402570026586753 25.0 0.0 35.98220832083827 1.0238148922854393 -0.025553606135659993 7 314.47263346346375 129.3969056629089 0.8887239985499058 -0.415497488077957 -0.981667389821615 25.0 0.0 37.2058611011273 1.462461712936029 -0.41271126282013204 8 10.651479210211352 141.89200443111378 0.8874321043149441 0.1038427584432066 0.6135710025317431 25.0 0.0 31.413937832962418 1.033701663235285 -0.035988037644573874 9 125.38583354043459 147.12287931014475 0.9045758881095933 0.018740208509969953 0.9822435679992049 25.0 0.0 47.21336918933935 1.6322194134762291 -0.5319463475288915 10 145.14914494499064 169.8101867232563 0.8849770368050671 0.15624574204422784 0.17519144635671952 25.0 0.0 83.38951276487094 5.207804548890224 -1.7916366915254869 11 394.08757117974244 186.3394174191881 0.8802050226751206 0.35607460226075754 0.09146988324132699 25.0 0.0 110.35005452953492 7.319441130206205 -2.161194805332651 12 206.70671685848117 198.52606581209523 0.8981872955879281 -0.914696580134408 0.03140476242070839 25.0 0.0 34.44956649698919 1.1330527665066235 -0.13562533880024377 13 48.107054682499836 199.00601737015663 0.8865292982659505 -0.25299384635714167 -0.29924860045592516 25.0 0.0 46.62575574744383 1.6718429529072263 -0.5579886972773125 14 426.0377480701111 211.01291166877795 0.8767982886712559 -0.18983407274095807 0.46718907337348503 25.0 0.0 76.36050307691691 4.157089774588257 -1.5469735085756775 15 256.89065375229984 218.8702651417756 0.8919076919119704 -0.6574612471815833 0.9250674332112004 25.0 0.0 31.145480617595414 1.3162412640878085 -0.29833875446097147 16 256.72686483357546 220.39592428590072 0.8657852930536363 -0.6868946128505259 0.7806774957564329 25.0 0.0 35.92343258547273 1.3961484705669713 -0.36232901226396025 17 10.748287200873916 224.20057800236486 0.8530832269725744 -0.6801043879386687 0.5439043607509917 25.0 0.0 55.76846840338447 2.9686701641588544 -1.1814048693326866 18 355.7777559665791 251.24144545680184 0.9076091250435318 0.03794133476381934 0.3071068705575514 25.0 0.0 34.568381338063475 1.194628367857395 -0.19308205841850107 19 140.49990182298185 275.4562630541407 0.8956136505852358 -0.12260502762650799 0.28058974799126774 25.0 0.0 29.39237884586493 1.0140636761486634 -0.015163066319055899 20 434.4873254828115 287.5722770696709 0.9358461422138549 0.7057077340015893 -0.2758037043682071 25.0 0.0 33.411944319652875 1.0657776539076416 -0.06916652543885461 photutils-0.4/photutils/detection/tests/data/daofind_test_thresh08.0_fwhm01.5.txt0000644000214200020070000001112212444404542032273 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid sharpness roundness1 roundness2 npix sky peak flux mag 1 441.25133516377946 31.44957631443119 0.7184774674544862 0.2540826457659647 0.33820558007054197 25.0 0.0 30.239994137265167 1.0192109043077802 -0.020660153587305028 2 88.79813107802212 58.81911572547198 0.6412388176817487 0.5199389854407557 -0.44797314831347357 25.0 0.0 41.219046927862664 2.4129090339570833 -0.9563523735710178 3 327.7440399874977 64.66175083365357 0.8223868740403482 -0.11894585756005756 0.8073553959462378 25.0 0.0 22.751918266079162 1.1516880702259864 -0.15333717054005433 4 230.91695759765258 65.9894379571403 0.7813631775972321 0.4774209234454751 -0.05879164786084709 25.0 0.0 10.922343382751748 1.022964317973155 -0.02465121336733659 5 99.95188614111407 98.10665397029018 0.9127911170174968 0.5594087143983953 0.8968738122629868 25.0 0.0 24.984494810798402 1.0915006340968902 -0.09505998086117816 6 71.4226160551628 111.66081527853272 0.619266520607775 -0.05983418788263739 0.4400254302666273 25.0 0.0 35.0913969385426 1.2593500685848913 -0.2503661753928418 7 206.54934787497413 114.06617385814583 0.7025595284696313 0.9767349175473904 -0.5834171877869196 25.0 0.0 29.754644643579955 1.7568577222085846 -0.6118414797745659 8 290.7911753889566 113.64828526813636 0.7060267926521758 -0.07996579234428655 -0.22858041274721436 25.0 0.0 46.446092373907746 2.393405398069706 -0.9475406654982715 9 341.27356655729886 114.4587775240903 0.5829404636239032 -0.7700415392818774 -0.3323734952946145 25.0 0.0 28.65755220774481 1.1798300357956324 -0.17954862031485538 10 200.33443823023921 130.4136477492795 0.8372831521724797 0.22155345712781147 -0.23896573945028038 25.0 0.0 35.98220832083827 1.1482404524644518 -0.1500821074451444 11 313.52800888962287 129.7741880324062 0.6842530424213573 -0.3959581050304958 -0.9378705237493801 25.0 0.0 37.2058611011273 1.88054900173178 -0.6857116359467628 12 10.867021165882411 142.54063824155378 0.6947552024609119 0.11878849281679056 0.6710643184520064 25.0 0.0 31.413937832962418 1.3072195133461557 -0.29087130527521743 13 125.13873545366265 148.07110612241445 0.7149614754541866 -0.019729049737502204 0.9618448688309467 25.0 0.0 47.21336918933935 2.04451774222952 -0.776477209234867 14 344.82714268313856 166.27735480015838 0.5551520317059323 0.4640472588827457 -0.18830878258989417 25.0 0.0 27.631382749951754 1.025656792366861 -0.027505151167814216 15 145.04161669340027 168.43979551421813 0.6418746565917449 -0.02736696618181218 0.1153966295770878 25.0 0.0 82.74848117792747 6.884763660996454 -2.0947225911530913 16 394.65502401524253 187.37357173892647 0.6692365367195389 0.3413113809170677 0.08770438935586308 25.0 0.0 110.35005452953492 9.530860217374974 -2.44783025023155 17 480.1442854950754 188.34294135511698 0.9105534140545337 0.8093219184197902 0.9481584847725933 25.0 0.0 23.07234835743072 1.0771739794755455 -0.08071463480942372 18 48.67675424075205 200.4511294253465 0.637320895047539 -0.571948558370518 -0.3831872251686798 25.0 0.0 47.2892911443403 2.2190254912470717 -0.865405728140129 19 426.64088605663267 211.0035331399752 0.6532673887534252 -0.22682231797770286 0.43515782187184765 25.0 0.0 76.36050307691691 5.523929114370442 -1.8556202421835335 20 305.6288921959932 216.1599509336425 0.5855765105365481 -0.007370981867631505 0.3512345640013347 25.0 0.0 26.565020985170975 1.1775541535016305 -0.17745222161727764 21 257.6002296718719 217.77146625690213 0.6907143669914155 -0.6586594988741606 0.9959103449675526 25.0 0.0 31.145480617595414 1.6827007962410163 -0.5650172505850533 22 256.265775002328 220.7587474377063 0.6330394123373777 -0.6815183858246547 0.824192111519761 25.0 0.0 35.92343258547273 1.8904319045703657 -0.691402595542391 23 10.906589762292864 224.07169129851232 0.5915881373471015 -0.7302006348846717 0.4954582336704235 25.0 0.0 55.76846840338447 4.238223669541717 -1.5679596814822927 24 355.91280082770714 252.31749264525953 0.7520228837688127 0.008366802620332457 0.31569853010198345 25.0 0.0 34.568381338063475 1.4274161278306714 -0.38637649844105226 25 139.59065348749309 275.1884233884024 0.7154853585505654 -0.18441687745205232 0.2548543643003899 25.0 0.0 29.39237884586493 1.2567102535141665 -0.24808789627317712 26 121.14388458069425 284.8118762076604 0.7574664205028767 0.18170258067684428 0.16985955244315368 25.0 0.0 12.415425631454825 1.0092893140555754 -0.010039187860971983 27 433.5551160149825 288.4794012861741 0.8097201173279633 0.6995511520935703 -0.27322026456220144 25.0 0.0 33.411944319652875 1.2195121002893914 -0.21546528461092726 28 471.001033909076 297.8111400615592 0.7934292830937713 0.6600959397482591 0.23721748404523008 25.0 0.0 10.524420303596958 1.030199275938046 -0.032303100766317684 photutils-0.4/photutils/detection/tests/data/daofind_test_thresh08.0_fwhm02.0.txt0000644000214200020070000001163712415340320032271 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid sharpness roundness1 roundness2 npix sky peak flux mag 1 441.3499172083869 31.402878654917814 0.6332823251229039 0.30085528401312445 0.3680354928019628 25.0 0.0 30.239994137265167 1.1572788429613632 -0.15859503387229676 2 0.8496908619052633 40.04305508436788 0.45619405869493956 -0.22810071231008808 0.7232872696244564 25.0 0.0 35.54828630019193 2.6662543983339755 -1.064753962208504 3 88.79956140550843 58.84568931520913 0.5582944988778225 0.5136344718362899 -0.4745844416283111 25.0 0.0 41.219046927862664 2.773675000415222 -1.1076389304199645 4 14.009742492766007 62.28240400026239 0.593724897815523 0.11641944621802239 0.15287632672275359 25.0 0.0 21.481924963251483 1.0271337239470224 -0.029067471679799464 5 327.70218373934637 64.7320262123344 0.8815643782278983 -0.25650986945379883 0.8672581850871744 25.0 0.0 22.751918266079162 1.0752641632256226 -0.07878792932136536 6 230.95864015413127 65.96089819494524 0.7532093773427389 0.5841683591471404 0.017434633001189193 25.0 0.0 10.922343382751748 1.062076683475109 -0.06538968642057963 7 7.328434526380554 69.5156220324513 0.5970223288209795 -0.9902229999019926 -0.36777328827511824 25.0 0.0 33.2880647048335 1.7553590999611413 -0.6109149375862932 8 99.90338602734467 97.94590143760601 0.9369113497949474 0.374463505034526 0.6482107118468855 25.0 0.0 24.984494810798402 1.0642779488426795 -0.0676376598520223 9 34.271919795938956 97.94540042765118 0.43062202850526665 0.9672893343791893 -0.1317036542420994 25.0 0.0 19.27625317566386 1.008392639521492 -0.009074167513830694 10 71.39201494648567 111.67855457865032 0.5196724941219817 -0.07293019182386253 0.3886579935129361 25.0 0.0 35.0913969385426 1.501939736999388 -0.44163126906439054 11 206.5524841594021 114.05764949550421 0.65064321348857 0.9917528207729931 -0.6371226317458737 25.0 0.0 29.754644643579955 1.8986065838796236 -0.696087456404153 12 290.81980079648145 113.6713740964111 0.6430087933779546 -0.09541554335946509 -0.2261751623126485 25.0 0.0 46.446092373907746 2.630139011027921 -1.0499467572765857 13 341.22590232689697 114.47439289227742 0.5191441535865722 -0.8605565678486781 -0.3979504981737904 25.0 0.0 28.65755220774481 1.325909399182182 -0.30628462318182176 14 313.4809249369467 129.79531153878077 0.6196365438299545 -0.36416701339821556 -0.9040072517542573 25.0 0.0 37.2058611011273 2.078368421780885 -0.7943063380385879 15 200.50844425205247 130.3249145251344 0.36839322322757534 0.09087190631722986 -0.22452517495084987 25.0 0.0 31.0622608687501 1.3149161534868317 -0.2972451515721313 16 10.872821371928692 142.50648812470342 0.6466298462154676 0.13850521836332988 0.718945864012764 25.0 0.0 31.413937832962418 1.4056679733123312 -0.36970687498996746 17 124.98844282543168 147.82993328621993 0.4556966042329859 -0.04138895629887515 0.67091498778985 25.0 0.0 45.57926969566492 2.325230959735237 -0.9161652420508276 18 344.7017528266213 166.2683042839413 0.4663098831416211 0.48104307824902254 -0.1252798314146538 25.0 0.0 27.631382749951754 1.2220741014411993 -0.21774385124610166 19 145.03763842687044 168.50637255629331 0.5701020092842325 -0.029731702887011314 0.10804684626541045 25.0 0.0 82.74848117792747 7.757912197426944 -2.2243621500740023 20 394.70347564634125 187.545695732136 0.6053888128969426 -0.17663263158596582 0.15140081222163074 25.0 0.0 111.07213962758318 10.54663671636893 -2.5577849669998876 21 480.1438082712454 188.24825828577048 0.961962354511558 0.7093991969108856 0.8500100573287711 25.0 0.0 23.07234835743072 1.0204491563081264 -0.021978427370778375 22 48.70622094028462 200.38639624622746 0.5563443241052142 -0.5730567456777012 -0.38716010778330917 25.0 0.0 47.2892911443403 2.544104543607104 -1.0138373839688648 23 426.68129895275075 210.99941585113444 0.5819568229039869 -0.25649780804993694 0.4101751816817609 25.0 0.0 76.36050307691691 6.205924359380613 -1.9820161947559602 24 305.66418356557824 216.1095259373655 0.49589375430169474 0.018404255168539715 0.40299126313766576 25.0 0.0 26.565020985170975 1.3916629465991646 -0.35883516046196595 25 10.914912580605002 224.06603573687934 0.5057191364084963 -0.7830489024133768 0.45914226256028107 25.0 0.0 55.76846840338447 4.961946854725386 -1.7391302710501153 26 355.83177174460354 252.09033401412367 0.5247423718988266 -0.01542369728380577 0.03998195526685211 25.0 0.0 35.77616395203726 1.6536995591431092 -0.5461415265566998 27 139.57526527495273 275.22358348387263 0.6714573499096007 -0.2016681835803816 0.23336192730652747 25.0 0.0 29.39237884586493 1.3402186094935478 -0.3179391100037833 28 121.14216985229885 284.808001888565 0.756547266010023 0.1816100396332748 0.29688354539695194 25.0 0.0 12.415425631454825 1.0113492584189694 -0.012252900872686396 29 433.3024759192166 288.60871367718977 0.4437378655853536 0.4050695181704022 -0.2599032109478057 25.0 0.0 29.897454380832716 1.3995809443498977 -0.36499505205356075 30 470.994349464043 297.7565141346699 0.7661467132938748 0.5285364269442019 0.10769026748619974 25.0 0.0 10.524420303596958 1.0677650194503476 -0.07118922259211052 photutils-0.4/photutils/detection/tests/data/daofind_test_thresh10.0_fwhm01.0.txt0000644000214200020070000000400612444404542032262 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid sharpness roundness1 roundness2 npix sky peak flux mag 1 88.46990059776188 58.48886418244315 0.8762686366344555 0.5208952612806851 -0.4122721184034913 25.0 0.0 41.219046927862664 1.426801294773606 -0.38590873693488487 2 207.35081374580741 114.17862428357554 0.8924335237573308 0.9346385083631819 -0.518807699686835 25.0 0.0 29.754644643579955 1.117593462421715 -0.12070963156077162 3 290.43063459156474 113.06404861735642 0.898082603591199 -0.05099656208774482 -0.23165223978862307 25.0 0.0 46.446092373907746 1.5204115945491306 -0.45490293210520344 4 314.47263346346375 129.3969056629089 0.8887239985499058 -0.415497488077957 -0.981667389821615 25.0 0.0 37.2058611011273 1.169969370348823 -0.17043623029999083 5 125.38583354043459 147.12287931014475 0.9045758881095933 0.018740208509969953 0.9822435679992049 25.0 0.0 47.21336918933935 1.3057755307809833 -0.2896713150087505 6 145.14914494499064 169.8101867232563 0.8849770368050671 0.15624574204422784 0.17519144635671952 25.0 0.0 83.38951276487094 4.166243639112179 -1.5493616590053458 7 394.08757117974244 186.3394174191881 0.8802050226751206 0.35607460226075754 0.09146988324132699 25.0 0.0 110.35005452953492 5.855552904164964 -1.9189197728125098 8 48.107054682499836 199.00601737015663 0.8865292982659505 -0.25299384635714167 -0.29924860045592516 25.0 0.0 46.62575574744383 1.337474362325781 -0.31571366475717144 9 426.0377480701111 211.01291166877795 0.8767982886712559 -0.18983407274095807 0.46718907337348503 25.0 0.0 76.36050307691691 3.3256718196706054 -1.3046984760555367 10 256.89065375229984 218.8702651417756 0.8919076919119704 -0.6574612471815833 0.9250674332112004 25.0 0.0 31.145480617595414 1.0529930112702468 -0.05606372194083043 11 256.72686483357546 220.39592428590072 0.8657852930536363 -0.6868946128505259 0.7806774957564329 25.0 0.0 35.92343258547273 1.116918776453577 -0.12005397974381918 12 10.748287200873916 224.20057800236486 0.8530832269725744 -0.6801043879386687 0.5439043607509917 25.0 0.0 55.76846840338447 2.3749361313270834 -0.9391298368125456 photutils-0.4/photutils/detection/tests/data/daofind_test_thresh10.0_fwhm01.5.txt0000644000214200020070000000522312444404542032271 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid sharpness roundness1 roundness2 npix sky peak flux mag 1 88.79813107802212 58.81911572547198 0.6412388176817487 0.5199389854407557 -0.44797314831347357 25.0 0.0 41.219046927862664 1.9303272271656666 -0.7140773410508767 2 71.4226160551628 111.66081527853272 0.619266520607775 -0.05983418788263739 0.4400254302666273 25.0 0.0 35.0913969385426 1.007480054867913 -0.008091142872700756 3 206.54934787497413 114.06617385814583 0.7025595284696313 0.9767349175473904 -0.5834171877869196 25.0 0.0 29.754644643579955 1.405486177766868 -0.3695664472544251 4 290.7911753889566 113.64828526813636 0.7060267926521758 -0.07996579234428655 -0.22858041274721436 25.0 0.0 46.446092373907746 1.914724318455765 -0.7052656329781305 5 313.52800888962287 129.7741880324062 0.6842530424213573 -0.3959581050304958 -0.9378705237493801 25.0 0.0 37.2058611011273 1.504439201385424 -0.4434366034266216 6 10.867021165882411 142.54063824155378 0.6947552024609119 0.11878849281679056 0.6710643184520064 25.0 0.0 31.413937832962418 1.0457756106769245 -0.04859627275507633 7 125.13873545366265 148.07110612241445 0.7149614754541866 -0.019729049737502204 0.9618448688309467 25.0 0.0 47.21336918933935 1.6356141937836162 -0.5342021767147259 8 145.04161669340027 168.43979551421813 0.6418746565917449 -0.02736696618181218 0.1153966295770878 25.0 0.0 82.74848117792747 5.507810928797164 -1.8524475586329505 9 394.65502401524253 187.37357173892647 0.6692365367195389 0.3413113809170677 0.08770438935586308 25.0 0.0 110.35005452953492 7.62468817389998 -2.2055552177114084 10 48.67675424075205 200.4511294253465 0.637320895047539 -0.571948558370518 -0.3831872251686798 25.0 0.0 47.2892911443403 1.7752203929976573 -0.6231306956199879 11 426.64088605663267 211.0035331399752 0.6532673887534252 -0.22682231797770286 0.43515782187184765 25.0 0.0 76.36050307691691 4.419143291496353 -1.6133452096633925 12 257.6002296718719 217.77146625690213 0.6907143669914155 -0.6586594988741606 0.9959103449675526 25.0 0.0 31.145480617595414 1.346160636992813 -0.32274221806491205 13 256.265775002328 220.7587474377063 0.6330394123373777 -0.6815183858246547 0.824192111519761 25.0 0.0 35.92343258547273 1.5123455236562926 -0.4491275630222501 14 10.906589762292864 224.07169129851232 0.5915881373471015 -0.7302006348846717 0.4954582336704235 25.0 0.0 55.76846840338447 3.3905789356333735 -1.3256846489621514 15 355.91280082770714 252.31749264525953 0.7520228837688127 0.008366802620332457 0.31569853010198345 25.0 0.0 34.568381338063475 1.1419329022645373 -0.14410146592091136 16 139.59065348749309 275.1884233884024 0.7154853585505654 -0.18441687745205232 0.2548543643003899 25.0 0.0 29.39237884586493 1.0053682028113333 -0.00581286375303618 photutils-0.4/photutils/detection/tests/data/daofind_test_thresh10.0_fwhm02.0.txt0000644000214200020070000000645712444404542032277 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid sharpness roundness1 roundness2 npix sky peak flux mag 1 0.8496908619052633 40.04305508436788 0.45619405869493956 -0.22810071231008808 0.7232872696244564 25.0 0.0 35.54828630019193 2.1330035186671803 -0.8224789296883628 2 88.79956140550843 58.84568931520913 0.5582944988778225 0.5136344718362899 -0.4745844416283111 25.0 0.0 41.219046927862664 2.2189400003321778 -0.8653638978998236 3 7.328434526380554 69.5156220324513 0.5970223288209795 -0.9902229999019926 -0.36777328827511824 25.0 0.0 33.2880647048335 1.404287279968913 -0.3686399050661522 4 71.39201494648567 111.67855457865032 0.5196724941219817 -0.07293019182386253 0.3886579935129361 25.0 0.0 35.0913969385426 1.2015517895995103 -0.19935623654424928 5 206.5524841594021 114.05764949550421 0.65064321348857 0.9917528207729931 -0.6371226317458737 25.0 0.0 29.754644643579955 1.5188852671036988 -0.45381242388401194 6 290.81980079648145 113.6713740964111 0.6430087933779546 -0.09541554335946509 -0.2261751623126485 25.0 0.0 46.446092373907746 2.1041112088223364 -0.8076717247564446 7 341.22590232689697 114.47439289227742 0.5191441535865722 -0.8605565678486781 -0.3979504981737904 25.0 0.0 28.65755220774481 1.0607275193457455 -0.0640095906616806 8 313.4809249369467 129.79531153878077 0.6196365438299545 -0.36416701339821556 -0.9040072517542573 25.0 0.0 37.2058611011273 1.662694737424708 -0.5520313055184468 9 200.50844425205247 130.3249145251344 0.36839322322757534 0.09087190631722986 -0.22452517495084987 25.0 0.0 31.0622608687501 1.0519329227894652 -0.0549701190519901 10 10.872821371928692 142.50648812470342 0.6466298462154676 0.13850521836332988 0.718945864012764 25.0 0.0 31.413937832962418 1.124534378649865 -0.12743184246982645 11 124.98844282543168 147.82993328621993 0.4556966042329859 -0.04138895629887515 0.67091498778985 25.0 0.0 45.57926969566492 1.8601847677881895 -0.6738902095306866 12 145.03763842687044 168.50637255629331 0.5701020092842325 -0.029731702887011314 0.10804684626541045 25.0 0.0 82.74848117792747 6.206329757941555 -1.9820871175538612 13 394.70347564634125 187.545695732136 0.6053888128969426 -0.17663263158596582 0.15140081222163074 25.0 0.0 111.07213962758318 8.437309373095143 -2.315509934479746 14 48.70622094028462 200.38639624622746 0.5563443241052142 -0.5730567456777012 -0.38716010778330917 25.0 0.0 47.2892911443403 2.035283634885683 -0.7715623514487234 15 426.68129895275075 210.99941585113444 0.5819568229039869 -0.25649780804993694 0.4101751816817609 25.0 0.0 76.36050307691691 4.96473948750449 -1.7397411622358192 16 305.66418356557824 216.1095259373655 0.49589375430169474 0.018404255168539715 0.40299126313766576 25.0 0.0 26.565020985170975 1.1133303572793316 -0.11656012794182484 17 10.914912580605002 224.06603573687934 0.5057191364084963 -0.7830489024133768 0.45914226256028107 25.0 0.0 55.76846840338447 3.9695574837803087 -1.4968552385299745 18 355.83177174460354 252.09033401412367 0.5247423718988266 -0.01542369728380577 0.03998195526685211 25.0 0.0 35.77616395203726 1.3229596473144873 -0.3038664940365588 19 139.57526527495273 275.22358348387263 0.6714573499096007 -0.2016681835803816 0.23336192730652747 25.0 0.0 29.39237884586493 1.072174887594838 -0.07566407748364207 20 433.3024759192166 288.60871367718977 0.4437378655853536 0.4050695181704022 -0.2599032109478057 25.0 0.0 29.897454380832716 1.1196647554799182 -0.12272001953341974 photutils-0.4/photutils/detection/tests/data/irafstarfind_test_thresh08.0_fwhm01.0.txt0000644000214200020070000000232312415340320033330 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid fwhm sharpness roundness pa npix sky peak flux mag 1 333.32967620268926 0.4922715732671872 1.2389649115388133 1.2389649115388133 0.1632730683139897 26.751332591232657 5.0 2.757612577414644 7.470385598813738 17.87026271806645 -3.1303273432349914 2 145.0334216330022 168.39799687136576 1.8918921328609848 1.8918921328609848 0.028604480029024524 118.25194333308633 12.0 16.46507005975501 66.92444270511594 406.86074671794165 -6.523614479572233 3 394.7628492905675 187.59049488469253 1.8259269954390203 1.8259269954390203 0.10712610861905057 131.1948064263936 11.0 20.151710346274722 90.92042928130846 527.6471044360048 -6.80585889802106 4 89.21399519459558 198.23805598325862 1.9699369976778611 1.9699369976778611 0.140512496831343 139.49061946497739 10.0 5.046355096294733 8.916265520602554 20.380914697547418 -3.2730591782220264 5 355.940683491333 251.70235184396 1.7522952194564725 1.7522952194564725 0.11598331430783908 52.09576852830077 10.0 19.585761501344383 16.190402450692876 88.75108703293594 -4.870434202614015 6 378.5365504434283 273.08553439796117 1.9282124502623856 1.9282124502623856 0.09786276609110564 14.700858135363946 8.0 4.744381064470213 8.45586154664661 18.53113999199971 -3.1697553422808005 photutils-0.4/photutils/detection/tests/data/irafstarfind_test_thresh08.0_fwhm01.5.txt0000644000214200020070000000504312415340320033337 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid fwhm sharpness roundness pa npix sky peak flux mag 1 333.32967620268926 0.4922715732671872 1.2389649115388133 0.8259766076925422 0.1632730683139897 26.751332591232657 5.0 2.757612577414644 7.470385598813738 17.87026271806645 -3.1303273432349914 2 230.98520543324977 65.96062094855215 1.1045257887737336 0.7363505258491557 0.08474087768196263 175.16563683708682 7.0 4.906490485436556 6.015852897315193 10.355696896573836 -2.5379483261935247 3 71.21785006913522 111.76487474565829 2.041287275833218 1.360858183888812 0.13679659319094278 77.80260478241559 11.0 21.001696161595063 14.089700776947538 107.15742741630842 -5.075055696823615 4 290.88594008616707 113.76868287298815 2.0132511884469353 1.3421674589646235 0.09921593553744701 172.63151133250068 12.0 21.19225795812126 25.253834415786486 157.2638285640319 -5.491572110730435 5 200.23683161129125 130.21911528245474 2.068730352200946 1.3791535681339642 0.0772733520073215 8.31574194223727 13.0 20.931309472117615 15.050898848720653 96.98092311348633 -4.966715784084392 6 344.3860440796261 166.22517276873882 1.8853557992763632 1.2569038661842422 0.11363886565891375 142.91674058093935 11.0 18.685078458637985 11.30249791672627 62.9878292683161 -4.498141604100731 7 145.04228403850357 168.6571618278717 1.9066993974327817 1.2711329316218545 0.01132060172865575 34.75701626174368 12.0 16.35083410366516 67.03867866120578 413.9858186416701 -6.5424636608266695 8 394.7628492905675 187.59049488469253 1.8259269954390203 1.2172846636260135 0.10712610861905057 131.1948064263936 11.0 20.151710346274722 90.92042928130846 527.6471044360048 -6.80585889802106 9 243.47516596181347 197.2493890490569 1.7827428108977648 1.1884952072651764 0.17856028127225676 98.90230482066524 8.0 14.345012293985468 8.2293190831917 34.45785068493779 -3.843220461887511 10 439.9142185199256 197.98835995057703 1.431190377976779 0.9541269186511859 0.14568617232313683 165.69248581691983 8.0 5.521741711694806 6.475686441345677 12.410831724229434 -2.734502217876459 11 48.84191877412477 200.23603953697418 2.044906279964065 1.3632708533093767 0.15397555058991969 8.669913148933764 13.0 21.891667190520494 25.397623953819803 185.00884809430943 -5.667981247854074 12 305.8226567441267 215.56678011460988 1.8118994096837677 1.2079329397891785 0.15302459233581697 71.90351161795856 10.0 16.561617853554058 12.39941531540315 70.11439579638966 -4.614517989747707 13 355.940683491333 251.70235184396 1.7522952194564725 1.1681968129709817 0.11598331430783908 52.09576852830077 10.0 19.585761501344383 16.190402450692876 88.75108703293594 -4.870434202614015 photutils-0.4/photutils/detection/tests/data/irafstarfind_test_thresh08.0_fwhm02.0.txt0000644000214200020070000000616512415340320033341 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid fwhm sharpness roundness pa npix sky peak flux mag 1 0.9463803041037677 40.05930171275073 2.1172882811901297 1.0586441405950648 0.15700998967332774 103.73294282811226 12.0 12.639904993835687 22.908381306356247 185.7046678317134 -5.672057050519047 2 13.990504104618 62.55334108277186 1.9112198263358675 0.9556099131679338 0.15520530817401776 50.54366391030623 11.0 13.70300905281337 7.8569227383370634 45.67009375413011 -4.14907975787997 3 230.98520543324977 65.96062094855215 1.1045257887737336 0.5522628943868668 0.08474087768196263 175.16563683708682 7.0 4.906490485436556 6.015852897315193 10.355696896573836 -2.5379483261935247 4 71.21785006913522 111.76487474565829 2.041287275833218 1.020643637916609 0.13679659319094278 77.80260478241559 11.0 21.001696161595063 14.089700776947538 107.15742741630842 -5.075055696823615 5 290.88594008616707 113.76868287298815 2.0132511884469353 1.0066255942234676 0.09921593553744701 172.63151133250068 12.0 21.19225795812126 25.253834415786486 157.2638285640319 -5.491572110730435 6 476.3854019214201 123.17381910435793 1.9095864585977254 0.9547932292988627 0.049460563907413366 141.18558248492255 10.0 13.976425258573249 7.392939939729255 49.30462300038852 -4.232219105973428 7 344.3860440796261 166.22517276873882 1.8853557992763632 0.9426778996381816 0.11363886565891375 142.91674058093935 11.0 18.685078458637985 11.30249791672627 62.9878292683161 -4.498141604100731 8 145.04228403850357 168.6571618278717 1.9066993974327817 0.9533496987163909 0.01132060172865575 34.75701626174368 12.0 16.35083410366516 67.03867866120578 413.9858186416701 -6.5424636608266695 9 433.96150057644644 172.11618975119973 1.5605837424622429 0.7802918712311214 0.09193780106796648 95.87470166499466 8.0 4.626744929765067 5.595633376366347 17.514050923880667 -3.1084665203219473 10 394.77243654370204 187.3653258347458 1.8382851657518942 0.9191425828759471 0.07664537864845197 57.84086299955225 11.0 20.06339188622815 91.00874774135504 535.3679088447971 -6.821630837610465 11 243.47516596181347 197.2493890490569 1.7827428108977648 0.8913714054488824 0.17856028127225676 98.90230482066524 8.0 14.345012293985468 8.2293190831917 34.45785068493779 -3.843220461887511 12 439.9142185199256 197.98835995057703 1.431190377976779 0.7155951889883895 0.14568617232313683 165.69248581691983 8.0 5.521741711694806 6.475686441345677 12.410831724229434 -2.734502217876459 13 48.84191877412477 200.23603953697418 2.044906279964065 1.0224531399820325 0.15397555058991969 8.669913148933764 13.0 21.891667190520494 25.397623953819803 185.00884809430943 -5.667981247854074 14 305.8226567441267 215.56678011460988 1.8118994096837677 0.9059497048418839 0.15302459233581697 71.90351161795856 10.0 16.561617853554058 12.39941531540315 70.11439579638966 -4.614517989747707 15 292.1680865716507 245.66007865305278 1.9132639276510426 0.9566319638255213 0.13981297941973697 131.64176988617768 12.0 17.382922811106205 26.281111155294354 151.39790010868757 -5.450299628835927 16 355.815359095706 252.07315473269793 2.040873269492875 1.0204366347464375 0.04014555235557765 166.92243604044648 13.0 20.266399267107513 15.509764684929745 112.63065620256117 -5.129141535958461 photutils-0.4/photutils/detection/tests/data/irafstarfind_test_thresh10.0_fwhm01.0.txt0000644000214200020070000000121612415340320033321 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid fwhm sharpness roundness pa npix sky peak flux mag 1 145.0334216330022 168.39799687136576 1.8918921328609848 1.8918921328609848 0.028604480029024524 118.25194333308633 12.0 16.46507005975501 66.92444270511594 406.86074671794165 -6.523614479572233 2 394.7628492905675 187.59049488469253 1.8259269954390203 1.8259269954390203 0.10712610861905057 131.1948064263936 11.0 20.151710346274722 90.92042928130846 527.6471044360048 -6.80585889802106 3 355.940683491333 251.70235184396 1.7522952194564725 1.7522952194564725 0.11598331430783908 52.09576852830077 10.0 19.585761501344383 16.190402450692876 88.75108703293594 -4.870434202614015 photutils-0.4/photutils/detection/tests/data/irafstarfind_test_thresh10.0_fwhm01.5.txt0000644000214200020070000000262712415340320033335 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid fwhm sharpness roundness pa npix sky peak flux mag 1 71.21785006913522 111.76487474565829 2.041287275833218 1.360858183888812 0.13679659319094278 77.80260478241559 11.0 21.001696161595063 14.089700776947538 107.15742741630842 -5.075055696823615 2 290.88594008616707 113.76868287298815 2.0132511884469353 1.3421674589646235 0.09921593553744701 172.63151133250068 12.0 21.19225795812126 25.253834415786486 157.2638285640319 -5.491572110730435 3 145.04228403850357 168.6571618278717 1.9066993974327817 1.2711329316218545 0.01132060172865575 34.75701626174368 12.0 16.35083410366516 67.03867866120578 413.9858186416701 -6.5424636608266695 4 394.7628492905675 187.59049488469253 1.8259269954390203 1.2172846636260135 0.10712610861905057 131.1948064263936 11.0 20.151710346274722 90.92042928130846 527.6471044360048 -6.80585889802106 5 48.84191877412477 200.23603953697418 2.044906279964065 1.3632708533093767 0.15397555058991969 8.669913148933764 13.0 21.891667190520494 25.397623953819803 185.00884809430943 -5.667981247854074 6 305.8226567441267 215.56678011460988 1.8118994096837677 1.2079329397891785 0.15302459233581697 71.90351161795856 10.0 16.561617853554058 12.39941531540315 70.11439579638966 -4.614517989747707 7 355.940683491333 251.70235184396 1.7522952194564725 1.1681968129709817 0.11598331430783908 52.09576852830077 10.0 19.585761501344383 16.190402450692876 88.75108703293594 -4.870434202614015 photutils-0.4/photutils/detection/tests/data/irafstarfind_test_thresh10.0_fwhm02.0.txt0000644000214200020070000000374712415340321033336 0ustar lbradleySTSCI\science00000000000000id xcentroid ycentroid fwhm sharpness roundness pa npix sky peak flux mag 1 0.9463803041037677 40.05930171275073 2.1172882811901297 1.0586441405950648 0.15700998967332774 103.73294282811226 12.0 12.639904993835687 22.908381306356247 185.7046678317134 -5.672057050519047 2 71.21785006913522 111.76487474565829 2.041287275833218 1.020643637916609 0.13679659319094278 77.80260478241559 11.0 21.001696161595063 14.089700776947538 107.15742741630842 -5.075055696823615 3 290.88594008616707 113.76868287298815 2.0132511884469353 1.0066255942234676 0.09921593553744701 172.63151133250068 12.0 21.19225795812126 25.253834415786486 157.2638285640319 -5.491572110730435 4 344.3860440796261 166.22517276873882 1.8853557992763632 0.9426778996381816 0.11363886565891375 142.91674058093935 11.0 18.685078458637985 11.30249791672627 62.9878292683161 -4.498141604100731 5 145.04228403850357 168.6571618278717 1.9066993974327817 0.9533496987163909 0.01132060172865575 34.75701626174368 12.0 16.35083410366516 67.03867866120578 413.9858186416701 -6.5424636608266695 6 394.77243654370204 187.3653258347458 1.8382851657518942 0.9191425828759471 0.07664537864845197 57.84086299955225 11.0 20.06339188622815 91.00874774135504 535.3679088447971 -6.821630837610465 7 48.84191877412477 200.23603953697418 2.044906279964065 1.0224531399820325 0.15397555058991969 8.669913148933764 13.0 21.891667190520494 25.397623953819803 185.00884809430943 -5.667981247854074 8 305.8226567441267 215.56678011460988 1.8118994096837677 0.9059497048418839 0.15302459233581697 71.90351161795856 10.0 16.561617853554058 12.39941531540315 70.11439579638966 -4.614517989747707 9 292.1680865716507 245.66007865305278 1.9132639276510426 0.9566319638255213 0.13981297941973697 131.64176988617768 12.0 17.382922811106205 26.281111155294354 151.39790010868757 -5.450299628835927 10 355.815359095706 252.07315473269793 2.040873269492875 1.0204366347464375 0.04014555235557765 166.92243604044648 13.0 20.266399267107513 15.509764684929745 112.63065620256117 -5.129141535958461 photutils-0.4/photutils/detection/tests/test_core.py0000644000214200020070000001527613175634532025333 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_array_equal, assert_allclose import pytest from ..core import detect_threshold, find_peaks from ...datasets import make_4gaussians_image, make_wcs try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False try: import skimage # noqa HAS_SKIMAGE = True except ImportError: HAS_SKIMAGE = False DATA = np.array([[0, 1, 0], [0, 2, 0], [0, 0, 0]]).astype(np.float) REF1 = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) PEAKDATA = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]]).astype(np.float) PEAKREF1 = np.array([[0, 0], [2, 2]]) @pytest.mark.skipif('not HAS_SCIPY') class TestDetectThreshold(object): def test_snr(self): """Test basic snr.""" threshold = detect_threshold(DATA, snr=0.1) ref = 0.4 * np.ones((3, 3)) assert_allclose(threshold, ref) def test_snr_zero(self): """Test snr=0.""" threshold = detect_threshold(DATA, snr=0.0) ref = (1. / 3.) * np.ones((3, 3)) assert_allclose(threshold, ref) def test_background(self): threshold = detect_threshold(DATA, snr=1.0, background=1) ref = (5. / 3.) * np.ones((3, 3)) assert_allclose(threshold, ref) def test_background_image(self): background = np.ones((3, 3)) threshold = detect_threshold(DATA, snr=1.0, background=background) ref = (5. / 3.) * np.ones((3, 3)) assert_allclose(threshold, ref) def test_background_badshape(self): wrong_shape = np.zeros((2, 2)) with pytest.raises(ValueError): detect_threshold(DATA, snr=2., background=wrong_shape) def test_error(self): threshold = detect_threshold(DATA, snr=1.0, error=1) ref = (4. / 3.) * np.ones((3, 3)) assert_allclose(threshold, ref) def test_error_image(self): error = np.ones((3, 3)) threshold = detect_threshold(DATA, snr=1.0, error=error) ref = (4. / 3.) * np.ones((3, 3)) assert_allclose(threshold, ref) def test_error_badshape(self): wrong_shape = np.zeros((2, 2)) with pytest.raises(ValueError): detect_threshold(DATA, snr=2., error=wrong_shape) def test_background_error(self): threshold = detect_threshold(DATA, snr=2.0, background=10., error=1.) ref = 12. * np.ones((3, 3)) assert_allclose(threshold, ref) def test_background_error_images(self): background = np.ones((3, 3)) * 10. error = np.ones((3, 3)) threshold = detect_threshold(DATA, snr=2.0, background=background, error=error) ref = 12. * np.ones((3, 3)) assert_allclose(threshold, ref) def test_mask_value(self): """Test detection with mask_value.""" threshold = detect_threshold(DATA, snr=1.0, mask_value=0.0) ref = 2. * np.ones((3, 3)) assert_array_equal(threshold, ref) def test_image_mask(self): """ Test detection with image_mask. sig=10 and iters=1 to prevent sigma clipping after applying the mask. """ mask = REF1.astype(np.bool) threshold = detect_threshold(DATA, snr=1., error=0, mask=mask, sigclip_sigma=10, sigclip_iters=1) ref = (1. / 8.) * np.ones((3, 3)) assert_array_equal(threshold, ref) def test_image_mask_override(self): """Test that image_mask overrides mask_value.""" mask = REF1.astype(np.bool) threshold = detect_threshold(DATA, snr=0.1, error=0, mask_value=0.0, mask=mask, sigclip_sigma=10, sigclip_iters=1) ref = np.ones((3, 3)) assert_array_equal(threshold, ref) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.skipif('not HAS_SKIMAGE') class TestFindPeaks(object): def test_box_size(self): """Test with box_size.""" tbl = find_peaks(PEAKDATA, 0.1, box_size=3) assert_array_equal(tbl['x_peak'], PEAKREF1[:, 1]) assert_array_equal(tbl['y_peak'], PEAKREF1[:, 0]) assert_array_equal(tbl['peak_value'], [1., 1.]) def test_footprint(self): """Test with footprint.""" tbl = find_peaks(PEAKDATA, 0.1, footprint=np.ones((3, 3))) assert_array_equal(tbl['x_peak'], PEAKREF1[:, 1]) assert_array_equal(tbl['y_peak'], PEAKREF1[:, 0]) assert_array_equal(tbl['peak_value'], [1., 1.]) def test_subpixel_regionsize(self): """Test that data cutout has at least 6 values.""" with pytest.raises(ValueError): find_peaks(PEAKDATA, 0.1, box_size=2, subpixel=True) def test_mask(self): """Test with mask.""" mask = np.zeros_like(PEAKDATA, dtype=bool) mask[0, 0] = True tbl = find_peaks(PEAKDATA, 0.1, box_size=3, mask=mask) assert len(tbl) == 1 assert_array_equal(tbl['x_peak'], PEAKREF1[1, 0]) assert_array_equal(tbl['y_peak'], PEAKREF1[1, 1]) assert_array_equal(tbl['peak_value'], 1.0) def test_maskshape(self): """Test if make shape doesn't match data shape.""" with pytest.raises(ValueError): find_peaks(PEAKDATA, 0.1, mask=np.ones((5, 5))) def test_npeaks(self): """Test npeaks.""" tbl = find_peaks(PEAKDATA, 0.1, box_size=3, npeaks=1) assert_array_equal(tbl['x_peak'], PEAKREF1[1, 1]) assert_array_equal(tbl['y_peak'], PEAKREF1[1, 0]) def test_border_width(self): """Test border exclusion.""" tbl = find_peaks(PEAKDATA, 0.1, box_size=3, border_width=3) assert_array_equal(len(tbl), 0) def test_zerodet(self): """Test with large threshold giving no sources.""" tbl = find_peaks(PEAKDATA, 5., box_size=3, border_width=3) assert_array_equal(len(tbl), 0) def test_constant_data(self): """Test constant data.""" tbl = find_peaks(np.ones((5, 5)), 0.1, box_size=3.) assert_array_equal(len(tbl), 0) def test_box_size_int(self): """Test non-integer box_size.""" tbl1 = find_peaks(PEAKDATA, 0.1, box_size=5.) tbl2 = find_peaks(PEAKDATA, 0.1, box_size=5.5) assert_array_equal(tbl1, tbl2) def test_wcs(self): """Test with WCS.""" data = make_4gaussians_image() wcs = make_wcs(data.shape) tbl = find_peaks(data, 100, wcs=wcs, subpixel=True) cols = ['icrs_ra_peak', 'icrs_dec_peak', 'icrs_ra_centroid', 'icrs_dec_centroid'] for col in cols: assert col in tbl.colnames photutils-0.4/photutils/detection/tests/test_findstars.py0000644000214200020070000001117313175634532026370 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import os.path as op import itertools import warnings import numpy as np from numpy.testing import assert_allclose import pytest from astropy.table import Table from astropy.utils.exceptions import AstropyUserWarning from ..findstars import DAOStarFinder, IRAFStarFinder from ...datasets import make_100gaussians_image try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False try: import skimage # noqa HAS_SKIMAGE = True except ImportError: HAS_SKIMAGE = False DATA = make_100gaussians_image() THRESHOLDS = [8.0, 10.0] FWHMS = [1.0, 1.5, 2.0] warnings.simplefilter('always', AstropyUserWarning) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.skipif('not HAS_SKIMAGE') class TestDAOStarFinder(object): @pytest.mark.parametrize(('threshold', 'fwhm'), list(itertools.product(THRESHOLDS, FWHMS))) def test_daofind(self, threshold, fwhm): starfinder = DAOStarFinder(threshold, fwhm, sigma_radius=1.5) t = starfinder(DATA) datafn = ('daofind_test_thresh{0:04.1f}_fwhm{1:04.1f}' '.txt'.format(threshold, fwhm)) datafn = op.join(op.dirname(op.abspath(__file__)), 'data', datafn) t_ref = Table.read(datafn, format='ascii') assert_allclose(np.array(t).astype(np.float), np.array(t_ref).astype(np.float)) def test_daofind_include_border(self): starfinder = DAOStarFinder(threshold=10, fwhm=2, sigma_radius=1.5, exclude_border=False) t = starfinder(DATA) assert len(t) == 20 def test_daofind_exclude_border(self): starfinder = DAOStarFinder(threshold=10, fwhm=2, sigma_radius=1.5, exclude_border=True) t = starfinder(DATA) assert len(t) == 19 def test_daofind_nosources(self): data = np.ones((3, 3)) starfinder = DAOStarFinder(threshold=10, fwhm=1) t = starfinder(data) assert len(t) == 0 def test_daofind_sharpness(self): """Sources found, but none pass the sharpness criteria.""" starfinder = DAOStarFinder(threshold=50, fwhm=1.0, sharplo=1.) t = starfinder(DATA) assert len(t) == 0 def test_daofind_roundness(self): """Sources found, but none pass the roundness criteria.""" starfinder = DAOStarFinder(threshold=50, fwhm=1.0, roundlo=1.) t = starfinder(DATA) assert len(t) == 0 def test_daofind_flux_negative(self): """Test handling of negative flux (here created by large sky).""" data = np.ones((5, 5)) data[2, 2] = 10. starfinder = DAOStarFinder(threshold=0.1, fwhm=1.0, sky=10) t = starfinder(data) assert not np.isfinite(t['mag']) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.skipif('not HAS_SKIMAGE') class TestIRAFStarFinder(object): @pytest.mark.parametrize(('threshold', 'fwhm'), list(itertools.product(THRESHOLDS, FWHMS))) def test_irafstarfind(self, threshold, fwhm): starfinder = IRAFStarFinder(threshold, fwhm, sigma_radius=1.5) t = starfinder(DATA) datafn = ('irafstarfind_test_thresh{0:04.1f}_fwhm{1:04.1f}' '.txt'.format(threshold, fwhm)) datafn = op.join(op.dirname(op.abspath(__file__)), 'data', datafn) t_ref = Table.read(datafn, format='ascii') assert_allclose(np.array(t).astype(np.float), np.array(t_ref).astype(np.float)) def test_irafstarfind_nosources(self): data = np.ones((3, 3)) starfinder = IRAFStarFinder(threshold=10, fwhm=1) t = starfinder(data) assert len(t) == 0 def test_irafstarfind_sharpness(self): """Sources found, but none pass the sharpness criteria.""" starfinder = IRAFStarFinder(threshold=50, fwhm=1.0, sharplo=2.) t = starfinder(DATA) assert len(t) == 0 def test_irafstarfind_roundness(self): """Sources found, but none pass the roundness criteria.""" starfinder = IRAFStarFinder(threshold=50, fwhm=1.0, roundlo=1.) t = starfinder(DATA) assert len(t) == 0 def test_irafstarfind_sky(self): starfinder = IRAFStarFinder(threshold=25.0, fwhm=2.0, sky=10.) t = starfinder(DATA) assert len(t) == 4 def test_irafstarfind_largesky(self): starfinder = IRAFStarFinder(threshold=25.0, fwhm=2.0, sky=100.) t = starfinder(DATA) assert len(t) == 0 photutils-0.4/photutils/extern/0000755000214200020070000000000013175654702021145 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/extern/__init__.py0000644000214200020070000000061512345377273023263 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This packages contains python packages that are bundled with the affiliated package but are external to the affiliated package, and hence are developed in a separate source tree. Note that this package is distinct from the /cextern directory of the source code distribution, as that directory only contains C extension code. """ photutils-0.4/photutils/geometry/0000755000214200020070000000000013175654702021473 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/geometry/__init__.py0000644000214200020070000000055613175634532023611 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Geometry subpackage for low-level geometry functions. """ from .circular_overlap import * # noqa from .elliptical_overlap import * # noqa from .rectangular_overlap import * # noqa __all__ = ['circular_overlap_grid', 'elliptical_overlap_grid', 'rectangular_overlap_grid'] photutils-0.4/photutils/geometry/circular_overlap.c0000644000214200020070000133425713175654677025225 0ustar lbradleySTSCI\science00000000000000/* Generated by Cython 0.27.2 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_27_2" #define CYTHON_FUTURE_DIVISION 1 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__photutils__geometry__circular_overlap #define __PYX_HAVE_API__photutils__geometry__circular_overlap #include #include #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "math.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* Header.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "photutils/geometry/circular_overlap.pyx", "__init__.pxd", "type.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":743 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":746 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":750 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":751 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":752 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":757 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":768 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":772 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":780 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "photutils/geometry/circular_overlap.pyx":25 * * DTYPE = np.float64 * ctypedef np.float64_t DTYPE_t # <<<<<<<<<<<<<< * * # NOTE: Here we need to make sure we use cimport to import the C functions from */ typedef __pyx_t_5numpy_float64_t __pyx_t_9photutils_8geometry_16circular_overlap_DTYPE_t; /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /*--- Type declarations ---*/ /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* BufferGetAndValidate.proto */ #define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ ((obj == Py_None || obj == NULL) ?\ (__Pyx_ZeroBuffer(buf), 0) :\ __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static void __Pyx_ZeroBuffer(Py_buffer* buf); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* RealImag.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX\ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_float(a, b) ((a)==(b)) #define __Pyx_c_sum_float(a, b) ((a)+(b)) #define __Pyx_c_diff_float(a, b) ((a)-(b)) #define __Pyx_c_prod_float(a, b) ((a)*(b)) #define __Pyx_c_quot_float(a, b) ((a)/(b)) #define __Pyx_c_neg_float(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_float(z) ((z)==(float)0) #define __Pyx_c_conj_float(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_float(z) (::std::abs(z)) #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_float(z) ((z)==0) #define __Pyx_c_conj_float(z) (conjf(z)) #if 1 #define __Pyx_c_abs_float(z) (cabsf(z)) #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_double(a, b) ((a)==(b)) #define __Pyx_c_sum_double(a, b) ((a)+(b)) #define __Pyx_c_diff_double(a, b) ((a)-(b)) #define __Pyx_c_prod_double(a, b) ((a)*(b)) #define __Pyx_c_quot_double(a, b) ((a)/(b)) #define __Pyx_c_neg_double(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_double(z) ((z)==(double)0) #define __Pyx_c_conj_double(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_double(z) (::std::abs(z)) #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_double(z) ((z)==0) #define __Pyx_c_conj_double(z) (conj(z)) #if 1 #define __Pyx_c_abs_double(z) (cabs(z)) #define __Pyx_c_pow_double(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* FunctionImport.proto */ static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'photutils.geometry.core' */ static double (*__pyx_f_9photutils_8geometry_4core_area_arc)(double, double, double, double, double); /*proto*/ static double (*__pyx_f_9photutils_8geometry_4core_area_triangle)(double, double, double, double, double, double); /*proto*/ static double (*__pyx_f_9photutils_8geometry_4core_floor_sqrt)(double); /*proto*/ /* Module declarations from 'photutils.geometry.circular_overlap' */ static double __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_subpixel(double, double, double, double, double, int); /*proto*/ static double __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(double, double, double, double, double); /*proto*/ static double __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_core(double, double, double, double, double); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_9photutils_8geometry_16circular_overlap_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_9photutils_8geometry_16circular_overlap_DTYPE_t), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "photutils.geometry.circular_overlap" int __pyx_module_is_main_photutils__geometry__circular_overlap = 0; /* Implementation of 'photutils.geometry.circular_overlap' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_ImportError; static const char __pyx_k_d[] = "d"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_r[] = "r"; static const char __pyx_k_x[] = "x"; static const char __pyx_k_y[] = "y"; static const char __pyx_k_dx[] = "dx"; static const char __pyx_k_dy[] = "dy"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_nx[] = "nx"; static const char __pyx_k_ny[] = "ny"; static const char __pyx_k_all[] = "__all__"; static const char __pyx_k_frac[] = "frac"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_xmax[] = "xmax"; static const char __pyx_k_xmin[] = "xmin"; static const char __pyx_k_ymax[] = "ymax"; static const char __pyx_k_ymin[] = "ymin"; static const char __pyx_k_DTYPE[] = "DTYPE"; static const char __pyx_k_bxmax[] = "bxmax"; static const char __pyx_k_bxmin[] = "bxmin"; static const char __pyx_k_bymax[] = "bymax"; static const char __pyx_k_bymin[] = "bymin"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_pxcen[] = "pxcen"; static const char __pyx_k_pxmax[] = "pxmax"; static const char __pyx_k_pxmin[] = "pxmin"; static const char __pyx_k_pycen[] = "pycen"; static const char __pyx_k_pymax[] = "pymax"; static const char __pyx_k_pymin[] = "pymin"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_subpixels[] = "subpixels"; static const char __pyx_k_use_exact[] = "use_exact"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_pixel_radius[] = "pixel_radius"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_circular_overlap_grid[] = "circular_overlap_grid"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_The_functions_defined_here_allo[] = "\nThe functions defined here allow one to determine the exact area of\noverlap of a rectangle and a circle (written by Thomas Robitaille).\n"; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; static const char __pyx_k_photutils_geometry_circular_over[] = "photutils/geometry/circular_overlap.pyx"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static const char __pyx_k_photutils_geometry_circular_over_2[] = "photutils.geometry.circular_overlap"; static PyObject *__pyx_n_s_DTYPE; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_all; static PyObject *__pyx_n_s_bxmax; static PyObject *__pyx_n_s_bxmin; static PyObject *__pyx_n_s_bymax; static PyObject *__pyx_n_s_bymin; static PyObject *__pyx_n_s_circular_overlap_grid; static PyObject *__pyx_n_u_circular_overlap_grid; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_d; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dx; static PyObject *__pyx_n_s_dy; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_n_s_frac; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_main; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to; static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor; static PyObject *__pyx_n_s_nx; static PyObject *__pyx_n_s_ny; static PyObject *__pyx_kp_s_photutils_geometry_circular_over; static PyObject *__pyx_n_s_photutils_geometry_circular_over_2; static PyObject *__pyx_n_s_pixel_radius; static PyObject *__pyx_n_s_pxcen; static PyObject *__pyx_n_s_pxmax; static PyObject *__pyx_n_s_pxmin; static PyObject *__pyx_n_s_pycen; static PyObject *__pyx_n_s_pymax; static PyObject *__pyx_n_s_pymin; static PyObject *__pyx_n_s_r; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_subpixels; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_use_exact; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_xmax; static PyObject *__pyx_n_s_xmin; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_ymax; static PyObject *__pyx_n_s_ymin; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_9photutils_8geometry_16circular_overlap_circular_overlap_grid(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_xmin, double __pyx_v_xmax, double __pyx_v_ymin, double __pyx_v_ymax, int __pyx_v_nx, int __pyx_v_ny, double __pyx_v_r, int __pyx_v_use_exact, int __pyx_v_subpixels); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_codeobj__11; /* "photutils/geometry/circular_overlap.pyx":33 * * * def circular_overlap_grid(double xmin, double xmax, double ymin, double ymax, # <<<<<<<<<<<<<< * int nx, int ny, double r, int use_exact, * int subpixels): */ /* Python wrapper */ static PyObject *__pyx_pw_9photutils_8geometry_16circular_overlap_1circular_overlap_grid(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_9photutils_8geometry_16circular_overlap_circular_overlap_grid[] = "\n circular_overlap_grid(xmin, xmax, ymin, ymax, nx, ny, r,\n use_exact, subpixels)\n\n Area of overlap between a circle and a pixel grid. The circle is centered\n on the origin.\n\n Parameters\n ----------\n xmin, xmax, ymin, ymax : float\n Extent of the grid in the x and y direction.\n nx, ny : int\n Grid dimensions.\n r : float\n The radius of the circle.\n use_exact : 0 or 1\n If ``1`` calculates exact overlap, if ``0`` uses ``subpixel`` number\n of subpixels to calculate the overlap.\n subpixels : int\n Each pixel resampled by this factor in each dimension, thus each\n pixel is divided into ``subpixels ** 2`` subpixels.\n\n Returns\n -------\n frac : `~numpy.ndarray` (float)\n 2-d array of shape (ny, nx) giving the fraction of the overlap.\n "; static PyMethodDef __pyx_mdef_9photutils_8geometry_16circular_overlap_1circular_overlap_grid = {"circular_overlap_grid", (PyCFunction)__pyx_pw_9photutils_8geometry_16circular_overlap_1circular_overlap_grid, METH_VARARGS|METH_KEYWORDS, __pyx_doc_9photutils_8geometry_16circular_overlap_circular_overlap_grid}; static PyObject *__pyx_pw_9photutils_8geometry_16circular_overlap_1circular_overlap_grid(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_xmin; double __pyx_v_xmax; double __pyx_v_ymin; double __pyx_v_ymax; int __pyx_v_nx; int __pyx_v_ny; double __pyx_v_r; int __pyx_v_use_exact; int __pyx_v_subpixels; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("circular_overlap_grid (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xmin,&__pyx_n_s_xmax,&__pyx_n_s_ymin,&__pyx_n_s_ymax,&__pyx_n_s_nx,&__pyx_n_s_ny,&__pyx_n_s_r,&__pyx_n_s_use_exact,&__pyx_n_s_subpixels,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); CYTHON_FALLTHROUGH; case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xmin)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xmax)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("circular_overlap_grid", 1, 9, 9, 1); __PYX_ERR(0, 33, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ymin)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("circular_overlap_grid", 1, 9, 9, 2); __PYX_ERR(0, 33, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ymax)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("circular_overlap_grid", 1, 9, 9, 3); __PYX_ERR(0, 33, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("circular_overlap_grid", 1, 9, 9, 4); __PYX_ERR(0, 33, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ny)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("circular_overlap_grid", 1, 9, 9, 5); __PYX_ERR(0, 33, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_r)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("circular_overlap_grid", 1, 9, 9, 6); __PYX_ERR(0, 33, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_use_exact)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("circular_overlap_grid", 1, 9, 9, 7); __PYX_ERR(0, 33, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_subpixels)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("circular_overlap_grid", 1, 9, 9, 8); __PYX_ERR(0, 33, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "circular_overlap_grid") < 0)) __PYX_ERR(0, 33, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xmin = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_xmin == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) __pyx_v_xmax = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_xmax == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) __pyx_v_ymin = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_ymin == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) __pyx_v_ymax = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_ymax == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) __pyx_v_nx = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_nx == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 34, __pyx_L3_error) __pyx_v_ny = __Pyx_PyInt_As_int(values[5]); if (unlikely((__pyx_v_ny == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 34, __pyx_L3_error) __pyx_v_r = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_r == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 34, __pyx_L3_error) __pyx_v_use_exact = __Pyx_PyInt_As_int(values[7]); if (unlikely((__pyx_v_use_exact == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 34, __pyx_L3_error) __pyx_v_subpixels = __Pyx_PyInt_As_int(values[8]); if (unlikely((__pyx_v_subpixels == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 35, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("circular_overlap_grid", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 33, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("photutils.geometry.circular_overlap.circular_overlap_grid", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_9photutils_8geometry_16circular_overlap_circular_overlap_grid(__pyx_self, __pyx_v_xmin, __pyx_v_xmax, __pyx_v_ymin, __pyx_v_ymax, __pyx_v_nx, __pyx_v_ny, __pyx_v_r, __pyx_v_use_exact, __pyx_v_subpixels); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9photutils_8geometry_16circular_overlap_circular_overlap_grid(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_xmin, double __pyx_v_xmax, double __pyx_v_ymin, double __pyx_v_ymax, int __pyx_v_nx, int __pyx_v_ny, double __pyx_v_r, int __pyx_v_use_exact, int __pyx_v_subpixels) { unsigned int __pyx_v_i; unsigned int __pyx_v_j; double __pyx_v_dx; double __pyx_v_dy; double __pyx_v_d; double __pyx_v_pixel_radius; double __pyx_v_bxmin; double __pyx_v_bxmax; double __pyx_v_bymin; double __pyx_v_bymax; double __pyx_v_pxmin; double __pyx_v_pxcen; double __pyx_v_pxmax; double __pyx_v_pymin; double __pyx_v_pycen; double __pyx_v_pymax; PyArrayObject *__pyx_v_frac = 0; __Pyx_LocalBuf_ND __pyx_pybuffernd_frac; __Pyx_Buffer __pyx_pybuffer_frac; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; double __pyx_t_6; int __pyx_t_7; unsigned int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; unsigned int __pyx_t_12; size_t __pyx_t_13; size_t __pyx_t_14; int __pyx_t_15; double __pyx_t_16; size_t __pyx_t_17; size_t __pyx_t_18; size_t __pyx_t_19; size_t __pyx_t_20; __Pyx_RefNannySetupContext("circular_overlap_grid", 0); __pyx_pybuffer_frac.pybuffer.buf = NULL; __pyx_pybuffer_frac.refcount = 0; __pyx_pybuffernd_frac.data = NULL; __pyx_pybuffernd_frac.rcbuffer = &__pyx_pybuffer_frac; /* "photutils/geometry/circular_overlap.pyx":70 * * # Define output array * cdef np.ndarray[DTYPE_t, ndim=2] frac = np.zeros([ny, nx], dtype=DTYPE) # <<<<<<<<<<<<<< * * # Find the width of each element in x and y */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 70, __pyx_L1_error) __pyx_t_5 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_frac.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_9photutils_8geometry_16circular_overlap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_frac = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_frac.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 70, __pyx_L1_error) } else {__pyx_pybuffernd_frac.diminfo[0].strides = __pyx_pybuffernd_frac.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_frac.diminfo[0].shape = __pyx_pybuffernd_frac.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_frac.diminfo[1].strides = __pyx_pybuffernd_frac.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_frac.diminfo[1].shape = __pyx_pybuffernd_frac.rcbuffer->pybuffer.shape[1]; } } __pyx_t_5 = 0; __pyx_v_frac = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "photutils/geometry/circular_overlap.pyx":73 * * # Find the width of each element in x and y * dx = (xmax - xmin) / nx # <<<<<<<<<<<<<< * dy = (ymax - ymin) / ny * */ __pyx_t_6 = (__pyx_v_xmax - __pyx_v_xmin); if (unlikely(__pyx_v_nx == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 73, __pyx_L1_error) } __pyx_v_dx = (__pyx_t_6 / ((double)__pyx_v_nx)); /* "photutils/geometry/circular_overlap.pyx":74 * # Find the width of each element in x and y * dx = (xmax - xmin) / nx * dy = (ymax - ymin) / ny # <<<<<<<<<<<<<< * * # Find the radius of a single pixel */ __pyx_t_6 = (__pyx_v_ymax - __pyx_v_ymin); if (unlikely(__pyx_v_ny == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 74, __pyx_L1_error) } __pyx_v_dy = (__pyx_t_6 / ((double)__pyx_v_ny)); /* "photutils/geometry/circular_overlap.pyx":77 * * # Find the radius of a single pixel * pixel_radius = 0.5 * sqrt(dx * dx + dy * dy) # <<<<<<<<<<<<<< * * # Define bounding box */ __pyx_v_pixel_radius = (0.5 * sqrt(((__pyx_v_dx * __pyx_v_dx) + (__pyx_v_dy * __pyx_v_dy)))); /* "photutils/geometry/circular_overlap.pyx":80 * * # Define bounding box * bxmin = -r - 0.5 * dx # <<<<<<<<<<<<<< * bxmax = +r + 0.5 * dx * bymin = -r - 0.5 * dy */ __pyx_v_bxmin = ((-__pyx_v_r) - (0.5 * __pyx_v_dx)); /* "photutils/geometry/circular_overlap.pyx":81 * # Define bounding box * bxmin = -r - 0.5 * dx * bxmax = +r + 0.5 * dx # <<<<<<<<<<<<<< * bymin = -r - 0.5 * dy * bymax = +r + 0.5 * dy */ __pyx_v_bxmax = (__pyx_v_r + (0.5 * __pyx_v_dx)); /* "photutils/geometry/circular_overlap.pyx":82 * bxmin = -r - 0.5 * dx * bxmax = +r + 0.5 * dx * bymin = -r - 0.5 * dy # <<<<<<<<<<<<<< * bymax = +r + 0.5 * dy * */ __pyx_v_bymin = ((-__pyx_v_r) - (0.5 * __pyx_v_dy)); /* "photutils/geometry/circular_overlap.pyx":83 * bxmax = +r + 0.5 * dx * bymin = -r - 0.5 * dy * bymax = +r + 0.5 * dy # <<<<<<<<<<<<<< * * for i in range(nx): */ __pyx_v_bymax = (__pyx_v_r + (0.5 * __pyx_v_dy)); /* "photutils/geometry/circular_overlap.pyx":85 * bymax = +r + 0.5 * dy * * for i in range(nx): # <<<<<<<<<<<<<< * pxmin = xmin + i * dx # lower end of pixel * pxcen = pxmin + dx * 0.5 */ __pyx_t_7 = __pyx_v_nx; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "photutils/geometry/circular_overlap.pyx":86 * * for i in range(nx): * pxmin = xmin + i * dx # lower end of pixel # <<<<<<<<<<<<<< * pxcen = pxmin + dx * 0.5 * pxmax = pxmin + dx # upper end of pixel */ __pyx_v_pxmin = (__pyx_v_xmin + (__pyx_v_i * __pyx_v_dx)); /* "photutils/geometry/circular_overlap.pyx":87 * for i in range(nx): * pxmin = xmin + i * dx # lower end of pixel * pxcen = pxmin + dx * 0.5 # <<<<<<<<<<<<<< * pxmax = pxmin + dx # upper end of pixel * if pxmax > bxmin and pxmin < bxmax: */ __pyx_v_pxcen = (__pyx_v_pxmin + (__pyx_v_dx * 0.5)); /* "photutils/geometry/circular_overlap.pyx":88 * pxmin = xmin + i * dx # lower end of pixel * pxcen = pxmin + dx * 0.5 * pxmax = pxmin + dx # upper end of pixel # <<<<<<<<<<<<<< * if pxmax > bxmin and pxmin < bxmax: * for j in range(ny): */ __pyx_v_pxmax = (__pyx_v_pxmin + __pyx_v_dx); /* "photutils/geometry/circular_overlap.pyx":89 * pxcen = pxmin + dx * 0.5 * pxmax = pxmin + dx # upper end of pixel * if pxmax > bxmin and pxmin < bxmax: # <<<<<<<<<<<<<< * for j in range(ny): * pymin = ymin + j * dy */ __pyx_t_10 = ((__pyx_v_pxmax > __pyx_v_bxmin) != 0); if (__pyx_t_10) { } else { __pyx_t_9 = __pyx_t_10; goto __pyx_L6_bool_binop_done; } __pyx_t_10 = ((__pyx_v_pxmin < __pyx_v_bxmax) != 0); __pyx_t_9 = __pyx_t_10; __pyx_L6_bool_binop_done:; if (__pyx_t_9) { /* "photutils/geometry/circular_overlap.pyx":90 * pxmax = pxmin + dx # upper end of pixel * if pxmax > bxmin and pxmin < bxmax: * for j in range(ny): # <<<<<<<<<<<<<< * pymin = ymin + j * dy * pycen = pymin + dy * 0.5 */ __pyx_t_11 = __pyx_v_ny; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_j = __pyx_t_12; /* "photutils/geometry/circular_overlap.pyx":91 * if pxmax > bxmin and pxmin < bxmax: * for j in range(ny): * pymin = ymin + j * dy # <<<<<<<<<<<<<< * pycen = pymin + dy * 0.5 * pymax = pymin + dy */ __pyx_v_pymin = (__pyx_v_ymin + (__pyx_v_j * __pyx_v_dy)); /* "photutils/geometry/circular_overlap.pyx":92 * for j in range(ny): * pymin = ymin + j * dy * pycen = pymin + dy * 0.5 # <<<<<<<<<<<<<< * pymax = pymin + dy * if pymax > bymin and pymin < bymax: */ __pyx_v_pycen = (__pyx_v_pymin + (__pyx_v_dy * 0.5)); /* "photutils/geometry/circular_overlap.pyx":93 * pymin = ymin + j * dy * pycen = pymin + dy * 0.5 * pymax = pymin + dy # <<<<<<<<<<<<<< * if pymax > bymin and pymin < bymax: * */ __pyx_v_pymax = (__pyx_v_pymin + __pyx_v_dy); /* "photutils/geometry/circular_overlap.pyx":94 * pycen = pymin + dy * 0.5 * pymax = pymin + dy * if pymax > bymin and pymin < bymax: # <<<<<<<<<<<<<< * * # Distance from circle center to pixel center. */ __pyx_t_10 = ((__pyx_v_pymax > __pyx_v_bymin) != 0); if (__pyx_t_10) { } else { __pyx_t_9 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = ((__pyx_v_pymin < __pyx_v_bymax) != 0); __pyx_t_9 = __pyx_t_10; __pyx_L11_bool_binop_done:; if (__pyx_t_9) { /* "photutils/geometry/circular_overlap.pyx":97 * * # Distance from circle center to pixel center. * d = sqrt(pxcen * pxcen + pycen * pycen) # <<<<<<<<<<<<<< * * # If pixel center is "well within" circle, count full */ __pyx_v_d = sqrt(((__pyx_v_pxcen * __pyx_v_pxcen) + (__pyx_v_pycen * __pyx_v_pycen))); /* "photutils/geometry/circular_overlap.pyx":101 * # If pixel center is "well within" circle, count full * # pixel. * if d < r - pixel_radius: # <<<<<<<<<<<<<< * frac[j, i] = 1. * */ __pyx_t_9 = ((__pyx_v_d < (__pyx_v_r - __pyx_v_pixel_radius)) != 0); if (__pyx_t_9) { /* "photutils/geometry/circular_overlap.pyx":102 * # pixel. * if d < r - pixel_radius: * frac[j, i] = 1. # <<<<<<<<<<<<<< * * # If pixel center is "close" to circle border, find */ __pyx_t_13 = __pyx_v_j; __pyx_t_14 = __pyx_v_i; __pyx_t_15 = -1; if (unlikely(__pyx_t_13 >= (size_t)__pyx_pybuffernd_frac.diminfo[0].shape)) __pyx_t_15 = 0; if (unlikely(__pyx_t_14 >= (size_t)__pyx_pybuffernd_frac.diminfo[1].shape)) __pyx_t_15 = 1; if (unlikely(__pyx_t_15 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_15); __PYX_ERR(0, 102, __pyx_L1_error) } *__Pyx_BufPtrStrided2d(__pyx_t_9photutils_8geometry_16circular_overlap_DTYPE_t *, __pyx_pybuffernd_frac.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_frac.diminfo[0].strides, __pyx_t_14, __pyx_pybuffernd_frac.diminfo[1].strides) = 1.; /* "photutils/geometry/circular_overlap.pyx":101 * # If pixel center is "well within" circle, count full * # pixel. * if d < r - pixel_radius: # <<<<<<<<<<<<<< * frac[j, i] = 1. * */ goto __pyx_L13; } /* "photutils/geometry/circular_overlap.pyx":106 * # If pixel center is "close" to circle border, find * # overlap. * elif d < r + pixel_radius: # <<<<<<<<<<<<<< * * # Either do exact calculation or use subpixel */ __pyx_t_9 = ((__pyx_v_d < (__pyx_v_r + __pyx_v_pixel_radius)) != 0); if (__pyx_t_9) { /* "photutils/geometry/circular_overlap.pyx":110 * # Either do exact calculation or use subpixel * # sampling: * if use_exact: # <<<<<<<<<<<<<< * frac[j, i] = circular_overlap_single_exact( * pxmin, pymin, pxmax, pymax, r) / (dx * dy) */ __pyx_t_9 = (__pyx_v_use_exact != 0); if (__pyx_t_9) { /* "photutils/geometry/circular_overlap.pyx":111 * # sampling: * if use_exact: * frac[j, i] = circular_overlap_single_exact( # <<<<<<<<<<<<<< * pxmin, pymin, pxmax, pymax, r) / (dx * dy) * else: */ __pyx_t_6 = __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(__pyx_v_pxmin, __pyx_v_pymin, __pyx_v_pxmax, __pyx_v_pymax, __pyx_v_r); /* "photutils/geometry/circular_overlap.pyx":112 * if use_exact: * frac[j, i] = circular_overlap_single_exact( * pxmin, pymin, pxmax, pymax, r) / (dx * dy) # <<<<<<<<<<<<<< * else: * frac[j, i] = circular_overlap_single_subpixel( */ __pyx_t_16 = (__pyx_v_dx * __pyx_v_dy); if (unlikely(__pyx_t_16 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 112, __pyx_L1_error) } /* "photutils/geometry/circular_overlap.pyx":111 * # sampling: * if use_exact: * frac[j, i] = circular_overlap_single_exact( # <<<<<<<<<<<<<< * pxmin, pymin, pxmax, pymax, r) / (dx * dy) * else: */ __pyx_t_17 = __pyx_v_j; __pyx_t_18 = __pyx_v_i; __pyx_t_15 = -1; if (unlikely(__pyx_t_17 >= (size_t)__pyx_pybuffernd_frac.diminfo[0].shape)) __pyx_t_15 = 0; if (unlikely(__pyx_t_18 >= (size_t)__pyx_pybuffernd_frac.diminfo[1].shape)) __pyx_t_15 = 1; if (unlikely(__pyx_t_15 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_15); __PYX_ERR(0, 111, __pyx_L1_error) } *__Pyx_BufPtrStrided2d(__pyx_t_9photutils_8geometry_16circular_overlap_DTYPE_t *, __pyx_pybuffernd_frac.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_frac.diminfo[0].strides, __pyx_t_18, __pyx_pybuffernd_frac.diminfo[1].strides) = (__pyx_t_6 / __pyx_t_16); /* "photutils/geometry/circular_overlap.pyx":110 * # Either do exact calculation or use subpixel * # sampling: * if use_exact: # <<<<<<<<<<<<<< * frac[j, i] = circular_overlap_single_exact( * pxmin, pymin, pxmax, pymax, r) / (dx * dy) */ goto __pyx_L14; } /* "photutils/geometry/circular_overlap.pyx":114 * pxmin, pymin, pxmax, pymax, r) / (dx * dy) * else: * frac[j, i] = circular_overlap_single_subpixel( # <<<<<<<<<<<<<< * pxmin, pymin, pxmax, pymax, r, subpixels) * */ /*else*/ { /* "photutils/geometry/circular_overlap.pyx":115 * else: * frac[j, i] = circular_overlap_single_subpixel( * pxmin, pymin, pxmax, pymax, r, subpixels) # <<<<<<<<<<<<<< * * # Otherwise, it is fully outside circle. */ __pyx_t_19 = __pyx_v_j; __pyx_t_20 = __pyx_v_i; __pyx_t_15 = -1; if (unlikely(__pyx_t_19 >= (size_t)__pyx_pybuffernd_frac.diminfo[0].shape)) __pyx_t_15 = 0; if (unlikely(__pyx_t_20 >= (size_t)__pyx_pybuffernd_frac.diminfo[1].shape)) __pyx_t_15 = 1; if (unlikely(__pyx_t_15 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_15); __PYX_ERR(0, 114, __pyx_L1_error) } *__Pyx_BufPtrStrided2d(__pyx_t_9photutils_8geometry_16circular_overlap_DTYPE_t *, __pyx_pybuffernd_frac.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_frac.diminfo[0].strides, __pyx_t_20, __pyx_pybuffernd_frac.diminfo[1].strides) = __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_subpixel(__pyx_v_pxmin, __pyx_v_pymin, __pyx_v_pxmax, __pyx_v_pymax, __pyx_v_r, __pyx_v_subpixels); } __pyx_L14:; /* "photutils/geometry/circular_overlap.pyx":106 * # If pixel center is "close" to circle border, find * # overlap. * elif d < r + pixel_radius: # <<<<<<<<<<<<<< * * # Either do exact calculation or use subpixel */ } __pyx_L13:; /* "photutils/geometry/circular_overlap.pyx":94 * pycen = pymin + dy * 0.5 * pymax = pymin + dy * if pymax > bymin and pymin < bymax: # <<<<<<<<<<<<<< * * # Distance from circle center to pixel center. */ } } /* "photutils/geometry/circular_overlap.pyx":89 * pxcen = pxmin + dx * 0.5 * pxmax = pxmin + dx # upper end of pixel * if pxmax > bxmin and pxmin < bxmax: # <<<<<<<<<<<<<< * for j in range(ny): * pymin = ymin + j * dy */ } } /* "photutils/geometry/circular_overlap.pyx":120 * # No action needed. * * return frac # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_frac)); __pyx_r = ((PyObject *)__pyx_v_frac); goto __pyx_L0; /* "photutils/geometry/circular_overlap.pyx":33 * * * def circular_overlap_grid(double xmin, double xmax, double ymin, double ymax, # <<<<<<<<<<<<<< * int nx, int ny, double r, int use_exact, * int subpixels): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_frac.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("photutils.geometry.circular_overlap.circular_overlap_grid", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_frac.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_frac); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/circular_overlap.pyx":129 * * * cdef double circular_overlap_single_subpixel(double x0, double y0, # <<<<<<<<<<<<<< * double x1, double y1, * double r, int subpixels): */ static double __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_subpixel(double __pyx_v_x0, double __pyx_v_y0, double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_r, int __pyx_v_subpixels) { CYTHON_UNUSED unsigned int __pyx_v_i; CYTHON_UNUSED unsigned int __pyx_v_j; double __pyx_v_x; double __pyx_v_y; double __pyx_v_dx; double __pyx_v_dy; double __pyx_v_r_squared; double __pyx_v_frac; double __pyx_r; __Pyx_RefNannyDeclarations double __pyx_t_1; int __pyx_t_2; unsigned int __pyx_t_3; int __pyx_t_4; unsigned int __pyx_t_5; int __pyx_t_6; __Pyx_RefNannySetupContext("circular_overlap_single_subpixel", 0); /* "photutils/geometry/circular_overlap.pyx":137 * cdef unsigned int i, j * cdef double x, y, dx, dy, r_squared * cdef double frac = 0. # Accumulator. # <<<<<<<<<<<<<< * * dx = (x1 - x0) / subpixels */ __pyx_v_frac = 0.; /* "photutils/geometry/circular_overlap.pyx":139 * cdef double frac = 0. # Accumulator. * * dx = (x1 - x0) / subpixels # <<<<<<<<<<<<<< * dy = (y1 - y0) / subpixels * r_squared = r ** 2 */ __pyx_t_1 = (__pyx_v_x1 - __pyx_v_x0); if (unlikely(__pyx_v_subpixels == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 139, __pyx_L1_error) } __pyx_v_dx = (__pyx_t_1 / ((double)__pyx_v_subpixels)); /* "photutils/geometry/circular_overlap.pyx":140 * * dx = (x1 - x0) / subpixels * dy = (y1 - y0) / subpixels # <<<<<<<<<<<<<< * r_squared = r ** 2 * */ __pyx_t_1 = (__pyx_v_y1 - __pyx_v_y0); if (unlikely(__pyx_v_subpixels == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 140, __pyx_L1_error) } __pyx_v_dy = (__pyx_t_1 / ((double)__pyx_v_subpixels)); /* "photutils/geometry/circular_overlap.pyx":141 * dx = (x1 - x0) / subpixels * dy = (y1 - y0) / subpixels * r_squared = r ** 2 # <<<<<<<<<<<<<< * * x = x0 - 0.5 * dx */ __pyx_v_r_squared = pow(__pyx_v_r, 2.0); /* "photutils/geometry/circular_overlap.pyx":143 * r_squared = r ** 2 * * x = x0 - 0.5 * dx # <<<<<<<<<<<<<< * for i in range(subpixels): * x += dx */ __pyx_v_x = (__pyx_v_x0 - (0.5 * __pyx_v_dx)); /* "photutils/geometry/circular_overlap.pyx":144 * * x = x0 - 0.5 * dx * for i in range(subpixels): # <<<<<<<<<<<<<< * x += dx * y = y0 - 0.5 * dy */ __pyx_t_2 = __pyx_v_subpixels; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "photutils/geometry/circular_overlap.pyx":145 * x = x0 - 0.5 * dx * for i in range(subpixels): * x += dx # <<<<<<<<<<<<<< * y = y0 - 0.5 * dy * for j in range(subpixels): */ __pyx_v_x = (__pyx_v_x + __pyx_v_dx); /* "photutils/geometry/circular_overlap.pyx":146 * for i in range(subpixels): * x += dx * y = y0 - 0.5 * dy # <<<<<<<<<<<<<< * for j in range(subpixels): * y += dy */ __pyx_v_y = (__pyx_v_y0 - (0.5 * __pyx_v_dy)); /* "photutils/geometry/circular_overlap.pyx":147 * x += dx * y = y0 - 0.5 * dy * for j in range(subpixels): # <<<<<<<<<<<<<< * y += dy * if x * x + y * y < r_squared: */ __pyx_t_4 = __pyx_v_subpixels; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_j = __pyx_t_5; /* "photutils/geometry/circular_overlap.pyx":148 * y = y0 - 0.5 * dy * for j in range(subpixels): * y += dy # <<<<<<<<<<<<<< * if x * x + y * y < r_squared: * frac += 1. */ __pyx_v_y = (__pyx_v_y + __pyx_v_dy); /* "photutils/geometry/circular_overlap.pyx":149 * for j in range(subpixels): * y += dy * if x * x + y * y < r_squared: # <<<<<<<<<<<<<< * frac += 1. * */ __pyx_t_6 = ((((__pyx_v_x * __pyx_v_x) + (__pyx_v_y * __pyx_v_y)) < __pyx_v_r_squared) != 0); if (__pyx_t_6) { /* "photutils/geometry/circular_overlap.pyx":150 * y += dy * if x * x + y * y < r_squared: * frac += 1. # <<<<<<<<<<<<<< * * return frac / (subpixels * subpixels) */ __pyx_v_frac = (__pyx_v_frac + 1.); /* "photutils/geometry/circular_overlap.pyx":149 * for j in range(subpixels): * y += dy * if x * x + y * y < r_squared: # <<<<<<<<<<<<<< * frac += 1. * */ } } } /* "photutils/geometry/circular_overlap.pyx":152 * frac += 1. * * return frac / (subpixels * subpixels) # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_subpixels * __pyx_v_subpixels); if (unlikely(__pyx_t_2 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 152, __pyx_L1_error) } __pyx_r = (__pyx_v_frac / ((double)__pyx_t_2)); goto __pyx_L0; /* "photutils/geometry/circular_overlap.pyx":129 * * * cdef double circular_overlap_single_subpixel(double x0, double y0, # <<<<<<<<<<<<<< * double x1, double y1, * double r, int subpixels): */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("photutils.geometry.circular_overlap.circular_overlap_single_subpixel", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/circular_overlap.pyx":155 * * * cdef double circular_overlap_single_exact(double xmin, double ymin, # <<<<<<<<<<<<<< * double xmax, double ymax, * double r): */ static double __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(double __pyx_v_xmin, double __pyx_v_ymin, double __pyx_v_xmax, double __pyx_v_ymax, double __pyx_v_r) { double __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("circular_overlap_single_exact", 0); /* "photutils/geometry/circular_overlap.pyx":161 * Area of overlap of a rectangle and a circle * """ * if 0. <= xmin: # <<<<<<<<<<<<<< * if 0. <= ymin: * return circular_overlap_core(xmin, ymin, xmax, ymax, r) */ __pyx_t_1 = ((0. <= __pyx_v_xmin) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":162 * """ * if 0. <= xmin: * if 0. <= ymin: # <<<<<<<<<<<<<< * return circular_overlap_core(xmin, ymin, xmax, ymax, r) * elif 0. >= ymax: */ __pyx_t_1 = ((0. <= __pyx_v_ymin) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":163 * if 0. <= xmin: * if 0. <= ymin: * return circular_overlap_core(xmin, ymin, xmax, ymax, r) # <<<<<<<<<<<<<< * elif 0. >= ymax: * return circular_overlap_core(-ymax, xmin, -ymin, xmax, r) */ __pyx_r = __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_core(__pyx_v_xmin, __pyx_v_ymin, __pyx_v_xmax, __pyx_v_ymax, __pyx_v_r); goto __pyx_L0; /* "photutils/geometry/circular_overlap.pyx":162 * """ * if 0. <= xmin: * if 0. <= ymin: # <<<<<<<<<<<<<< * return circular_overlap_core(xmin, ymin, xmax, ymax, r) * elif 0. >= ymax: */ } /* "photutils/geometry/circular_overlap.pyx":164 * if 0. <= ymin: * return circular_overlap_core(xmin, ymin, xmax, ymax, r) * elif 0. >= ymax: # <<<<<<<<<<<<<< * return circular_overlap_core(-ymax, xmin, -ymin, xmax, r) * else: */ __pyx_t_1 = ((0. >= __pyx_v_ymax) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":165 * return circular_overlap_core(xmin, ymin, xmax, ymax, r) * elif 0. >= ymax: * return circular_overlap_core(-ymax, xmin, -ymin, xmax, r) # <<<<<<<<<<<<<< * else: * return circular_overlap_single_exact(xmin, ymin, xmax, 0., r) \ */ __pyx_r = __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_core((-__pyx_v_ymax), __pyx_v_xmin, (-__pyx_v_ymin), __pyx_v_xmax, __pyx_v_r); goto __pyx_L0; /* "photutils/geometry/circular_overlap.pyx":164 * if 0. <= ymin: * return circular_overlap_core(xmin, ymin, xmax, ymax, r) * elif 0. >= ymax: # <<<<<<<<<<<<<< * return circular_overlap_core(-ymax, xmin, -ymin, xmax, r) * else: */ } /* "photutils/geometry/circular_overlap.pyx":167 * return circular_overlap_core(-ymax, xmin, -ymin, xmax, r) * else: * return circular_overlap_single_exact(xmin, ymin, xmax, 0., r) \ # <<<<<<<<<<<<<< * + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) * elif 0. >= xmax: */ /*else*/ { /* "photutils/geometry/circular_overlap.pyx":168 * else: * return circular_overlap_single_exact(xmin, ymin, xmax, 0., r) \ * + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) # <<<<<<<<<<<<<< * elif 0. >= xmax: * if 0. <= ymin: */ __pyx_r = (__pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(__pyx_v_xmin, __pyx_v_ymin, __pyx_v_xmax, 0., __pyx_v_r) + __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(__pyx_v_xmin, 0., __pyx_v_xmax, __pyx_v_ymax, __pyx_v_r)); goto __pyx_L0; } /* "photutils/geometry/circular_overlap.pyx":161 * Area of overlap of a rectangle and a circle * """ * if 0. <= xmin: # <<<<<<<<<<<<<< * if 0. <= ymin: * return circular_overlap_core(xmin, ymin, xmax, ymax, r) */ } /* "photutils/geometry/circular_overlap.pyx":169 * return circular_overlap_single_exact(xmin, ymin, xmax, 0., r) \ * + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) * elif 0. >= xmax: # <<<<<<<<<<<<<< * if 0. <= ymin: * return circular_overlap_core(-xmax, ymin, -xmin, ymax, r) */ __pyx_t_1 = ((0. >= __pyx_v_xmax) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":170 * + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) * elif 0. >= xmax: * if 0. <= ymin: # <<<<<<<<<<<<<< * return circular_overlap_core(-xmax, ymin, -xmin, ymax, r) * elif 0. >= ymax: */ __pyx_t_1 = ((0. <= __pyx_v_ymin) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":171 * elif 0. >= xmax: * if 0. <= ymin: * return circular_overlap_core(-xmax, ymin, -xmin, ymax, r) # <<<<<<<<<<<<<< * elif 0. >= ymax: * return circular_overlap_core(-xmax, -ymax, -xmin, -ymin, r) */ __pyx_r = __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_core((-__pyx_v_xmax), __pyx_v_ymin, (-__pyx_v_xmin), __pyx_v_ymax, __pyx_v_r); goto __pyx_L0; /* "photutils/geometry/circular_overlap.pyx":170 * + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) * elif 0. >= xmax: * if 0. <= ymin: # <<<<<<<<<<<<<< * return circular_overlap_core(-xmax, ymin, -xmin, ymax, r) * elif 0. >= ymax: */ } /* "photutils/geometry/circular_overlap.pyx":172 * if 0. <= ymin: * return circular_overlap_core(-xmax, ymin, -xmin, ymax, r) * elif 0. >= ymax: # <<<<<<<<<<<<<< * return circular_overlap_core(-xmax, -ymax, -xmin, -ymin, r) * else: */ __pyx_t_1 = ((0. >= __pyx_v_ymax) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":173 * return circular_overlap_core(-xmax, ymin, -xmin, ymax, r) * elif 0. >= ymax: * return circular_overlap_core(-xmax, -ymax, -xmin, -ymin, r) # <<<<<<<<<<<<<< * else: * return circular_overlap_single_exact(xmin, ymin, xmax, 0., r) \ */ __pyx_r = __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_core((-__pyx_v_xmax), (-__pyx_v_ymax), (-__pyx_v_xmin), (-__pyx_v_ymin), __pyx_v_r); goto __pyx_L0; /* "photutils/geometry/circular_overlap.pyx":172 * if 0. <= ymin: * return circular_overlap_core(-xmax, ymin, -xmin, ymax, r) * elif 0. >= ymax: # <<<<<<<<<<<<<< * return circular_overlap_core(-xmax, -ymax, -xmin, -ymin, r) * else: */ } /* "photutils/geometry/circular_overlap.pyx":175 * return circular_overlap_core(-xmax, -ymax, -xmin, -ymin, r) * else: * return circular_overlap_single_exact(xmin, ymin, xmax, 0., r) \ # <<<<<<<<<<<<<< * + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) * else: */ /*else*/ { /* "photutils/geometry/circular_overlap.pyx":176 * else: * return circular_overlap_single_exact(xmin, ymin, xmax, 0., r) \ * + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) # <<<<<<<<<<<<<< * else: * if 0. <= ymin: */ __pyx_r = (__pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(__pyx_v_xmin, __pyx_v_ymin, __pyx_v_xmax, 0., __pyx_v_r) + __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(__pyx_v_xmin, 0., __pyx_v_xmax, __pyx_v_ymax, __pyx_v_r)); goto __pyx_L0; } /* "photutils/geometry/circular_overlap.pyx":169 * return circular_overlap_single_exact(xmin, ymin, xmax, 0., r) \ * + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) * elif 0. >= xmax: # <<<<<<<<<<<<<< * if 0. <= ymin: * return circular_overlap_core(-xmax, ymin, -xmin, ymax, r) */ } /* "photutils/geometry/circular_overlap.pyx":178 * + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) * else: * if 0. <= ymin: # <<<<<<<<<<<<<< * return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ * + circular_overlap_single_exact(0., ymin, xmax, ymax, r) */ /*else*/ { __pyx_t_1 = ((0. <= __pyx_v_ymin) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":180 * if 0. <= ymin: * return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ * + circular_overlap_single_exact(0., ymin, xmax, ymax, r) # <<<<<<<<<<<<<< * if 0. >= ymax: * return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ */ __pyx_r = (__pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(__pyx_v_xmin, __pyx_v_ymin, 0., __pyx_v_ymax, __pyx_v_r) + __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(0., __pyx_v_ymin, __pyx_v_xmax, __pyx_v_ymax, __pyx_v_r)); goto __pyx_L0; /* "photutils/geometry/circular_overlap.pyx":178 * + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) * else: * if 0. <= ymin: # <<<<<<<<<<<<<< * return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ * + circular_overlap_single_exact(0., ymin, xmax, ymax, r) */ } /* "photutils/geometry/circular_overlap.pyx":181 * return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ * + circular_overlap_single_exact(0., ymin, xmax, ymax, r) * if 0. >= ymax: # <<<<<<<<<<<<<< * return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ * + circular_overlap_single_exact(0., ymin, xmax, ymax, r) */ __pyx_t_1 = ((0. >= __pyx_v_ymax) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":183 * if 0. >= ymax: * return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ * + circular_overlap_single_exact(0., ymin, xmax, ymax, r) # <<<<<<<<<<<<<< * else: * return circular_overlap_single_exact(xmin, ymin, 0., 0., r) \ */ __pyx_r = (__pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(__pyx_v_xmin, __pyx_v_ymin, 0., __pyx_v_ymax, __pyx_v_r) + __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(0., __pyx_v_ymin, __pyx_v_xmax, __pyx_v_ymax, __pyx_v_r)); goto __pyx_L0; /* "photutils/geometry/circular_overlap.pyx":181 * return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ * + circular_overlap_single_exact(0., ymin, xmax, ymax, r) * if 0. >= ymax: # <<<<<<<<<<<<<< * return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ * + circular_overlap_single_exact(0., ymin, xmax, ymax, r) */ } /* "photutils/geometry/circular_overlap.pyx":185 * + circular_overlap_single_exact(0., ymin, xmax, ymax, r) * else: * return circular_overlap_single_exact(xmin, ymin, 0., 0., r) \ # <<<<<<<<<<<<<< * + circular_overlap_single_exact(0., ymin, xmax, 0., r) \ * + circular_overlap_single_exact(xmin, 0., 0., ymax, r) \ */ /*else*/ { /* "photutils/geometry/circular_overlap.pyx":188 * + circular_overlap_single_exact(0., ymin, xmax, 0., r) \ * + circular_overlap_single_exact(xmin, 0., 0., ymax, r) \ * + circular_overlap_single_exact(0., 0., xmax, ymax, r) # <<<<<<<<<<<<<< * * */ __pyx_r = (((__pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(__pyx_v_xmin, __pyx_v_ymin, 0., 0., __pyx_v_r) + __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(0., __pyx_v_ymin, __pyx_v_xmax, 0., __pyx_v_r)) + __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(__pyx_v_xmin, 0., 0., __pyx_v_ymax, __pyx_v_r)) + __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_single_exact(0., 0., __pyx_v_xmax, __pyx_v_ymax, __pyx_v_r)); goto __pyx_L0; } } /* "photutils/geometry/circular_overlap.pyx":155 * * * cdef double circular_overlap_single_exact(double xmin, double ymin, # <<<<<<<<<<<<<< * double xmax, double ymax, * double r): */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/circular_overlap.pyx":191 * * * cdef double circular_overlap_core(double xmin, double ymin, double xmax, double ymax, # <<<<<<<<<<<<<< * double r): * """ */ static double __pyx_f_9photutils_8geometry_16circular_overlap_circular_overlap_core(double __pyx_v_xmin, double __pyx_v_ymin, double __pyx_v_xmax, double __pyx_v_ymax, double __pyx_v_r) { double __pyx_v_area; double __pyx_v_d1; double __pyx_v_d2; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; double __pyx_t_3; double __pyx_t_4; __Pyx_RefNannySetupContext("circular_overlap_core", 0); /* "photutils/geometry/circular_overlap.pyx":200 * cdef double area, d1, d2, x1, x2, y1, y2 * * if xmin * xmin + ymin * ymin > r * r: # <<<<<<<<<<<<<< * area = 0. * elif xmax * xmax + ymax * ymax < r * r: */ __pyx_t_1 = ((((__pyx_v_xmin * __pyx_v_xmin) + (__pyx_v_ymin * __pyx_v_ymin)) > (__pyx_v_r * __pyx_v_r)) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":201 * * if xmin * xmin + ymin * ymin > r * r: * area = 0. # <<<<<<<<<<<<<< * elif xmax * xmax + ymax * ymax < r * r: * area = (xmax - xmin) * (ymax - ymin) */ __pyx_v_area = 0.; /* "photutils/geometry/circular_overlap.pyx":200 * cdef double area, d1, d2, x1, x2, y1, y2 * * if xmin * xmin + ymin * ymin > r * r: # <<<<<<<<<<<<<< * area = 0. * elif xmax * xmax + ymax * ymax < r * r: */ goto __pyx_L3; } /* "photutils/geometry/circular_overlap.pyx":202 * if xmin * xmin + ymin * ymin > r * r: * area = 0. * elif xmax * xmax + ymax * ymax < r * r: # <<<<<<<<<<<<<< * area = (xmax - xmin) * (ymax - ymin) * else: */ __pyx_t_1 = ((((__pyx_v_xmax * __pyx_v_xmax) + (__pyx_v_ymax * __pyx_v_ymax)) < (__pyx_v_r * __pyx_v_r)) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":203 * area = 0. * elif xmax * xmax + ymax * ymax < r * r: * area = (xmax - xmin) * (ymax - ymin) # <<<<<<<<<<<<<< * else: * area = 0. */ __pyx_v_area = ((__pyx_v_xmax - __pyx_v_xmin) * (__pyx_v_ymax - __pyx_v_ymin)); /* "photutils/geometry/circular_overlap.pyx":202 * if xmin * xmin + ymin * ymin > r * r: * area = 0. * elif xmax * xmax + ymax * ymax < r * r: # <<<<<<<<<<<<<< * area = (xmax - xmin) * (ymax - ymin) * else: */ goto __pyx_L3; } /* "photutils/geometry/circular_overlap.pyx":205 * area = (xmax - xmin) * (ymax - ymin) * else: * area = 0. # <<<<<<<<<<<<<< * d1 = floor_sqrt(xmax * xmax + ymin * ymin) * d2 = floor_sqrt(xmin * xmin + ymax * ymax) */ /*else*/ { __pyx_v_area = 0.; /* "photutils/geometry/circular_overlap.pyx":206 * else: * area = 0. * d1 = floor_sqrt(xmax * xmax + ymin * ymin) # <<<<<<<<<<<<<< * d2 = floor_sqrt(xmin * xmin + ymax * ymax) * if d1 < r and d2 < r: */ __pyx_v_d1 = __pyx_f_9photutils_8geometry_4core_floor_sqrt(((__pyx_v_xmax * __pyx_v_xmax) + (__pyx_v_ymin * __pyx_v_ymin))); /* "photutils/geometry/circular_overlap.pyx":207 * area = 0. * d1 = floor_sqrt(xmax * xmax + ymin * ymin) * d2 = floor_sqrt(xmin * xmin + ymax * ymax) # <<<<<<<<<<<<<< * if d1 < r and d2 < r: * x1, y1 = floor_sqrt(r * r - ymax * ymax), ymax */ __pyx_v_d2 = __pyx_f_9photutils_8geometry_4core_floor_sqrt(((__pyx_v_xmin * __pyx_v_xmin) + (__pyx_v_ymax * __pyx_v_ymax))); /* "photutils/geometry/circular_overlap.pyx":208 * d1 = floor_sqrt(xmax * xmax + ymin * ymin) * d2 = floor_sqrt(xmin * xmin + ymax * ymax) * if d1 < r and d2 < r: # <<<<<<<<<<<<<< * x1, y1 = floor_sqrt(r * r - ymax * ymax), ymax * x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) */ __pyx_t_2 = ((__pyx_v_d1 < __pyx_v_r) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_d2 < __pyx_v_r) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L5_bool_binop_done:; if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":209 * d2 = floor_sqrt(xmin * xmin + ymax * ymax) * if d1 < r and d2 < r: * x1, y1 = floor_sqrt(r * r - ymax * ymax), ymax # <<<<<<<<<<<<<< * x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) * area = ((xmax - xmin) * (ymax - ymin) - */ __pyx_t_3 = __pyx_f_9photutils_8geometry_4core_floor_sqrt(((__pyx_v_r * __pyx_v_r) - (__pyx_v_ymax * __pyx_v_ymax))); __pyx_t_4 = __pyx_v_ymax; __pyx_v_x1 = __pyx_t_3; __pyx_v_y1 = __pyx_t_4; /* "photutils/geometry/circular_overlap.pyx":210 * if d1 < r and d2 < r: * x1, y1 = floor_sqrt(r * r - ymax * ymax), ymax * x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) # <<<<<<<<<<<<<< * area = ((xmax - xmin) * (ymax - ymin) - * area_triangle(x1, y1, x2, y2, xmax, ymax) + */ __pyx_t_4 = __pyx_v_xmax; __pyx_t_3 = __pyx_f_9photutils_8geometry_4core_floor_sqrt(((__pyx_v_r * __pyx_v_r) - (__pyx_v_xmax * __pyx_v_xmax))); __pyx_v_x2 = __pyx_t_4; __pyx_v_y2 = __pyx_t_3; /* "photutils/geometry/circular_overlap.pyx":212 * x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) * area = ((xmax - xmin) * (ymax - ymin) - * area_triangle(x1, y1, x2, y2, xmax, ymax) + # <<<<<<<<<<<<<< * area_arc(x1, y1, x2, y2, r)) * elif d1 < r: */ __pyx_v_area = ((((__pyx_v_xmax - __pyx_v_xmin) * (__pyx_v_ymax - __pyx_v_ymin)) - __pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_xmax, __pyx_v_ymax)) + __pyx_f_9photutils_8geometry_4core_area_arc(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_r)); /* "photutils/geometry/circular_overlap.pyx":208 * d1 = floor_sqrt(xmax * xmax + ymin * ymin) * d2 = floor_sqrt(xmin * xmin + ymax * ymax) * if d1 < r and d2 < r: # <<<<<<<<<<<<<< * x1, y1 = floor_sqrt(r * r - ymax * ymax), ymax * x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) */ goto __pyx_L4; } /* "photutils/geometry/circular_overlap.pyx":214 * area_triangle(x1, y1, x2, y2, xmax, ymax) + * area_arc(x1, y1, x2, y2, r)) * elif d1 < r: # <<<<<<<<<<<<<< * x1, y1 = xmin, floor_sqrt(r * r - xmin * xmin) * x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) */ __pyx_t_1 = ((__pyx_v_d1 < __pyx_v_r) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":215 * area_arc(x1, y1, x2, y2, r)) * elif d1 < r: * x1, y1 = xmin, floor_sqrt(r * r - xmin * xmin) # <<<<<<<<<<<<<< * x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) * area = (area_arc(x1, y1, x2, y2, r) + */ __pyx_t_3 = __pyx_v_xmin; __pyx_t_4 = __pyx_f_9photutils_8geometry_4core_floor_sqrt(((__pyx_v_r * __pyx_v_r) - (__pyx_v_xmin * __pyx_v_xmin))); __pyx_v_x1 = __pyx_t_3; __pyx_v_y1 = __pyx_t_4; /* "photutils/geometry/circular_overlap.pyx":216 * elif d1 < r: * x1, y1 = xmin, floor_sqrt(r * r - xmin * xmin) * x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) # <<<<<<<<<<<<<< * area = (area_arc(x1, y1, x2, y2, r) + * area_triangle(x1, y1, x1, ymin, xmax, ymin) + */ __pyx_t_4 = __pyx_v_xmax; __pyx_t_3 = __pyx_f_9photutils_8geometry_4core_floor_sqrt(((__pyx_v_r * __pyx_v_r) - (__pyx_v_xmax * __pyx_v_xmax))); __pyx_v_x2 = __pyx_t_4; __pyx_v_y2 = __pyx_t_3; /* "photutils/geometry/circular_overlap.pyx":218 * x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) * area = (area_arc(x1, y1, x2, y2, r) + * area_triangle(x1, y1, x1, ymin, xmax, ymin) + # <<<<<<<<<<<<<< * area_triangle(x1, y1, x2, ymin, x2, y2)) * elif d2 < r: */ __pyx_v_area = ((__pyx_f_9photutils_8geometry_4core_area_arc(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_r) + __pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x1, __pyx_v_ymin, __pyx_v_xmax, __pyx_v_ymin)) + __pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_ymin, __pyx_v_x2, __pyx_v_y2)); /* "photutils/geometry/circular_overlap.pyx":214 * area_triangle(x1, y1, x2, y2, xmax, ymax) + * area_arc(x1, y1, x2, y2, r)) * elif d1 < r: # <<<<<<<<<<<<<< * x1, y1 = xmin, floor_sqrt(r * r - xmin * xmin) * x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) */ goto __pyx_L4; } /* "photutils/geometry/circular_overlap.pyx":220 * area_triangle(x1, y1, x1, ymin, xmax, ymin) + * area_triangle(x1, y1, x2, ymin, x2, y2)) * elif d2 < r: # <<<<<<<<<<<<<< * x1, y1 = floor_sqrt(r * r - ymin * ymin), ymin * x2, y2 = floor_sqrt(r * r - ymax * ymax), ymax */ __pyx_t_1 = ((__pyx_v_d2 < __pyx_v_r) != 0); if (__pyx_t_1) { /* "photutils/geometry/circular_overlap.pyx":221 * area_triangle(x1, y1, x2, ymin, x2, y2)) * elif d2 < r: * x1, y1 = floor_sqrt(r * r - ymin * ymin), ymin # <<<<<<<<<<<<<< * x2, y2 = floor_sqrt(r * r - ymax * ymax), ymax * area = (area_arc(x1, y1, x2, y2, r) + */ __pyx_t_3 = __pyx_f_9photutils_8geometry_4core_floor_sqrt(((__pyx_v_r * __pyx_v_r) - (__pyx_v_ymin * __pyx_v_ymin))); __pyx_t_4 = __pyx_v_ymin; __pyx_v_x1 = __pyx_t_3; __pyx_v_y1 = __pyx_t_4; /* "photutils/geometry/circular_overlap.pyx":222 * elif d2 < r: * x1, y1 = floor_sqrt(r * r - ymin * ymin), ymin * x2, y2 = floor_sqrt(r * r - ymax * ymax), ymax # <<<<<<<<<<<<<< * area = (area_arc(x1, y1, x2, y2, r) + * area_triangle(x1, y1, xmin, y1, xmin, ymax) + */ __pyx_t_4 = __pyx_f_9photutils_8geometry_4core_floor_sqrt(((__pyx_v_r * __pyx_v_r) - (__pyx_v_ymax * __pyx_v_ymax))); __pyx_t_3 = __pyx_v_ymax; __pyx_v_x2 = __pyx_t_4; __pyx_v_y2 = __pyx_t_3; /* "photutils/geometry/circular_overlap.pyx":224 * x2, y2 = floor_sqrt(r * r - ymax * ymax), ymax * area = (area_arc(x1, y1, x2, y2, r) + * area_triangle(x1, y1, xmin, y1, xmin, ymax) + # <<<<<<<<<<<<<< * area_triangle(x1, y1, xmin, y2, x2, y2)) * else: */ __pyx_v_area = ((__pyx_f_9photutils_8geometry_4core_area_arc(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_r) + __pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_xmin, __pyx_v_y1, __pyx_v_xmin, __pyx_v_ymax)) + __pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_xmin, __pyx_v_y2, __pyx_v_x2, __pyx_v_y2)); /* "photutils/geometry/circular_overlap.pyx":220 * area_triangle(x1, y1, x1, ymin, xmax, ymin) + * area_triangle(x1, y1, x2, ymin, x2, y2)) * elif d2 < r: # <<<<<<<<<<<<<< * x1, y1 = floor_sqrt(r * r - ymin * ymin), ymin * x2, y2 = floor_sqrt(r * r - ymax * ymax), ymax */ goto __pyx_L4; } /* "photutils/geometry/circular_overlap.pyx":227 * area_triangle(x1, y1, xmin, y2, x2, y2)) * else: * x1, y1 = floor_sqrt(r * r - ymin * ymin), ymin # <<<<<<<<<<<<<< * x2, y2 = xmin, floor_sqrt(r * r - xmin * xmin) * area = (area_arc(x1, y1, x2, y2, r) + */ /*else*/ { __pyx_t_3 = __pyx_f_9photutils_8geometry_4core_floor_sqrt(((__pyx_v_r * __pyx_v_r) - (__pyx_v_ymin * __pyx_v_ymin))); __pyx_t_4 = __pyx_v_ymin; __pyx_v_x1 = __pyx_t_3; __pyx_v_y1 = __pyx_t_4; /* "photutils/geometry/circular_overlap.pyx":228 * else: * x1, y1 = floor_sqrt(r * r - ymin * ymin), ymin * x2, y2 = xmin, floor_sqrt(r * r - xmin * xmin) # <<<<<<<<<<<<<< * area = (area_arc(x1, y1, x2, y2, r) + * area_triangle(x1, y1, x2, y2, xmin, ymin)) */ __pyx_t_4 = __pyx_v_xmin; __pyx_t_3 = __pyx_f_9photutils_8geometry_4core_floor_sqrt(((__pyx_v_r * __pyx_v_r) - (__pyx_v_xmin * __pyx_v_xmin))); __pyx_v_x2 = __pyx_t_4; __pyx_v_y2 = __pyx_t_3; /* "photutils/geometry/circular_overlap.pyx":229 * x1, y1 = floor_sqrt(r * r - ymin * ymin), ymin * x2, y2 = xmin, floor_sqrt(r * r - xmin * xmin) * area = (area_arc(x1, y1, x2, y2, r) + # <<<<<<<<<<<<<< * area_triangle(x1, y1, x2, y2, xmin, ymin)) * */ __pyx_v_area = (__pyx_f_9photutils_8geometry_4core_area_arc(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_r) + __pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_xmin, __pyx_v_ymin)); } __pyx_L4:; } __pyx_L3:; /* "photutils/geometry/circular_overlap.pyx":232 * area_triangle(x1, y1, x2, y2, xmin, ymin)) * * return area # <<<<<<<<<<<<<< */ __pyx_r = __pyx_v_area; goto __pyx_L0; /* "photutils/geometry/circular_overlap.pyx":191 * * * cdef double circular_overlap_core(double xmin, double ymin, double xmax, double ymax, # <<<<<<<<<<<<<< * double r): * """ */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":220 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":224 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":226 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ goto __pyx_L4; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ /*else*/ { __pyx_v_copy_shape = 0; } __pyx_L4:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":234 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 235, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":238 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 239, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247 * # This is allocated as one block, strides first. * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248 * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L11; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":252 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":256 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef int offset */ __pyx_v_f = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ goto __pyx_L14; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ /*else*/ { __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 276, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = ((char *)"B"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = ((char *)"h"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = ((char *)"H"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":281 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = ((char *)"i"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = ((char *)"I"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = ((char *)"l"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = ((char *)"L"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = ((char *)"q"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = ((char *)"Q"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = ((char *)"f"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = ((char *)"d"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = ((char *)"g"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = ((char *)"Zf"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = ((char *)"Zd"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = ((char *)"Zg"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = ((char *)"O"); break; default: /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 295, __pyx_L1_error) break; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = PyObject_Malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":299 * return * else: * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 * else: * info.format = PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":301 * info.format = PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 302, __pyx_L1_error) __pyx_v_f = __pyx_t_7; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) */ PyObject_Free(__pyx_v_info->format); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ PyObject_Free(__pyx_v_info->strides); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":798 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline tuple PyDataType_SHAPE(dtype d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return d.subarray.shape * else: */ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape # <<<<<<<<<<<<<< * else: * return () */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return d.subarray.shape * else: */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 * return d.subarray.shape * else: * return () # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_empty_tuple); __pyx_r = __pyx_empty_tuple; goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 818, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 819, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 819, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 820, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 820, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 823, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 827, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 847, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":862 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":863 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":864 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":865 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":866 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":868 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 868, __pyx_L1_error) } __pyx_L15:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 873, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":993 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ goto __pyx_L3; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":995 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ /*else*/ { Py_INCREF(__pyx_v_base); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004 * return None * else: * return arr.base # <<<<<<<<<<<<<< * * */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_array", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011 * cdef inline int import_array() except -1: * try: * _import_array() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.multiarray failed to import") */ __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1011, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 * try: * _import_array() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.multiarray failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1012, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1013, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1013, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_umath", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1017 * cdef inline int import_umath() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1017, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1018 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1018, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1019, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1019, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_ufunc", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 * cdef inline int import_ufunc() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1023, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1024, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1025, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1025, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_circular_overlap(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_circular_overlap}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "circular_overlap", __pyx_k_The_functions_defined_here_allo, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, {&__pyx_n_s_bxmax, __pyx_k_bxmax, sizeof(__pyx_k_bxmax), 0, 0, 1, 1}, {&__pyx_n_s_bxmin, __pyx_k_bxmin, sizeof(__pyx_k_bxmin), 0, 0, 1, 1}, {&__pyx_n_s_bymax, __pyx_k_bymax, sizeof(__pyx_k_bymax), 0, 0, 1, 1}, {&__pyx_n_s_bymin, __pyx_k_bymin, sizeof(__pyx_k_bymin), 0, 0, 1, 1}, {&__pyx_n_s_circular_overlap_grid, __pyx_k_circular_overlap_grid, sizeof(__pyx_k_circular_overlap_grid), 0, 0, 1, 1}, {&__pyx_n_u_circular_overlap_grid, __pyx_k_circular_overlap_grid, sizeof(__pyx_k_circular_overlap_grid), 0, 1, 0, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dx, __pyx_k_dx, sizeof(__pyx_k_dx), 0, 0, 1, 1}, {&__pyx_n_s_dy, __pyx_k_dy, sizeof(__pyx_k_dy), 0, 0, 1, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_n_s_frac, __pyx_k_frac, sizeof(__pyx_k_frac), 0, 0, 1, 1}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_kp_u_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 1, 0, 0}, {&__pyx_kp_u_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 1, 0, 0}, {&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1}, {&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1}, {&__pyx_kp_s_photutils_geometry_circular_over, __pyx_k_photutils_geometry_circular_over, sizeof(__pyx_k_photutils_geometry_circular_over), 0, 0, 1, 0}, {&__pyx_n_s_photutils_geometry_circular_over_2, __pyx_k_photutils_geometry_circular_over_2, sizeof(__pyx_k_photutils_geometry_circular_over_2), 0, 0, 1, 1}, {&__pyx_n_s_pixel_radius, __pyx_k_pixel_radius, sizeof(__pyx_k_pixel_radius), 0, 0, 1, 1}, {&__pyx_n_s_pxcen, __pyx_k_pxcen, sizeof(__pyx_k_pxcen), 0, 0, 1, 1}, {&__pyx_n_s_pxmax, __pyx_k_pxmax, sizeof(__pyx_k_pxmax), 0, 0, 1, 1}, {&__pyx_n_s_pxmin, __pyx_k_pxmin, sizeof(__pyx_k_pxmin), 0, 0, 1, 1}, {&__pyx_n_s_pycen, __pyx_k_pycen, sizeof(__pyx_k_pycen), 0, 0, 1, 1}, {&__pyx_n_s_pymax, __pyx_k_pymax, sizeof(__pyx_k_pymax), 0, 0, 1, 1}, {&__pyx_n_s_pymin, __pyx_k_pymin, sizeof(__pyx_k_pymin), 0, 0, 1, 1}, {&__pyx_n_s_r, __pyx_k_r, sizeof(__pyx_k_r), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_subpixels, __pyx_k_subpixels, sizeof(__pyx_k_subpixels), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_use_exact, __pyx_k_use_exact, sizeof(__pyx_k_use_exact), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_xmax, __pyx_k_xmax, sizeof(__pyx_k_xmax), 0, 0, 1, 1}, {&__pyx_n_s_xmin, __pyx_k_xmin, sizeof(__pyx_k_xmin), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_ymax, __pyx_k_ymax, sizeof(__pyx_k_ymax), 0, 0, 1, 1}, {&__pyx_n_s_ymin, __pyx_k_ymin, sizeof(__pyx_k_ymin), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 85, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 235, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 823, __pyx_L1_error) __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1013, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 239, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 847, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 1019, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "photutils/geometry/circular_overlap.pyx":33 * * * def circular_overlap_grid(double xmin, double xmax, double ymin, double ymax, # <<<<<<<<<<<<<< * int nx, int ny, double r, int use_exact, * int subpixels): */ __pyx_tuple__10 = PyTuple_Pack(28, __pyx_n_s_xmin, __pyx_n_s_xmax, __pyx_n_s_ymin, __pyx_n_s_ymax, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_r, __pyx_n_s_use_exact, __pyx_n_s_subpixels, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_d, __pyx_n_s_pixel_radius, __pyx_n_s_bxmin, __pyx_n_s_bxmax, __pyx_n_s_bymin, __pyx_n_s_bymax, __pyx_n_s_pxmin, __pyx_n_s_pxcen, __pyx_n_s_pxmax, __pyx_n_s_pymin, __pyx_n_s_pycen, __pyx_n_s_pymax, __pyx_n_s_frac); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); __pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(9, 0, 28, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_photutils_geometry_circular_over, __pyx_n_s_circular_overlap_grid, 33, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initcircular_overlap(void); /*proto*/ PyMODINIT_FUNC initcircular_overlap(void) #else PyMODINIT_FUNC PyInit_circular_overlap(void); /*proto*/ PyMODINIT_FUNC PyInit_circular_overlap(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { result = PyDict_SetItemString(moddict, to_name, value); Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static int __pyx_pymod_exec_circular_overlap(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_circular_overlap(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("circular_overlap", __pyx_methods, __pyx_k_The_functions_defined_here_allo, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_photutils__geometry__circular_overlap) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "photutils.geometry.circular_overlap")) { if (unlikely(PyDict_SetItemString(modules, "photutils.geometry.circular_overlap", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 163, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 185, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 198, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 885, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ __pyx_t_1 = __Pyx_ImportModule("photutils.geometry.core"); if (!__pyx_t_1) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "area_arc", (void (**)(void))&__pyx_f_9photutils_8geometry_4core_area_arc, "double (double, double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "area_triangle", (void (**)(void))&__pyx_f_9photutils_8geometry_4core_area_triangle, "double (double, double, double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "floor_sqrt", (void (**)(void))&__pyx_f_9photutils_8geometry_4core_floor_sqrt, "double (double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "photutils/geometry/circular_overlap.pyx":10 * unicode_literals) * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_2) < 0) __PYX_ERR(0, 10, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/circular_overlap.pyx":14 * * * __all__ = ['circular_overlap_grid'] # <<<<<<<<<<<<<< * * */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_u_circular_overlap_grid); __Pyx_GIVEREF(__pyx_n_u_circular_overlap_grid); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_u_circular_overlap_grid); if (PyDict_SetItem(__pyx_d, __pyx_n_s_all, __pyx_t_2) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/circular_overlap.pyx":24 * * * DTYPE = np.float64 # <<<<<<<<<<<<<< * ctypedef np.float64_t DTYPE_t * */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_3) < 0) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "photutils/geometry/circular_overlap.pyx":33 * * * def circular_overlap_grid(double xmin, double xmax, double ymin, double ymax, # <<<<<<<<<<<<<< * int nx, int ny, double r, int use_exact, * int subpixels): */ __pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_9photutils_8geometry_16circular_overlap_1circular_overlap_grid, NULL, __pyx_n_s_photutils_geometry_circular_over_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_circular_overlap_grid, __pyx_t_3) < 0) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "photutils/geometry/circular_overlap.pyx":1 * # Licensed under a 3-clause BSD style license - see LICENSE.rst # <<<<<<<<<<<<<< * """ * The functions defined here allow one to determine the exact area of */ __pyx_t_3 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_3) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init photutils.geometry.circular_overlap", 0, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init photutils.geometry.circular_overlap"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* BufferGetAndValidate */ static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (unlikely(info->buf == NULL)) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static int __Pyx__GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { buf->buf = NULL; if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { __Pyx_ZeroBuffer(buf); return -1; } if (unlikely(buf->ndim != nd)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if (unlikely((unsigned)buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_SafeReleaseBuffer(buf); return -1; } /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (PyObject_Not(use_cline) != 0) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); view->obj = NULL; Py_DECREF(obj); } #endif /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabsf(b.real) >= fabsf(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { float r = b.imag / b.real; float s = 1.0 / (b.real + b.imag * r); return __pyx_t_float_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { float r = b.real / b.imag; float s = 1.0 / (b.imag + b.real * r); return __pyx_t_float_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else { float denom = b.real * b.real + b.imag * b.imag; return __pyx_t_float_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(a, a); case 3: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, a); case 4: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = powf(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2f(0, -1); } } else { r = __Pyx_c_abs_float(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabs(b.real) >= fabs(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { double r = b.imag / b.real; double s = 1.0 / (b.real + b.imag * r); return __pyx_t_double_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { double r = b.real / b.imag; double s = 1.0 / (b.imag + b.real * r); return __pyx_t_double_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else { double denom = b.real * b.real + b.imag * b.imag; return __pyx_t_double_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(a, a); case 3: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, a); case 4: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = pow(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2(0, -1); } } else { r = __Pyx_c_abs_double(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (unsigned int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned int) 0; case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) case 2: if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); } } break; case 3: if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); } } break; case 4: if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (unsigned int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(unsigned int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned int) 0; case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) case -2: if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case 2: if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case -3: if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case 3: if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case -4: if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case 4: if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; } #endif if (sizeof(unsigned int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned int) -1; } } else { unsigned int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (unsigned int) -1; val = __Pyx_PyInt_As_unsigned_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to unsigned int"); return (unsigned int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* FunctionImport */ #ifndef __PYX_HAVE_RT_ImportFunction #define __PYX_HAVE_RT_ImportFunction static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); if (!d) goto bad; cobj = PyDict_GetItemString(d, funcname); if (!cobj) { PyErr_Format(PyExc_ImportError, "%.200s does not export expected C function %.200s", PyModule_GetName(module), funcname); goto bad; } #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_IsValid(cobj, sig)) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); goto bad; } tmp.p = PyCapsule_GetPointer(cobj, sig); #else {const char *desc, *s1, *s2; desc = (const char *)PyCObject_GetDesc(cobj); if (!desc) goto bad; s1 = desc; s2 = sig; while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } if (*s1 != *s2) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, desc); goto bad; } tmp.p = PyCObject_AsVoidPtr(cobj);} #endif *f = tmp.fp; if (!(*f)) goto bad; Py_DECREF(d); return 0; bad: Py_XDECREF(d); return -1; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) PyErr_Clear(); ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */ photutils-0.4/photutils/geometry/circular_overlap.pyx0000644000214200020070000002040213063003335025551 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The functions defined here allow one to determine the exact area of overlap of a rectangle and a circle (written by Thomas Robitaille). """ from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np cimport numpy as np __all__ = ['circular_overlap_grid'] cdef extern from "math.h": double asin(double x) double sin(double x) double sqrt(double x) DTYPE = np.float64 ctypedef np.float64_t DTYPE_t # NOTE: Here we need to make sure we use cimport to import the C functions from # core (since these were defined with cdef). This also requires the core.pxd # file to exist with the function signatures. from .core cimport area_arc, area_triangle, floor_sqrt def circular_overlap_grid(double xmin, double xmax, double ymin, double ymax, int nx, int ny, double r, int use_exact, int subpixels): """ circular_overlap_grid(xmin, xmax, ymin, ymax, nx, ny, r, use_exact, subpixels) Area of overlap between a circle and a pixel grid. The circle is centered on the origin. Parameters ---------- xmin, xmax, ymin, ymax : float Extent of the grid in the x and y direction. nx, ny : int Grid dimensions. r : float The radius of the circle. use_exact : 0 or 1 If ``1`` calculates exact overlap, if ``0`` uses ``subpixel`` number of subpixels to calculate the overlap. subpixels : int Each pixel resampled by this factor in each dimension, thus each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- frac : `~numpy.ndarray` (float) 2-d array of shape (ny, nx) giving the fraction of the overlap. """ cdef unsigned int i, j cdef double x, y, dx, dy, d, pixel_radius cdef double bxmin, bxmax, bymin, bymax cdef double pxmin, pxcen, pxmax, pymin, pycen, pymax # Define output array cdef np.ndarray[DTYPE_t, ndim=2] frac = np.zeros([ny, nx], dtype=DTYPE) # Find the width of each element in x and y dx = (xmax - xmin) / nx dy = (ymax - ymin) / ny # Find the radius of a single pixel pixel_radius = 0.5 * sqrt(dx * dx + dy * dy) # Define bounding box bxmin = -r - 0.5 * dx bxmax = +r + 0.5 * dx bymin = -r - 0.5 * dy bymax = +r + 0.5 * dy for i in range(nx): pxmin = xmin + i * dx # lower end of pixel pxcen = pxmin + dx * 0.5 pxmax = pxmin + dx # upper end of pixel if pxmax > bxmin and pxmin < bxmax: for j in range(ny): pymin = ymin + j * dy pycen = pymin + dy * 0.5 pymax = pymin + dy if pymax > bymin and pymin < bymax: # Distance from circle center to pixel center. d = sqrt(pxcen * pxcen + pycen * pycen) # If pixel center is "well within" circle, count full # pixel. if d < r - pixel_radius: frac[j, i] = 1. # If pixel center is "close" to circle border, find # overlap. elif d < r + pixel_radius: # Either do exact calculation or use subpixel # sampling: if use_exact: frac[j, i] = circular_overlap_single_exact( pxmin, pymin, pxmax, pymax, r) / (dx * dy) else: frac[j, i] = circular_overlap_single_subpixel( pxmin, pymin, pxmax, pymax, r, subpixels) # Otherwise, it is fully outside circle. # No action needed. return frac # NOTE: The following two functions use cdef because they are not # intended to be called from the Python code. Using def makes them # callable from outside, but also slower. In any case, these aren't useful # to call from outside because they only operate on a single pixel. cdef double circular_overlap_single_subpixel(double x0, double y0, double x1, double y1, double r, int subpixels): """Return the fraction of overlap between a circle and a single pixel with given extent, using a sub-pixel sampling method.""" cdef unsigned int i, j cdef double x, y, dx, dy, r_squared cdef double frac = 0. # Accumulator. dx = (x1 - x0) / subpixels dy = (y1 - y0) / subpixels r_squared = r ** 2 x = x0 - 0.5 * dx for i in range(subpixels): x += dx y = y0 - 0.5 * dy for j in range(subpixels): y += dy if x * x + y * y < r_squared: frac += 1. return frac / (subpixels * subpixels) cdef double circular_overlap_single_exact(double xmin, double ymin, double xmax, double ymax, double r): """ Area of overlap of a rectangle and a circle """ if 0. <= xmin: if 0. <= ymin: return circular_overlap_core(xmin, ymin, xmax, ymax, r) elif 0. >= ymax: return circular_overlap_core(-ymax, xmin, -ymin, xmax, r) else: return circular_overlap_single_exact(xmin, ymin, xmax, 0., r) \ + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) elif 0. >= xmax: if 0. <= ymin: return circular_overlap_core(-xmax, ymin, -xmin, ymax, r) elif 0. >= ymax: return circular_overlap_core(-xmax, -ymax, -xmin, -ymin, r) else: return circular_overlap_single_exact(xmin, ymin, xmax, 0., r) \ + circular_overlap_single_exact(xmin, 0., xmax, ymax, r) else: if 0. <= ymin: return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ + circular_overlap_single_exact(0., ymin, xmax, ymax, r) if 0. >= ymax: return circular_overlap_single_exact(xmin, ymin, 0., ymax, r) \ + circular_overlap_single_exact(0., ymin, xmax, ymax, r) else: return circular_overlap_single_exact(xmin, ymin, 0., 0., r) \ + circular_overlap_single_exact(0., ymin, xmax, 0., r) \ + circular_overlap_single_exact(xmin, 0., 0., ymax, r) \ + circular_overlap_single_exact(0., 0., xmax, ymax, r) cdef double circular_overlap_core(double xmin, double ymin, double xmax, double ymax, double r): """ Assumes that the center of the circle is <= xmin, ymin (can always modify input to conform to this). """ cdef double area, d1, d2, x1, x2, y1, y2 if xmin * xmin + ymin * ymin > r * r: area = 0. elif xmax * xmax + ymax * ymax < r * r: area = (xmax - xmin) * (ymax - ymin) else: area = 0. d1 = floor_sqrt(xmax * xmax + ymin * ymin) d2 = floor_sqrt(xmin * xmin + ymax * ymax) if d1 < r and d2 < r: x1, y1 = floor_sqrt(r * r - ymax * ymax), ymax x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) area = ((xmax - xmin) * (ymax - ymin) - area_triangle(x1, y1, x2, y2, xmax, ymax) + area_arc(x1, y1, x2, y2, r)) elif d1 < r: x1, y1 = xmin, floor_sqrt(r * r - xmin * xmin) x2, y2 = xmax, floor_sqrt(r * r - xmax * xmax) area = (area_arc(x1, y1, x2, y2, r) + area_triangle(x1, y1, x1, ymin, xmax, ymin) + area_triangle(x1, y1, x2, ymin, x2, y2)) elif d2 < r: x1, y1 = floor_sqrt(r * r - ymin * ymin), ymin x2, y2 = floor_sqrt(r * r - ymax * ymax), ymax area = (area_arc(x1, y1, x2, y2, r) + area_triangle(x1, y1, xmin, y1, xmin, ymax) + area_triangle(x1, y1, xmin, y2, x2, y2)) else: x1, y1 = floor_sqrt(r * r - ymin * ymin), ymin x2, y2 = xmin, floor_sqrt(r * r - xmin * xmin) area = (area_arc(x1, y1, x2, y2, r) + area_triangle(x1, y1, x2, y2, xmin, ymin)) return area photutils-0.4/photutils/geometry/core.c0000644000214200020070000127251313175654677022615 0ustar lbradleySTSCI\science00000000000000/* Generated by Cython 0.27.2 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_27_2" #define CYTHON_FUTURE_DIVISION 1 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__photutils__geometry__core #define __PYX_HAVE_API__photutils__geometry__core #include #include #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "math.h" #include "pythread.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* Header.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "photutils/geometry/core.pyx", "__init__.pxd", "type.pxd", "bool.pxd", "complex.pxd", }; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":743 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":746 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":750 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":751 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":752 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":757 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":768 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":772 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":780 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "photutils/geometry/core.pyx":25 * * DTYPE = np.float64 * ctypedef np.float64_t DTYPE_t # <<<<<<<<<<<<<< * * cimport cython */ typedef __pyx_t_5numpy_float64_t __pyx_t_9photutils_8geometry_4core_DTYPE_t; /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /*--- Type declarations ---*/ /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; struct __pyx_t_9photutils_8geometry_4core_point; typedef struct __pyx_t_9photutils_8geometry_4core_point __pyx_t_9photutils_8geometry_4core_point; struct __pyx_t_9photutils_8geometry_4core_intersections; typedef struct __pyx_t_9photutils_8geometry_4core_intersections __pyx_t_9photutils_8geometry_4core_intersections; /* "photutils/geometry/core.pyx":30 * * * ctypedef struct point: # <<<<<<<<<<<<<< * double x * double y */ struct __pyx_t_9photutils_8geometry_4core_point { double x; double y; }; /* "photutils/geometry/core.pyx":35 * * * ctypedef struct intersections: # <<<<<<<<<<<<<< * point p1 * point p2 */ struct __pyx_t_9photutils_8geometry_4core_intersections { __pyx_t_9photutils_8geometry_4core_point p1; __pyx_t_9photutils_8geometry_4core_point p2; }; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* None.proto */ static CYTHON_INLINE long __Pyx_mod_long(long, long); /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* RealImag.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX\ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_float(a, b) ((a)==(b)) #define __Pyx_c_sum_float(a, b) ((a)+(b)) #define __Pyx_c_diff_float(a, b) ((a)-(b)) #define __Pyx_c_prod_float(a, b) ((a)*(b)) #define __Pyx_c_quot_float(a, b) ((a)/(b)) #define __Pyx_c_neg_float(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_float(z) ((z)==(float)0) #define __Pyx_c_conj_float(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_float(z) (::std::abs(z)) #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_float(z) ((z)==0) #define __Pyx_c_conj_float(z) (conjf(z)) #if 1 #define __Pyx_c_abs_float(z) (cabsf(z)) #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_double(a, b) ((a)==(b)) #define __Pyx_c_sum_double(a, b) ((a)+(b)) #define __Pyx_c_diff_double(a, b) ((a)-(b)) #define __Pyx_c_prod_double(a, b) ((a)*(b)) #define __Pyx_c_quot_double(a, b) ((a)/(b)) #define __Pyx_c_neg_double(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_double(z) ((z)==(double)0) #define __Pyx_c_conj_double(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_double(z) (::std::abs(z)) #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_double(z) ((z)==0) #define __Pyx_c_conj_double(z) (conj(z)) #if 1 #define __Pyx_c_abs_double(z) (cabs(z)) #define __Pyx_c_pow_double(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* FunctionExport.proto */ static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython.version' */ /* Module declarations from 'cpython.exc' */ /* Module declarations from 'cpython.module' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'cpython.tuple' */ /* Module declarations from 'cpython.list' */ /* Module declarations from 'cpython.sequence' */ /* Module declarations from 'cpython.mapping' */ /* Module declarations from 'cpython.iterator' */ /* Module declarations from 'cpython.number' */ /* Module declarations from 'cpython.int' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.bool' */ static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; /* Module declarations from 'cpython.long' */ /* Module declarations from 'cpython.float' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.complex' */ static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; /* Module declarations from 'cpython.string' */ /* Module declarations from 'cpython.unicode' */ /* Module declarations from 'cpython.dict' */ /* Module declarations from 'cpython.instance' */ /* Module declarations from 'cpython.function' */ /* Module declarations from 'cpython.method' */ /* Module declarations from 'cpython.weakref' */ /* Module declarations from 'cpython.getargs' */ /* Module declarations from 'cpython.pythread' */ /* Module declarations from 'cpython.pystate' */ /* Module declarations from 'cpython.cobject' */ /* Module declarations from 'cpython.oldbuffer' */ /* Module declarations from 'cpython.set' */ /* Module declarations from 'cpython.bytes' */ /* Module declarations from 'cpython.pycapsule' */ /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'photutils.geometry.core' */ static double __pyx_f_9photutils_8geometry_4core_distance(double, double, double, double); /*proto*/ static double __pyx_f_9photutils_8geometry_4core_area_triangle(double, double, double, double, double, double); /*proto*/ static double __pyx_f_9photutils_8geometry_4core_area_arc_unit(double, double, double, double); /*proto*/ static int __pyx_f_9photutils_8geometry_4core_in_triangle(double, double, double, double, double, double, double, double); /*proto*/ static double __pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle(double, double, double, double, double, double); /*proto*/ static __pyx_t_9photutils_8geometry_4core_intersections __pyx_f_9photutils_8geometry_4core_circle_line(double, double, double, double); /*proto*/ static __pyx_t_9photutils_8geometry_4core_point __pyx_f_9photutils_8geometry_4core_circle_segment_single2(double, double, double, double); /*proto*/ static __pyx_t_9photutils_8geometry_4core_intersections __pyx_f_9photutils_8geometry_4core_circle_segment(double, double, double, double); /*proto*/ #define __Pyx_MODULE_NAME "photutils.geometry.core" int __pyx_module_is_main_photutils__geometry__core = 0; /* Implementation of 'photutils.geometry.core' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_ImportError; static const char __pyx_k_np[] = "np"; static const char __pyx_k_pi[] = "pi"; static const char __pyx_k_all[] = "__all__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_DTYPE[] = "DTYPE"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_elliptical_overlap_grid[] = "elliptical_overlap_grid"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_The_functions_here_are_the_core[] = "The functions here are the core geometry functions."; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_ERROR_vertices_did_not_sort_corr[] = "ERROR: vertices did not sort correctly"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_DTYPE; static PyObject *__pyx_kp_u_ERROR_vertices_did_not_sort_corr; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_all; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_u_elliptical_overlap_grid; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_main; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to; static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor; static PyObject *__pyx_n_s_pi; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; /* "photutils/geometry/core.pyx":40 * * * cdef double floor_sqrt(double x): # <<<<<<<<<<<<<< * """ * In some of the geometrical functions, we have to take the sqrt of a number */ static double __pyx_f_9photutils_8geometry_4core_floor_sqrt(double __pyx_v_x) { double __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("floor_sqrt", 0); /* "photutils/geometry/core.pyx":51 * to be positive on paper. * """ * if x > 0: # <<<<<<<<<<<<<< * return sqrt(x) * else: */ __pyx_t_1 = ((__pyx_v_x > 0.0) != 0); if (__pyx_t_1) { /* "photutils/geometry/core.pyx":52 * """ * if x > 0: * return sqrt(x) # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = sqrt(__pyx_v_x); goto __pyx_L0; /* "photutils/geometry/core.pyx":51 * to be positive on paper. * """ * if x > 0: # <<<<<<<<<<<<<< * return sqrt(x) * else: */ } /* "photutils/geometry/core.pyx":54 * return sqrt(x) * else: * return 0 # <<<<<<<<<<<<<< * * # NOTE: The following two functions use cdef because they are not intended to be */ /*else*/ { __pyx_r = 0.0; goto __pyx_L0; } /* "photutils/geometry/core.pyx":40 * * * cdef double floor_sqrt(double x): # <<<<<<<<<<<<<< * """ * In some of the geometrical functions, we have to take the sqrt of a number */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/core.pyx":62 * * * cdef double distance(double x1, double y1, double x2, double y2): # <<<<<<<<<<<<<< * """ * Distance between two points in two dimensions. */ static double __pyx_f_9photutils_8geometry_4core_distance(double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_x2, double __pyx_v_y2) { double __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("distance", 0); /* "photutils/geometry/core.pyx":79 * """ * * return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) # <<<<<<<<<<<<<< * * */ __pyx_r = sqrt((pow((__pyx_v_x2 - __pyx_v_x1), 2.0) + pow((__pyx_v_y2 - __pyx_v_y1), 2.0))); goto __pyx_L0; /* "photutils/geometry/core.pyx":62 * * * cdef double distance(double x1, double y1, double x2, double y2): # <<<<<<<<<<<<<< * """ * Distance between two points in two dimensions. */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/core.pyx":82 * * * cdef double area_arc(double x1, double y1, double x2, double y2, double r): # <<<<<<<<<<<<<< * """ * Area of a circle arc with radius r between points (x1, y1) and (x2, y2). */ static double __pyx_f_9photutils_8geometry_4core_area_arc(double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_x2, double __pyx_v_y2, double __pyx_v_r) { double __pyx_v_a; double __pyx_v_theta; double __pyx_r; __Pyx_RefNannyDeclarations double __pyx_t_1; __Pyx_RefNannySetupContext("area_arc", 0); /* "photutils/geometry/core.pyx":92 * * cdef double a, theta * a = distance(x1, y1, x2, y2) # <<<<<<<<<<<<<< * theta = 2. * asin(0.5 * a / r) * return 0.5 * r * r * (theta - sin(theta)) */ __pyx_v_a = __pyx_f_9photutils_8geometry_4core_distance(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2); /* "photutils/geometry/core.pyx":93 * cdef double a, theta * a = distance(x1, y1, x2, y2) * theta = 2. * asin(0.5 * a / r) # <<<<<<<<<<<<<< * return 0.5 * r * r * (theta - sin(theta)) * */ __pyx_t_1 = (0.5 * __pyx_v_a); if (unlikely(__pyx_v_r == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 93, __pyx_L1_error) } __pyx_v_theta = (2. * asin((__pyx_t_1 / __pyx_v_r))); /* "photutils/geometry/core.pyx":94 * a = distance(x1, y1, x2, y2) * theta = 2. * asin(0.5 * a / r) * return 0.5 * r * r * (theta - sin(theta)) # <<<<<<<<<<<<<< * * */ __pyx_r = (((0.5 * __pyx_v_r) * __pyx_v_r) * (__pyx_v_theta - sin(__pyx_v_theta))); goto __pyx_L0; /* "photutils/geometry/core.pyx":82 * * * cdef double area_arc(double x1, double y1, double x2, double y2, double r): # <<<<<<<<<<<<<< * """ * Area of a circle arc with radius r between points (x1, y1) and (x2, y2). */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("photutils.geometry.core.area_arc", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/core.pyx":97 * * * cdef double area_triangle(double x1, double y1, double x2, double y2, double x3, # <<<<<<<<<<<<<< * double y3): * """ */ static double __pyx_f_9photutils_8geometry_4core_area_triangle(double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_x2, double __pyx_v_y2, double __pyx_v_x3, double __pyx_v_y3) { double __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("area_triangle", 0); /* "photutils/geometry/core.pyx":102 * Area of a triangle defined by three vertices. * """ * return 0.5 * abs(x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) # <<<<<<<<<<<<<< * * */ __pyx_r = (0.5 * fabs((((__pyx_v_x1 * (__pyx_v_y2 - __pyx_v_y3)) + (__pyx_v_x2 * (__pyx_v_y3 - __pyx_v_y1))) + (__pyx_v_x3 * (__pyx_v_y1 - __pyx_v_y2))))); goto __pyx_L0; /* "photutils/geometry/core.pyx":97 * * * cdef double area_triangle(double x1, double y1, double x2, double y2, double x3, # <<<<<<<<<<<<<< * double y3): * """ */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/core.pyx":105 * * * cdef double area_arc_unit(double x1, double y1, double x2, double y2): # <<<<<<<<<<<<<< * """ * Area of a circle arc with radius R between points (x1, y1) and (x2, y2) */ static double __pyx_f_9photutils_8geometry_4core_area_arc_unit(double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_x2, double __pyx_v_y2) { double __pyx_v_a; double __pyx_v_theta; double __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("area_arc_unit", 0); /* "photutils/geometry/core.pyx":114 * """ * cdef double a, theta * a = distance(x1, y1, x2, y2) # <<<<<<<<<<<<<< * theta = 2. * asin(0.5 * a) * return 0.5 * (theta - sin(theta)) */ __pyx_v_a = __pyx_f_9photutils_8geometry_4core_distance(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2); /* "photutils/geometry/core.pyx":115 * cdef double a, theta * a = distance(x1, y1, x2, y2) * theta = 2. * asin(0.5 * a) # <<<<<<<<<<<<<< * return 0.5 * (theta - sin(theta)) * */ __pyx_v_theta = (2. * asin((0.5 * __pyx_v_a))); /* "photutils/geometry/core.pyx":116 * a = distance(x1, y1, x2, y2) * theta = 2. * asin(0.5 * a) * return 0.5 * (theta - sin(theta)) # <<<<<<<<<<<<<< * * */ __pyx_r = (0.5 * (__pyx_v_theta - sin(__pyx_v_theta))); goto __pyx_L0; /* "photutils/geometry/core.pyx":105 * * * cdef double area_arc_unit(double x1, double y1, double x2, double y2): # <<<<<<<<<<<<<< * """ * Area of a circle arc with radius R between points (x1, y1) and (x2, y2) */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/core.pyx":119 * * * cdef int in_triangle(double x, double y, double x1, double y1, double x2, double y2, double x3, double y3): # <<<<<<<<<<<<<< * """ * Check if a point (x,y) is inside a triangle */ static int __pyx_f_9photutils_8geometry_4core_in_triangle(double __pyx_v_x, double __pyx_v_y, double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_x2, double __pyx_v_y2, double __pyx_v_x3, double __pyx_v_y3) { int __pyx_v_c; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; double __pyx_t_3; double __pyx_t_4; __Pyx_RefNannySetupContext("in_triangle", 0); /* "photutils/geometry/core.pyx":123 * Check if a point (x,y) is inside a triangle * """ * cdef int c = 0 # <<<<<<<<<<<<<< * * c += ((y1 > y) != (y2 > y) and x < (x2 - x1) * (y - y1) / (y2 - y1) + x1) */ __pyx_v_c = 0; /* "photutils/geometry/core.pyx":125 * cdef int c = 0 * * c += ((y1 > y) != (y2 > y) and x < (x2 - x1) * (y - y1) / (y2 - y1) + x1) # <<<<<<<<<<<<<< * c += ((y2 > y) != (y3 > y) and x < (x3 - x2) * (y - y2) / (y3 - y2) + x2) * c += ((y3 > y) != (y1 > y) and x < (x1 - x3) * (y - y3) / (y1 - y3) + x3) */ __pyx_t_2 = ((__pyx_v_y1 > __pyx_v_y) != (__pyx_v_y2 > __pyx_v_y)); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L3_bool_binop_done; } __pyx_t_3 = ((__pyx_v_x2 - __pyx_v_x1) * (__pyx_v_y - __pyx_v_y1)); __pyx_t_4 = (__pyx_v_y2 - __pyx_v_y1); if (unlikely(__pyx_t_4 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 125, __pyx_L1_error) } __pyx_t_2 = (__pyx_v_x < ((__pyx_t_3 / __pyx_t_4) + __pyx_v_x1)); __pyx_t_1 = __pyx_t_2; __pyx_L3_bool_binop_done:; __pyx_v_c = (__pyx_v_c + __pyx_t_1); /* "photutils/geometry/core.pyx":126 * * c += ((y1 > y) != (y2 > y) and x < (x2 - x1) * (y - y1) / (y2 - y1) + x1) * c += ((y2 > y) != (y3 > y) and x < (x3 - x2) * (y - y2) / (y3 - y2) + x2) # <<<<<<<<<<<<<< * c += ((y3 > y) != (y1 > y) and x < (x1 - x3) * (y - y3) / (y1 - y3) + x3) * */ __pyx_t_2 = ((__pyx_v_y2 > __pyx_v_y) != (__pyx_v_y3 > __pyx_v_y)); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_4 = ((__pyx_v_x3 - __pyx_v_x2) * (__pyx_v_y - __pyx_v_y2)); __pyx_t_3 = (__pyx_v_y3 - __pyx_v_y2); if (unlikely(__pyx_t_3 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 126, __pyx_L1_error) } __pyx_t_2 = (__pyx_v_x < ((__pyx_t_4 / __pyx_t_3) + __pyx_v_x2)); __pyx_t_1 = __pyx_t_2; __pyx_L5_bool_binop_done:; __pyx_v_c = (__pyx_v_c + __pyx_t_1); /* "photutils/geometry/core.pyx":127 * c += ((y1 > y) != (y2 > y) and x < (x2 - x1) * (y - y1) / (y2 - y1) + x1) * c += ((y2 > y) != (y3 > y) and x < (x3 - x2) * (y - y2) / (y3 - y2) + x2) * c += ((y3 > y) != (y1 > y) and x < (x1 - x3) * (y - y3) / (y1 - y3) + x3) # <<<<<<<<<<<<<< * * return c % 2 == 1 */ __pyx_t_2 = ((__pyx_v_y3 > __pyx_v_y) != (__pyx_v_y1 > __pyx_v_y)); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } __pyx_t_3 = ((__pyx_v_x1 - __pyx_v_x3) * (__pyx_v_y - __pyx_v_y3)); __pyx_t_4 = (__pyx_v_y1 - __pyx_v_y3); if (unlikely(__pyx_t_4 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 127, __pyx_L1_error) } __pyx_t_2 = (__pyx_v_x < ((__pyx_t_3 / __pyx_t_4) + __pyx_v_x3)); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; __pyx_v_c = (__pyx_v_c + __pyx_t_1); /* "photutils/geometry/core.pyx":129 * c += ((y3 > y) != (y1 > y) and x < (x1 - x3) * (y - y3) / (y1 - y3) + x3) * * return c % 2 == 1 # <<<<<<<<<<<<<< * * */ __pyx_r = (__Pyx_mod_long(__pyx_v_c, 2) == 1); goto __pyx_L0; /* "photutils/geometry/core.pyx":119 * * * cdef int in_triangle(double x, double y, double x1, double y1, double x2, double y2, double x3, double y3): # <<<<<<<<<<<<<< * """ * Check if a point (x,y) is inside a triangle */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("photutils.geometry.core.in_triangle", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/core.pyx":132 * * * cdef intersections circle_line(double x1, double y1, double x2, double y2): # <<<<<<<<<<<<<< * """Intersection of a line defined by two points with a unit circle""" * */ static __pyx_t_9photutils_8geometry_4core_intersections __pyx_f_9photutils_8geometry_4core_circle_line(double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_x2, double __pyx_v_y2) { double __pyx_v_a; double __pyx_v_b; double __pyx_v_delta; double __pyx_v_dx; double __pyx_v_dy; double __pyx_v_tolerance; __pyx_t_9photutils_8geometry_4core_intersections __pyx_v_inter; __pyx_t_9photutils_8geometry_4core_intersections __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; double __pyx_t_3; double __pyx_t_4; __Pyx_RefNannySetupContext("circle_line", 0); /* "photutils/geometry/core.pyx":136 * * cdef double a, b, delta, dx, dy * cdef double tolerance = 1.e-10 # <<<<<<<<<<<<<< * cdef intersections inter * */ __pyx_v_tolerance = 1.e-10; /* "photutils/geometry/core.pyx":139 * cdef intersections inter * * dx = x2 - x1 # <<<<<<<<<<<<<< * dy = y2 - y1 * */ __pyx_v_dx = (__pyx_v_x2 - __pyx_v_x1); /* "photutils/geometry/core.pyx":140 * * dx = x2 - x1 * dy = y2 - y1 # <<<<<<<<<<<<<< * * if fabs(dx) < tolerance and fabs(dy) < tolerance: */ __pyx_v_dy = (__pyx_v_y2 - __pyx_v_y1); /* "photutils/geometry/core.pyx":142 * dy = y2 - y1 * * if fabs(dx) < tolerance and fabs(dy) < tolerance: # <<<<<<<<<<<<<< * inter.p1.x = 2. * inter.p1.y = 2. */ __pyx_t_2 = ((fabs(__pyx_v_dx) < __pyx_v_tolerance) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = ((fabs(__pyx_v_dy) < __pyx_v_tolerance) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "photutils/geometry/core.pyx":143 * * if fabs(dx) < tolerance and fabs(dy) < tolerance: * inter.p1.x = 2. # <<<<<<<<<<<<<< * inter.p1.y = 2. * inter.p2.x = 2. */ __pyx_v_inter.p1.x = 2.; /* "photutils/geometry/core.pyx":144 * if fabs(dx) < tolerance and fabs(dy) < tolerance: * inter.p1.x = 2. * inter.p1.y = 2. # <<<<<<<<<<<<<< * inter.p2.x = 2. * inter.p2.y = 2. */ __pyx_v_inter.p1.y = 2.; /* "photutils/geometry/core.pyx":145 * inter.p1.x = 2. * inter.p1.y = 2. * inter.p2.x = 2. # <<<<<<<<<<<<<< * inter.p2.y = 2. * */ __pyx_v_inter.p2.x = 2.; /* "photutils/geometry/core.pyx":146 * inter.p1.y = 2. * inter.p2.x = 2. * inter.p2.y = 2. # <<<<<<<<<<<<<< * * elif fabs(dx) > fabs(dy): */ __pyx_v_inter.p2.y = 2.; /* "photutils/geometry/core.pyx":142 * dy = y2 - y1 * * if fabs(dx) < tolerance and fabs(dy) < tolerance: # <<<<<<<<<<<<<< * inter.p1.x = 2. * inter.p1.y = 2. */ goto __pyx_L3; } /* "photutils/geometry/core.pyx":148 * inter.p2.y = 2. * * elif fabs(dx) > fabs(dy): # <<<<<<<<<<<<<< * * # Find the slope and intercept of the line */ __pyx_t_1 = ((fabs(__pyx_v_dx) > fabs(__pyx_v_dy)) != 0); if (__pyx_t_1) { /* "photutils/geometry/core.pyx":151 * * # Find the slope and intercept of the line * a = dy / dx # <<<<<<<<<<<<<< * b = y1 - a * x1 * */ if (unlikely(__pyx_v_dx == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 151, __pyx_L1_error) } __pyx_v_a = (__pyx_v_dy / __pyx_v_dx); /* "photutils/geometry/core.pyx":152 * # Find the slope and intercept of the line * a = dy / dx * b = y1 - a * x1 # <<<<<<<<<<<<<< * * # Find the determinant of the quadratic equation */ __pyx_v_b = (__pyx_v_y1 - (__pyx_v_a * __pyx_v_x1)); /* "photutils/geometry/core.pyx":155 * * # Find the determinant of the quadratic equation * delta = 1. + a * a - b * b # <<<<<<<<<<<<<< * if delta > 0.: # solutions exist * */ __pyx_v_delta = ((1. + (__pyx_v_a * __pyx_v_a)) - (__pyx_v_b * __pyx_v_b)); /* "photutils/geometry/core.pyx":156 * # Find the determinant of the quadratic equation * delta = 1. + a * a - b * b * if delta > 0.: # solutions exist # <<<<<<<<<<<<<< * * delta = sqrt(delta) */ __pyx_t_1 = ((__pyx_v_delta > 0.) != 0); if (__pyx_t_1) { /* "photutils/geometry/core.pyx":158 * if delta > 0.: # solutions exist * * delta = sqrt(delta) # <<<<<<<<<<<<<< * * inter.p1.x = (- a * b - delta) / (1. + a * a) */ __pyx_v_delta = sqrt(__pyx_v_delta); /* "photutils/geometry/core.pyx":160 * delta = sqrt(delta) * * inter.p1.x = (- a * b - delta) / (1. + a * a) # <<<<<<<<<<<<<< * inter.p1.y = a * inter.p1.x + b * inter.p2.x = (- a * b + delta) / (1. + a * a) */ __pyx_t_3 = (((-__pyx_v_a) * __pyx_v_b) - __pyx_v_delta); __pyx_t_4 = (1. + (__pyx_v_a * __pyx_v_a)); if (unlikely(__pyx_t_4 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 160, __pyx_L1_error) } __pyx_v_inter.p1.x = (__pyx_t_3 / __pyx_t_4); /* "photutils/geometry/core.pyx":161 * * inter.p1.x = (- a * b - delta) / (1. + a * a) * inter.p1.y = a * inter.p1.x + b # <<<<<<<<<<<<<< * inter.p2.x = (- a * b + delta) / (1. + a * a) * inter.p2.y = a * inter.p2.x + b */ __pyx_v_inter.p1.y = ((__pyx_v_a * __pyx_v_inter.p1.x) + __pyx_v_b); /* "photutils/geometry/core.pyx":162 * inter.p1.x = (- a * b - delta) / (1. + a * a) * inter.p1.y = a * inter.p1.x + b * inter.p2.x = (- a * b + delta) / (1. + a * a) # <<<<<<<<<<<<<< * inter.p2.y = a * inter.p2.x + b * */ __pyx_t_4 = (((-__pyx_v_a) * __pyx_v_b) + __pyx_v_delta); __pyx_t_3 = (1. + (__pyx_v_a * __pyx_v_a)); if (unlikely(__pyx_t_3 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 162, __pyx_L1_error) } __pyx_v_inter.p2.x = (__pyx_t_4 / __pyx_t_3); /* "photutils/geometry/core.pyx":163 * inter.p1.y = a * inter.p1.x + b * inter.p2.x = (- a * b + delta) / (1. + a * a) * inter.p2.y = a * inter.p2.x + b # <<<<<<<<<<<<<< * * else: # no solution, return values > 1 */ __pyx_v_inter.p2.y = ((__pyx_v_a * __pyx_v_inter.p2.x) + __pyx_v_b); /* "photutils/geometry/core.pyx":156 * # Find the determinant of the quadratic equation * delta = 1. + a * a - b * b * if delta > 0.: # solutions exist # <<<<<<<<<<<<<< * * delta = sqrt(delta) */ goto __pyx_L6; } /* "photutils/geometry/core.pyx":166 * * else: # no solution, return values > 1 * inter.p1.x = 2. # <<<<<<<<<<<<<< * inter.p1.y = 2. * inter.p2.x = 2. */ /*else*/ { __pyx_v_inter.p1.x = 2.; /* "photutils/geometry/core.pyx":167 * else: # no solution, return values > 1 * inter.p1.x = 2. * inter.p1.y = 2. # <<<<<<<<<<<<<< * inter.p2.x = 2. * inter.p2.y = 2. */ __pyx_v_inter.p1.y = 2.; /* "photutils/geometry/core.pyx":168 * inter.p1.x = 2. * inter.p1.y = 2. * inter.p2.x = 2. # <<<<<<<<<<<<<< * inter.p2.y = 2. * */ __pyx_v_inter.p2.x = 2.; /* "photutils/geometry/core.pyx":169 * inter.p1.y = 2. * inter.p2.x = 2. * inter.p2.y = 2. # <<<<<<<<<<<<<< * * else: */ __pyx_v_inter.p2.y = 2.; } __pyx_L6:; /* "photutils/geometry/core.pyx":148 * inter.p2.y = 2. * * elif fabs(dx) > fabs(dy): # <<<<<<<<<<<<<< * * # Find the slope and intercept of the line */ goto __pyx_L3; } /* "photutils/geometry/core.pyx":174 * * # Find the slope and intercept of the line * a = dx / dy # <<<<<<<<<<<<<< * b = x1 - a * y1 * */ /*else*/ { if (unlikely(__pyx_v_dy == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 174, __pyx_L1_error) } __pyx_v_a = (__pyx_v_dx / __pyx_v_dy); /* "photutils/geometry/core.pyx":175 * # Find the slope and intercept of the line * a = dx / dy * b = x1 - a * y1 # <<<<<<<<<<<<<< * * # Find the determinant of the quadratic equation */ __pyx_v_b = (__pyx_v_x1 - (__pyx_v_a * __pyx_v_y1)); /* "photutils/geometry/core.pyx":178 * * # Find the determinant of the quadratic equation * delta = 1. + a * a - b * b # <<<<<<<<<<<<<< * * if delta > 0.: # solutions exist */ __pyx_v_delta = ((1. + (__pyx_v_a * __pyx_v_a)) - (__pyx_v_b * __pyx_v_b)); /* "photutils/geometry/core.pyx":180 * delta = 1. + a * a - b * b * * if delta > 0.: # solutions exist # <<<<<<<<<<<<<< * * delta = sqrt(delta) */ __pyx_t_1 = ((__pyx_v_delta > 0.) != 0); if (__pyx_t_1) { /* "photutils/geometry/core.pyx":182 * if delta > 0.: # solutions exist * * delta = sqrt(delta) # <<<<<<<<<<<<<< * * inter.p1.y = (- a * b - delta) / (1. + a * a) */ __pyx_v_delta = sqrt(__pyx_v_delta); /* "photutils/geometry/core.pyx":184 * delta = sqrt(delta) * * inter.p1.y = (- a * b - delta) / (1. + a * a) # <<<<<<<<<<<<<< * inter.p1.x = a * inter.p1.y + b * inter.p2.y = (- a * b + delta) / (1. + a * a) */ __pyx_t_3 = (((-__pyx_v_a) * __pyx_v_b) - __pyx_v_delta); __pyx_t_4 = (1. + (__pyx_v_a * __pyx_v_a)); if (unlikely(__pyx_t_4 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 184, __pyx_L1_error) } __pyx_v_inter.p1.y = (__pyx_t_3 / __pyx_t_4); /* "photutils/geometry/core.pyx":185 * * inter.p1.y = (- a * b - delta) / (1. + a * a) * inter.p1.x = a * inter.p1.y + b # <<<<<<<<<<<<<< * inter.p2.y = (- a * b + delta) / (1. + a * a) * inter.p2.x = a * inter.p2.y + b */ __pyx_v_inter.p1.x = ((__pyx_v_a * __pyx_v_inter.p1.y) + __pyx_v_b); /* "photutils/geometry/core.pyx":186 * inter.p1.y = (- a * b - delta) / (1. + a * a) * inter.p1.x = a * inter.p1.y + b * inter.p2.y = (- a * b + delta) / (1. + a * a) # <<<<<<<<<<<<<< * inter.p2.x = a * inter.p2.y + b * */ __pyx_t_4 = (((-__pyx_v_a) * __pyx_v_b) + __pyx_v_delta); __pyx_t_3 = (1. + (__pyx_v_a * __pyx_v_a)); if (unlikely(__pyx_t_3 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 186, __pyx_L1_error) } __pyx_v_inter.p2.y = (__pyx_t_4 / __pyx_t_3); /* "photutils/geometry/core.pyx":187 * inter.p1.x = a * inter.p1.y + b * inter.p2.y = (- a * b + delta) / (1. + a * a) * inter.p2.x = a * inter.p2.y + b # <<<<<<<<<<<<<< * * else: # no solution, return values > 1 */ __pyx_v_inter.p2.x = ((__pyx_v_a * __pyx_v_inter.p2.y) + __pyx_v_b); /* "photutils/geometry/core.pyx":180 * delta = 1. + a * a - b * b * * if delta > 0.: # solutions exist # <<<<<<<<<<<<<< * * delta = sqrt(delta) */ goto __pyx_L7; } /* "photutils/geometry/core.pyx":190 * * else: # no solution, return values > 1 * inter.p1.x = 2. # <<<<<<<<<<<<<< * inter.p1.y = 2. * inter.p2.x = 2. */ /*else*/ { __pyx_v_inter.p1.x = 2.; /* "photutils/geometry/core.pyx":191 * else: # no solution, return values > 1 * inter.p1.x = 2. * inter.p1.y = 2. # <<<<<<<<<<<<<< * inter.p2.x = 2. * inter.p2.y = 2. */ __pyx_v_inter.p1.y = 2.; /* "photutils/geometry/core.pyx":192 * inter.p1.x = 2. * inter.p1.y = 2. * inter.p2.x = 2. # <<<<<<<<<<<<<< * inter.p2.y = 2. * */ __pyx_v_inter.p2.x = 2.; /* "photutils/geometry/core.pyx":193 * inter.p1.y = 2. * inter.p2.x = 2. * inter.p2.y = 2. # <<<<<<<<<<<<<< * * return inter */ __pyx_v_inter.p2.y = 2.; } __pyx_L7:; } __pyx_L3:; /* "photutils/geometry/core.pyx":195 * inter.p2.y = 2. * * return inter # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_inter; goto __pyx_L0; /* "photutils/geometry/core.pyx":132 * * * cdef intersections circle_line(double x1, double y1, double x2, double y2): # <<<<<<<<<<<<<< * """Intersection of a line defined by two points with a unit circle""" * */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("photutils.geometry.core.circle_line", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __Pyx_pretend_to_initialize(&__pyx_r); __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/core.pyx":198 * * * cdef point circle_segment_single2(double x1, double y1, double x2, double y2): # <<<<<<<<<<<<<< * """ * The intersection of a line with the unit circle. The intersection the */ static __pyx_t_9photutils_8geometry_4core_point __pyx_f_9photutils_8geometry_4core_circle_segment_single2(double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_x2, double __pyx_v_y2) { double __pyx_v_dx1; double __pyx_v_dy1; double __pyx_v_dx2; double __pyx_v_dy2; __pyx_t_9photutils_8geometry_4core_intersections __pyx_v_inter; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt1; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt2; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt; __pyx_t_9photutils_8geometry_4core_point __pyx_r; __Pyx_RefNannyDeclarations __pyx_t_9photutils_8geometry_4core_point __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("circle_segment_single2", 0); /* "photutils/geometry/core.pyx":208 * cdef point pt1, pt2, pt * * inter = circle_line(x1, y1, x2, y2) # <<<<<<<<<<<<<< * * pt1 = inter.p1 */ __pyx_v_inter = __pyx_f_9photutils_8geometry_4core_circle_line(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2); /* "photutils/geometry/core.pyx":210 * inter = circle_line(x1, y1, x2, y2) * * pt1 = inter.p1 # <<<<<<<<<<<<<< * pt2 = inter.p2 * */ __pyx_t_1 = __pyx_v_inter.p1; __pyx_v_pt1 = __pyx_t_1; /* "photutils/geometry/core.pyx":211 * * pt1 = inter.p1 * pt2 = inter.p2 # <<<<<<<<<<<<<< * * # Can be optimized, but just checking for correctness right now */ __pyx_t_1 = __pyx_v_inter.p2; __pyx_v_pt2 = __pyx_t_1; /* "photutils/geometry/core.pyx":214 * * # Can be optimized, but just checking for correctness right now * dx1 = fabs(pt1.x - x2) # <<<<<<<<<<<<<< * dy1 = fabs(pt1.y - y2) * dx2 = fabs(pt2.x - x2) */ __pyx_v_dx1 = fabs((__pyx_v_pt1.x - __pyx_v_x2)); /* "photutils/geometry/core.pyx":215 * # Can be optimized, but just checking for correctness right now * dx1 = fabs(pt1.x - x2) * dy1 = fabs(pt1.y - y2) # <<<<<<<<<<<<<< * dx2 = fabs(pt2.x - x2) * dy2 = fabs(pt2.y - y2) */ __pyx_v_dy1 = fabs((__pyx_v_pt1.y - __pyx_v_y2)); /* "photutils/geometry/core.pyx":216 * dx1 = fabs(pt1.x - x2) * dy1 = fabs(pt1.y - y2) * dx2 = fabs(pt2.x - x2) # <<<<<<<<<<<<<< * dy2 = fabs(pt2.y - y2) * */ __pyx_v_dx2 = fabs((__pyx_v_pt2.x - __pyx_v_x2)); /* "photutils/geometry/core.pyx":217 * dy1 = fabs(pt1.y - y2) * dx2 = fabs(pt2.x - x2) * dy2 = fabs(pt2.y - y2) # <<<<<<<<<<<<<< * * if dx1 > dy1: # compare based on x-axis */ __pyx_v_dy2 = fabs((__pyx_v_pt2.y - __pyx_v_y2)); /* "photutils/geometry/core.pyx":219 * dy2 = fabs(pt2.y - y2) * * if dx1 > dy1: # compare based on x-axis # <<<<<<<<<<<<<< * if dx1 > dx2: * pt = pt2 */ __pyx_t_2 = ((__pyx_v_dx1 > __pyx_v_dy1) != 0); if (__pyx_t_2) { /* "photutils/geometry/core.pyx":220 * * if dx1 > dy1: # compare based on x-axis * if dx1 > dx2: # <<<<<<<<<<<<<< * pt = pt2 * else: */ __pyx_t_2 = ((__pyx_v_dx1 > __pyx_v_dx2) != 0); if (__pyx_t_2) { /* "photutils/geometry/core.pyx":221 * if dx1 > dy1: # compare based on x-axis * if dx1 > dx2: * pt = pt2 # <<<<<<<<<<<<<< * else: * pt = pt1 */ __pyx_v_pt = __pyx_v_pt2; /* "photutils/geometry/core.pyx":220 * * if dx1 > dy1: # compare based on x-axis * if dx1 > dx2: # <<<<<<<<<<<<<< * pt = pt2 * else: */ goto __pyx_L4; } /* "photutils/geometry/core.pyx":223 * pt = pt2 * else: * pt = pt1 # <<<<<<<<<<<<<< * else: * if dy1 > dy2: */ /*else*/ { __pyx_v_pt = __pyx_v_pt1; } __pyx_L4:; /* "photutils/geometry/core.pyx":219 * dy2 = fabs(pt2.y - y2) * * if dx1 > dy1: # compare based on x-axis # <<<<<<<<<<<<<< * if dx1 > dx2: * pt = pt2 */ goto __pyx_L3; } /* "photutils/geometry/core.pyx":225 * pt = pt1 * else: * if dy1 > dy2: # <<<<<<<<<<<<<< * pt = pt2 * else: */ /*else*/ { __pyx_t_2 = ((__pyx_v_dy1 > __pyx_v_dy2) != 0); if (__pyx_t_2) { /* "photutils/geometry/core.pyx":226 * else: * if dy1 > dy2: * pt = pt2 # <<<<<<<<<<<<<< * else: * pt = pt1 */ __pyx_v_pt = __pyx_v_pt2; /* "photutils/geometry/core.pyx":225 * pt = pt1 * else: * if dy1 > dy2: # <<<<<<<<<<<<<< * pt = pt2 * else: */ goto __pyx_L5; } /* "photutils/geometry/core.pyx":228 * pt = pt2 * else: * pt = pt1 # <<<<<<<<<<<<<< * * return pt */ /*else*/ { __pyx_v_pt = __pyx_v_pt1; } __pyx_L5:; } __pyx_L3:; /* "photutils/geometry/core.pyx":230 * pt = pt1 * * return pt # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_pt; goto __pyx_L0; /* "photutils/geometry/core.pyx":198 * * * cdef point circle_segment_single2(double x1, double y1, double x2, double y2): # <<<<<<<<<<<<<< * """ * The intersection of a line with the unit circle. The intersection the */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/core.pyx":233 * * * cdef intersections circle_segment(double x1, double y1, double x2, double y2): # <<<<<<<<<<<<<< * """ * Intersection(s) of a segment with the unit circle. Discard any */ static __pyx_t_9photutils_8geometry_4core_intersections __pyx_f_9photutils_8geometry_4core_circle_segment(double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_x2, double __pyx_v_y2) { __pyx_t_9photutils_8geometry_4core_intersections __pyx_v_inter; __pyx_t_9photutils_8geometry_4core_intersections __pyx_v_inter_new; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt1; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt2; __pyx_t_9photutils_8geometry_4core_intersections __pyx_r; __Pyx_RefNannyDeclarations __pyx_t_9photutils_8geometry_4core_point __pyx_t_1; int __pyx_t_2; int __pyx_t_3; double __pyx_t_4; double __pyx_t_5; __Pyx_RefNannySetupContext("circle_segment", 0); /* "photutils/geometry/core.pyx":242 * cdef point pt1, pt2 * * inter = circle_line(x1, y1, x2, y2) # <<<<<<<<<<<<<< * * pt1 = inter.p1 */ __pyx_v_inter = __pyx_f_9photutils_8geometry_4core_circle_line(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2); /* "photutils/geometry/core.pyx":244 * inter = circle_line(x1, y1, x2, y2) * * pt1 = inter.p1 # <<<<<<<<<<<<<< * pt2 = inter.p2 * */ __pyx_t_1 = __pyx_v_inter.p1; __pyx_v_pt1 = __pyx_t_1; /* "photutils/geometry/core.pyx":245 * * pt1 = inter.p1 * pt2 = inter.p2 # <<<<<<<<<<<<<< * * if (pt1.x > x1 and pt1.x > x2) or (pt1.x < x1 and pt1.x < x2) or (pt1.y > y1 and pt1.y > y2) or (pt1.y < y1 and pt1.y < y2): */ __pyx_t_1 = __pyx_v_inter.p2; __pyx_v_pt2 = __pyx_t_1; /* "photutils/geometry/core.pyx":247 * pt2 = inter.p2 * * if (pt1.x > x1 and pt1.x > x2) or (pt1.x < x1 and pt1.x < x2) or (pt1.y > y1 and pt1.y > y2) or (pt1.y < y1 and pt1.y < y2): # <<<<<<<<<<<<<< * pt1.x, pt1.y = 2., 2. * if (pt2.x > x1 and pt2.x > x2) or (pt2.x < x1 and pt2.x < x2) or (pt2.y > y1 and pt2.y > y2) or (pt2.y < y1 and pt2.y < y2): */ __pyx_t_3 = ((__pyx_v_pt1.x > __pyx_v_x1) != 0); if (!__pyx_t_3) { goto __pyx_L5_next_or; } else { } __pyx_t_3 = ((__pyx_v_pt1.x > __pyx_v_x2) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_L5_next_or:; __pyx_t_3 = ((__pyx_v_pt1.x < __pyx_v_x1) != 0); if (!__pyx_t_3) { goto __pyx_L7_next_or; } else { } __pyx_t_3 = ((__pyx_v_pt1.x < __pyx_v_x2) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_L7_next_or:; __pyx_t_3 = ((__pyx_v_pt1.y > __pyx_v_y1) != 0); if (!__pyx_t_3) { goto __pyx_L9_next_or; } else { } __pyx_t_3 = ((__pyx_v_pt1.y > __pyx_v_y2) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_L9_next_or:; __pyx_t_3 = ((__pyx_v_pt1.y < __pyx_v_y1) != 0); if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = ((__pyx_v_pt1.y < __pyx_v_y2) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "photutils/geometry/core.pyx":248 * * if (pt1.x > x1 and pt1.x > x2) or (pt1.x < x1 and pt1.x < x2) or (pt1.y > y1 and pt1.y > y2) or (pt1.y < y1 and pt1.y < y2): * pt1.x, pt1.y = 2., 2. # <<<<<<<<<<<<<< * if (pt2.x > x1 and pt2.x > x2) or (pt2.x < x1 and pt2.x < x2) or (pt2.y > y1 and pt2.y > y2) or (pt2.y < y1 and pt2.y < y2): * pt2.x, pt2.y = 2., 2. */ __pyx_t_4 = 2.; __pyx_t_5 = 2.; __pyx_v_pt1.x = __pyx_t_4; __pyx_v_pt1.y = __pyx_t_5; /* "photutils/geometry/core.pyx":247 * pt2 = inter.p2 * * if (pt1.x > x1 and pt1.x > x2) or (pt1.x < x1 and pt1.x < x2) or (pt1.y > y1 and pt1.y > y2) or (pt1.y < y1 and pt1.y < y2): # <<<<<<<<<<<<<< * pt1.x, pt1.y = 2., 2. * if (pt2.x > x1 and pt2.x > x2) or (pt2.x < x1 and pt2.x < x2) or (pt2.y > y1 and pt2.y > y2) or (pt2.y < y1 and pt2.y < y2): */ } /* "photutils/geometry/core.pyx":249 * if (pt1.x > x1 and pt1.x > x2) or (pt1.x < x1 and pt1.x < x2) or (pt1.y > y1 and pt1.y > y2) or (pt1.y < y1 and pt1.y < y2): * pt1.x, pt1.y = 2., 2. * if (pt2.x > x1 and pt2.x > x2) or (pt2.x < x1 and pt2.x < x2) or (pt2.y > y1 and pt2.y > y2) or (pt2.y < y1 and pt2.y < y2): # <<<<<<<<<<<<<< * pt2.x, pt2.y = 2., 2. * */ __pyx_t_3 = ((__pyx_v_pt2.x > __pyx_v_x1) != 0); if (!__pyx_t_3) { goto __pyx_L14_next_or; } else { } __pyx_t_3 = ((__pyx_v_pt2.x > __pyx_v_x2) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L13_bool_binop_done; } __pyx_L14_next_or:; __pyx_t_3 = ((__pyx_v_pt2.x < __pyx_v_x1) != 0); if (!__pyx_t_3) { goto __pyx_L16_next_or; } else { } __pyx_t_3 = ((__pyx_v_pt2.x < __pyx_v_x2) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L13_bool_binop_done; } __pyx_L16_next_or:; __pyx_t_3 = ((__pyx_v_pt2.y > __pyx_v_y1) != 0); if (!__pyx_t_3) { goto __pyx_L18_next_or; } else { } __pyx_t_3 = ((__pyx_v_pt2.y > __pyx_v_y2) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L13_bool_binop_done; } __pyx_L18_next_or:; __pyx_t_3 = ((__pyx_v_pt2.y < __pyx_v_y1) != 0); if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L13_bool_binop_done; } __pyx_t_3 = ((__pyx_v_pt2.y < __pyx_v_y2) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L13_bool_binop_done:; if (__pyx_t_2) { /* "photutils/geometry/core.pyx":250 * pt1.x, pt1.y = 2., 2. * if (pt2.x > x1 and pt2.x > x2) or (pt2.x < x1 and pt2.x < x2) or (pt2.y > y1 and pt2.y > y2) or (pt2.y < y1 and pt2.y < y2): * pt2.x, pt2.y = 2., 2. # <<<<<<<<<<<<<< * * if pt1.x > 1. and pt2.x < 2.: */ __pyx_t_5 = 2.; __pyx_t_4 = 2.; __pyx_v_pt2.x = __pyx_t_5; __pyx_v_pt2.y = __pyx_t_4; /* "photutils/geometry/core.pyx":249 * if (pt1.x > x1 and pt1.x > x2) or (pt1.x < x1 and pt1.x < x2) or (pt1.y > y1 and pt1.y > y2) or (pt1.y < y1 and pt1.y < y2): * pt1.x, pt1.y = 2., 2. * if (pt2.x > x1 and pt2.x > x2) or (pt2.x < x1 and pt2.x < x2) or (pt2.y > y1 and pt2.y > y2) or (pt2.y < y1 and pt2.y < y2): # <<<<<<<<<<<<<< * pt2.x, pt2.y = 2., 2. * */ } /* "photutils/geometry/core.pyx":252 * pt2.x, pt2.y = 2., 2. * * if pt1.x > 1. and pt2.x < 2.: # <<<<<<<<<<<<<< * inter_new.p1 = pt1 * inter_new.p2 = pt2 */ __pyx_t_3 = ((__pyx_v_pt1.x > 1.) != 0); if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L22_bool_binop_done; } __pyx_t_3 = ((__pyx_v_pt2.x < 2.) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L22_bool_binop_done:; if (__pyx_t_2) { /* "photutils/geometry/core.pyx":253 * * if pt1.x > 1. and pt2.x < 2.: * inter_new.p1 = pt1 # <<<<<<<<<<<<<< * inter_new.p2 = pt2 * else: */ __pyx_v_inter_new.p1 = __pyx_v_pt1; /* "photutils/geometry/core.pyx":254 * if pt1.x > 1. and pt2.x < 2.: * inter_new.p1 = pt1 * inter_new.p2 = pt2 # <<<<<<<<<<<<<< * else: * inter_new.p1 = pt2 */ __pyx_v_inter_new.p2 = __pyx_v_pt2; /* "photutils/geometry/core.pyx":252 * pt2.x, pt2.y = 2., 2. * * if pt1.x > 1. and pt2.x < 2.: # <<<<<<<<<<<<<< * inter_new.p1 = pt1 * inter_new.p2 = pt2 */ goto __pyx_L21; } /* "photutils/geometry/core.pyx":256 * inter_new.p2 = pt2 * else: * inter_new.p1 = pt2 # <<<<<<<<<<<<<< * inter_new.p2 = pt1 * */ /*else*/ { __pyx_v_inter_new.p1 = __pyx_v_pt2; /* "photutils/geometry/core.pyx":257 * else: * inter_new.p1 = pt2 * inter_new.p2 = pt1 # <<<<<<<<<<<<<< * * return inter_new */ __pyx_v_inter_new.p2 = __pyx_v_pt1; } __pyx_L21:; /* "photutils/geometry/core.pyx":259 * inter_new.p2 = pt1 * * return inter_new # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_inter_new; goto __pyx_L0; /* "photutils/geometry/core.pyx":233 * * * cdef intersections circle_segment(double x1, double y1, double x2, double y2): # <<<<<<<<<<<<<< * """ * Intersection(s) of a segment with the unit circle. Discard any */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/core.pyx":262 * * * cdef double overlap_area_triangle_unit_circle(double x1, double y1, double x2, double y2, double x3, double y3): # <<<<<<<<<<<<<< * """ * Given a triangle defined by three points (x1, y1), (x2, y2), and */ static double __pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle(double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_x2, double __pyx_v_y2, double __pyx_v_x3, double __pyx_v_y3) { double __pyx_v_d1; double __pyx_v_d2; double __pyx_v_d3; PyBoolObject *__pyx_v_in1 = 0; PyBoolObject *__pyx_v_in2 = 0; PyBoolObject *__pyx_v_in3 = 0; PyBoolObject *__pyx_v_on1 = 0; PyBoolObject *__pyx_v_on2 = 0; PyBoolObject *__pyx_v_on3 = 0; double __pyx_v_area; double __pyx_v_PI; __pyx_t_9photutils_8geometry_4core_intersections __pyx_v_inter; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt1; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt2; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt3; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt4; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt5; __pyx_t_9photutils_8geometry_4core_point __pyx_v_pt6; PyObject *__pyx_v_intersect13 = NULL; PyObject *__pyx_v_intersect23 = NULL; double __pyx_v_xp; double __pyx_v_yp; double __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; double __pyx_t_3; int __pyx_t_4; double __pyx_t_5; double __pyx_t_6; double __pyx_t_7; double __pyx_t_8; double __pyx_t_9; double __pyx_t_10; double __pyx_t_11; double __pyx_t_12; int __pyx_t_13; int __pyx_t_14; __pyx_t_9photutils_8geometry_4core_point __pyx_t_15; __pyx_t_9photutils_8geometry_4core_point __pyx_t_16; __Pyx_RefNannySetupContext("overlap_area_triangle_unit_circle", 0); /* "photutils/geometry/core.pyx":272 * cdef bool on1, on2, on3 * cdef double area * cdef double PI = np.pi # <<<<<<<<<<<<<< * cdef intersections inter * cdef point pt1, pt2, pt3, pt4, pt5, pt6, pt_tmp */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_pi); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 272, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_PI = __pyx_t_3; /* "photutils/geometry/core.pyx":277 * * # Find distance of all vertices to circle center * d1 = x1 * x1 + y1 * y1 # <<<<<<<<<<<<<< * d2 = x2 * x2 + y2 * y2 * d3 = x3 * x3 + y3 * y3 */ __pyx_v_d1 = ((__pyx_v_x1 * __pyx_v_x1) + (__pyx_v_y1 * __pyx_v_y1)); /* "photutils/geometry/core.pyx":278 * # Find distance of all vertices to circle center * d1 = x1 * x1 + y1 * y1 * d2 = x2 * x2 + y2 * y2 # <<<<<<<<<<<<<< * d3 = x3 * x3 + y3 * y3 * */ __pyx_v_d2 = ((__pyx_v_x2 * __pyx_v_x2) + (__pyx_v_y2 * __pyx_v_y2)); /* "photutils/geometry/core.pyx":279 * d1 = x1 * x1 + y1 * y1 * d2 = x2 * x2 + y2 * y2 * d3 = x3 * x3 + y3 * y3 # <<<<<<<<<<<<<< * * # Order vertices by distance from origin */ __pyx_v_d3 = ((__pyx_v_x3 * __pyx_v_x3) + (__pyx_v_y3 * __pyx_v_y3)); /* "photutils/geometry/core.pyx":282 * * # Order vertices by distance from origin * if d1 < d2: # <<<<<<<<<<<<<< * if d2 < d3: * pass */ __pyx_t_4 = ((__pyx_v_d1 < __pyx_v_d2) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":283 * # Order vertices by distance from origin * if d1 < d2: * if d2 < d3: # <<<<<<<<<<<<<< * pass * elif d1 < d3: */ __pyx_t_4 = ((__pyx_v_d2 < __pyx_v_d3) != 0); if (__pyx_t_4) { goto __pyx_L4; } /* "photutils/geometry/core.pyx":285 * if d2 < d3: * pass * elif d1 < d3: # <<<<<<<<<<<<<< * x2, y2, d2, x3, y3, d3 = x3, y3, d3, x2, y2, d2 * else: */ __pyx_t_4 = ((__pyx_v_d1 < __pyx_v_d3) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":286 * pass * elif d1 < d3: * x2, y2, d2, x3, y3, d3 = x3, y3, d3, x2, y2, d2 # <<<<<<<<<<<<<< * else: * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x3, y3, d3, x1, y1, d1, x2, y2, d2 */ __pyx_t_3 = __pyx_v_x3; __pyx_t_5 = __pyx_v_y3; __pyx_t_6 = __pyx_v_d3; __pyx_t_7 = __pyx_v_x2; __pyx_t_8 = __pyx_v_y2; __pyx_t_9 = __pyx_v_d2; __pyx_v_x2 = __pyx_t_3; __pyx_v_y2 = __pyx_t_5; __pyx_v_d2 = __pyx_t_6; __pyx_v_x3 = __pyx_t_7; __pyx_v_y3 = __pyx_t_8; __pyx_v_d3 = __pyx_t_9; /* "photutils/geometry/core.pyx":285 * if d2 < d3: * pass * elif d1 < d3: # <<<<<<<<<<<<<< * x2, y2, d2, x3, y3, d3 = x3, y3, d3, x2, y2, d2 * else: */ goto __pyx_L4; } /* "photutils/geometry/core.pyx":288 * x2, y2, d2, x3, y3, d3 = x3, y3, d3, x2, y2, d2 * else: * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x3, y3, d3, x1, y1, d1, x2, y2, d2 # <<<<<<<<<<<<<< * * else: */ /*else*/ { __pyx_t_9 = __pyx_v_x3; __pyx_t_8 = __pyx_v_y3; __pyx_t_7 = __pyx_v_d3; __pyx_t_6 = __pyx_v_x1; __pyx_t_5 = __pyx_v_y1; __pyx_t_3 = __pyx_v_d1; __pyx_t_10 = __pyx_v_x2; __pyx_t_11 = __pyx_v_y2; __pyx_t_12 = __pyx_v_d2; __pyx_v_x1 = __pyx_t_9; __pyx_v_y1 = __pyx_t_8; __pyx_v_d1 = __pyx_t_7; __pyx_v_x2 = __pyx_t_6; __pyx_v_y2 = __pyx_t_5; __pyx_v_d2 = __pyx_t_3; __pyx_v_x3 = __pyx_t_10; __pyx_v_y3 = __pyx_t_11; __pyx_v_d3 = __pyx_t_12; } __pyx_L4:; /* "photutils/geometry/core.pyx":282 * * # Order vertices by distance from origin * if d1 < d2: # <<<<<<<<<<<<<< * if d2 < d3: * pass */ goto __pyx_L3; } /* "photutils/geometry/core.pyx":291 * * else: * if d1 < d3: # <<<<<<<<<<<<<< * x1, y1, d1, x2, y2, d2 = x2, y2, d2, x1, y1, d1 * elif d2 < d3: */ /*else*/ { __pyx_t_4 = ((__pyx_v_d1 < __pyx_v_d3) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":292 * else: * if d1 < d3: * x1, y1, d1, x2, y2, d2 = x2, y2, d2, x1, y1, d1 # <<<<<<<<<<<<<< * elif d2 < d3: * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x2, y2, d2, x3, y3, d3, x1, y1, d1 */ __pyx_t_12 = __pyx_v_x2; __pyx_t_11 = __pyx_v_y2; __pyx_t_10 = __pyx_v_d2; __pyx_t_3 = __pyx_v_x1; __pyx_t_5 = __pyx_v_y1; __pyx_t_6 = __pyx_v_d1; __pyx_v_x1 = __pyx_t_12; __pyx_v_y1 = __pyx_t_11; __pyx_v_d1 = __pyx_t_10; __pyx_v_x2 = __pyx_t_3; __pyx_v_y2 = __pyx_t_5; __pyx_v_d2 = __pyx_t_6; /* "photutils/geometry/core.pyx":291 * * else: * if d1 < d3: # <<<<<<<<<<<<<< * x1, y1, d1, x2, y2, d2 = x2, y2, d2, x1, y1, d1 * elif d2 < d3: */ goto __pyx_L5; } /* "photutils/geometry/core.pyx":293 * if d1 < d3: * x1, y1, d1, x2, y2, d2 = x2, y2, d2, x1, y1, d1 * elif d2 < d3: # <<<<<<<<<<<<<< * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x2, y2, d2, x3, y3, d3, x1, y1, d1 * else: */ __pyx_t_4 = ((__pyx_v_d2 < __pyx_v_d3) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":294 * x1, y1, d1, x2, y2, d2 = x2, y2, d2, x1, y1, d1 * elif d2 < d3: * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x2, y2, d2, x3, y3, d3, x1, y1, d1 # <<<<<<<<<<<<<< * else: * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x3, y3, d3, x2, y2, d2, x1, y1, d1 */ __pyx_t_6 = __pyx_v_x2; __pyx_t_5 = __pyx_v_y2; __pyx_t_3 = __pyx_v_d2; __pyx_t_10 = __pyx_v_x3; __pyx_t_11 = __pyx_v_y3; __pyx_t_12 = __pyx_v_d3; __pyx_t_7 = __pyx_v_x1; __pyx_t_8 = __pyx_v_y1; __pyx_t_9 = __pyx_v_d1; __pyx_v_x1 = __pyx_t_6; __pyx_v_y1 = __pyx_t_5; __pyx_v_d1 = __pyx_t_3; __pyx_v_x2 = __pyx_t_10; __pyx_v_y2 = __pyx_t_11; __pyx_v_d2 = __pyx_t_12; __pyx_v_x3 = __pyx_t_7; __pyx_v_y3 = __pyx_t_8; __pyx_v_d3 = __pyx_t_9; /* "photutils/geometry/core.pyx":293 * if d1 < d3: * x1, y1, d1, x2, y2, d2 = x2, y2, d2, x1, y1, d1 * elif d2 < d3: # <<<<<<<<<<<<<< * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x2, y2, d2, x3, y3, d3, x1, y1, d1 * else: */ goto __pyx_L5; } /* "photutils/geometry/core.pyx":296 * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x2, y2, d2, x3, y3, d3, x1, y1, d1 * else: * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x3, y3, d3, x2, y2, d2, x1, y1, d1 # <<<<<<<<<<<<<< * * if d1 > d2 or d2 > d3 or d1 > d3: */ /*else*/ { __pyx_t_9 = __pyx_v_x3; __pyx_t_8 = __pyx_v_y3; __pyx_t_7 = __pyx_v_d3; __pyx_t_12 = __pyx_v_x2; __pyx_t_11 = __pyx_v_y2; __pyx_t_10 = __pyx_v_d2; __pyx_t_3 = __pyx_v_x1; __pyx_t_5 = __pyx_v_y1; __pyx_t_6 = __pyx_v_d1; __pyx_v_x1 = __pyx_t_9; __pyx_v_y1 = __pyx_t_8; __pyx_v_d1 = __pyx_t_7; __pyx_v_x2 = __pyx_t_12; __pyx_v_y2 = __pyx_t_11; __pyx_v_d2 = __pyx_t_10; __pyx_v_x3 = __pyx_t_3; __pyx_v_y3 = __pyx_t_5; __pyx_v_d3 = __pyx_t_6; } __pyx_L5:; } __pyx_L3:; /* "photutils/geometry/core.pyx":298 * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x3, y3, d3, x2, y2, d2, x1, y1, d1 * * if d1 > d2 or d2 > d3 or d1 > d3: # <<<<<<<<<<<<<< * raise Exception("ERROR: vertices did not sort correctly") * */ __pyx_t_13 = ((__pyx_v_d1 > __pyx_v_d2) != 0); if (!__pyx_t_13) { } else { __pyx_t_4 = __pyx_t_13; goto __pyx_L7_bool_binop_done; } __pyx_t_13 = ((__pyx_v_d2 > __pyx_v_d3) != 0); if (!__pyx_t_13) { } else { __pyx_t_4 = __pyx_t_13; goto __pyx_L7_bool_binop_done; } __pyx_t_13 = ((__pyx_v_d1 > __pyx_v_d3) != 0); __pyx_t_4 = __pyx_t_13; __pyx_L7_bool_binop_done:; if (__pyx_t_4) { /* "photutils/geometry/core.pyx":299 * * if d1 > d2 or d2 > d3 or d1 > d3: * raise Exception("ERROR: vertices did not sort correctly") # <<<<<<<<<<<<<< * * # Determine number of vertices inside circle */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 299, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 299, __pyx_L1_error) /* "photutils/geometry/core.pyx":298 * x1, y1, d1, x2, y2, d2, x3, y3, d3 = x3, y3, d3, x2, y2, d2, x1, y1, d1 * * if d1 > d2 or d2 > d3 or d1 > d3: # <<<<<<<<<<<<<< * raise Exception("ERROR: vertices did not sort correctly") * */ } /* "photutils/geometry/core.pyx":302 * * # Determine number of vertices inside circle * in1 = d1 < 1 # <<<<<<<<<<<<<< * in2 = d2 < 1 * in3 = d3 < 1 */ __pyx_t_2 = __Pyx_PyBool_FromLong((__pyx_v_d1 < 1.0)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 302, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (!(likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_7cpython_4bool_bool)))) __PYX_ERR(0, 302, __pyx_L1_error) __pyx_v_in1 = ((PyBoolObject *)__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/core.pyx":303 * # Determine number of vertices inside circle * in1 = d1 < 1 * in2 = d2 < 1 # <<<<<<<<<<<<<< * in3 = d3 < 1 * */ __pyx_t_2 = __Pyx_PyBool_FromLong((__pyx_v_d2 < 1.0)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (!(likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_7cpython_4bool_bool)))) __PYX_ERR(0, 303, __pyx_L1_error) __pyx_v_in2 = ((PyBoolObject *)__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/core.pyx":304 * in1 = d1 < 1 * in2 = d2 < 1 * in3 = d3 < 1 # <<<<<<<<<<<<<< * * # Determine which vertices are on the circle */ __pyx_t_2 = __Pyx_PyBool_FromLong((__pyx_v_d3 < 1.0)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 304, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (!(likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_7cpython_4bool_bool)))) __PYX_ERR(0, 304, __pyx_L1_error) __pyx_v_in3 = ((PyBoolObject *)__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/core.pyx":307 * * # Determine which vertices are on the circle * on1 = fabs(d1 - 1) < 1.e-10 # <<<<<<<<<<<<<< * on2 = fabs(d2 - 1) < 1.e-10 * on3 = fabs(d3 - 1) < 1.e-10 */ __pyx_t_2 = __Pyx_PyBool_FromLong((fabs((__pyx_v_d1 - 1.0)) < 1.e-10)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (!(likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_7cpython_4bool_bool)))) __PYX_ERR(0, 307, __pyx_L1_error) __pyx_v_on1 = ((PyBoolObject *)__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/core.pyx":308 * # Determine which vertices are on the circle * on1 = fabs(d1 - 1) < 1.e-10 * on2 = fabs(d2 - 1) < 1.e-10 # <<<<<<<<<<<<<< * on3 = fabs(d3 - 1) < 1.e-10 * */ __pyx_t_2 = __Pyx_PyBool_FromLong((fabs((__pyx_v_d2 - 1.0)) < 1.e-10)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (!(likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_7cpython_4bool_bool)))) __PYX_ERR(0, 308, __pyx_L1_error) __pyx_v_on2 = ((PyBoolObject *)__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/core.pyx":309 * on1 = fabs(d1 - 1) < 1.e-10 * on2 = fabs(d2 - 1) < 1.e-10 * on3 = fabs(d3 - 1) < 1.e-10 # <<<<<<<<<<<<<< * * if on3 or in3: # triangle is completely in circle */ __pyx_t_2 = __Pyx_PyBool_FromLong((fabs((__pyx_v_d3 - 1.0)) < 1.e-10)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (!(likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_7cpython_4bool_bool)))) __PYX_ERR(0, 309, __pyx_L1_error) __pyx_v_on3 = ((PyBoolObject *)__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/core.pyx":311 * on3 = fabs(d3 - 1) < 1.e-10 * * if on3 or in3: # triangle is completely in circle # <<<<<<<<<<<<<< * * area = area_triangle(x1, y1, x2, y2, x3, y3) */ __pyx_t_13 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_on3)); if (unlikely(__pyx_t_13 < 0)) __PYX_ERR(0, 311, __pyx_L1_error) if (!__pyx_t_13) { } else { __pyx_t_4 = __pyx_t_13; goto __pyx_L11_bool_binop_done; } __pyx_t_13 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_in3)); if (unlikely(__pyx_t_13 < 0)) __PYX_ERR(0, 311, __pyx_L1_error) __pyx_t_4 = __pyx_t_13; __pyx_L11_bool_binop_done:; if (__pyx_t_4) { /* "photutils/geometry/core.pyx":313 * if on3 or in3: # triangle is completely in circle * * area = area_triangle(x1, y1, x2, y2, x3, y3) # <<<<<<<<<<<<<< * * elif in2 or on2: */ __pyx_v_area = __pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_x3, __pyx_v_y3); /* "photutils/geometry/core.pyx":311 * on3 = fabs(d3 - 1) < 1.e-10 * * if on3 or in3: # triangle is completely in circle # <<<<<<<<<<<<<< * * area = area_triangle(x1, y1, x2, y2, x3, y3) */ goto __pyx_L10; } /* "photutils/geometry/core.pyx":315 * area = area_triangle(x1, y1, x2, y2, x3, y3) * * elif in2 or on2: # <<<<<<<<<<<<<< * # If vertex 1 or 2 are on the edge of the circle, then we use the dot * # product to vertex 3 to determine whether an intersection takes place. */ __pyx_t_13 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_in2)); if (unlikely(__pyx_t_13 < 0)) __PYX_ERR(0, 315, __pyx_L1_error) if (!__pyx_t_13) { } else { __pyx_t_4 = __pyx_t_13; goto __pyx_L13_bool_binop_done; } __pyx_t_13 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_on2)); if (unlikely(__pyx_t_13 < 0)) __PYX_ERR(0, 315, __pyx_L1_error) __pyx_t_4 = __pyx_t_13; __pyx_L13_bool_binop_done:; if (__pyx_t_4) { /* "photutils/geometry/core.pyx":318 * # If vertex 1 or 2 are on the edge of the circle, then we use the dot * # product to vertex 3 to determine whether an intersection takes place. * intersect13 = not on1 or x1 * (x3 - x1) + y1 * (y3 - y1) < 0. # <<<<<<<<<<<<<< * intersect23 = not on2 or x2 * (x3 - x2) + y2 * (y3 - y2) < 0. * if intersect13 and intersect23 and not on2: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_on1)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 318, __pyx_L1_error) __pyx_t_13 = (!__pyx_t_4); if (!__pyx_t_13) { } else { __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_t_13); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L15_bool_binop_done; } __pyx_t_13 = (((__pyx_v_x1 * (__pyx_v_x3 - __pyx_v_x1)) + (__pyx_v_y1 * (__pyx_v_y3 - __pyx_v_y1))) < 0.); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_t_13); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __pyx_t_1 = 0; __pyx_L15_bool_binop_done:; __pyx_v_intersect13 = __pyx_t_2; __pyx_t_2 = 0; /* "photutils/geometry/core.pyx":319 * # product to vertex 3 to determine whether an intersection takes place. * intersect13 = not on1 or x1 * (x3 - x1) + y1 * (y3 - y1) < 0. * intersect23 = not on2 or x2 * (x3 - x2) + y2 * (y3 - y2) < 0. # <<<<<<<<<<<<<< * if intersect13 and intersect23 and not on2: * pt1 = circle_segment_single2(x1, y1, x3, y3) */ __pyx_t_13 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_on2)); if (unlikely(__pyx_t_13 < 0)) __PYX_ERR(0, 319, __pyx_L1_error) __pyx_t_4 = (!__pyx_t_13); if (!__pyx_t_4) { } else { __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L17_bool_binop_done; } __pyx_t_4 = (((__pyx_v_x2 * (__pyx_v_x3 - __pyx_v_x2)) + (__pyx_v_y2 * (__pyx_v_y3 - __pyx_v_y2))) < 0.); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __pyx_t_1 = 0; __pyx_L17_bool_binop_done:; __pyx_v_intersect23 = __pyx_t_2; __pyx_t_2 = 0; /* "photutils/geometry/core.pyx":320 * intersect13 = not on1 or x1 * (x3 - x1) + y1 * (y3 - y1) < 0. * intersect23 = not on2 or x2 * (x3 - x2) + y2 * (y3 - y2) < 0. * if intersect13 and intersect23 and not on2: # <<<<<<<<<<<<<< * pt1 = circle_segment_single2(x1, y1, x3, y3) * pt2 = circle_segment_single2(x2, y2, x3, y3) */ __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_intersect13); if (unlikely(__pyx_t_13 < 0)) __PYX_ERR(0, 320, __pyx_L1_error) if (__pyx_t_13) { } else { __pyx_t_4 = __pyx_t_13; goto __pyx_L20_bool_binop_done; } __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_intersect23); if (unlikely(__pyx_t_13 < 0)) __PYX_ERR(0, 320, __pyx_L1_error) if (__pyx_t_13) { } else { __pyx_t_4 = __pyx_t_13; goto __pyx_L20_bool_binop_done; } __pyx_t_13 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_on2)); if (unlikely(__pyx_t_13 < 0)) __PYX_ERR(0, 320, __pyx_L1_error) __pyx_t_14 = ((!__pyx_t_13) != 0); __pyx_t_4 = __pyx_t_14; __pyx_L20_bool_binop_done:; if (__pyx_t_4) { /* "photutils/geometry/core.pyx":321 * intersect23 = not on2 or x2 * (x3 - x2) + y2 * (y3 - y2) < 0. * if intersect13 and intersect23 and not on2: * pt1 = circle_segment_single2(x1, y1, x3, y3) # <<<<<<<<<<<<<< * pt2 = circle_segment_single2(x2, y2, x3, y3) * area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ */ __pyx_v_pt1 = __pyx_f_9photutils_8geometry_4core_circle_segment_single2(__pyx_v_x1, __pyx_v_y1, __pyx_v_x3, __pyx_v_y3); /* "photutils/geometry/core.pyx":322 * if intersect13 and intersect23 and not on2: * pt1 = circle_segment_single2(x1, y1, x3, y3) * pt2 = circle_segment_single2(x2, y2, x3, y3) # <<<<<<<<<<<<<< * area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ * + area_triangle(x2, y2, pt1.x, pt1.y, pt2.x, pt2.y) \ */ __pyx_v_pt2 = __pyx_f_9photutils_8geometry_4core_circle_segment_single2(__pyx_v_x2, __pyx_v_y2, __pyx_v_x3, __pyx_v_y3); /* "photutils/geometry/core.pyx":325 * area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ * + area_triangle(x2, y2, pt1.x, pt1.y, pt2.x, pt2.y) \ * + area_arc_unit(pt1.x, pt1.y, pt2.x, pt2.y) # <<<<<<<<<<<<<< * elif intersect13: * pt1 = circle_segment_single2(x1, y1, x3, y3) */ __pyx_v_area = ((__pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_pt1.x, __pyx_v_pt1.y) + __pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x2, __pyx_v_y2, __pyx_v_pt1.x, __pyx_v_pt1.y, __pyx_v_pt2.x, __pyx_v_pt2.y)) + __pyx_f_9photutils_8geometry_4core_area_arc_unit(__pyx_v_pt1.x, __pyx_v_pt1.y, __pyx_v_pt2.x, __pyx_v_pt2.y)); /* "photutils/geometry/core.pyx":320 * intersect13 = not on1 or x1 * (x3 - x1) + y1 * (y3 - y1) < 0. * intersect23 = not on2 or x2 * (x3 - x2) + y2 * (y3 - y2) < 0. * if intersect13 and intersect23 and not on2: # <<<<<<<<<<<<<< * pt1 = circle_segment_single2(x1, y1, x3, y3) * pt2 = circle_segment_single2(x2, y2, x3, y3) */ goto __pyx_L19; } /* "photutils/geometry/core.pyx":326 * + area_triangle(x2, y2, pt1.x, pt1.y, pt2.x, pt2.y) \ * + area_arc_unit(pt1.x, pt1.y, pt2.x, pt2.y) * elif intersect13: # <<<<<<<<<<<<<< * pt1 = circle_segment_single2(x1, y1, x3, y3) * area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_intersect13); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 326, __pyx_L1_error) if (__pyx_t_4) { /* "photutils/geometry/core.pyx":327 * + area_arc_unit(pt1.x, pt1.y, pt2.x, pt2.y) * elif intersect13: * pt1 = circle_segment_single2(x1, y1, x3, y3) # <<<<<<<<<<<<<< * area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ * + area_arc_unit(x2, y2, pt1.x, pt1.y) */ __pyx_v_pt1 = __pyx_f_9photutils_8geometry_4core_circle_segment_single2(__pyx_v_x1, __pyx_v_y1, __pyx_v_x3, __pyx_v_y3); /* "photutils/geometry/core.pyx":329 * pt1 = circle_segment_single2(x1, y1, x3, y3) * area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ * + area_arc_unit(x2, y2, pt1.x, pt1.y) # <<<<<<<<<<<<<< * elif intersect23: * pt2 = circle_segment_single2(x2, y2, x3, y3) */ __pyx_v_area = (__pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_pt1.x, __pyx_v_pt1.y) + __pyx_f_9photutils_8geometry_4core_area_arc_unit(__pyx_v_x2, __pyx_v_y2, __pyx_v_pt1.x, __pyx_v_pt1.y)); /* "photutils/geometry/core.pyx":326 * + area_triangle(x2, y2, pt1.x, pt1.y, pt2.x, pt2.y) \ * + area_arc_unit(pt1.x, pt1.y, pt2.x, pt2.y) * elif intersect13: # <<<<<<<<<<<<<< * pt1 = circle_segment_single2(x1, y1, x3, y3) * area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ */ goto __pyx_L19; } /* "photutils/geometry/core.pyx":330 * area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ * + area_arc_unit(x2, y2, pt1.x, pt1.y) * elif intersect23: # <<<<<<<<<<<<<< * pt2 = circle_segment_single2(x2, y2, x3, y3) * area = area_triangle(x1, y1, x2, y2, pt2.x, pt2.y) \ */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_intersect23); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 330, __pyx_L1_error) if (__pyx_t_4) { /* "photutils/geometry/core.pyx":331 * + area_arc_unit(x2, y2, pt1.x, pt1.y) * elif intersect23: * pt2 = circle_segment_single2(x2, y2, x3, y3) # <<<<<<<<<<<<<< * area = area_triangle(x1, y1, x2, y2, pt2.x, pt2.y) \ * + area_arc_unit(x1, y1, pt2.x, pt2.y) */ __pyx_v_pt2 = __pyx_f_9photutils_8geometry_4core_circle_segment_single2(__pyx_v_x2, __pyx_v_y2, __pyx_v_x3, __pyx_v_y3); /* "photutils/geometry/core.pyx":333 * pt2 = circle_segment_single2(x2, y2, x3, y3) * area = area_triangle(x1, y1, x2, y2, pt2.x, pt2.y) \ * + area_arc_unit(x1, y1, pt2.x, pt2.y) # <<<<<<<<<<<<<< * else: * area = area_arc_unit(x1, y1, x2, y2) */ __pyx_v_area = (__pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_pt2.x, __pyx_v_pt2.y) + __pyx_f_9photutils_8geometry_4core_area_arc_unit(__pyx_v_x1, __pyx_v_y1, __pyx_v_pt2.x, __pyx_v_pt2.y)); /* "photutils/geometry/core.pyx":330 * area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ * + area_arc_unit(x2, y2, pt1.x, pt1.y) * elif intersect23: # <<<<<<<<<<<<<< * pt2 = circle_segment_single2(x2, y2, x3, y3) * area = area_triangle(x1, y1, x2, y2, pt2.x, pt2.y) \ */ goto __pyx_L19; } /* "photutils/geometry/core.pyx":335 * + area_arc_unit(x1, y1, pt2.x, pt2.y) * else: * area = area_arc_unit(x1, y1, x2, y2) # <<<<<<<<<<<<<< * * elif on1: */ /*else*/ { __pyx_v_area = __pyx_f_9photutils_8geometry_4core_area_arc_unit(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2); } __pyx_L19:; /* "photutils/geometry/core.pyx":315 * area = area_triangle(x1, y1, x2, y2, x3, y3) * * elif in2 or on2: # <<<<<<<<<<<<<< * # If vertex 1 or 2 are on the edge of the circle, then we use the dot * # product to vertex 3 to determine whether an intersection takes place. */ goto __pyx_L10; } /* "photutils/geometry/core.pyx":337 * area = area_arc_unit(x1, y1, x2, y2) * * elif on1: # <<<<<<<<<<<<<< * # The triangle is outside the circle * area = 0.0 */ __pyx_t_4 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_on1)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 337, __pyx_L1_error) if (__pyx_t_4) { /* "photutils/geometry/core.pyx":339 * elif on1: * # The triangle is outside the circle * area = 0.0 # <<<<<<<<<<<<<< * elif in1: * # Check for intersections of far side with circle */ __pyx_v_area = 0.0; /* "photutils/geometry/core.pyx":337 * area = area_arc_unit(x1, y1, x2, y2) * * elif on1: # <<<<<<<<<<<<<< * # The triangle is outside the circle * area = 0.0 */ goto __pyx_L10; } /* "photutils/geometry/core.pyx":340 * # The triangle is outside the circle * area = 0.0 * elif in1: # <<<<<<<<<<<<<< * # Check for intersections of far side with circle * inter = circle_segment(x2, y2, x3, y3) */ __pyx_t_4 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_in1)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 340, __pyx_L1_error) if (__pyx_t_4) { /* "photutils/geometry/core.pyx":342 * elif in1: * # Check for intersections of far side with circle * inter = circle_segment(x2, y2, x3, y3) # <<<<<<<<<<<<<< * pt1 = inter.p1 * pt2 = inter.p2 */ __pyx_v_inter = __pyx_f_9photutils_8geometry_4core_circle_segment(__pyx_v_x2, __pyx_v_y2, __pyx_v_x3, __pyx_v_y3); /* "photutils/geometry/core.pyx":343 * # Check for intersections of far side with circle * inter = circle_segment(x2, y2, x3, y3) * pt1 = inter.p1 # <<<<<<<<<<<<<< * pt2 = inter.p2 * pt3 = circle_segment_single2(x1, y1, x2, y2) */ __pyx_t_15 = __pyx_v_inter.p1; __pyx_v_pt1 = __pyx_t_15; /* "photutils/geometry/core.pyx":344 * inter = circle_segment(x2, y2, x3, y3) * pt1 = inter.p1 * pt2 = inter.p2 # <<<<<<<<<<<<<< * pt3 = circle_segment_single2(x1, y1, x2, y2) * pt4 = circle_segment_single2(x1, y1, x3, y3) */ __pyx_t_15 = __pyx_v_inter.p2; __pyx_v_pt2 = __pyx_t_15; /* "photutils/geometry/core.pyx":345 * pt1 = inter.p1 * pt2 = inter.p2 * pt3 = circle_segment_single2(x1, y1, x2, y2) # <<<<<<<<<<<<<< * pt4 = circle_segment_single2(x1, y1, x3, y3) * if pt1.x > 1.: # indicates no intersection */ __pyx_v_pt3 = __pyx_f_9photutils_8geometry_4core_circle_segment_single2(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2); /* "photutils/geometry/core.pyx":346 * pt2 = inter.p2 * pt3 = circle_segment_single2(x1, y1, x2, y2) * pt4 = circle_segment_single2(x1, y1, x3, y3) # <<<<<<<<<<<<<< * if pt1.x > 1.: # indicates no intersection * # Code taken from `sep.h`. */ __pyx_v_pt4 = __pyx_f_9photutils_8geometry_4core_circle_segment_single2(__pyx_v_x1, __pyx_v_y1, __pyx_v_x3, __pyx_v_y3); /* "photutils/geometry/core.pyx":347 * pt3 = circle_segment_single2(x1, y1, x2, y2) * pt4 = circle_segment_single2(x1, y1, x3, y3) * if pt1.x > 1.: # indicates no intersection # <<<<<<<<<<<<<< * # Code taken from `sep.h`. * # TODO: use `sep` and get rid of this Cython code. */ __pyx_t_4 = ((__pyx_v_pt1.x > 1.) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":350 * # Code taken from `sep.h`. * # TODO: use `sep` and get rid of this Cython code. * if (((0.-pt3.y) * (pt4.x-pt3.x) > (pt4.y-pt3.y) * (0.-pt3.x)) != # <<<<<<<<<<<<<< * ((y1-pt3.y) * (pt4.x-pt3.x) > (pt4.y-pt3.y) * (x1-pt3.x))): * area = area_triangle(x1, y1, pt3.x, pt3.y, pt4.x, pt4.y) \ */ __pyx_t_4 = (((((0. - __pyx_v_pt3.y) * (__pyx_v_pt4.x - __pyx_v_pt3.x)) > ((__pyx_v_pt4.y - __pyx_v_pt3.y) * (0. - __pyx_v_pt3.x))) != (((__pyx_v_y1 - __pyx_v_pt3.y) * (__pyx_v_pt4.x - __pyx_v_pt3.x)) > ((__pyx_v_pt4.y - __pyx_v_pt3.y) * (__pyx_v_x1 - __pyx_v_pt3.x)))) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":353 * ((y1-pt3.y) * (pt4.x-pt3.x) > (pt4.y-pt3.y) * (x1-pt3.x))): * area = area_triangle(x1, y1, pt3.x, pt3.y, pt4.x, pt4.y) \ * + (PI - area_arc_unit(pt3.x, pt3.y, pt4.x, pt4.y)) # <<<<<<<<<<<<<< * else: * area = area_triangle(x1, y1, pt3.x, pt3.y, pt4.x, pt4.y) \ */ __pyx_v_area = (__pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_pt3.x, __pyx_v_pt3.y, __pyx_v_pt4.x, __pyx_v_pt4.y) + (__pyx_v_PI - __pyx_f_9photutils_8geometry_4core_area_arc_unit(__pyx_v_pt3.x, __pyx_v_pt3.y, __pyx_v_pt4.x, __pyx_v_pt4.y))); /* "photutils/geometry/core.pyx":350 * # Code taken from `sep.h`. * # TODO: use `sep` and get rid of this Cython code. * if (((0.-pt3.y) * (pt4.x-pt3.x) > (pt4.y-pt3.y) * (0.-pt3.x)) != # <<<<<<<<<<<<<< * ((y1-pt3.y) * (pt4.x-pt3.x) > (pt4.y-pt3.y) * (x1-pt3.x))): * area = area_triangle(x1, y1, pt3.x, pt3.y, pt4.x, pt4.y) \ */ goto __pyx_L24; } /* "photutils/geometry/core.pyx":356 * else: * area = area_triangle(x1, y1, pt3.x, pt3.y, pt4.x, pt4.y) \ * + area_arc_unit(pt3.x, pt3.y, pt4.x, pt4.y) # <<<<<<<<<<<<<< * else: * if (pt2.x - x2)**2 + (pt2.y - y2)**2 < (pt1.x - x2)**2 + (pt1.y - y2)**2: */ /*else*/ { /* "photutils/geometry/core.pyx":355 * + (PI - area_arc_unit(pt3.x, pt3.y, pt4.x, pt4.y)) * else: * area = area_triangle(x1, y1, pt3.x, pt3.y, pt4.x, pt4.y) \ # <<<<<<<<<<<<<< * + area_arc_unit(pt3.x, pt3.y, pt4.x, pt4.y) * else: */ __pyx_v_area = (__pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_pt3.x, __pyx_v_pt3.y, __pyx_v_pt4.x, __pyx_v_pt4.y) + __pyx_f_9photutils_8geometry_4core_area_arc_unit(__pyx_v_pt3.x, __pyx_v_pt3.y, __pyx_v_pt4.x, __pyx_v_pt4.y)); } __pyx_L24:; /* "photutils/geometry/core.pyx":347 * pt3 = circle_segment_single2(x1, y1, x2, y2) * pt4 = circle_segment_single2(x1, y1, x3, y3) * if pt1.x > 1.: # indicates no intersection # <<<<<<<<<<<<<< * # Code taken from `sep.h`. * # TODO: use `sep` and get rid of this Cython code. */ goto __pyx_L23; } /* "photutils/geometry/core.pyx":358 * + area_arc_unit(pt3.x, pt3.y, pt4.x, pt4.y) * else: * if (pt2.x - x2)**2 + (pt2.y - y2)**2 < (pt1.x - x2)**2 + (pt1.y - y2)**2: # <<<<<<<<<<<<<< * pt1, pt2 = pt2, pt1 * area = area_triangle(x1, y1, pt3.x, pt3.y, pt1.x, pt1.y) \ */ /*else*/ { __pyx_t_4 = (((pow((__pyx_v_pt2.x - __pyx_v_x2), 2.0) + pow((__pyx_v_pt2.y - __pyx_v_y2), 2.0)) < (pow((__pyx_v_pt1.x - __pyx_v_x2), 2.0) + pow((__pyx_v_pt1.y - __pyx_v_y2), 2.0))) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":359 * else: * if (pt2.x - x2)**2 + (pt2.y - y2)**2 < (pt1.x - x2)**2 + (pt1.y - y2)**2: * pt1, pt2 = pt2, pt1 # <<<<<<<<<<<<<< * area = area_triangle(x1, y1, pt3.x, pt3.y, pt1.x, pt1.y) \ * + area_triangle(x1, y1, pt1.x, pt1.y, pt2.x, pt2.y) \ */ __pyx_t_15 = __pyx_v_pt2; __pyx_t_16 = __pyx_v_pt1; __pyx_v_pt1 = __pyx_t_15; __pyx_v_pt2 = __pyx_t_16; /* "photutils/geometry/core.pyx":358 * + area_arc_unit(pt3.x, pt3.y, pt4.x, pt4.y) * else: * if (pt2.x - x2)**2 + (pt2.y - y2)**2 < (pt1.x - x2)**2 + (pt1.y - y2)**2: # <<<<<<<<<<<<<< * pt1, pt2 = pt2, pt1 * area = area_triangle(x1, y1, pt3.x, pt3.y, pt1.x, pt1.y) \ */ } /* "photutils/geometry/core.pyx":364 * + area_triangle(x1, y1, pt2.x, pt2.y, pt4.x, pt4.y) \ * + area_arc_unit(pt1.x, pt1.y, pt3.x, pt3.y) \ * + area_arc_unit(pt2.x, pt2.y, pt4.x, pt4.y) # <<<<<<<<<<<<<< * else: * inter = circle_segment(x1, y1, x2, y2) */ __pyx_v_area = ((((__pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_pt3.x, __pyx_v_pt3.y, __pyx_v_pt1.x, __pyx_v_pt1.y) + __pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_pt1.x, __pyx_v_pt1.y, __pyx_v_pt2.x, __pyx_v_pt2.y)) + __pyx_f_9photutils_8geometry_4core_area_triangle(__pyx_v_x1, __pyx_v_y1, __pyx_v_pt2.x, __pyx_v_pt2.y, __pyx_v_pt4.x, __pyx_v_pt4.y)) + __pyx_f_9photutils_8geometry_4core_area_arc_unit(__pyx_v_pt1.x, __pyx_v_pt1.y, __pyx_v_pt3.x, __pyx_v_pt3.y)) + __pyx_f_9photutils_8geometry_4core_area_arc_unit(__pyx_v_pt2.x, __pyx_v_pt2.y, __pyx_v_pt4.x, __pyx_v_pt4.y)); } __pyx_L23:; /* "photutils/geometry/core.pyx":340 * # The triangle is outside the circle * area = 0.0 * elif in1: # <<<<<<<<<<<<<< * # Check for intersections of far side with circle * inter = circle_segment(x2, y2, x3, y3) */ goto __pyx_L10; } /* "photutils/geometry/core.pyx":366 * + area_arc_unit(pt2.x, pt2.y, pt4.x, pt4.y) * else: * inter = circle_segment(x1, y1, x2, y2) # <<<<<<<<<<<<<< * pt1 = inter.p1 * pt2 = inter.p2 */ /*else*/ { __pyx_v_inter = __pyx_f_9photutils_8geometry_4core_circle_segment(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2); /* "photutils/geometry/core.pyx":367 * else: * inter = circle_segment(x1, y1, x2, y2) * pt1 = inter.p1 # <<<<<<<<<<<<<< * pt2 = inter.p2 * inter = circle_segment(x2, y2, x3, y3) */ __pyx_t_16 = __pyx_v_inter.p1; __pyx_v_pt1 = __pyx_t_16; /* "photutils/geometry/core.pyx":368 * inter = circle_segment(x1, y1, x2, y2) * pt1 = inter.p1 * pt2 = inter.p2 # <<<<<<<<<<<<<< * inter = circle_segment(x2, y2, x3, y3) * pt3 = inter.p1 */ __pyx_t_16 = __pyx_v_inter.p2; __pyx_v_pt2 = __pyx_t_16; /* "photutils/geometry/core.pyx":369 * pt1 = inter.p1 * pt2 = inter.p2 * inter = circle_segment(x2, y2, x3, y3) # <<<<<<<<<<<<<< * pt3 = inter.p1 * pt4 = inter.p2 */ __pyx_v_inter = __pyx_f_9photutils_8geometry_4core_circle_segment(__pyx_v_x2, __pyx_v_y2, __pyx_v_x3, __pyx_v_y3); /* "photutils/geometry/core.pyx":370 * pt2 = inter.p2 * inter = circle_segment(x2, y2, x3, y3) * pt3 = inter.p1 # <<<<<<<<<<<<<< * pt4 = inter.p2 * inter = circle_segment(x3, y3, x1, y1) */ __pyx_t_16 = __pyx_v_inter.p1; __pyx_v_pt3 = __pyx_t_16; /* "photutils/geometry/core.pyx":371 * inter = circle_segment(x2, y2, x3, y3) * pt3 = inter.p1 * pt4 = inter.p2 # <<<<<<<<<<<<<< * inter = circle_segment(x3, y3, x1, y1) * pt5 = inter.p1 */ __pyx_t_16 = __pyx_v_inter.p2; __pyx_v_pt4 = __pyx_t_16; /* "photutils/geometry/core.pyx":372 * pt3 = inter.p1 * pt4 = inter.p2 * inter = circle_segment(x3, y3, x1, y1) # <<<<<<<<<<<<<< * pt5 = inter.p1 * pt6 = inter.p2 */ __pyx_v_inter = __pyx_f_9photutils_8geometry_4core_circle_segment(__pyx_v_x3, __pyx_v_y3, __pyx_v_x1, __pyx_v_y1); /* "photutils/geometry/core.pyx":373 * pt4 = inter.p2 * inter = circle_segment(x3, y3, x1, y1) * pt5 = inter.p1 # <<<<<<<<<<<<<< * pt6 = inter.p2 * if pt1.x <= 1.: */ __pyx_t_16 = __pyx_v_inter.p1; __pyx_v_pt5 = __pyx_t_16; /* "photutils/geometry/core.pyx":374 * inter = circle_segment(x3, y3, x1, y1) * pt5 = inter.p1 * pt6 = inter.p2 # <<<<<<<<<<<<<< * if pt1.x <= 1.: * xp, yp = 0.5 * (pt1.x + pt2.x), 0.5 * (pt1.y + pt2.y) */ __pyx_t_16 = __pyx_v_inter.p2; __pyx_v_pt6 = __pyx_t_16; /* "photutils/geometry/core.pyx":375 * pt5 = inter.p1 * pt6 = inter.p2 * if pt1.x <= 1.: # <<<<<<<<<<<<<< * xp, yp = 0.5 * (pt1.x + pt2.x), 0.5 * (pt1.y + pt2.y) * area = overlap_area_triangle_unit_circle(x1, y1, x3, y3, xp, yp) \ */ __pyx_t_4 = ((__pyx_v_pt1.x <= 1.) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":376 * pt6 = inter.p2 * if pt1.x <= 1.: * xp, yp = 0.5 * (pt1.x + pt2.x), 0.5 * (pt1.y + pt2.y) # <<<<<<<<<<<<<< * area = overlap_area_triangle_unit_circle(x1, y1, x3, y3, xp, yp) \ * + overlap_area_triangle_unit_circle(x2, y2, x3, y3, xp, yp) */ __pyx_t_6 = (0.5 * (__pyx_v_pt1.x + __pyx_v_pt2.x)); __pyx_t_5 = (0.5 * (__pyx_v_pt1.y + __pyx_v_pt2.y)); __pyx_v_xp = __pyx_t_6; __pyx_v_yp = __pyx_t_5; /* "photutils/geometry/core.pyx":378 * xp, yp = 0.5 * (pt1.x + pt2.x), 0.5 * (pt1.y + pt2.y) * area = overlap_area_triangle_unit_circle(x1, y1, x3, y3, xp, yp) \ * + overlap_area_triangle_unit_circle(x2, y2, x3, y3, xp, yp) # <<<<<<<<<<<<<< * elif pt3.x <= 1.: * xp, yp = 0.5 * (pt3.x + pt4.x), 0.5 * (pt3.y + pt4.y) */ __pyx_v_area = (__pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x3, __pyx_v_y3, __pyx_v_xp, __pyx_v_yp) + __pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle(__pyx_v_x2, __pyx_v_y2, __pyx_v_x3, __pyx_v_y3, __pyx_v_xp, __pyx_v_yp)); /* "photutils/geometry/core.pyx":375 * pt5 = inter.p1 * pt6 = inter.p2 * if pt1.x <= 1.: # <<<<<<<<<<<<<< * xp, yp = 0.5 * (pt1.x + pt2.x), 0.5 * (pt1.y + pt2.y) * area = overlap_area_triangle_unit_circle(x1, y1, x3, y3, xp, yp) \ */ goto __pyx_L26; } /* "photutils/geometry/core.pyx":379 * area = overlap_area_triangle_unit_circle(x1, y1, x3, y3, xp, yp) \ * + overlap_area_triangle_unit_circle(x2, y2, x3, y3, xp, yp) * elif pt3.x <= 1.: # <<<<<<<<<<<<<< * xp, yp = 0.5 * (pt3.x + pt4.x), 0.5 * (pt3.y + pt4.y) * area = overlap_area_triangle_unit_circle(x3, y3, x1, y1, xp, yp) \ */ __pyx_t_4 = ((__pyx_v_pt3.x <= 1.) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":380 * + overlap_area_triangle_unit_circle(x2, y2, x3, y3, xp, yp) * elif pt3.x <= 1.: * xp, yp = 0.5 * (pt3.x + pt4.x), 0.5 * (pt3.y + pt4.y) # <<<<<<<<<<<<<< * area = overlap_area_triangle_unit_circle(x3, y3, x1, y1, xp, yp) \ * + overlap_area_triangle_unit_circle(x2, y2, x1, y1, xp, yp) */ __pyx_t_5 = (0.5 * (__pyx_v_pt3.x + __pyx_v_pt4.x)); __pyx_t_6 = (0.5 * (__pyx_v_pt3.y + __pyx_v_pt4.y)); __pyx_v_xp = __pyx_t_5; __pyx_v_yp = __pyx_t_6; /* "photutils/geometry/core.pyx":382 * xp, yp = 0.5 * (pt3.x + pt4.x), 0.5 * (pt3.y + pt4.y) * area = overlap_area_triangle_unit_circle(x3, y3, x1, y1, xp, yp) \ * + overlap_area_triangle_unit_circle(x2, y2, x1, y1, xp, yp) # <<<<<<<<<<<<<< * elif pt5.x <= 1.: * xp, yp = 0.5 * (pt5.x + pt6.x), 0.5 * (pt5.y + pt6.y) */ __pyx_v_area = (__pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle(__pyx_v_x3, __pyx_v_y3, __pyx_v_x1, __pyx_v_y1, __pyx_v_xp, __pyx_v_yp) + __pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle(__pyx_v_x2, __pyx_v_y2, __pyx_v_x1, __pyx_v_y1, __pyx_v_xp, __pyx_v_yp)); /* "photutils/geometry/core.pyx":379 * area = overlap_area_triangle_unit_circle(x1, y1, x3, y3, xp, yp) \ * + overlap_area_triangle_unit_circle(x2, y2, x3, y3, xp, yp) * elif pt3.x <= 1.: # <<<<<<<<<<<<<< * xp, yp = 0.5 * (pt3.x + pt4.x), 0.5 * (pt3.y + pt4.y) * area = overlap_area_triangle_unit_circle(x3, y3, x1, y1, xp, yp) \ */ goto __pyx_L26; } /* "photutils/geometry/core.pyx":383 * area = overlap_area_triangle_unit_circle(x3, y3, x1, y1, xp, yp) \ * + overlap_area_triangle_unit_circle(x2, y2, x1, y1, xp, yp) * elif pt5.x <= 1.: # <<<<<<<<<<<<<< * xp, yp = 0.5 * (pt5.x + pt6.x), 0.5 * (pt5.y + pt6.y) * area = overlap_area_triangle_unit_circle(x1, y1, x2, y2, xp, yp) \ */ __pyx_t_4 = ((__pyx_v_pt5.x <= 1.) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":384 * + overlap_area_triangle_unit_circle(x2, y2, x1, y1, xp, yp) * elif pt5.x <= 1.: * xp, yp = 0.5 * (pt5.x + pt6.x), 0.5 * (pt5.y + pt6.y) # <<<<<<<<<<<<<< * area = overlap_area_triangle_unit_circle(x1, y1, x2, y2, xp, yp) \ * + overlap_area_triangle_unit_circle(x3, y3, x2, y2, xp, yp) */ __pyx_t_6 = (0.5 * (__pyx_v_pt5.x + __pyx_v_pt6.x)); __pyx_t_5 = (0.5 * (__pyx_v_pt5.y + __pyx_v_pt6.y)); __pyx_v_xp = __pyx_t_6; __pyx_v_yp = __pyx_t_5; /* "photutils/geometry/core.pyx":386 * xp, yp = 0.5 * (pt5.x + pt6.x), 0.5 * (pt5.y + pt6.y) * area = overlap_area_triangle_unit_circle(x1, y1, x2, y2, xp, yp) \ * + overlap_area_triangle_unit_circle(x3, y3, x2, y2, xp, yp) # <<<<<<<<<<<<<< * else: # no intersections * if in_triangle(0., 0., x1, y1, x2, y2, x3, y3): */ __pyx_v_area = (__pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_xp, __pyx_v_yp) + __pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle(__pyx_v_x3, __pyx_v_y3, __pyx_v_x2, __pyx_v_y2, __pyx_v_xp, __pyx_v_yp)); /* "photutils/geometry/core.pyx":383 * area = overlap_area_triangle_unit_circle(x3, y3, x1, y1, xp, yp) \ * + overlap_area_triangle_unit_circle(x2, y2, x1, y1, xp, yp) * elif pt5.x <= 1.: # <<<<<<<<<<<<<< * xp, yp = 0.5 * (pt5.x + pt6.x), 0.5 * (pt5.y + pt6.y) * area = overlap_area_triangle_unit_circle(x1, y1, x2, y2, xp, yp) \ */ goto __pyx_L26; } /* "photutils/geometry/core.pyx":388 * + overlap_area_triangle_unit_circle(x3, y3, x2, y2, xp, yp) * else: # no intersections * if in_triangle(0., 0., x1, y1, x2, y2, x3, y3): # <<<<<<<<<<<<<< * return PI * else: */ /*else*/ { __pyx_t_4 = (__pyx_f_9photutils_8geometry_4core_in_triangle(0., 0., __pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_x3, __pyx_v_y3) != 0); if (__pyx_t_4) { /* "photutils/geometry/core.pyx":389 * else: # no intersections * if in_triangle(0., 0., x1, y1, x2, y2, x3, y3): * return PI # <<<<<<<<<<<<<< * else: * return 0. */ __pyx_r = __pyx_v_PI; goto __pyx_L0; /* "photutils/geometry/core.pyx":388 * + overlap_area_triangle_unit_circle(x3, y3, x2, y2, xp, yp) * else: # no intersections * if in_triangle(0., 0., x1, y1, x2, y2, x3, y3): # <<<<<<<<<<<<<< * return PI * else: */ } /* "photutils/geometry/core.pyx":391 * return PI * else: * return 0. # <<<<<<<<<<<<<< * * return area */ /*else*/ { __pyx_r = 0.; goto __pyx_L0; } } __pyx_L26:; } __pyx_L10:; /* "photutils/geometry/core.pyx":393 * return 0. * * return area # <<<<<<<<<<<<<< */ __pyx_r = __pyx_v_area; goto __pyx_L0; /* "photutils/geometry/core.pyx":262 * * * cdef double overlap_area_triangle_unit_circle(double x1, double y1, double x2, double y2, double x3, double y3): # <<<<<<<<<<<<<< * """ * Given a triangle defined by three points (x1, y1), (x2, y2), and */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_WriteUnraisable("photutils.geometry.core.overlap_area_triangle_unit_circle", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_in1); __Pyx_XDECREF((PyObject *)__pyx_v_in2); __Pyx_XDECREF((PyObject *)__pyx_v_in3); __Pyx_XDECREF((PyObject *)__pyx_v_on1); __Pyx_XDECREF((PyObject *)__pyx_v_on2); __Pyx_XDECREF((PyObject *)__pyx_v_on3); __Pyx_XDECREF(__pyx_v_intersect13); __Pyx_XDECREF(__pyx_v_intersect23); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":220 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":224 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":226 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ goto __pyx_L4; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ /*else*/ { __pyx_v_copy_shape = 0; } __pyx_L4:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":234 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 235, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":238 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 239, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247 * # This is allocated as one block, strides first. * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248 * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L11; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":252 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":256 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef int offset */ __pyx_v_f = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ goto __pyx_L14; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ /*else*/ { __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 276, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = ((char *)"B"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = ((char *)"h"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = ((char *)"H"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":281 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = ((char *)"i"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = ((char *)"I"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = ((char *)"l"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = ((char *)"L"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = ((char *)"q"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = ((char *)"Q"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = ((char *)"f"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = ((char *)"d"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = ((char *)"g"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = ((char *)"Zf"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = ((char *)"Zd"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = ((char *)"Zg"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = ((char *)"O"); break; default: /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 295, __pyx_L1_error) break; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = PyObject_Malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":299 * return * else: * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 * else: * info.format = PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":301 * info.format = PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 302, __pyx_L1_error) __pyx_v_f = __pyx_t_7; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) */ PyObject_Free(__pyx_v_info->format); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ PyObject_Free(__pyx_v_info->strides); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":798 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline tuple PyDataType_SHAPE(dtype d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return d.subarray.shape * else: */ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape # <<<<<<<<<<<<<< * else: * return () */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return d.subarray.shape * else: */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 * return d.subarray.shape * else: * return () # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_empty_tuple); __pyx_r = __pyx_empty_tuple; goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 818, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 819, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 819, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 820, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 820, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 823, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 827, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 847, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":862 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":863 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":864 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":865 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":866 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":868 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 868, __pyx_L1_error) } __pyx_L15:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 873, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":993 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ goto __pyx_L3; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":995 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ /*else*/ { Py_INCREF(__pyx_v_base); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004 * return None * else: * return arr.base # <<<<<<<<<<<<<< * * */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_array", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011 * cdef inline int import_array() except -1: * try: * _import_array() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.multiarray failed to import") */ __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1011, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 * try: * _import_array() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.multiarray failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1012, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1013, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1013, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_umath", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1017 * cdef inline int import_umath() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1017, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1018 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1018, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1019, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1019, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_ufunc", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 * cdef inline int import_ufunc() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1023, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1024, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1025, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1025, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_core}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "core", __pyx_k_The_functions_here_are_the_core, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, {&__pyx_kp_u_ERROR_vertices_did_not_sort_corr, __pyx_k_ERROR_vertices_did_not_sort_corr, sizeof(__pyx_k_ERROR_vertices_did_not_sort_corr), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_u_elliptical_overlap_grid, __pyx_k_elliptical_overlap_grid, sizeof(__pyx_k_elliptical_overlap_grid), 0, 1, 0, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_kp_u_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 1, 0, 0}, {&__pyx_kp_u_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 1, 0, 0}, {&__pyx_n_s_pi, __pyx_k_pi, sizeof(__pyx_k_pi), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 235, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 248, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 823, __pyx_L1_error) __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1013, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "photutils/geometry/core.pyx":299 * * if d1 > d2 or d2 > d3 or d1 > d3: * raise Exception("ERROR: vertices did not sort correctly") # <<<<<<<<<<<<<< * * # Determine number of vertices inside circle */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ERROR_vertices_did_not_sort_corr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 299, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 239, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 847, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 1019, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initcore(void); /*proto*/ PyMODINIT_FUNC initcore(void) #else PyMODINIT_FUNC PyInit_core(void); /*proto*/ PyMODINIT_FUNC PyInit_core(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { result = PyDict_SetItemString(moddict, to_name, value); Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_core(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("core", __pyx_methods, __pyx_k_The_functions_here_are_the_core, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_photutils__geometry__core) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "photutils.geometry.core")) { if (unlikely(PyDict_SetItemString(modules, "photutils.geometry.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ if (__Pyx_ExportFunction("distance", (void (*)(void))__pyx_f_9photutils_8geometry_4core_distance, "double (double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("area_arc", (void (*)(void))__pyx_f_9photutils_8geometry_4core_area_arc, "double (double, double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("area_triangle", (void (*)(void))__pyx_f_9photutils_8geometry_4core_area_triangle, "double (double, double, double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("area_arc_unit", (void (*)(void))__pyx_f_9photutils_8geometry_4core_area_arc_unit, "double (double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("in_triangle", (void (*)(void))__pyx_f_9photutils_8geometry_4core_in_triangle, "int (double, double, double, double, double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("overlap_area_triangle_unit_circle", (void (*)(void))__pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle, "double (double, double, double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("floor_sqrt", (void (*)(void))__pyx_f_9photutils_8geometry_4core_floor_sqrt, "double (double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) __PYX_ERR(3, 8, __pyx_L1_error) __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) __PYX_ERR(4, 15, __pyx_L1_error) __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 163, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 185, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 198, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 885, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "photutils/geometry/core.pyx":7 * unicode_literals) * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "photutils/geometry/core.pyx":11 * * * __all__ = ['elliptical_overlap_grid'] # <<<<<<<<<<<<<< * * */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_u_elliptical_overlap_grid); __Pyx_GIVEREF(__pyx_n_u_elliptical_overlap_grid); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_elliptical_overlap_grid); if (PyDict_SetItem(__pyx_d, __pyx_n_s_all, __pyx_t_1) < 0) __PYX_ERR(0, 11, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "photutils/geometry/core.pyx":24 * from cpython cimport bool * * DTYPE = np.float64 # <<<<<<<<<<<<<< * ctypedef np.float64_t DTYPE_t * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/core.pyx":1 * # Licensed under a 3-clause BSD style license - see LICENSE.rst # <<<<<<<<<<<<<< * """The functions here are the core geometry functions.""" * */ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init photutils.geometry.core", 0, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init photutils.geometry.core"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* None */ static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { long r = a % b; r += ((r != 0) & ((r ^ b) < 0)) * b; return r; } /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (PyObject_Not(use_cline) != 0) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabsf(b.real) >= fabsf(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { float r = b.imag / b.real; float s = 1.0 / (b.real + b.imag * r); return __pyx_t_float_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { float r = b.real / b.imag; float s = 1.0 / (b.imag + b.real * r); return __pyx_t_float_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else { float denom = b.real * b.real + b.imag * b.imag; return __pyx_t_float_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(a, a); case 3: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, a); case 4: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = powf(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2f(0, -1); } } else { r = __Pyx_c_abs_float(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabs(b.real) >= fabs(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { double r = b.imag / b.real; double s = 1.0 / (b.real + b.imag * r); return __pyx_t_double_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { double r = b.real / b.imag; double s = 1.0 / (b.imag + b.real * r); return __pyx_t_double_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else { double denom = b.real * b.real + b.imag * b.imag; return __pyx_t_double_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(a, a); case 3: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, a); case 4: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = pow(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2(0, -1); } } else { r = __Pyx_c_abs_double(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* FunctionExport */ static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(__pyx_m, (char *)"__pyx_capi__"); if (!d) { PyErr_Clear(); d = PyDict_New(); if (!d) goto bad; Py_INCREF(d); if (PyModule_AddObject(__pyx_m, (char *)"__pyx_capi__", d) < 0) goto bad; } tmp.fp = f; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(tmp.p, sig, 0); #else cobj = PyCObject_FromVoidPtrAndDesc(tmp.p, (void *)sig, 0); #endif if (!cobj) goto bad; if (PyDict_SetItemString(d, name, cobj) < 0) goto bad; Py_DECREF(cobj); Py_DECREF(d); return 0; bad: Py_XDECREF(cobj); Py_XDECREF(d); return -1; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) PyErr_Clear(); ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */ photutils-0.4/photutils/geometry/core.pxd0000644000214200020070000000130313063003335023117 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file is needed in order to be able to cimport functions into other Cython files cdef double distance(double x1, double y1, double x2, double y2) cdef double area_arc(double x1, double y1, double x2, double y2, double R) cdef double area_triangle(double x1, double y1, double x2, double y2, double x3, double y3) cdef double area_arc_unit(double x1, double y1, double x2, double y2) cdef int in_triangle(double x, double y, double x1, double y1, double x2, double y2, double x3, double y3) cdef double overlap_area_triangle_unit_circle(double x1, double y1, double x2, double y2, double x3, double y3) cdef double floor_sqrt(double x) photutils-0.4/photutils/geometry/core.pyx0000644000214200020070000002770313063003335023160 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """The functions here are the core geometry functions.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np cimport numpy as np __all__ = ['elliptical_overlap_grid'] cdef extern from "math.h": double asin(double x) double sin(double x) double cos(double x) double sqrt(double x) double fabs(double x) from cpython cimport bool DTYPE = np.float64 ctypedef np.float64_t DTYPE_t cimport cython ctypedef struct point: double x double y ctypedef struct intersections: point p1 point p2 cdef double floor_sqrt(double x): """ In some of the geometrical functions, we have to take the sqrt of a number and we know that the number should be >= 0. However, in some cases the value is e.g. -1e-10, but we want to treat it as zero, which is what this function does. Note that this does **not** check whether negative values are close or not to zero, so this should be used only in cases where the value is expected to be positive on paper. """ if x > 0: return sqrt(x) else: return 0 # NOTE: The following two functions use cdef because they are not intended to be # called from the Python code. Using def makes them callable from outside, but # also slower. Some functions currently return multiple values, and for those we # still use 'def' for now. cdef double distance(double x1, double y1, double x2, double y2): """ Distance between two points in two dimensions. Parameters ---------- x1, y1 : float The coordinates of the first point x2, y2 : float The coordinates of the second point Returns ------- d : float The Euclidean distance between the two points """ return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) cdef double area_arc(double x1, double y1, double x2, double y2, double r): """ Area of a circle arc with radius r between points (x1, y1) and (x2, y2). References ---------- http://mathworld.wolfram.com/CircularSegment.html """ cdef double a, theta a = distance(x1, y1, x2, y2) theta = 2. * asin(0.5 * a / r) return 0.5 * r * r * (theta - sin(theta)) cdef double area_triangle(double x1, double y1, double x2, double y2, double x3, double y3): """ Area of a triangle defined by three vertices. """ return 0.5 * abs(x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) cdef double area_arc_unit(double x1, double y1, double x2, double y2): """ Area of a circle arc with radius R between points (x1, y1) and (x2, y2) References ---------- http://mathworld.wolfram.com/CircularSegment.html """ cdef double a, theta a = distance(x1, y1, x2, y2) theta = 2. * asin(0.5 * a) return 0.5 * (theta - sin(theta)) cdef int in_triangle(double x, double y, double x1, double y1, double x2, double y2, double x3, double y3): """ Check if a point (x,y) is inside a triangle """ cdef int c = 0 c += ((y1 > y) != (y2 > y) and x < (x2 - x1) * (y - y1) / (y2 - y1) + x1) c += ((y2 > y) != (y3 > y) and x < (x3 - x2) * (y - y2) / (y3 - y2) + x2) c += ((y3 > y) != (y1 > y) and x < (x1 - x3) * (y - y3) / (y1 - y3) + x3) return c % 2 == 1 cdef intersections circle_line(double x1, double y1, double x2, double y2): """Intersection of a line defined by two points with a unit circle""" cdef double a, b, delta, dx, dy cdef double tolerance = 1.e-10 cdef intersections inter dx = x2 - x1 dy = y2 - y1 if fabs(dx) < tolerance and fabs(dy) < tolerance: inter.p1.x = 2. inter.p1.y = 2. inter.p2.x = 2. inter.p2.y = 2. elif fabs(dx) > fabs(dy): # Find the slope and intercept of the line a = dy / dx b = y1 - a * x1 # Find the determinant of the quadratic equation delta = 1. + a * a - b * b if delta > 0.: # solutions exist delta = sqrt(delta) inter.p1.x = (- a * b - delta) / (1. + a * a) inter.p1.y = a * inter.p1.x + b inter.p2.x = (- a * b + delta) / (1. + a * a) inter.p2.y = a * inter.p2.x + b else: # no solution, return values > 1 inter.p1.x = 2. inter.p1.y = 2. inter.p2.x = 2. inter.p2.y = 2. else: # Find the slope and intercept of the line a = dx / dy b = x1 - a * y1 # Find the determinant of the quadratic equation delta = 1. + a * a - b * b if delta > 0.: # solutions exist delta = sqrt(delta) inter.p1.y = (- a * b - delta) / (1. + a * a) inter.p1.x = a * inter.p1.y + b inter.p2.y = (- a * b + delta) / (1. + a * a) inter.p2.x = a * inter.p2.y + b else: # no solution, return values > 1 inter.p1.x = 2. inter.p1.y = 2. inter.p2.x = 2. inter.p2.y = 2. return inter cdef point circle_segment_single2(double x1, double y1, double x2, double y2): """ The intersection of a line with the unit circle. The intersection the closest to (x2, y2) is chosen. """ cdef double dx1, dy1, dx2, dy2 cdef intersections inter cdef point pt1, pt2, pt inter = circle_line(x1, y1, x2, y2) pt1 = inter.p1 pt2 = inter.p2 # Can be optimized, but just checking for correctness right now dx1 = fabs(pt1.x - x2) dy1 = fabs(pt1.y - y2) dx2 = fabs(pt2.x - x2) dy2 = fabs(pt2.y - y2) if dx1 > dy1: # compare based on x-axis if dx1 > dx2: pt = pt2 else: pt = pt1 else: if dy1 > dy2: pt = pt2 else: pt = pt1 return pt cdef intersections circle_segment(double x1, double y1, double x2, double y2): """ Intersection(s) of a segment with the unit circle. Discard any solution not on the segment. """ cdef intersections inter, inter_new cdef point pt1, pt2 inter = circle_line(x1, y1, x2, y2) pt1 = inter.p1 pt2 = inter.p2 if (pt1.x > x1 and pt1.x > x2) or (pt1.x < x1 and pt1.x < x2) or (pt1.y > y1 and pt1.y > y2) or (pt1.y < y1 and pt1.y < y2): pt1.x, pt1.y = 2., 2. if (pt2.x > x1 and pt2.x > x2) or (pt2.x < x1 and pt2.x < x2) or (pt2.y > y1 and pt2.y > y2) or (pt2.y < y1 and pt2.y < y2): pt2.x, pt2.y = 2., 2. if pt1.x > 1. and pt2.x < 2.: inter_new.p1 = pt1 inter_new.p2 = pt2 else: inter_new.p1 = pt2 inter_new.p2 = pt1 return inter_new cdef double overlap_area_triangle_unit_circle(double x1, double y1, double x2, double y2, double x3, double y3): """ Given a triangle defined by three points (x1, y1), (x2, y2), and (x3, y3), find the area of overlap with the unit circle. """ cdef double d1, d2, d3 cdef bool in1, in2, in3 cdef bool on1, on2, on3 cdef double area cdef double PI = np.pi cdef intersections inter cdef point pt1, pt2, pt3, pt4, pt5, pt6, pt_tmp # Find distance of all vertices to circle center d1 = x1 * x1 + y1 * y1 d2 = x2 * x2 + y2 * y2 d3 = x3 * x3 + y3 * y3 # Order vertices by distance from origin if d1 < d2: if d2 < d3: pass elif d1 < d3: x2, y2, d2, x3, y3, d3 = x3, y3, d3, x2, y2, d2 else: x1, y1, d1, x2, y2, d2, x3, y3, d3 = x3, y3, d3, x1, y1, d1, x2, y2, d2 else: if d1 < d3: x1, y1, d1, x2, y2, d2 = x2, y2, d2, x1, y1, d1 elif d2 < d3: x1, y1, d1, x2, y2, d2, x3, y3, d3 = x2, y2, d2, x3, y3, d3, x1, y1, d1 else: x1, y1, d1, x2, y2, d2, x3, y3, d3 = x3, y3, d3, x2, y2, d2, x1, y1, d1 if d1 > d2 or d2 > d3 or d1 > d3: raise Exception("ERROR: vertices did not sort correctly") # Determine number of vertices inside circle in1 = d1 < 1 in2 = d2 < 1 in3 = d3 < 1 # Determine which vertices are on the circle on1 = fabs(d1 - 1) < 1.e-10 on2 = fabs(d2 - 1) < 1.e-10 on3 = fabs(d3 - 1) < 1.e-10 if on3 or in3: # triangle is completely in circle area = area_triangle(x1, y1, x2, y2, x3, y3) elif in2 or on2: # If vertex 1 or 2 are on the edge of the circle, then we use the dot # product to vertex 3 to determine whether an intersection takes place. intersect13 = not on1 or x1 * (x3 - x1) + y1 * (y3 - y1) < 0. intersect23 = not on2 or x2 * (x3 - x2) + y2 * (y3 - y2) < 0. if intersect13 and intersect23 and not on2: pt1 = circle_segment_single2(x1, y1, x3, y3) pt2 = circle_segment_single2(x2, y2, x3, y3) area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ + area_triangle(x2, y2, pt1.x, pt1.y, pt2.x, pt2.y) \ + area_arc_unit(pt1.x, pt1.y, pt2.x, pt2.y) elif intersect13: pt1 = circle_segment_single2(x1, y1, x3, y3) area = area_triangle(x1, y1, x2, y2, pt1.x, pt1.y) \ + area_arc_unit(x2, y2, pt1.x, pt1.y) elif intersect23: pt2 = circle_segment_single2(x2, y2, x3, y3) area = area_triangle(x1, y1, x2, y2, pt2.x, pt2.y) \ + area_arc_unit(x1, y1, pt2.x, pt2.y) else: area = area_arc_unit(x1, y1, x2, y2) elif on1: # The triangle is outside the circle area = 0.0 elif in1: # Check for intersections of far side with circle inter = circle_segment(x2, y2, x3, y3) pt1 = inter.p1 pt2 = inter.p2 pt3 = circle_segment_single2(x1, y1, x2, y2) pt4 = circle_segment_single2(x1, y1, x3, y3) if pt1.x > 1.: # indicates no intersection # Code taken from `sep.h`. # TODO: use `sep` and get rid of this Cython code. if (((0.-pt3.y) * (pt4.x-pt3.x) > (pt4.y-pt3.y) * (0.-pt3.x)) != ((y1-pt3.y) * (pt4.x-pt3.x) > (pt4.y-pt3.y) * (x1-pt3.x))): area = area_triangle(x1, y1, pt3.x, pt3.y, pt4.x, pt4.y) \ + (PI - area_arc_unit(pt3.x, pt3.y, pt4.x, pt4.y)) else: area = area_triangle(x1, y1, pt3.x, pt3.y, pt4.x, pt4.y) \ + area_arc_unit(pt3.x, pt3.y, pt4.x, pt4.y) else: if (pt2.x - x2)**2 + (pt2.y - y2)**2 < (pt1.x - x2)**2 + (pt1.y - y2)**2: pt1, pt2 = pt2, pt1 area = area_triangle(x1, y1, pt3.x, pt3.y, pt1.x, pt1.y) \ + area_triangle(x1, y1, pt1.x, pt1.y, pt2.x, pt2.y) \ + area_triangle(x1, y1, pt2.x, pt2.y, pt4.x, pt4.y) \ + area_arc_unit(pt1.x, pt1.y, pt3.x, pt3.y) \ + area_arc_unit(pt2.x, pt2.y, pt4.x, pt4.y) else: inter = circle_segment(x1, y1, x2, y2) pt1 = inter.p1 pt2 = inter.p2 inter = circle_segment(x2, y2, x3, y3) pt3 = inter.p1 pt4 = inter.p2 inter = circle_segment(x3, y3, x1, y1) pt5 = inter.p1 pt6 = inter.p2 if pt1.x <= 1.: xp, yp = 0.5 * (pt1.x + pt2.x), 0.5 * (pt1.y + pt2.y) area = overlap_area_triangle_unit_circle(x1, y1, x3, y3, xp, yp) \ + overlap_area_triangle_unit_circle(x2, y2, x3, y3, xp, yp) elif pt3.x <= 1.: xp, yp = 0.5 * (pt3.x + pt4.x), 0.5 * (pt3.y + pt4.y) area = overlap_area_triangle_unit_circle(x3, y3, x1, y1, xp, yp) \ + overlap_area_triangle_unit_circle(x2, y2, x1, y1, xp, yp) elif pt5.x <= 1.: xp, yp = 0.5 * (pt5.x + pt6.x), 0.5 * (pt5.y + pt6.y) area = overlap_area_triangle_unit_circle(x1, y1, x2, y2, xp, yp) \ + overlap_area_triangle_unit_circle(x3, y3, x2, y2, xp, yp) else: # no intersections if in_triangle(0., 0., x1, y1, x2, y2, x3, y3): return PI else: return 0. return area photutils-0.4/photutils/geometry/elliptical_overlap.c0000644000214200020070000127304713175654700025525 0ustar lbradleySTSCI\science00000000000000/* Generated by Cython 0.27.2 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_27_2" #define CYTHON_FUTURE_DIVISION 1 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__photutils__geometry__elliptical_overlap #define __PYX_HAVE_API__photutils__geometry__elliptical_overlap #include #include #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "math.h" #include "pythread.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* Header.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "photutils/geometry/elliptical_overlap.pyx", "__init__.pxd", "type.pxd", "bool.pxd", "complex.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":743 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":746 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":750 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":751 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":752 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":757 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":768 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":772 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":780 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "photutils/geometry/elliptical_overlap.pyx":30 * * DTYPE = np.float64 * ctypedef np.float64_t DTYPE_t # <<<<<<<<<<<<<< * * cimport cython */ typedef __pyx_t_5numpy_float64_t __pyx_t_9photutils_8geometry_18elliptical_overlap_DTYPE_t; /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /*--- Type declarations ---*/ /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* BufferGetAndValidate.proto */ #define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ ((obj == Py_None || obj == NULL) ?\ (__Pyx_ZeroBuffer(buf), 0) :\ __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static void __Pyx_ZeroBuffer(Py_buffer* buf); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* RealImag.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX\ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_float(a, b) ((a)==(b)) #define __Pyx_c_sum_float(a, b) ((a)+(b)) #define __Pyx_c_diff_float(a, b) ((a)-(b)) #define __Pyx_c_prod_float(a, b) ((a)*(b)) #define __Pyx_c_quot_float(a, b) ((a)/(b)) #define __Pyx_c_neg_float(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_float(z) ((z)==(float)0) #define __Pyx_c_conj_float(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_float(z) (::std::abs(z)) #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_float(z) ((z)==0) #define __Pyx_c_conj_float(z) (conjf(z)) #if 1 #define __Pyx_c_abs_float(z) (cabsf(z)) #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_double(a, b) ((a)==(b)) #define __Pyx_c_sum_double(a, b) ((a)+(b)) #define __Pyx_c_diff_double(a, b) ((a)-(b)) #define __Pyx_c_prod_double(a, b) ((a)*(b)) #define __Pyx_c_quot_double(a, b) ((a)/(b)) #define __Pyx_c_neg_double(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_double(z) ((z)==(double)0) #define __Pyx_c_conj_double(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_double(z) (::std::abs(z)) #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_double(z) ((z)==0) #define __Pyx_c_conj_double(z) (conj(z)) #if 1 #define __Pyx_c_abs_double(z) (cabs(z)) #define __Pyx_c_pow_double(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* FunctionImport.proto */ static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython.version' */ /* Module declarations from 'cpython.exc' */ /* Module declarations from 'cpython.module' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'cpython.tuple' */ /* Module declarations from 'cpython.list' */ /* Module declarations from 'cpython.sequence' */ /* Module declarations from 'cpython.mapping' */ /* Module declarations from 'cpython.iterator' */ /* Module declarations from 'cpython.number' */ /* Module declarations from 'cpython.int' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.bool' */ static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; /* Module declarations from 'cpython.long' */ /* Module declarations from 'cpython.float' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.complex' */ static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; /* Module declarations from 'cpython.string' */ /* Module declarations from 'cpython.unicode' */ /* Module declarations from 'cpython.dict' */ /* Module declarations from 'cpython.instance' */ /* Module declarations from 'cpython.function' */ /* Module declarations from 'cpython.method' */ /* Module declarations from 'cpython.weakref' */ /* Module declarations from 'cpython.getargs' */ /* Module declarations from 'cpython.pythread' */ /* Module declarations from 'cpython.pystate' */ /* Module declarations from 'cpython.cobject' */ /* Module declarations from 'cpython.oldbuffer' */ /* Module declarations from 'cpython.set' */ /* Module declarations from 'cpython.bytes' */ /* Module declarations from 'cpython.pycapsule' */ /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'photutils.geometry.core' */ static double (*__pyx_f_9photutils_8geometry_4core_distance)(double, double, double, double); /*proto*/ static double (*__pyx_f_9photutils_8geometry_4core_area_triangle)(double, double, double, double, double, double); /*proto*/ static double (*__pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle)(double, double, double, double, double, double); /*proto*/ /* Module declarations from 'photutils.geometry.elliptical_overlap' */ static double __pyx_f_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_single_subpixel(double, double, double, double, double, double, double, int); /*proto*/ static double __pyx_f_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_single_exact(double, double, double, double, double, double, double); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_9photutils_8geometry_18elliptical_overlap_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_9photutils_8geometry_18elliptical_overlap_DTYPE_t), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "photutils.geometry.elliptical_overlap" int __pyx_module_is_main_photutils__geometry__elliptical_overlap = 0; /* Implementation of 'photutils.geometry.elliptical_overlap' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_ImportError; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_r[] = "r"; static const char __pyx_k_x[] = "x"; static const char __pyx_k_y[] = "y"; static const char __pyx_k_dx[] = "dx"; static const char __pyx_k_dy[] = "dy"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_nx[] = "nx"; static const char __pyx_k_ny[] = "ny"; static const char __pyx_k_rx[] = "rx"; static const char __pyx_k_ry[] = "ry"; static const char __pyx_k_all[] = "__all__"; static const char __pyx_k_frac[] = "frac"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_norm[] = "norm"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_xmax[] = "xmax"; static const char __pyx_k_xmin[] = "xmin"; static const char __pyx_k_ymax[] = "ymax"; static const char __pyx_k_ymin[] = "ymin"; static const char __pyx_k_DTYPE[] = "DTYPE"; static const char __pyx_k_bxmax[] = "bxmax"; static const char __pyx_k_bxmin[] = "bxmin"; static const char __pyx_k_bymax[] = "bymax"; static const char __pyx_k_bymin[] = "bymin"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_pxmax[] = "pxmax"; static const char __pyx_k_pxmin[] = "pxmin"; static const char __pyx_k_pymax[] = "pymax"; static const char __pyx_k_pymin[] = "pymin"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_theta[] = "theta"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_subpixels[] = "subpixels"; static const char __pyx_k_use_exact[] = "use_exact"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_elliptical_overlap_grid[] = "elliptical_overlap_grid"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_The_functions_defined_here_allo[] = "\nThe functions defined here allow one to determine the exact area of\noverlap of an ellipse and a triangle (written by Thomas Robitaille).\nThe approach is to divide the rectangle into two triangles, and\nreproject these so that the ellipse is a unit circle, then compute the\nintersection of a triangle with a unit circle.\n"; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; static const char __pyx_k_photutils_geometry_elliptical_ov[] = "photutils/geometry/elliptical_overlap.pyx"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static const char __pyx_k_photutils_geometry_elliptical_ov_2[] = "photutils.geometry.elliptical_overlap"; static PyObject *__pyx_n_s_DTYPE; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_all; static PyObject *__pyx_n_s_bxmax; static PyObject *__pyx_n_s_bxmin; static PyObject *__pyx_n_s_bymax; static PyObject *__pyx_n_s_bymin; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dx; static PyObject *__pyx_n_s_dy; static PyObject *__pyx_n_s_elliptical_overlap_grid; static PyObject *__pyx_n_u_elliptical_overlap_grid; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_n_s_frac; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_main; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_norm; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to; static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor; static PyObject *__pyx_n_s_nx; static PyObject *__pyx_n_s_ny; static PyObject *__pyx_kp_s_photutils_geometry_elliptical_ov; static PyObject *__pyx_n_s_photutils_geometry_elliptical_ov_2; static PyObject *__pyx_n_s_pxmax; static PyObject *__pyx_n_s_pxmin; static PyObject *__pyx_n_s_pymax; static PyObject *__pyx_n_s_pymin; static PyObject *__pyx_n_s_r; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_rx; static PyObject *__pyx_n_s_ry; static PyObject *__pyx_n_s_subpixels; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_theta; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_use_exact; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_xmax; static PyObject *__pyx_n_s_xmin; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_ymax; static PyObject *__pyx_n_s_ymin; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_grid(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_xmin, double __pyx_v_xmax, double __pyx_v_ymin, double __pyx_v_ymax, int __pyx_v_nx, int __pyx_v_ny, double __pyx_v_rx, double __pyx_v_ry, double __pyx_v_theta, int __pyx_v_use_exact, int __pyx_v_subpixels); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_codeobj__11; /* "photutils/geometry/elliptical_overlap.pyx":40 * * * def elliptical_overlap_grid(double xmin, double xmax, double ymin, double ymax, # <<<<<<<<<<<<<< * int nx, int ny, double rx, double ry, double theta, * int use_exact, int subpixels): */ /* Python wrapper */ static PyObject *__pyx_pw_9photutils_8geometry_18elliptical_overlap_1elliptical_overlap_grid(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_grid[] = "\n elliptical_overlap_grid(xmin, xmax, ymin, ymax, nx, ny, rx, ry,\n use_exact, subpixels)\n\n Area of overlap between an ellipse and a pixel grid. The ellipse is\n centered on the origin.\n\n Parameters\n ----------\n xmin, xmax, ymin, ymax : float\n Extent of the grid in the x and y direction.\n nx, ny : int\n Grid dimensions.\n rx : float\n The semimajor axis of the ellipse.\n ry : float\n The semiminor axis of the ellipse.\n theta : float\n The position angle of the semimajor axis in radians (counterclockwise).\n use_exact : 0 or 1\n If set to 1, calculates the exact overlap, while if set to 0, uses a\n subpixel sampling method with ``subpixel`` subpixels in each direction.\n subpixels : int\n If ``use_exact`` is 0, each pixel is resampled by this factor in each\n dimension. Thus, each pixel is divided into ``subpixels ** 2``\n subpixels.\n\n Returns\n -------\n frac : `~numpy.ndarray`\n 2-d array giving the fraction of the overlap.\n "; static PyMethodDef __pyx_mdef_9photutils_8geometry_18elliptical_overlap_1elliptical_overlap_grid = {"elliptical_overlap_grid", (PyCFunction)__pyx_pw_9photutils_8geometry_18elliptical_overlap_1elliptical_overlap_grid, METH_VARARGS|METH_KEYWORDS, __pyx_doc_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_grid}; static PyObject *__pyx_pw_9photutils_8geometry_18elliptical_overlap_1elliptical_overlap_grid(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_xmin; double __pyx_v_xmax; double __pyx_v_ymin; double __pyx_v_ymax; int __pyx_v_nx; int __pyx_v_ny; double __pyx_v_rx; double __pyx_v_ry; double __pyx_v_theta; int __pyx_v_use_exact; int __pyx_v_subpixels; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("elliptical_overlap_grid (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xmin,&__pyx_n_s_xmax,&__pyx_n_s_ymin,&__pyx_n_s_ymax,&__pyx_n_s_nx,&__pyx_n_s_ny,&__pyx_n_s_rx,&__pyx_n_s_ry,&__pyx_n_s_theta,&__pyx_n_s_use_exact,&__pyx_n_s_subpixels,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); CYTHON_FALLTHROUGH; case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); CYTHON_FALLTHROUGH; case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); CYTHON_FALLTHROUGH; case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xmin)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xmax)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, 1); __PYX_ERR(0, 40, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ymin)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, 2); __PYX_ERR(0, 40, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ymax)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, 3); __PYX_ERR(0, 40, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, 4); __PYX_ERR(0, 40, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ny)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, 5); __PYX_ERR(0, 40, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_rx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, 6); __PYX_ERR(0, 40, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ry)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, 7); __PYX_ERR(0, 40, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_theta)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, 8); __PYX_ERR(0, 40, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_use_exact)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, 9); __PYX_ERR(0, 40, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_subpixels)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, 10); __PYX_ERR(0, 40, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "elliptical_overlap_grid") < 0)) __PYX_ERR(0, 40, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xmin = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_xmin == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 40, __pyx_L3_error) __pyx_v_xmax = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_xmax == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 40, __pyx_L3_error) __pyx_v_ymin = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_ymin == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 40, __pyx_L3_error) __pyx_v_ymax = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_ymax == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 40, __pyx_L3_error) __pyx_v_nx = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_nx == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L3_error) __pyx_v_ny = __Pyx_PyInt_As_int(values[5]); if (unlikely((__pyx_v_ny == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L3_error) __pyx_v_rx = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_rx == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L3_error) __pyx_v_ry = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_ry == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L3_error) __pyx_v_theta = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_theta == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L3_error) __pyx_v_use_exact = __Pyx_PyInt_As_int(values[9]); if (unlikely((__pyx_v_use_exact == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 42, __pyx_L3_error) __pyx_v_subpixels = __Pyx_PyInt_As_int(values[10]); if (unlikely((__pyx_v_subpixels == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 42, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("elliptical_overlap_grid", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 40, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("photutils.geometry.elliptical_overlap.elliptical_overlap_grid", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_grid(__pyx_self, __pyx_v_xmin, __pyx_v_xmax, __pyx_v_ymin, __pyx_v_ymax, __pyx_v_nx, __pyx_v_ny, __pyx_v_rx, __pyx_v_ry, __pyx_v_theta, __pyx_v_use_exact, __pyx_v_subpixels); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_grid(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_xmin, double __pyx_v_xmax, double __pyx_v_ymin, double __pyx_v_ymax, int __pyx_v_nx, int __pyx_v_ny, double __pyx_v_rx, double __pyx_v_ry, double __pyx_v_theta, int __pyx_v_use_exact, int __pyx_v_subpixels) { unsigned int __pyx_v_i; unsigned int __pyx_v_j; double __pyx_v_dx; double __pyx_v_dy; double __pyx_v_bxmin; double __pyx_v_bxmax; double __pyx_v_bymin; double __pyx_v_bymax; double __pyx_v_pxmin; double __pyx_v_pxmax; double __pyx_v_pymin; double __pyx_v_pymax; double __pyx_v_norm; PyArrayObject *__pyx_v_frac = 0; double __pyx_v_r; __Pyx_LocalBuf_ND __pyx_pybuffernd_frac; __Pyx_Buffer __pyx_pybuffer_frac; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; double __pyx_t_6; double __pyx_t_7; double __pyx_t_8; int __pyx_t_9; unsigned int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; unsigned int __pyx_t_14; size_t __pyx_t_15; size_t __pyx_t_16; int __pyx_t_17; size_t __pyx_t_18; size_t __pyx_t_19; __Pyx_RefNannySetupContext("elliptical_overlap_grid", 0); __pyx_pybuffer_frac.pybuffer.buf = NULL; __pyx_pybuffer_frac.refcount = 0; __pyx_pybuffernd_frac.data = NULL; __pyx_pybuffernd_frac.rcbuffer = &__pyx_pybuffer_frac; /* "photutils/geometry/elliptical_overlap.pyx":83 * * # Define output array * cdef np.ndarray[DTYPE_t, ndim=2] frac = np.zeros([ny, nx], dtype=DTYPE) # <<<<<<<<<<<<<< * * # Find the width of each element in x and y */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 83, __pyx_L1_error) __pyx_t_5 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_frac.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_9photutils_8geometry_18elliptical_overlap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_frac = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_frac.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 83, __pyx_L1_error) } else {__pyx_pybuffernd_frac.diminfo[0].strides = __pyx_pybuffernd_frac.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_frac.diminfo[0].shape = __pyx_pybuffernd_frac.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_frac.diminfo[1].strides = __pyx_pybuffernd_frac.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_frac.diminfo[1].shape = __pyx_pybuffernd_frac.rcbuffer->pybuffer.shape[1]; } } __pyx_t_5 = 0; __pyx_v_frac = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "photutils/geometry/elliptical_overlap.pyx":86 * * # Find the width of each element in x and y * dx = (xmax - xmin) / nx # <<<<<<<<<<<<<< * dy = (ymax - ymin) / ny * */ __pyx_t_6 = (__pyx_v_xmax - __pyx_v_xmin); if (unlikely(__pyx_v_nx == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 86, __pyx_L1_error) } __pyx_v_dx = (__pyx_t_6 / ((double)__pyx_v_nx)); /* "photutils/geometry/elliptical_overlap.pyx":87 * # Find the width of each element in x and y * dx = (xmax - xmin) / nx * dy = (ymax - ymin) / ny # <<<<<<<<<<<<<< * * norm = 1. / (dx * dy) */ __pyx_t_6 = (__pyx_v_ymax - __pyx_v_ymin); if (unlikely(__pyx_v_ny == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 87, __pyx_L1_error) } __pyx_v_dy = (__pyx_t_6 / ((double)__pyx_v_ny)); /* "photutils/geometry/elliptical_overlap.pyx":89 * dy = (ymax - ymin) / ny * * norm = 1. / (dx * dy) # <<<<<<<<<<<<<< * * # For now we use a bounding circle and then use that to find a bounding box */ __pyx_t_6 = (__pyx_v_dx * __pyx_v_dy); if (unlikely(__pyx_t_6 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 89, __pyx_L1_error) } __pyx_v_norm = (1. / __pyx_t_6); /* "photutils/geometry/elliptical_overlap.pyx":95 * * # Find bounding circle radius * r = max(rx, ry) # <<<<<<<<<<<<<< * * # Define bounding box */ __pyx_t_6 = __pyx_v_ry; __pyx_t_7 = __pyx_v_rx; if (((__pyx_t_6 > __pyx_t_7) != 0)) { __pyx_t_8 = __pyx_t_6; } else { __pyx_t_8 = __pyx_t_7; } __pyx_v_r = __pyx_t_8; /* "photutils/geometry/elliptical_overlap.pyx":98 * * # Define bounding box * bxmin = -r - 0.5 * dx # <<<<<<<<<<<<<< * bxmax = +r + 0.5 * dx * bymin = -r - 0.5 * dy */ __pyx_v_bxmin = ((-__pyx_v_r) - (0.5 * __pyx_v_dx)); /* "photutils/geometry/elliptical_overlap.pyx":99 * # Define bounding box * bxmin = -r - 0.5 * dx * bxmax = +r + 0.5 * dx # <<<<<<<<<<<<<< * bymin = -r - 0.5 * dy * bymax = +r + 0.5 * dy */ __pyx_v_bxmax = (__pyx_v_r + (0.5 * __pyx_v_dx)); /* "photutils/geometry/elliptical_overlap.pyx":100 * bxmin = -r - 0.5 * dx * bxmax = +r + 0.5 * dx * bymin = -r - 0.5 * dy # <<<<<<<<<<<<<< * bymax = +r + 0.5 * dy * */ __pyx_v_bymin = ((-__pyx_v_r) - (0.5 * __pyx_v_dy)); /* "photutils/geometry/elliptical_overlap.pyx":101 * bxmax = +r + 0.5 * dx * bymin = -r - 0.5 * dy * bymax = +r + 0.5 * dy # <<<<<<<<<<<<<< * * for i in range(nx): */ __pyx_v_bymax = (__pyx_v_r + (0.5 * __pyx_v_dy)); /* "photutils/geometry/elliptical_overlap.pyx":103 * bymax = +r + 0.5 * dy * * for i in range(nx): # <<<<<<<<<<<<<< * pxmin = xmin + i * dx # lower end of pixel * pxmax = pxmin + dx # upper end of pixel */ __pyx_t_9 = __pyx_v_nx; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_i = __pyx_t_10; /* "photutils/geometry/elliptical_overlap.pyx":104 * * for i in range(nx): * pxmin = xmin + i * dx # lower end of pixel # <<<<<<<<<<<<<< * pxmax = pxmin + dx # upper end of pixel * if pxmax > bxmin and pxmin < bxmax: */ __pyx_v_pxmin = (__pyx_v_xmin + (__pyx_v_i * __pyx_v_dx)); /* "photutils/geometry/elliptical_overlap.pyx":105 * for i in range(nx): * pxmin = xmin + i * dx # lower end of pixel * pxmax = pxmin + dx # upper end of pixel # <<<<<<<<<<<<<< * if pxmax > bxmin and pxmin < bxmax: * for j in range(ny): */ __pyx_v_pxmax = (__pyx_v_pxmin + __pyx_v_dx); /* "photutils/geometry/elliptical_overlap.pyx":106 * pxmin = xmin + i * dx # lower end of pixel * pxmax = pxmin + dx # upper end of pixel * if pxmax > bxmin and pxmin < bxmax: # <<<<<<<<<<<<<< * for j in range(ny): * pymin = ymin + j * dy */ __pyx_t_12 = ((__pyx_v_pxmax > __pyx_v_bxmin) != 0); if (__pyx_t_12) { } else { __pyx_t_11 = __pyx_t_12; goto __pyx_L6_bool_binop_done; } __pyx_t_12 = ((__pyx_v_pxmin < __pyx_v_bxmax) != 0); __pyx_t_11 = __pyx_t_12; __pyx_L6_bool_binop_done:; if (__pyx_t_11) { /* "photutils/geometry/elliptical_overlap.pyx":107 * pxmax = pxmin + dx # upper end of pixel * if pxmax > bxmin and pxmin < bxmax: * for j in range(ny): # <<<<<<<<<<<<<< * pymin = ymin + j * dy * pymax = pymin + dy */ __pyx_t_13 = __pyx_v_ny; for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { __pyx_v_j = __pyx_t_14; /* "photutils/geometry/elliptical_overlap.pyx":108 * if pxmax > bxmin and pxmin < bxmax: * for j in range(ny): * pymin = ymin + j * dy # <<<<<<<<<<<<<< * pymax = pymin + dy * if pymax > bymin and pymin < bymax: */ __pyx_v_pymin = (__pyx_v_ymin + (__pyx_v_j * __pyx_v_dy)); /* "photutils/geometry/elliptical_overlap.pyx":109 * for j in range(ny): * pymin = ymin + j * dy * pymax = pymin + dy # <<<<<<<<<<<<<< * if pymax > bymin and pymin < bymax: * if use_exact: */ __pyx_v_pymax = (__pyx_v_pymin + __pyx_v_dy); /* "photutils/geometry/elliptical_overlap.pyx":110 * pymin = ymin + j * dy * pymax = pymin + dy * if pymax > bymin and pymin < bymax: # <<<<<<<<<<<<<< * if use_exact: * frac[j, i] = elliptical_overlap_single_exact( */ __pyx_t_12 = ((__pyx_v_pymax > __pyx_v_bymin) != 0); if (__pyx_t_12) { } else { __pyx_t_11 = __pyx_t_12; goto __pyx_L11_bool_binop_done; } __pyx_t_12 = ((__pyx_v_pymin < __pyx_v_bymax) != 0); __pyx_t_11 = __pyx_t_12; __pyx_L11_bool_binop_done:; if (__pyx_t_11) { /* "photutils/geometry/elliptical_overlap.pyx":111 * pymax = pymin + dy * if pymax > bymin and pymin < bymax: * if use_exact: # <<<<<<<<<<<<<< * frac[j, i] = elliptical_overlap_single_exact( * pxmin, pymin, pxmax, pymax, rx, ry, theta) * norm */ __pyx_t_11 = (__pyx_v_use_exact != 0); if (__pyx_t_11) { /* "photutils/geometry/elliptical_overlap.pyx":112 * if pymax > bymin and pymin < bymax: * if use_exact: * frac[j, i] = elliptical_overlap_single_exact( # <<<<<<<<<<<<<< * pxmin, pymin, pxmax, pymax, rx, ry, theta) * norm * else: */ __pyx_t_15 = __pyx_v_j; __pyx_t_16 = __pyx_v_i; __pyx_t_17 = -1; if (unlikely(__pyx_t_15 >= (size_t)__pyx_pybuffernd_frac.diminfo[0].shape)) __pyx_t_17 = 0; if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_frac.diminfo[1].shape)) __pyx_t_17 = 1; if (unlikely(__pyx_t_17 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_17); __PYX_ERR(0, 112, __pyx_L1_error) } *__Pyx_BufPtrStrided2d(__pyx_t_9photutils_8geometry_18elliptical_overlap_DTYPE_t *, __pyx_pybuffernd_frac.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_frac.diminfo[0].strides, __pyx_t_16, __pyx_pybuffernd_frac.diminfo[1].strides) = (__pyx_f_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_single_exact(__pyx_v_pxmin, __pyx_v_pymin, __pyx_v_pxmax, __pyx_v_pymax, __pyx_v_rx, __pyx_v_ry, __pyx_v_theta) * __pyx_v_norm); /* "photutils/geometry/elliptical_overlap.pyx":111 * pymax = pymin + dy * if pymax > bymin and pymin < bymax: * if use_exact: # <<<<<<<<<<<<<< * frac[j, i] = elliptical_overlap_single_exact( * pxmin, pymin, pxmax, pymax, rx, ry, theta) * norm */ goto __pyx_L13; } /* "photutils/geometry/elliptical_overlap.pyx":115 * pxmin, pymin, pxmax, pymax, rx, ry, theta) * norm * else: * frac[j, i] = elliptical_overlap_single_subpixel( # <<<<<<<<<<<<<< * pxmin, pymin, pxmax, pymax, rx, ry, theta, * subpixels) */ /*else*/ { /* "photutils/geometry/elliptical_overlap.pyx":117 * frac[j, i] = elliptical_overlap_single_subpixel( * pxmin, pymin, pxmax, pymax, rx, ry, theta, * subpixels) # <<<<<<<<<<<<<< * return frac * */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_i; __pyx_t_17 = -1; if (unlikely(__pyx_t_18 >= (size_t)__pyx_pybuffernd_frac.diminfo[0].shape)) __pyx_t_17 = 0; if (unlikely(__pyx_t_19 >= (size_t)__pyx_pybuffernd_frac.diminfo[1].shape)) __pyx_t_17 = 1; if (unlikely(__pyx_t_17 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_17); __PYX_ERR(0, 115, __pyx_L1_error) } *__Pyx_BufPtrStrided2d(__pyx_t_9photutils_8geometry_18elliptical_overlap_DTYPE_t *, __pyx_pybuffernd_frac.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_frac.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_frac.diminfo[1].strides) = __pyx_f_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_single_subpixel(__pyx_v_pxmin, __pyx_v_pymin, __pyx_v_pxmax, __pyx_v_pymax, __pyx_v_rx, __pyx_v_ry, __pyx_v_theta, __pyx_v_subpixels); } __pyx_L13:; /* "photutils/geometry/elliptical_overlap.pyx":110 * pymin = ymin + j * dy * pymax = pymin + dy * if pymax > bymin and pymin < bymax: # <<<<<<<<<<<<<< * if use_exact: * frac[j, i] = elliptical_overlap_single_exact( */ } } /* "photutils/geometry/elliptical_overlap.pyx":106 * pxmin = xmin + i * dx # lower end of pixel * pxmax = pxmin + dx # upper end of pixel * if pxmax > bxmin and pxmin < bxmax: # <<<<<<<<<<<<<< * for j in range(ny): * pymin = ymin + j * dy */ } } /* "photutils/geometry/elliptical_overlap.pyx":118 * pxmin, pymin, pxmax, pymax, rx, ry, theta, * subpixels) * return frac # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_frac)); __pyx_r = ((PyObject *)__pyx_v_frac); goto __pyx_L0; /* "photutils/geometry/elliptical_overlap.pyx":40 * * * def elliptical_overlap_grid(double xmin, double xmax, double ymin, double ymax, # <<<<<<<<<<<<<< * int nx, int ny, double rx, double ry, double theta, * int use_exact, int subpixels): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_frac.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("photutils.geometry.elliptical_overlap.elliptical_overlap_grid", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_frac.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_frac); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/elliptical_overlap.pyx":127 * * * cdef double elliptical_overlap_single_subpixel(double x0, double y0, # <<<<<<<<<<<<<< * double x1, double y1, * double rx, double ry, */ static double __pyx_f_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_single_subpixel(double __pyx_v_x0, double __pyx_v_y0, double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_rx, double __pyx_v_ry, double __pyx_v_theta, int __pyx_v_subpixels) { CYTHON_UNUSED unsigned int __pyx_v_i; CYTHON_UNUSED unsigned int __pyx_v_j; double __pyx_v_x; double __pyx_v_y; double __pyx_v_frac; double __pyx_v_inv_rx_sq; double __pyx_v_inv_ry_sq; double __pyx_v_cos_theta; double __pyx_v_sin_theta; double __pyx_v_dx; double __pyx_v_dy; double __pyx_v_x_tr; double __pyx_v_y_tr; double __pyx_r; __Pyx_RefNannyDeclarations double __pyx_t_1; int __pyx_t_2; unsigned int __pyx_t_3; int __pyx_t_4; unsigned int __pyx_t_5; int __pyx_t_6; __Pyx_RefNannySetupContext("elliptical_overlap_single_subpixel", 0); /* "photutils/geometry/elliptical_overlap.pyx":138 * cdef unsigned int i, j * cdef double x, y * cdef double frac = 0. # Accumulator. # <<<<<<<<<<<<<< * cdef double inv_rx_sq, inv_ry_sq * cdef double cos_theta = cos(theta) */ __pyx_v_frac = 0.; /* "photutils/geometry/elliptical_overlap.pyx":140 * cdef double frac = 0. # Accumulator. * cdef double inv_rx_sq, inv_ry_sq * cdef double cos_theta = cos(theta) # <<<<<<<<<<<<<< * cdef double sin_theta = sin(theta) * cdef double dx, dy */ __pyx_v_cos_theta = cos(__pyx_v_theta); /* "photutils/geometry/elliptical_overlap.pyx":141 * cdef double inv_rx_sq, inv_ry_sq * cdef double cos_theta = cos(theta) * cdef double sin_theta = sin(theta) # <<<<<<<<<<<<<< * cdef double dx, dy * cdef double x_tr, y_tr */ __pyx_v_sin_theta = sin(__pyx_v_theta); /* "photutils/geometry/elliptical_overlap.pyx":145 * cdef double x_tr, y_tr * * dx = (x1 - x0) / subpixels # <<<<<<<<<<<<<< * dy = (y1 - y0) / subpixels * */ __pyx_t_1 = (__pyx_v_x1 - __pyx_v_x0); if (unlikely(__pyx_v_subpixels == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 145, __pyx_L1_error) } __pyx_v_dx = (__pyx_t_1 / ((double)__pyx_v_subpixels)); /* "photutils/geometry/elliptical_overlap.pyx":146 * * dx = (x1 - x0) / subpixels * dy = (y1 - y0) / subpixels # <<<<<<<<<<<<<< * * inv_rx_sq = 1. / (rx * rx) */ __pyx_t_1 = (__pyx_v_y1 - __pyx_v_y0); if (unlikely(__pyx_v_subpixels == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 146, __pyx_L1_error) } __pyx_v_dy = (__pyx_t_1 / ((double)__pyx_v_subpixels)); /* "photutils/geometry/elliptical_overlap.pyx":148 * dy = (y1 - y0) / subpixels * * inv_rx_sq = 1. / (rx * rx) # <<<<<<<<<<<<<< * inv_ry_sq = 1. / (ry * ry) * */ __pyx_t_1 = (__pyx_v_rx * __pyx_v_rx); if (unlikely(__pyx_t_1 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 148, __pyx_L1_error) } __pyx_v_inv_rx_sq = (1. / __pyx_t_1); /* "photutils/geometry/elliptical_overlap.pyx":149 * * inv_rx_sq = 1. / (rx * rx) * inv_ry_sq = 1. / (ry * ry) # <<<<<<<<<<<<<< * * x = x0 - 0.5 * dx */ __pyx_t_1 = (__pyx_v_ry * __pyx_v_ry); if (unlikely(__pyx_t_1 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 149, __pyx_L1_error) } __pyx_v_inv_ry_sq = (1. / __pyx_t_1); /* "photutils/geometry/elliptical_overlap.pyx":151 * inv_ry_sq = 1. / (ry * ry) * * x = x0 - 0.5 * dx # <<<<<<<<<<<<<< * for i in range(subpixels): * x += dx */ __pyx_v_x = (__pyx_v_x0 - (0.5 * __pyx_v_dx)); /* "photutils/geometry/elliptical_overlap.pyx":152 * * x = x0 - 0.5 * dx * for i in range(subpixels): # <<<<<<<<<<<<<< * x += dx * y = y0 - 0.5 * dy */ __pyx_t_2 = __pyx_v_subpixels; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "photutils/geometry/elliptical_overlap.pyx":153 * x = x0 - 0.5 * dx * for i in range(subpixels): * x += dx # <<<<<<<<<<<<<< * y = y0 - 0.5 * dy * for j in range(subpixels): */ __pyx_v_x = (__pyx_v_x + __pyx_v_dx); /* "photutils/geometry/elliptical_overlap.pyx":154 * for i in range(subpixels): * x += dx * y = y0 - 0.5 * dy # <<<<<<<<<<<<<< * for j in range(subpixels): * y += dy */ __pyx_v_y = (__pyx_v_y0 - (0.5 * __pyx_v_dy)); /* "photutils/geometry/elliptical_overlap.pyx":155 * x += dx * y = y0 - 0.5 * dy * for j in range(subpixels): # <<<<<<<<<<<<<< * y += dy * */ __pyx_t_4 = __pyx_v_subpixels; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_j = __pyx_t_5; /* "photutils/geometry/elliptical_overlap.pyx":156 * y = y0 - 0.5 * dy * for j in range(subpixels): * y += dy # <<<<<<<<<<<<<< * * # Transform into frame of rotated ellipse */ __pyx_v_y = (__pyx_v_y + __pyx_v_dy); /* "photutils/geometry/elliptical_overlap.pyx":159 * * # Transform into frame of rotated ellipse * x_tr = y * sin_theta + x * cos_theta # <<<<<<<<<<<<<< * y_tr = y * cos_theta - x * sin_theta * */ __pyx_v_x_tr = ((__pyx_v_y * __pyx_v_sin_theta) + (__pyx_v_x * __pyx_v_cos_theta)); /* "photutils/geometry/elliptical_overlap.pyx":160 * # Transform into frame of rotated ellipse * x_tr = y * sin_theta + x * cos_theta * y_tr = y * cos_theta - x * sin_theta # <<<<<<<<<<<<<< * * if x_tr * x_tr * inv_rx_sq + y_tr * y_tr * inv_ry_sq < 1.: */ __pyx_v_y_tr = ((__pyx_v_y * __pyx_v_cos_theta) - (__pyx_v_x * __pyx_v_sin_theta)); /* "photutils/geometry/elliptical_overlap.pyx":162 * y_tr = y * cos_theta - x * sin_theta * * if x_tr * x_tr * inv_rx_sq + y_tr * y_tr * inv_ry_sq < 1.: # <<<<<<<<<<<<<< * frac += 1. * */ __pyx_t_6 = (((((__pyx_v_x_tr * __pyx_v_x_tr) * __pyx_v_inv_rx_sq) + ((__pyx_v_y_tr * __pyx_v_y_tr) * __pyx_v_inv_ry_sq)) < 1.) != 0); if (__pyx_t_6) { /* "photutils/geometry/elliptical_overlap.pyx":163 * * if x_tr * x_tr * inv_rx_sq + y_tr * y_tr * inv_ry_sq < 1.: * frac += 1. # <<<<<<<<<<<<<< * * return frac / (subpixels * subpixels) */ __pyx_v_frac = (__pyx_v_frac + 1.); /* "photutils/geometry/elliptical_overlap.pyx":162 * y_tr = y * cos_theta - x * sin_theta * * if x_tr * x_tr * inv_rx_sq + y_tr * y_tr * inv_ry_sq < 1.: # <<<<<<<<<<<<<< * frac += 1. * */ } } } /* "photutils/geometry/elliptical_overlap.pyx":165 * frac += 1. * * return frac / (subpixels * subpixels) # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_subpixels * __pyx_v_subpixels); if (unlikely(__pyx_t_2 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 165, __pyx_L1_error) } __pyx_r = (__pyx_v_frac / ((double)__pyx_t_2)); goto __pyx_L0; /* "photutils/geometry/elliptical_overlap.pyx":127 * * * cdef double elliptical_overlap_single_subpixel(double x0, double y0, # <<<<<<<<<<<<<< * double x1, double y1, * double rx, double ry, */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("photutils.geometry.elliptical_overlap.elliptical_overlap_single_subpixel", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/elliptical_overlap.pyx":168 * * * cdef double elliptical_overlap_single_exact(double xmin, double ymin, # <<<<<<<<<<<<<< * double xmax, double ymax, * double rx, double ry, */ static double __pyx_f_9photutils_8geometry_18elliptical_overlap_elliptical_overlap_single_exact(double __pyx_v_xmin, double __pyx_v_ymin, double __pyx_v_xmax, double __pyx_v_ymax, double __pyx_v_rx, double __pyx_v_ry, double __pyx_v_theta) { double __pyx_v_cos_m_theta; double __pyx_v_sin_m_theta; double __pyx_v_scale; double __pyx_v_x1; double __pyx_v_y1; double __pyx_v_x2; double __pyx_v_y2; double __pyx_v_x3; double __pyx_v_y3; double __pyx_v_x4; double __pyx_v_y4; double __pyx_r; __Pyx_RefNannyDeclarations double __pyx_t_1; double __pyx_t_2; double __pyx_t_3; __Pyx_RefNannySetupContext("elliptical_overlap_single_exact", 0); /* "photutils/geometry/elliptical_overlap.pyx":178 * """ * * cdef double cos_m_theta = cos(-theta) # <<<<<<<<<<<<<< * cdef double sin_m_theta = sin(-theta) * cdef double scale */ __pyx_v_cos_m_theta = cos((-__pyx_v_theta)); /* "photutils/geometry/elliptical_overlap.pyx":179 * * cdef double cos_m_theta = cos(-theta) * cdef double sin_m_theta = sin(-theta) # <<<<<<<<<<<<<< * cdef double scale * */ __pyx_v_sin_m_theta = sin((-__pyx_v_theta)); /* "photutils/geometry/elliptical_overlap.pyx":183 * * # Find scale by which the areas will be shrunk * scale = rx * ry # <<<<<<<<<<<<<< * * # Reproject rectangle to frame of reference in which ellipse is a */ __pyx_v_scale = (__pyx_v_rx * __pyx_v_ry); /* "photutils/geometry/elliptical_overlap.pyx":187 * # Reproject rectangle to frame of reference in which ellipse is a * # unit circle * x1, y1 = ((xmin * cos_m_theta - ymin * sin_m_theta) / rx, # <<<<<<<<<<<<<< * (xmin * sin_m_theta + ymin * cos_m_theta) / ry) * x2, y2 = ((xmax * cos_m_theta - ymin * sin_m_theta) / rx, */ __pyx_t_1 = ((__pyx_v_xmin * __pyx_v_cos_m_theta) - (__pyx_v_ymin * __pyx_v_sin_m_theta)); if (unlikely(__pyx_v_rx == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 187, __pyx_L1_error) } __pyx_t_2 = (__pyx_t_1 / __pyx_v_rx); /* "photutils/geometry/elliptical_overlap.pyx":188 * # unit circle * x1, y1 = ((xmin * cos_m_theta - ymin * sin_m_theta) / rx, * (xmin * sin_m_theta + ymin * cos_m_theta) / ry) # <<<<<<<<<<<<<< * x2, y2 = ((xmax * cos_m_theta - ymin * sin_m_theta) / rx, * (xmax * sin_m_theta + ymin * cos_m_theta) / ry) */ __pyx_t_1 = ((__pyx_v_xmin * __pyx_v_sin_m_theta) + (__pyx_v_ymin * __pyx_v_cos_m_theta)); if (unlikely(__pyx_v_ry == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 188, __pyx_L1_error) } __pyx_t_3 = (__pyx_t_1 / __pyx_v_ry); __pyx_v_x1 = __pyx_t_2; __pyx_v_y1 = __pyx_t_3; /* "photutils/geometry/elliptical_overlap.pyx":189 * x1, y1 = ((xmin * cos_m_theta - ymin * sin_m_theta) / rx, * (xmin * sin_m_theta + ymin * cos_m_theta) / ry) * x2, y2 = ((xmax * cos_m_theta - ymin * sin_m_theta) / rx, # <<<<<<<<<<<<<< * (xmax * sin_m_theta + ymin * cos_m_theta) / ry) * x3, y3 = ((xmax * cos_m_theta - ymax * sin_m_theta) / rx, */ __pyx_t_3 = ((__pyx_v_xmax * __pyx_v_cos_m_theta) - (__pyx_v_ymin * __pyx_v_sin_m_theta)); if (unlikely(__pyx_v_rx == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 189, __pyx_L1_error) } __pyx_t_2 = (__pyx_t_3 / __pyx_v_rx); /* "photutils/geometry/elliptical_overlap.pyx":190 * (xmin * sin_m_theta + ymin * cos_m_theta) / ry) * x2, y2 = ((xmax * cos_m_theta - ymin * sin_m_theta) / rx, * (xmax * sin_m_theta + ymin * cos_m_theta) / ry) # <<<<<<<<<<<<<< * x3, y3 = ((xmax * cos_m_theta - ymax * sin_m_theta) / rx, * (xmax * sin_m_theta + ymax * cos_m_theta) / ry) */ __pyx_t_3 = ((__pyx_v_xmax * __pyx_v_sin_m_theta) + (__pyx_v_ymin * __pyx_v_cos_m_theta)); if (unlikely(__pyx_v_ry == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 190, __pyx_L1_error) } __pyx_t_1 = (__pyx_t_3 / __pyx_v_ry); __pyx_v_x2 = __pyx_t_2; __pyx_v_y2 = __pyx_t_1; /* "photutils/geometry/elliptical_overlap.pyx":191 * x2, y2 = ((xmax * cos_m_theta - ymin * sin_m_theta) / rx, * (xmax * sin_m_theta + ymin * cos_m_theta) / ry) * x3, y3 = ((xmax * cos_m_theta - ymax * sin_m_theta) / rx, # <<<<<<<<<<<<<< * (xmax * sin_m_theta + ymax * cos_m_theta) / ry) * x4, y4 = ((xmin * cos_m_theta - ymax * sin_m_theta) / rx, */ __pyx_t_1 = ((__pyx_v_xmax * __pyx_v_cos_m_theta) - (__pyx_v_ymax * __pyx_v_sin_m_theta)); if (unlikely(__pyx_v_rx == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 191, __pyx_L1_error) } __pyx_t_2 = (__pyx_t_1 / __pyx_v_rx); /* "photutils/geometry/elliptical_overlap.pyx":192 * (xmax * sin_m_theta + ymin * cos_m_theta) / ry) * x3, y3 = ((xmax * cos_m_theta - ymax * sin_m_theta) / rx, * (xmax * sin_m_theta + ymax * cos_m_theta) / ry) # <<<<<<<<<<<<<< * x4, y4 = ((xmin * cos_m_theta - ymax * sin_m_theta) / rx, * (xmin * sin_m_theta + ymax * cos_m_theta) / ry) */ __pyx_t_1 = ((__pyx_v_xmax * __pyx_v_sin_m_theta) + (__pyx_v_ymax * __pyx_v_cos_m_theta)); if (unlikely(__pyx_v_ry == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 192, __pyx_L1_error) } __pyx_t_3 = (__pyx_t_1 / __pyx_v_ry); __pyx_v_x3 = __pyx_t_2; __pyx_v_y3 = __pyx_t_3; /* "photutils/geometry/elliptical_overlap.pyx":193 * x3, y3 = ((xmax * cos_m_theta - ymax * sin_m_theta) / rx, * (xmax * sin_m_theta + ymax * cos_m_theta) / ry) * x4, y4 = ((xmin * cos_m_theta - ymax * sin_m_theta) / rx, # <<<<<<<<<<<<<< * (xmin * sin_m_theta + ymax * cos_m_theta) / ry) * */ __pyx_t_3 = ((__pyx_v_xmin * __pyx_v_cos_m_theta) - (__pyx_v_ymax * __pyx_v_sin_m_theta)); if (unlikely(__pyx_v_rx == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 193, __pyx_L1_error) } __pyx_t_2 = (__pyx_t_3 / __pyx_v_rx); /* "photutils/geometry/elliptical_overlap.pyx":194 * (xmax * sin_m_theta + ymax * cos_m_theta) / ry) * x4, y4 = ((xmin * cos_m_theta - ymax * sin_m_theta) / rx, * (xmin * sin_m_theta + ymax * cos_m_theta) / ry) # <<<<<<<<<<<<<< * * # Divide resulting quadrilateral into two triangles and find */ __pyx_t_3 = ((__pyx_v_xmin * __pyx_v_sin_m_theta) + (__pyx_v_ymax * __pyx_v_cos_m_theta)); if (unlikely(__pyx_v_ry == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 194, __pyx_L1_error) } __pyx_t_1 = (__pyx_t_3 / __pyx_v_ry); __pyx_v_x4 = __pyx_t_2; __pyx_v_y4 = __pyx_t_1; /* "photutils/geometry/elliptical_overlap.pyx":199 * # intersection with unit circle * return (overlap_area_triangle_unit_circle(x1, y1, x2, y2, x3, y3) + * overlap_area_triangle_unit_circle(x1, y1, x4, y4, x3, y3)) * scale # <<<<<<<<<<<<<< */ __pyx_r = ((__pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x2, __pyx_v_y2, __pyx_v_x3, __pyx_v_y3) + __pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle(__pyx_v_x1, __pyx_v_y1, __pyx_v_x4, __pyx_v_y4, __pyx_v_x3, __pyx_v_y3)) * __pyx_v_scale); goto __pyx_L0; /* "photutils/geometry/elliptical_overlap.pyx":168 * * * cdef double elliptical_overlap_single_exact(double xmin, double ymin, # <<<<<<<<<<<<<< * double xmax, double ymax, * double rx, double ry, */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("photutils.geometry.elliptical_overlap.elliptical_overlap_single_exact", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":220 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":224 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":226 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ goto __pyx_L4; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ /*else*/ { __pyx_v_copy_shape = 0; } __pyx_L4:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":234 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 235, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":238 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 239, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247 * # This is allocated as one block, strides first. * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248 * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L11; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":252 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":256 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef int offset */ __pyx_v_f = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ goto __pyx_L14; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ /*else*/ { __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 276, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = ((char *)"B"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = ((char *)"h"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = ((char *)"H"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":281 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = ((char *)"i"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = ((char *)"I"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = ((char *)"l"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = ((char *)"L"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = ((char *)"q"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = ((char *)"Q"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = ((char *)"f"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = ((char *)"d"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = ((char *)"g"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = ((char *)"Zf"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = ((char *)"Zd"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = ((char *)"Zg"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = ((char *)"O"); break; default: /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 295, __pyx_L1_error) break; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = PyObject_Malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":299 * return * else: * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 * else: * info.format = PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":301 * info.format = PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 302, __pyx_L1_error) __pyx_v_f = __pyx_t_7; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) */ PyObject_Free(__pyx_v_info->format); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ PyObject_Free(__pyx_v_info->strides); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":798 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline tuple PyDataType_SHAPE(dtype d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return d.subarray.shape * else: */ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape # <<<<<<<<<<<<<< * else: * return () */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return d.subarray.shape * else: */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 * return d.subarray.shape * else: * return () # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_empty_tuple); __pyx_r = __pyx_empty_tuple; goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 818, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 819, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 819, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 820, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 820, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 823, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 827, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 847, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":862 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":863 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":864 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":865 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":866 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":868 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 868, __pyx_L1_error) } __pyx_L15:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 873, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":993 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ goto __pyx_L3; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":995 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ /*else*/ { Py_INCREF(__pyx_v_base); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004 * return None * else: * return arr.base # <<<<<<<<<<<<<< * * */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_array", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011 * cdef inline int import_array() except -1: * try: * _import_array() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.multiarray failed to import") */ __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1011, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 * try: * _import_array() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.multiarray failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1012, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1013, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1013, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_umath", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1017 * cdef inline int import_umath() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1017, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1018 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1018, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1019, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1019, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_ufunc", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 * cdef inline int import_ufunc() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1023, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1024, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1025, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1025, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_elliptical_overlap(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_elliptical_overlap}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "elliptical_overlap", __pyx_k_The_functions_defined_here_allo, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, {&__pyx_n_s_bxmax, __pyx_k_bxmax, sizeof(__pyx_k_bxmax), 0, 0, 1, 1}, {&__pyx_n_s_bxmin, __pyx_k_bxmin, sizeof(__pyx_k_bxmin), 0, 0, 1, 1}, {&__pyx_n_s_bymax, __pyx_k_bymax, sizeof(__pyx_k_bymax), 0, 0, 1, 1}, {&__pyx_n_s_bymin, __pyx_k_bymin, sizeof(__pyx_k_bymin), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dx, __pyx_k_dx, sizeof(__pyx_k_dx), 0, 0, 1, 1}, {&__pyx_n_s_dy, __pyx_k_dy, sizeof(__pyx_k_dy), 0, 0, 1, 1}, {&__pyx_n_s_elliptical_overlap_grid, __pyx_k_elliptical_overlap_grid, sizeof(__pyx_k_elliptical_overlap_grid), 0, 0, 1, 1}, {&__pyx_n_u_elliptical_overlap_grid, __pyx_k_elliptical_overlap_grid, sizeof(__pyx_k_elliptical_overlap_grid), 0, 1, 0, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_n_s_frac, __pyx_k_frac, sizeof(__pyx_k_frac), 0, 0, 1, 1}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_norm, __pyx_k_norm, sizeof(__pyx_k_norm), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_kp_u_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 1, 0, 0}, {&__pyx_kp_u_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 1, 0, 0}, {&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1}, {&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1}, {&__pyx_kp_s_photutils_geometry_elliptical_ov, __pyx_k_photutils_geometry_elliptical_ov, sizeof(__pyx_k_photutils_geometry_elliptical_ov), 0, 0, 1, 0}, {&__pyx_n_s_photutils_geometry_elliptical_ov_2, __pyx_k_photutils_geometry_elliptical_ov_2, sizeof(__pyx_k_photutils_geometry_elliptical_ov_2), 0, 0, 1, 1}, {&__pyx_n_s_pxmax, __pyx_k_pxmax, sizeof(__pyx_k_pxmax), 0, 0, 1, 1}, {&__pyx_n_s_pxmin, __pyx_k_pxmin, sizeof(__pyx_k_pxmin), 0, 0, 1, 1}, {&__pyx_n_s_pymax, __pyx_k_pymax, sizeof(__pyx_k_pymax), 0, 0, 1, 1}, {&__pyx_n_s_pymin, __pyx_k_pymin, sizeof(__pyx_k_pymin), 0, 0, 1, 1}, {&__pyx_n_s_r, __pyx_k_r, sizeof(__pyx_k_r), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_rx, __pyx_k_rx, sizeof(__pyx_k_rx), 0, 0, 1, 1}, {&__pyx_n_s_ry, __pyx_k_ry, sizeof(__pyx_k_ry), 0, 0, 1, 1}, {&__pyx_n_s_subpixels, __pyx_k_subpixels, sizeof(__pyx_k_subpixels), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_theta, __pyx_k_theta, sizeof(__pyx_k_theta), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_use_exact, __pyx_k_use_exact, sizeof(__pyx_k_use_exact), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_xmax, __pyx_k_xmax, sizeof(__pyx_k_xmax), 0, 0, 1, 1}, {&__pyx_n_s_xmin, __pyx_k_xmin, sizeof(__pyx_k_xmin), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_ymax, __pyx_k_ymax, sizeof(__pyx_k_ymax), 0, 0, 1, 1}, {&__pyx_n_s_ymin, __pyx_k_ymin, sizeof(__pyx_k_ymin), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 103, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 235, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 823, __pyx_L1_error) __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1013, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 239, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 847, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 1019, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "photutils/geometry/elliptical_overlap.pyx":40 * * * def elliptical_overlap_grid(double xmin, double xmax, double ymin, double ymax, # <<<<<<<<<<<<<< * int nx, int ny, double rx, double ry, double theta, * int use_exact, int subpixels): */ __pyx_tuple__10 = PyTuple_Pack(28, __pyx_n_s_xmin, __pyx_n_s_xmax, __pyx_n_s_ymin, __pyx_n_s_ymax, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_rx, __pyx_n_s_ry, __pyx_n_s_theta, __pyx_n_s_use_exact, __pyx_n_s_subpixels, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_bxmin, __pyx_n_s_bxmax, __pyx_n_s_bymin, __pyx_n_s_bymax, __pyx_n_s_pxmin, __pyx_n_s_pxmax, __pyx_n_s_pymin, __pyx_n_s_pymax, __pyx_n_s_norm, __pyx_n_s_frac, __pyx_n_s_r); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); __pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(11, 0, 28, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_photutils_geometry_elliptical_ov, __pyx_n_s_elliptical_overlap_grid, 40, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initelliptical_overlap(void); /*proto*/ PyMODINIT_FUNC initelliptical_overlap(void) #else PyMODINIT_FUNC PyInit_elliptical_overlap(void); /*proto*/ PyMODINIT_FUNC PyInit_elliptical_overlap(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { result = PyDict_SetItemString(moddict, to_name, value); Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static int __pyx_pymod_exec_elliptical_overlap(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_elliptical_overlap(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("elliptical_overlap", __pyx_methods, __pyx_k_The_functions_defined_here_allo, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_photutils__geometry__elliptical_overlap) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "photutils.geometry.elliptical_overlap")) { if (unlikely(PyDict_SetItemString(modules, "photutils.geometry.elliptical_overlap", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) __PYX_ERR(3, 8, __pyx_L1_error) __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) __PYX_ERR(4, 15, __pyx_L1_error) __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 163, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 185, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 198, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 885, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ __pyx_t_1 = __Pyx_ImportModule("photutils.geometry.core"); if (!__pyx_t_1) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "distance", (void (**)(void))&__pyx_f_9photutils_8geometry_4core_distance, "double (double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "area_triangle", (void (**)(void))&__pyx_f_9photutils_8geometry_4core_area_triangle, "double (double, double, double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "overlap_area_triangle_unit_circle", (void (**)(void))&__pyx_f_9photutils_8geometry_4core_overlap_area_triangle_unit_circle, "double (double, double, double, double, double, double)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "photutils/geometry/elliptical_overlap.pyx":13 * unicode_literals) * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_2) < 0) __PYX_ERR(0, 13, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/elliptical_overlap.pyx":17 * * * __all__ = ['elliptical_overlap_grid'] # <<<<<<<<<<<<<< * * */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_u_elliptical_overlap_grid); __Pyx_GIVEREF(__pyx_n_u_elliptical_overlap_grid); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_u_elliptical_overlap_grid); if (PyDict_SetItem(__pyx_d, __pyx_n_s_all, __pyx_t_2) < 0) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/elliptical_overlap.pyx":29 * from cpython cimport bool * * DTYPE = np.float64 # <<<<<<<<<<<<<< * ctypedef np.float64_t DTYPE_t * */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 29, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 29, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_3) < 0) __PYX_ERR(0, 29, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "photutils/geometry/elliptical_overlap.pyx":40 * * * def elliptical_overlap_grid(double xmin, double xmax, double ymin, double ymax, # <<<<<<<<<<<<<< * int nx, int ny, double rx, double ry, double theta, * int use_exact, int subpixels): */ __pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_9photutils_8geometry_18elliptical_overlap_1elliptical_overlap_grid, NULL, __pyx_n_s_photutils_geometry_elliptical_ov_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_elliptical_overlap_grid, __pyx_t_3) < 0) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "photutils/geometry/elliptical_overlap.pyx":1 * # Licensed under a 3-clause BSD style license - see LICENSE.rst # <<<<<<<<<<<<<< * """ * The functions defined here allow one to determine the exact area of */ __pyx_t_3 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_3) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init photutils.geometry.elliptical_overlap", 0, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init photutils.geometry.elliptical_overlap"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* BufferGetAndValidate */ static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (unlikely(info->buf == NULL)) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static int __Pyx__GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { buf->buf = NULL; if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { __Pyx_ZeroBuffer(buf); return -1; } if (unlikely(buf->ndim != nd)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if (unlikely((unsigned)buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_SafeReleaseBuffer(buf); return -1; } /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (PyObject_Not(use_cline) != 0) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); view->obj = NULL; Py_DECREF(obj); } #endif /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabsf(b.real) >= fabsf(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { float r = b.imag / b.real; float s = 1.0 / (b.real + b.imag * r); return __pyx_t_float_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { float r = b.real / b.imag; float s = 1.0 / (b.imag + b.real * r); return __pyx_t_float_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else { float denom = b.real * b.real + b.imag * b.imag; return __pyx_t_float_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(a, a); case 3: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, a); case 4: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = powf(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2f(0, -1); } } else { r = __Pyx_c_abs_float(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabs(b.real) >= fabs(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { double r = b.imag / b.real; double s = 1.0 / (b.real + b.imag * r); return __pyx_t_double_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { double r = b.real / b.imag; double s = 1.0 / (b.imag + b.real * r); return __pyx_t_double_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else { double denom = b.real * b.real + b.imag * b.imag; return __pyx_t_double_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(a, a); case 3: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, a); case 4: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = pow(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2(0, -1); } } else { r = __Pyx_c_abs_double(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (unsigned int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned int) 0; case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) case 2: if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); } } break; case 3: if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); } } break; case 4: if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (unsigned int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(unsigned int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned int) 0; case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) case -2: if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case 2: if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case -3: if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case 3: if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case -4: if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case 4: if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; } #endif if (sizeof(unsigned int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned int) -1; } } else { unsigned int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (unsigned int) -1; val = __Pyx_PyInt_As_unsigned_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to unsigned int"); return (unsigned int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* FunctionImport */ #ifndef __PYX_HAVE_RT_ImportFunction #define __PYX_HAVE_RT_ImportFunction static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); if (!d) goto bad; cobj = PyDict_GetItemString(d, funcname); if (!cobj) { PyErr_Format(PyExc_ImportError, "%.200s does not export expected C function %.200s", PyModule_GetName(module), funcname); goto bad; } #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_IsValid(cobj, sig)) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); goto bad; } tmp.p = PyCapsule_GetPointer(cobj, sig); #else {const char *desc, *s1, *s2; desc = (const char *)PyCObject_GetDesc(cobj); if (!desc) goto bad; s1 = desc; s2 = sig; while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } if (*s1 != *s2) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, desc); goto bad; } tmp.p = PyCObject_AsVoidPtr(cobj);} #endif *f = tmp.fp; if (!(*f)) goto bad; Py_DECREF(d); return 0; bad: Py_XDECREF(d); return -1; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) PyErr_Clear(); ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */ photutils-0.4/photutils/geometry/elliptical_overlap.pyx0000644000214200020070000001537213055576313026115 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The functions defined here allow one to determine the exact area of overlap of an ellipse and a triangle (written by Thomas Robitaille). The approach is to divide the rectangle into two triangles, and reproject these so that the ellipse is a unit circle, then compute the intersection of a triangle with a unit circle. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np cimport numpy as np __all__ = ['elliptical_overlap_grid'] cdef extern from "math.h": double asin(double x) double sin(double x) double cos(double x) double sqrt(double x) from cpython cimport bool DTYPE = np.float64 ctypedef np.float64_t DTYPE_t cimport cython # NOTE: Here we need to make sure we use cimport to import the C functions from # core (since these were defined with cdef). This also requires the core.pxd # file to exist with the function signatures. from .core cimport distance, area_triangle, overlap_area_triangle_unit_circle def elliptical_overlap_grid(double xmin, double xmax, double ymin, double ymax, int nx, int ny, double rx, double ry, double theta, int use_exact, int subpixels): """ elliptical_overlap_grid(xmin, xmax, ymin, ymax, nx, ny, rx, ry, use_exact, subpixels) Area of overlap between an ellipse and a pixel grid. The ellipse is centered on the origin. Parameters ---------- xmin, xmax, ymin, ymax : float Extent of the grid in the x and y direction. nx, ny : int Grid dimensions. rx : float The semimajor axis of the ellipse. ry : float The semiminor axis of the ellipse. theta : float The position angle of the semimajor axis in radians (counterclockwise). use_exact : 0 or 1 If set to 1, calculates the exact overlap, while if set to 0, uses a subpixel sampling method with ``subpixel`` subpixels in each direction. subpixels : int If ``use_exact`` is 0, each pixel is resampled by this factor in each dimension. Thus, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- frac : `~numpy.ndarray` 2-d array giving the fraction of the overlap. """ cdef unsigned int i, j cdef double x, y, dx, dy cdef double bxmin, bxmax, bymin, bymax cdef double pxmin, pxmax, pymin, pymax cdef double norm # Define output array cdef np.ndarray[DTYPE_t, ndim=2] frac = np.zeros([ny, nx], dtype=DTYPE) # Find the width of each element in x and y dx = (xmax - xmin) / nx dy = (ymax - ymin) / ny norm = 1. / (dx * dy) # For now we use a bounding circle and then use that to find a bounding box # but of course this is inefficient and could be done better. # Find bounding circle radius r = max(rx, ry) # Define bounding box bxmin = -r - 0.5 * dx bxmax = +r + 0.5 * dx bymin = -r - 0.5 * dy bymax = +r + 0.5 * dy for i in range(nx): pxmin = xmin + i * dx # lower end of pixel pxmax = pxmin + dx # upper end of pixel if pxmax > bxmin and pxmin < bxmax: for j in range(ny): pymin = ymin + j * dy pymax = pymin + dy if pymax > bymin and pymin < bymax: if use_exact: frac[j, i] = elliptical_overlap_single_exact( pxmin, pymin, pxmax, pymax, rx, ry, theta) * norm else: frac[j, i] = elliptical_overlap_single_subpixel( pxmin, pymin, pxmax, pymax, rx, ry, theta, subpixels) return frac # NOTE: The following two functions use cdef because they are not # intended to be called from the Python code. Using def makes them # callable from outside, but also slower. In any case, these aren't useful # to call from outside because they only operate on a single pixel. cdef double elliptical_overlap_single_subpixel(double x0, double y0, double x1, double y1, double rx, double ry, double theta, int subpixels): """ Return the fraction of overlap between a ellipse and a single pixel with given extent, using a sub-pixel sampling method. """ cdef unsigned int i, j cdef double x, y cdef double frac = 0. # Accumulator. cdef double inv_rx_sq, inv_ry_sq cdef double cos_theta = cos(theta) cdef double sin_theta = sin(theta) cdef double dx, dy cdef double x_tr, y_tr dx = (x1 - x0) / subpixels dy = (y1 - y0) / subpixels inv_rx_sq = 1. / (rx * rx) inv_ry_sq = 1. / (ry * ry) x = x0 - 0.5 * dx for i in range(subpixels): x += dx y = y0 - 0.5 * dy for j in range(subpixels): y += dy # Transform into frame of rotated ellipse x_tr = y * sin_theta + x * cos_theta y_tr = y * cos_theta - x * sin_theta if x_tr * x_tr * inv_rx_sq + y_tr * y_tr * inv_ry_sq < 1.: frac += 1. return frac / (subpixels * subpixels) cdef double elliptical_overlap_single_exact(double xmin, double ymin, double xmax, double ymax, double rx, double ry, double theta): """ Given a rectangle defined by (xmin, ymin, xmax, ymax) and an ellipse with major and minor axes rx and ry respectively, position angle theta, and centered at the origin, find the area of overlap. """ cdef double cos_m_theta = cos(-theta) cdef double sin_m_theta = sin(-theta) cdef double scale # Find scale by which the areas will be shrunk scale = rx * ry # Reproject rectangle to frame of reference in which ellipse is a # unit circle x1, y1 = ((xmin * cos_m_theta - ymin * sin_m_theta) / rx, (xmin * sin_m_theta + ymin * cos_m_theta) / ry) x2, y2 = ((xmax * cos_m_theta - ymin * sin_m_theta) / rx, (xmax * sin_m_theta + ymin * cos_m_theta) / ry) x3, y3 = ((xmax * cos_m_theta - ymax * sin_m_theta) / rx, (xmax * sin_m_theta + ymax * cos_m_theta) / ry) x4, y4 = ((xmin * cos_m_theta - ymax * sin_m_theta) / rx, (xmin * sin_m_theta + ymax * cos_m_theta) / ry) # Divide resulting quadrilateral into two triangles and find # intersection with unit circle return (overlap_area_triangle_unit_circle(x1, y1, x2, y2, x3, y3) + overlap_area_triangle_unit_circle(x1, y1, x4, y4, x3, y3)) * scale photutils-0.4/photutils/geometry/rectangular_overlap.c0000644000214200020070000123125113175654701025702 0ustar lbradleySTSCI\science00000000000000/* Generated by Cython 0.27.2 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_27_2" #define CYTHON_FUTURE_DIVISION 1 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__photutils__geometry__rectangular_overlap #define __PYX_HAVE_API__photutils__geometry__rectangular_overlap #include #include #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "math.h" #include "pythread.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* Header.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "photutils/geometry/rectangular_overlap.pyx", "__init__.pxd", "type.pxd", "bool.pxd", "complex.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":743 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":746 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":750 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":751 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":752 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":757 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":768 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":772 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":780 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "photutils/geometry/rectangular_overlap.pyx":23 * * DTYPE = np.float64 * ctypedef np.float64_t DTYPE_t # <<<<<<<<<<<<<< * * cimport cython */ typedef __pyx_t_5numpy_float64_t __pyx_t_9photutils_8geometry_19rectangular_overlap_DTYPE_t; /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /*--- Type declarations ---*/ /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* BufferGetAndValidate.proto */ #define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ ((obj == Py_None || obj == NULL) ?\ (__Pyx_ZeroBuffer(buf), 0) :\ __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static void __Pyx_ZeroBuffer(Py_buffer* buf); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* RealImag.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX\ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_float(a, b) ((a)==(b)) #define __Pyx_c_sum_float(a, b) ((a)+(b)) #define __Pyx_c_diff_float(a, b) ((a)-(b)) #define __Pyx_c_prod_float(a, b) ((a)*(b)) #define __Pyx_c_quot_float(a, b) ((a)/(b)) #define __Pyx_c_neg_float(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_float(z) ((z)==(float)0) #define __Pyx_c_conj_float(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_float(z) (::std::abs(z)) #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_float(z) ((z)==0) #define __Pyx_c_conj_float(z) (conjf(z)) #if 1 #define __Pyx_c_abs_float(z) (cabsf(z)) #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_double(a, b) ((a)==(b)) #define __Pyx_c_sum_double(a, b) ((a)+(b)) #define __Pyx_c_diff_double(a, b) ((a)-(b)) #define __Pyx_c_prod_double(a, b) ((a)*(b)) #define __Pyx_c_quot_double(a, b) ((a)/(b)) #define __Pyx_c_neg_double(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_double(z) ((z)==(double)0) #define __Pyx_c_conj_double(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_double(z) (::std::abs(z)) #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_double(z) ((z)==0) #define __Pyx_c_conj_double(z) (conj(z)) #if 1 #define __Pyx_c_abs_double(z) (cabs(z)) #define __Pyx_c_pow_double(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython.version' */ /* Module declarations from 'cpython.exc' */ /* Module declarations from 'cpython.module' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'cpython.tuple' */ /* Module declarations from 'cpython.list' */ /* Module declarations from 'cpython.sequence' */ /* Module declarations from 'cpython.mapping' */ /* Module declarations from 'cpython.iterator' */ /* Module declarations from 'cpython.number' */ /* Module declarations from 'cpython.int' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.bool' */ static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; /* Module declarations from 'cpython.long' */ /* Module declarations from 'cpython.float' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.complex' */ static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; /* Module declarations from 'cpython.string' */ /* Module declarations from 'cpython.unicode' */ /* Module declarations from 'cpython.dict' */ /* Module declarations from 'cpython.instance' */ /* Module declarations from 'cpython.function' */ /* Module declarations from 'cpython.method' */ /* Module declarations from 'cpython.weakref' */ /* Module declarations from 'cpython.getargs' */ /* Module declarations from 'cpython.pythread' */ /* Module declarations from 'cpython.pystate' */ /* Module declarations from 'cpython.cobject' */ /* Module declarations from 'cpython.oldbuffer' */ /* Module declarations from 'cpython.set' */ /* Module declarations from 'cpython.bytes' */ /* Module declarations from 'cpython.pycapsule' */ /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'photutils.geometry.rectangular_overlap' */ static double __pyx_f_9photutils_8geometry_19rectangular_overlap_rectangular_overlap_single_subpixel(double, double, double, double, double, double, double, int); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_9photutils_8geometry_19rectangular_overlap_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_9photutils_8geometry_19rectangular_overlap_DTYPE_t), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "photutils.geometry.rectangular_overlap" int __pyx_module_is_main_photutils__geometry__rectangular_overlap = 0; /* Implementation of 'photutils.geometry.rectangular_overlap' */ static PyObject *__pyx_builtin_NotImplementedError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_ImportError; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_x[] = "x"; static const char __pyx_k_y[] = "y"; static const char __pyx_k_dx[] = "dx"; static const char __pyx_k_dy[] = "dy"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_nx[] = "nx"; static const char __pyx_k_ny[] = "ny"; static const char __pyx_k_all[] = "__all__"; static const char __pyx_k_frac[] = "frac"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_xmax[] = "xmax"; static const char __pyx_k_xmin[] = "xmin"; static const char __pyx_k_ymax[] = "ymax"; static const char __pyx_k_ymin[] = "ymin"; static const char __pyx_k_DTYPE[] = "DTYPE"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_pxmax[] = "pxmax"; static const char __pyx_k_pxmin[] = "pxmin"; static const char __pyx_k_pymax[] = "pymax"; static const char __pyx_k_pymin[] = "pymin"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_theta[] = "theta"; static const char __pyx_k_width[] = "width"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_height[] = "height"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_subpixels[] = "subpixels"; static const char __pyx_k_use_exact[] = "use_exact"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_NotImplementedError[] = "NotImplementedError"; static const char __pyx_k_rectangular_overlap_grid[] = "rectangular_overlap_grid"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_Exact_mode_has_not_been_implemen[] = "Exact mode has not been implemented for rectangular apertures"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; static const char __pyx_k_photutils_geometry_rectangular_o[] = "photutils/geometry/rectangular_overlap.pyx"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static const char __pyx_k_photutils_geometry_rectangular_o_2[] = "photutils.geometry.rectangular_overlap"; static PyObject *__pyx_n_s_DTYPE; static PyObject *__pyx_kp_u_Exact_mode_has_not_been_implemen; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_NotImplementedError; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_all; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dx; static PyObject *__pyx_n_s_dy; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_n_s_frac; static PyObject *__pyx_n_s_height; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_main; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to; static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor; static PyObject *__pyx_n_s_nx; static PyObject *__pyx_n_s_ny; static PyObject *__pyx_kp_s_photutils_geometry_rectangular_o; static PyObject *__pyx_n_s_photutils_geometry_rectangular_o_2; static PyObject *__pyx_n_s_pxmax; static PyObject *__pyx_n_s_pxmin; static PyObject *__pyx_n_s_pymax; static PyObject *__pyx_n_s_pymin; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_rectangular_overlap_grid; static PyObject *__pyx_n_u_rectangular_overlap_grid; static PyObject *__pyx_n_s_subpixels; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_theta; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_use_exact; static PyObject *__pyx_n_s_width; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_xmax; static PyObject *__pyx_n_s_xmin; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_ymax; static PyObject *__pyx_n_s_ymin; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_9photutils_8geometry_19rectangular_overlap_rectangular_overlap_grid(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_xmin, double __pyx_v_xmax, double __pyx_v_ymin, double __pyx_v_ymax, int __pyx_v_nx, int __pyx_v_ny, double __pyx_v_width, double __pyx_v_height, double __pyx_v_theta, int __pyx_v_use_exact, int __pyx_v_subpixels); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_codeobj__12; /* "photutils/geometry/rectangular_overlap.pyx":28 * * * def rectangular_overlap_grid(double xmin, double xmax, double ymin, # <<<<<<<<<<<<<< * double ymax, int nx, int ny, double width, * double height, double theta, int use_exact, */ /* Python wrapper */ static PyObject *__pyx_pw_9photutils_8geometry_19rectangular_overlap_1rectangular_overlap_grid(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_9photutils_8geometry_19rectangular_overlap_rectangular_overlap_grid[] = "\n rectangular_overlap_grid(xmin, xmax, ymin, ymax, nx, ny, width, height,\n use_exact, subpixels)\n\n Area of overlap between a rectangle and a pixel grid. The rectangle is\n centered on the origin.\n\n Parameters\n ----------\n xmin, xmax, ymin, ymax : float\n Extent of the grid in the x and y direction.\n nx, ny : int\n Grid dimensions.\n width : float\n The width of the rectangle\n height : float\n The height of the rectangle\n theta : float\n The position angle of the rectangle in radians (counterclockwise).\n use_exact : 0 or 1\n If set to 1, calculates the exact overlap, while if set to 0, uses a\n subpixel sampling method with ``subpixel`` subpixels in each direction.\n subpixels : int\n If ``use_exact`` is 0, each pixel is resampled by this factor in each\n dimension. Thus, each pixel is divided into ``subpixels ** 2``\n subpixels.\n\n Returns\n -------\n frac : `~numpy.ndarray`\n 2-d array giving the fraction of the overlap.\n "; static PyMethodDef __pyx_mdef_9photutils_8geometry_19rectangular_overlap_1rectangular_overlap_grid = {"rectangular_overlap_grid", (PyCFunction)__pyx_pw_9photutils_8geometry_19rectangular_overlap_1rectangular_overlap_grid, METH_VARARGS|METH_KEYWORDS, __pyx_doc_9photutils_8geometry_19rectangular_overlap_rectangular_overlap_grid}; static PyObject *__pyx_pw_9photutils_8geometry_19rectangular_overlap_1rectangular_overlap_grid(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_xmin; double __pyx_v_xmax; double __pyx_v_ymin; double __pyx_v_ymax; int __pyx_v_nx; int __pyx_v_ny; double __pyx_v_width; double __pyx_v_height; double __pyx_v_theta; int __pyx_v_use_exact; int __pyx_v_subpixels; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("rectangular_overlap_grid (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xmin,&__pyx_n_s_xmax,&__pyx_n_s_ymin,&__pyx_n_s_ymax,&__pyx_n_s_nx,&__pyx_n_s_ny,&__pyx_n_s_width,&__pyx_n_s_height,&__pyx_n_s_theta,&__pyx_n_s_use_exact,&__pyx_n_s_subpixels,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); CYTHON_FALLTHROUGH; case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); CYTHON_FALLTHROUGH; case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); CYTHON_FALLTHROUGH; case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xmin)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xmax)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, 1); __PYX_ERR(0, 28, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ymin)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, 2); __PYX_ERR(0, 28, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ymax)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, 3); __PYX_ERR(0, 28, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, 4); __PYX_ERR(0, 28, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ny)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, 5); __PYX_ERR(0, 28, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_width)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, 6); __PYX_ERR(0, 28, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_height)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, 7); __PYX_ERR(0, 28, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_theta)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, 8); __PYX_ERR(0, 28, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_use_exact)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, 9); __PYX_ERR(0, 28, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_subpixels)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, 10); __PYX_ERR(0, 28, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "rectangular_overlap_grid") < 0)) __PYX_ERR(0, 28, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xmin = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_xmin == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 28, __pyx_L3_error) __pyx_v_xmax = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_xmax == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 28, __pyx_L3_error) __pyx_v_ymin = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_ymin == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 28, __pyx_L3_error) __pyx_v_ymax = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_ymax == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 29, __pyx_L3_error) __pyx_v_nx = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_nx == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 29, __pyx_L3_error) __pyx_v_ny = __Pyx_PyInt_As_int(values[5]); if (unlikely((__pyx_v_ny == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 29, __pyx_L3_error) __pyx_v_width = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_width == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 29, __pyx_L3_error) __pyx_v_height = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_height == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 30, __pyx_L3_error) __pyx_v_theta = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_theta == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 30, __pyx_L3_error) __pyx_v_use_exact = __Pyx_PyInt_As_int(values[9]); if (unlikely((__pyx_v_use_exact == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 30, __pyx_L3_error) __pyx_v_subpixels = __Pyx_PyInt_As_int(values[10]); if (unlikely((__pyx_v_subpixels == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 31, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("rectangular_overlap_grid", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 28, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("photutils.geometry.rectangular_overlap.rectangular_overlap_grid", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_9photutils_8geometry_19rectangular_overlap_rectangular_overlap_grid(__pyx_self, __pyx_v_xmin, __pyx_v_xmax, __pyx_v_ymin, __pyx_v_ymax, __pyx_v_nx, __pyx_v_ny, __pyx_v_width, __pyx_v_height, __pyx_v_theta, __pyx_v_use_exact, __pyx_v_subpixels); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9photutils_8geometry_19rectangular_overlap_rectangular_overlap_grid(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_xmin, double __pyx_v_xmax, double __pyx_v_ymin, double __pyx_v_ymax, int __pyx_v_nx, int __pyx_v_ny, double __pyx_v_width, double __pyx_v_height, double __pyx_v_theta, int __pyx_v_use_exact, int __pyx_v_subpixels) { unsigned int __pyx_v_i; unsigned int __pyx_v_j; double __pyx_v_dx; double __pyx_v_dy; double __pyx_v_pxmin; double __pyx_v_pxmax; double __pyx_v_pymin; double __pyx_v_pymax; PyArrayObject *__pyx_v_frac = 0; __Pyx_LocalBuf_ND __pyx_pybuffernd_frac; __Pyx_Buffer __pyx_pybuffer_frac; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; int __pyx_t_6; double __pyx_t_7; int __pyx_t_8; unsigned int __pyx_t_9; int __pyx_t_10; unsigned int __pyx_t_11; size_t __pyx_t_12; size_t __pyx_t_13; int __pyx_t_14; __Pyx_RefNannySetupContext("rectangular_overlap_grid", 0); __pyx_pybuffer_frac.pybuffer.buf = NULL; __pyx_pybuffer_frac.refcount = 0; __pyx_pybuffernd_frac.data = NULL; __pyx_pybuffernd_frac.rcbuffer = &__pyx_pybuffer_frac; /* "photutils/geometry/rectangular_overlap.pyx":70 * * # Define output array * cdef np.ndarray[DTYPE_t, ndim=2] frac = np.zeros([ny, nx], dtype=DTYPE) # <<<<<<<<<<<<<< * * if use_exact == 1: */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 70, __pyx_L1_error) __pyx_t_5 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_frac.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_9photutils_8geometry_19rectangular_overlap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_frac = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_frac.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 70, __pyx_L1_error) } else {__pyx_pybuffernd_frac.diminfo[0].strides = __pyx_pybuffernd_frac.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_frac.diminfo[0].shape = __pyx_pybuffernd_frac.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_frac.diminfo[1].strides = __pyx_pybuffernd_frac.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_frac.diminfo[1].shape = __pyx_pybuffernd_frac.rcbuffer->pybuffer.shape[1]; } } __pyx_t_5 = 0; __pyx_v_frac = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "photutils/geometry/rectangular_overlap.pyx":72 * cdef np.ndarray[DTYPE_t, ndim=2] frac = np.zeros([ny, nx], dtype=DTYPE) * * if use_exact == 1: # <<<<<<<<<<<<<< * raise NotImplementedError("Exact mode has not been implemented for " * "rectangular apertures") */ __pyx_t_6 = ((__pyx_v_use_exact == 1) != 0); if (__pyx_t_6) { /* "photutils/geometry/rectangular_overlap.pyx":73 * * if use_exact == 1: * raise NotImplementedError("Exact mode has not been implemented for " # <<<<<<<<<<<<<< * "rectangular apertures") * */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_NotImplementedError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 73, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 73, __pyx_L1_error) /* "photutils/geometry/rectangular_overlap.pyx":72 * cdef np.ndarray[DTYPE_t, ndim=2] frac = np.zeros([ny, nx], dtype=DTYPE) * * if use_exact == 1: # <<<<<<<<<<<<<< * raise NotImplementedError("Exact mode has not been implemented for " * "rectangular apertures") */ } /* "photutils/geometry/rectangular_overlap.pyx":77 * * # Find the width of each element in x and y * dx = (xmax - xmin) / nx # <<<<<<<<<<<<<< * dy = (ymax - ymin) / ny * */ __pyx_t_7 = (__pyx_v_xmax - __pyx_v_xmin); if (unlikely(__pyx_v_nx == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 77, __pyx_L1_error) } __pyx_v_dx = (__pyx_t_7 / ((double)__pyx_v_nx)); /* "photutils/geometry/rectangular_overlap.pyx":78 * # Find the width of each element in x and y * dx = (xmax - xmin) / nx * dy = (ymax - ymin) / ny # <<<<<<<<<<<<<< * * # TODO: can implement a bounding box here for efficiency (as for the */ __pyx_t_7 = (__pyx_v_ymax - __pyx_v_ymin); if (unlikely(__pyx_v_ny == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 78, __pyx_L1_error) } __pyx_v_dy = (__pyx_t_7 / ((double)__pyx_v_ny)); /* "photutils/geometry/rectangular_overlap.pyx":83 * # circular and elliptical aperture photometry) * * for i in range(nx): # <<<<<<<<<<<<<< * pxmin = xmin + i * dx # lower end of pixel * pxmax = pxmin + dx # upper end of pixel */ __pyx_t_8 = __pyx_v_nx; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_i = __pyx_t_9; /* "photutils/geometry/rectangular_overlap.pyx":84 * * for i in range(nx): * pxmin = xmin + i * dx # lower end of pixel # <<<<<<<<<<<<<< * pxmax = pxmin + dx # upper end of pixel * for j in range(ny): */ __pyx_v_pxmin = (__pyx_v_xmin + (__pyx_v_i * __pyx_v_dx)); /* "photutils/geometry/rectangular_overlap.pyx":85 * for i in range(nx): * pxmin = xmin + i * dx # lower end of pixel * pxmax = pxmin + dx # upper end of pixel # <<<<<<<<<<<<<< * for j in range(ny): * pymin = ymin + j * dy */ __pyx_v_pxmax = (__pyx_v_pxmin + __pyx_v_dx); /* "photutils/geometry/rectangular_overlap.pyx":86 * pxmin = xmin + i * dx # lower end of pixel * pxmax = pxmin + dx # upper end of pixel * for j in range(ny): # <<<<<<<<<<<<<< * pymin = ymin + j * dy * pymax = pymin + dy */ __pyx_t_10 = __pyx_v_ny; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v_j = __pyx_t_11; /* "photutils/geometry/rectangular_overlap.pyx":87 * pxmax = pxmin + dx # upper end of pixel * for j in range(ny): * pymin = ymin + j * dy # <<<<<<<<<<<<<< * pymax = pymin + dy * frac[j, i] = rectangular_overlap_single_subpixel( */ __pyx_v_pymin = (__pyx_v_ymin + (__pyx_v_j * __pyx_v_dy)); /* "photutils/geometry/rectangular_overlap.pyx":88 * for j in range(ny): * pymin = ymin + j * dy * pymax = pymin + dy # <<<<<<<<<<<<<< * frac[j, i] = rectangular_overlap_single_subpixel( * pxmin, pymin, pxmax, pymax, width, height, theta, */ __pyx_v_pymax = (__pyx_v_pymin + __pyx_v_dy); /* "photutils/geometry/rectangular_overlap.pyx":89 * pymin = ymin + j * dy * pymax = pymin + dy * frac[j, i] = rectangular_overlap_single_subpixel( # <<<<<<<<<<<<<< * pxmin, pymin, pxmax, pymax, width, height, theta, * subpixels) */ __pyx_t_12 = __pyx_v_j; __pyx_t_13 = __pyx_v_i; __pyx_t_14 = -1; if (unlikely(__pyx_t_12 >= (size_t)__pyx_pybuffernd_frac.diminfo[0].shape)) __pyx_t_14 = 0; if (unlikely(__pyx_t_13 >= (size_t)__pyx_pybuffernd_frac.diminfo[1].shape)) __pyx_t_14 = 1; if (unlikely(__pyx_t_14 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_14); __PYX_ERR(0, 89, __pyx_L1_error) } *__Pyx_BufPtrStrided2d(__pyx_t_9photutils_8geometry_19rectangular_overlap_DTYPE_t *, __pyx_pybuffernd_frac.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_frac.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_frac.diminfo[1].strides) = __pyx_f_9photutils_8geometry_19rectangular_overlap_rectangular_overlap_single_subpixel(__pyx_v_pxmin, __pyx_v_pymin, __pyx_v_pxmax, __pyx_v_pymax, __pyx_v_width, __pyx_v_height, __pyx_v_theta, __pyx_v_subpixels); } } /* "photutils/geometry/rectangular_overlap.pyx":93 * subpixels) * * return frac # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_frac)); __pyx_r = ((PyObject *)__pyx_v_frac); goto __pyx_L0; /* "photutils/geometry/rectangular_overlap.pyx":28 * * * def rectangular_overlap_grid(double xmin, double xmax, double ymin, # <<<<<<<<<<<<<< * double ymax, int nx, int ny, double width, * double height, double theta, int use_exact, */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_frac.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("photutils.geometry.rectangular_overlap.rectangular_overlap_grid", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_frac.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_frac); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "photutils/geometry/rectangular_overlap.pyx":96 * * * cdef double rectangular_overlap_single_subpixel(double x0, double y0, # <<<<<<<<<<<<<< * double x1, double y1, * double width, double height, */ static double __pyx_f_9photutils_8geometry_19rectangular_overlap_rectangular_overlap_single_subpixel(double __pyx_v_x0, double __pyx_v_y0, double __pyx_v_x1, double __pyx_v_y1, double __pyx_v_width, double __pyx_v_height, double __pyx_v_theta, int __pyx_v_subpixels) { CYTHON_UNUSED unsigned int __pyx_v_i; CYTHON_UNUSED unsigned int __pyx_v_j; double __pyx_v_x; double __pyx_v_y; double __pyx_v_frac; double __pyx_v_cos_theta; double __pyx_v_sin_theta; double __pyx_v_half_width; double __pyx_v_half_height; double __pyx_v_dx; double __pyx_v_dy; double __pyx_v_x_tr; double __pyx_v_y_tr; double __pyx_r; __Pyx_RefNannyDeclarations double __pyx_t_1; int __pyx_t_2; unsigned int __pyx_t_3; int __pyx_t_4; unsigned int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; __Pyx_RefNannySetupContext("rectangular_overlap_single_subpixel", 0); /* "photutils/geometry/rectangular_overlap.pyx":107 * cdef unsigned int i, j * cdef double x, y * cdef double frac = 0. # Accumulator. # <<<<<<<<<<<<<< * cdef double cos_theta = cos(theta) * cdef double sin_theta = sin(theta) */ __pyx_v_frac = 0.; /* "photutils/geometry/rectangular_overlap.pyx":108 * cdef double x, y * cdef double frac = 0. # Accumulator. * cdef double cos_theta = cos(theta) # <<<<<<<<<<<<<< * cdef double sin_theta = sin(theta) * cdef double half_width, half_height */ __pyx_v_cos_theta = cos(__pyx_v_theta); /* "photutils/geometry/rectangular_overlap.pyx":109 * cdef double frac = 0. # Accumulator. * cdef double cos_theta = cos(theta) * cdef double sin_theta = sin(theta) # <<<<<<<<<<<<<< * cdef double half_width, half_height * */ __pyx_v_sin_theta = sin(__pyx_v_theta); /* "photutils/geometry/rectangular_overlap.pyx":112 * cdef double half_width, half_height * * half_width = width / 2. # <<<<<<<<<<<<<< * half_height = height / 2. * */ __pyx_v_half_width = (__pyx_v_width / 2.); /* "photutils/geometry/rectangular_overlap.pyx":113 * * half_width = width / 2. * half_height = height / 2. # <<<<<<<<<<<<<< * * dx = (x1 - x0) / subpixels */ __pyx_v_half_height = (__pyx_v_height / 2.); /* "photutils/geometry/rectangular_overlap.pyx":115 * half_height = height / 2. * * dx = (x1 - x0) / subpixels # <<<<<<<<<<<<<< * dy = (y1 - y0) / subpixels * */ __pyx_t_1 = (__pyx_v_x1 - __pyx_v_x0); if (unlikely(__pyx_v_subpixels == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 115, __pyx_L1_error) } __pyx_v_dx = (__pyx_t_1 / ((double)__pyx_v_subpixels)); /* "photutils/geometry/rectangular_overlap.pyx":116 * * dx = (x1 - x0) / subpixels * dy = (y1 - y0) / subpixels # <<<<<<<<<<<<<< * * x = x0 - 0.5 * dx */ __pyx_t_1 = (__pyx_v_y1 - __pyx_v_y0); if (unlikely(__pyx_v_subpixels == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 116, __pyx_L1_error) } __pyx_v_dy = (__pyx_t_1 / ((double)__pyx_v_subpixels)); /* "photutils/geometry/rectangular_overlap.pyx":118 * dy = (y1 - y0) / subpixels * * x = x0 - 0.5 * dx # <<<<<<<<<<<<<< * for i in range(subpixels): * x += dx */ __pyx_v_x = (__pyx_v_x0 - (0.5 * __pyx_v_dx)); /* "photutils/geometry/rectangular_overlap.pyx":119 * * x = x0 - 0.5 * dx * for i in range(subpixels): # <<<<<<<<<<<<<< * x += dx * y = y0 - 0.5 * dy */ __pyx_t_2 = __pyx_v_subpixels; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "photutils/geometry/rectangular_overlap.pyx":120 * x = x0 - 0.5 * dx * for i in range(subpixels): * x += dx # <<<<<<<<<<<<<< * y = y0 - 0.5 * dy * for j in range(subpixels): */ __pyx_v_x = (__pyx_v_x + __pyx_v_dx); /* "photutils/geometry/rectangular_overlap.pyx":121 * for i in range(subpixels): * x += dx * y = y0 - 0.5 * dy # <<<<<<<<<<<<<< * for j in range(subpixels): * y += dy */ __pyx_v_y = (__pyx_v_y0 - (0.5 * __pyx_v_dy)); /* "photutils/geometry/rectangular_overlap.pyx":122 * x += dx * y = y0 - 0.5 * dy * for j in range(subpixels): # <<<<<<<<<<<<<< * y += dy * */ __pyx_t_4 = __pyx_v_subpixels; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_j = __pyx_t_5; /* "photutils/geometry/rectangular_overlap.pyx":123 * y = y0 - 0.5 * dy * for j in range(subpixels): * y += dy # <<<<<<<<<<<<<< * * # Transform into frame of rotated rectangle */ __pyx_v_y = (__pyx_v_y + __pyx_v_dy); /* "photutils/geometry/rectangular_overlap.pyx":126 * * # Transform into frame of rotated rectangle * x_tr = y * sin_theta + x * cos_theta # <<<<<<<<<<<<<< * y_tr = y * cos_theta - x * sin_theta * */ __pyx_v_x_tr = ((__pyx_v_y * __pyx_v_sin_theta) + (__pyx_v_x * __pyx_v_cos_theta)); /* "photutils/geometry/rectangular_overlap.pyx":127 * # Transform into frame of rotated rectangle * x_tr = y * sin_theta + x * cos_theta * y_tr = y * cos_theta - x * sin_theta # <<<<<<<<<<<<<< * * if fabs(x_tr) < half_width and fabs(y_tr) < half_height: */ __pyx_v_y_tr = ((__pyx_v_y * __pyx_v_cos_theta) - (__pyx_v_x * __pyx_v_sin_theta)); /* "photutils/geometry/rectangular_overlap.pyx":129 * y_tr = y * cos_theta - x * sin_theta * * if fabs(x_tr) < half_width and fabs(y_tr) < half_height: # <<<<<<<<<<<<<< * frac += 1. * */ __pyx_t_7 = ((fabs(__pyx_v_x_tr) < __pyx_v_half_width) != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L8_bool_binop_done; } __pyx_t_7 = ((fabs(__pyx_v_y_tr) < __pyx_v_half_height) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L8_bool_binop_done:; if (__pyx_t_6) { /* "photutils/geometry/rectangular_overlap.pyx":130 * * if fabs(x_tr) < half_width and fabs(y_tr) < half_height: * frac += 1. # <<<<<<<<<<<<<< * * return frac / (subpixels * subpixels) */ __pyx_v_frac = (__pyx_v_frac + 1.); /* "photutils/geometry/rectangular_overlap.pyx":129 * y_tr = y * cos_theta - x * sin_theta * * if fabs(x_tr) < half_width and fabs(y_tr) < half_height: # <<<<<<<<<<<<<< * frac += 1. * */ } } } /* "photutils/geometry/rectangular_overlap.pyx":132 * frac += 1. * * return frac / (subpixels * subpixels) # <<<<<<<<<<<<<< */ __pyx_t_2 = (__pyx_v_subpixels * __pyx_v_subpixels); if (unlikely(__pyx_t_2 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 132, __pyx_L1_error) } __pyx_r = (__pyx_v_frac / ((double)__pyx_t_2)); goto __pyx_L0; /* "photutils/geometry/rectangular_overlap.pyx":96 * * * cdef double rectangular_overlap_single_subpixel(double x0, double y0, # <<<<<<<<<<<<<< * double x1, double y1, * double width, double height, */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("photutils.geometry.rectangular_overlap.rectangular_overlap_single_subpixel", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":220 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":224 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":226 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ goto __pyx_L4; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ /*else*/ { __pyx_v_copy_shape = 0; } __pyx_L4:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":234 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 235, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":238 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 239, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247 * # This is allocated as one block, strides first. * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248 * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L11; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":252 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":256 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef int offset */ __pyx_v_f = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ goto __pyx_L14; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ /*else*/ { __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 276, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = ((char *)"B"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = ((char *)"h"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = ((char *)"H"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":281 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = ((char *)"i"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = ((char *)"I"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = ((char *)"l"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = ((char *)"L"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = ((char *)"q"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = ((char *)"Q"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = ((char *)"f"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = ((char *)"d"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = ((char *)"g"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = ((char *)"Zf"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = ((char *)"Zd"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = ((char *)"Zg"); break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = ((char *)"O"); break; default: /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 295, __pyx_L1_error) break; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = PyObject_Malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":299 * return * else: * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 * else: * info.format = PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":301 * info.format = PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 302, __pyx_L1_error) __pyx_v_f = __pyx_t_7; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) */ PyObject_Free(__pyx_v_info->format); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ PyObject_Free(__pyx_v_info->strides); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":798 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline tuple PyDataType_SHAPE(dtype d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return d.subarray.shape * else: */ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape # <<<<<<<<<<<<<< * else: * return () */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return d.subarray.shape * else: */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 * return d.subarray.shape * else: * return () # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_empty_tuple); __pyx_r = __pyx_empty_tuple; goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 818, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 819, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 819, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 820, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 820, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 820, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 823, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 827, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 847, __pyx_L1_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 857, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 858, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 861, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":862 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 862, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":863 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 863, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":864 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 864, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":865 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 865, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":866 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 866, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":868 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 868, __pyx_L1_error) } __pyx_L15:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 873, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":993 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ goto __pyx_L3; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":995 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ /*else*/ { Py_INCREF(__pyx_v_base); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004 * return None * else: * return arr.base # <<<<<<<<<<<<<< * * */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_array", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011 * cdef inline int import_array() except -1: * try: * _import_array() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.multiarray failed to import") */ __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1011, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 * try: * _import_array() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.multiarray failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1012, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1013, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1013, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_umath", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1017 * cdef inline int import_umath() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1017, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1018 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1018, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1019, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1019, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_ufunc", 0); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 * cdef inline int import_ufunc() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1023, __pyx_L3_error) /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1024, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1025, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1025, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_rectangular_overlap(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_rectangular_overlap}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "rectangular_overlap", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, {&__pyx_kp_u_Exact_mode_has_not_been_implemen, __pyx_k_Exact_mode_has_not_been_implemen, sizeof(__pyx_k_Exact_mode_has_not_been_implemen), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_NotImplementedError, __pyx_k_NotImplementedError, sizeof(__pyx_k_NotImplementedError), 0, 0, 1, 1}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dx, __pyx_k_dx, sizeof(__pyx_k_dx), 0, 0, 1, 1}, {&__pyx_n_s_dy, __pyx_k_dy, sizeof(__pyx_k_dy), 0, 0, 1, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_n_s_frac, __pyx_k_frac, sizeof(__pyx_k_frac), 0, 0, 1, 1}, {&__pyx_n_s_height, __pyx_k_height, sizeof(__pyx_k_height), 0, 0, 1, 1}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_kp_u_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 1, 0, 0}, {&__pyx_kp_u_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 1, 0, 0}, {&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1}, {&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1}, {&__pyx_kp_s_photutils_geometry_rectangular_o, __pyx_k_photutils_geometry_rectangular_o, sizeof(__pyx_k_photutils_geometry_rectangular_o), 0, 0, 1, 0}, {&__pyx_n_s_photutils_geometry_rectangular_o_2, __pyx_k_photutils_geometry_rectangular_o_2, sizeof(__pyx_k_photutils_geometry_rectangular_o_2), 0, 0, 1, 1}, {&__pyx_n_s_pxmax, __pyx_k_pxmax, sizeof(__pyx_k_pxmax), 0, 0, 1, 1}, {&__pyx_n_s_pxmin, __pyx_k_pxmin, sizeof(__pyx_k_pxmin), 0, 0, 1, 1}, {&__pyx_n_s_pymax, __pyx_k_pymax, sizeof(__pyx_k_pymax), 0, 0, 1, 1}, {&__pyx_n_s_pymin, __pyx_k_pymin, sizeof(__pyx_k_pymin), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_rectangular_overlap_grid, __pyx_k_rectangular_overlap_grid, sizeof(__pyx_k_rectangular_overlap_grid), 0, 0, 1, 1}, {&__pyx_n_u_rectangular_overlap_grid, __pyx_k_rectangular_overlap_grid, sizeof(__pyx_k_rectangular_overlap_grid), 0, 1, 0, 1}, {&__pyx_n_s_subpixels, __pyx_k_subpixels, sizeof(__pyx_k_subpixels), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_theta, __pyx_k_theta, sizeof(__pyx_k_theta), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_use_exact, __pyx_k_use_exact, sizeof(__pyx_k_use_exact), 0, 0, 1, 1}, {&__pyx_n_s_width, __pyx_k_width, sizeof(__pyx_k_width), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_xmax, __pyx_k_xmax, sizeof(__pyx_k_xmax), 0, 0, 1, 1}, {&__pyx_n_s_xmin, __pyx_k_xmin, sizeof(__pyx_k_xmin), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_ymax, __pyx_k_ymax, sizeof(__pyx_k_ymax), 0, 0, 1, 1}, {&__pyx_n_s_ymin, __pyx_k_ymin, sizeof(__pyx_k_ymin), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_NotImplementedError = __Pyx_GetBuiltinName(__pyx_n_s_NotImplementedError); if (!__pyx_builtin_NotImplementedError) __PYX_ERR(0, 73, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 83, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 235, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 823, __pyx_L1_error) __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1013, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "photutils/geometry/rectangular_overlap.pyx":73 * * if use_exact == 1: * raise NotImplementedError("Exact mode has not been implemented for " # <<<<<<<<<<<<<< * "rectangular apertures") * */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_Exact_mode_has_not_been_implemen); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 73, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 239, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 847, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 1019, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "photutils/geometry/rectangular_overlap.pyx":28 * * * def rectangular_overlap_grid(double xmin, double xmax, double ymin, # <<<<<<<<<<<<<< * double ymax, int nx, int ny, double width, * double height, double theta, int use_exact, */ __pyx_tuple__11 = PyTuple_Pack(22, __pyx_n_s_xmin, __pyx_n_s_xmax, __pyx_n_s_ymin, __pyx_n_s_ymax, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_width, __pyx_n_s_height, __pyx_n_s_theta, __pyx_n_s_use_exact, __pyx_n_s_subpixels, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_pxmin, __pyx_n_s_pxmax, __pyx_n_s_pymin, __pyx_n_s_pymax, __pyx_n_s_frac); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(11, 0, 22, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_photutils_geometry_rectangular_o, __pyx_n_s_rectangular_overlap_grid, 28, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initrectangular_overlap(void); /*proto*/ PyMODINIT_FUNC initrectangular_overlap(void) #else PyMODINIT_FUNC PyInit_rectangular_overlap(void); /*proto*/ PyMODINIT_FUNC PyInit_rectangular_overlap(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { result = PyDict_SetItemString(moddict, to_name, value); Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static int __pyx_pymod_exec_rectangular_overlap(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_rectangular_overlap(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("rectangular_overlap", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_photutils__geometry__rectangular_overlap) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "photutils.geometry.rectangular_overlap")) { if (unlikely(PyDict_SetItemString(modules, "photutils.geometry.rectangular_overlap", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) __PYX_ERR(3, 8, __pyx_L1_error) __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) __PYX_ERR(4, 15, __pyx_L1_error) __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 163, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 185, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 198, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 885, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "photutils/geometry/rectangular_overlap.pyx":5 * unicode_literals) * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 5, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "photutils/geometry/rectangular_overlap.pyx":9 * * * __all__ = ['rectangular_overlap_grid'] # <<<<<<<<<<<<<< * * */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_u_rectangular_overlap_grid); __Pyx_GIVEREF(__pyx_n_u_rectangular_overlap_grid); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_rectangular_overlap_grid); if (PyDict_SetItem(__pyx_d, __pyx_n_s_all, __pyx_t_1) < 0) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "photutils/geometry/rectangular_overlap.pyx":22 * from cpython cimport bool * * DTYPE = np.float64 # <<<<<<<<<<<<<< * ctypedef np.float64_t DTYPE_t * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/rectangular_overlap.pyx":28 * * * def rectangular_overlap_grid(double xmin, double xmax, double ymin, # <<<<<<<<<<<<<< * double ymax, int nx, int ny, double width, * double height, double theta, int use_exact, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_9photutils_8geometry_19rectangular_overlap_1rectangular_overlap_grid, NULL, __pyx_n_s_photutils_geometry_rectangular_o_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_rectangular_overlap_grid, __pyx_t_2) < 0) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "photutils/geometry/rectangular_overlap.pyx":1 * # Licensed under a 3-clause BSD style license - see LICENSE.rst # <<<<<<<<<<<<<< * from __future__ import (absolute_import, division, print_function, * unicode_literals) */ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "../../../../../../../usr/local/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init photutils.geometry.rectangular_overlap", 0, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init photutils.geometry.rectangular_overlap"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* BufferGetAndValidate */ static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (unlikely(info->buf == NULL)) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static int __Pyx__GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { buf->buf = NULL; if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { __Pyx_ZeroBuffer(buf); return -1; } if (unlikely(buf->ndim != nd)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if (unlikely((unsigned)buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_SafeReleaseBuffer(buf); return -1; } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (PyObject_Not(use_cline) != 0) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); view->obj = NULL; Py_DECREF(obj); } #endif /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabsf(b.real) >= fabsf(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { float r = b.imag / b.real; float s = 1.0 / (b.real + b.imag * r); return __pyx_t_float_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { float r = b.real / b.imag; float s = 1.0 / (b.imag + b.real * r); return __pyx_t_float_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else { float denom = b.real * b.real + b.imag * b.imag; return __pyx_t_float_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(a, a); case 3: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, a); case 4: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = powf(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2f(0, -1); } } else { r = __Pyx_c_abs_float(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabs(b.real) >= fabs(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { double r = b.imag / b.real; double s = 1.0 / (b.real + b.imag * r); return __pyx_t_double_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { double r = b.real / b.imag; double s = 1.0 / (b.imag + b.real * r); return __pyx_t_double_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else { double denom = b.real * b.real + b.imag * b.imag; return __pyx_t_double_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(a, a); case 3: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, a); case 4: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = pow(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2(0, -1); } } else { r = __Pyx_c_abs_double(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (unsigned int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned int) 0; case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) case 2: if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); } } break; case 3: if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); } } break; case 4: if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (unsigned int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(unsigned int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned int) 0; case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) case -2: if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case 2: if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case -3: if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case 3: if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case -4: if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; case 4: if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); } } break; } #endif if (sizeof(unsigned int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned int) -1; } } else { unsigned int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (unsigned int) -1; val = __Pyx_PyInt_As_unsigned_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to unsigned int"); return (unsigned int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) PyErr_Clear(); ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */ photutils-0.4/photutils/geometry/rectangular_overlap.pyx0000644000214200020070000000772713055576313026307 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np cimport numpy as np __all__ = ['rectangular_overlap_grid'] cdef extern from "math.h": double asin(double x) double sin(double x) double cos(double x) double sqrt(double x) double fabs(double x) from cpython cimport bool DTYPE = np.float64 ctypedef np.float64_t DTYPE_t cimport cython def rectangular_overlap_grid(double xmin, double xmax, double ymin, double ymax, int nx, int ny, double width, double height, double theta, int use_exact, int subpixels): """ rectangular_overlap_grid(xmin, xmax, ymin, ymax, nx, ny, width, height, use_exact, subpixels) Area of overlap between a rectangle and a pixel grid. The rectangle is centered on the origin. Parameters ---------- xmin, xmax, ymin, ymax : float Extent of the grid in the x and y direction. nx, ny : int Grid dimensions. width : float The width of the rectangle height : float The height of the rectangle theta : float The position angle of the rectangle in radians (counterclockwise). use_exact : 0 or 1 If set to 1, calculates the exact overlap, while if set to 0, uses a subpixel sampling method with ``subpixel`` subpixels in each direction. subpixels : int If ``use_exact`` is 0, each pixel is resampled by this factor in each dimension. Thus, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- frac : `~numpy.ndarray` 2-d array giving the fraction of the overlap. """ cdef unsigned int i, j cdef double x, y, dx, dy cdef double pxmin, pxmax, pymin, pymax # Define output array cdef np.ndarray[DTYPE_t, ndim=2] frac = np.zeros([ny, nx], dtype=DTYPE) if use_exact == 1: raise NotImplementedError("Exact mode has not been implemented for " "rectangular apertures") # Find the width of each element in x and y dx = (xmax - xmin) / nx dy = (ymax - ymin) / ny # TODO: can implement a bounding box here for efficiency (as for the # circular and elliptical aperture photometry) for i in range(nx): pxmin = xmin + i * dx # lower end of pixel pxmax = pxmin + dx # upper end of pixel for j in range(ny): pymin = ymin + j * dy pymax = pymin + dy frac[j, i] = rectangular_overlap_single_subpixel( pxmin, pymin, pxmax, pymax, width, height, theta, subpixels) return frac cdef double rectangular_overlap_single_subpixel(double x0, double y0, double x1, double y1, double width, double height, double theta, int subpixels): """ Return the fraction of overlap between a rectangle and a single pixel with given extent, using a sub-pixel sampling method. """ cdef unsigned int i, j cdef double x, y cdef double frac = 0. # Accumulator. cdef double cos_theta = cos(theta) cdef double sin_theta = sin(theta) cdef double half_width, half_height half_width = width / 2. half_height = height / 2. dx = (x1 - x0) / subpixels dy = (y1 - y0) / subpixels x = x0 - 0.5 * dx for i in range(subpixels): x += dx y = y0 - 0.5 * dy for j in range(subpixels): y += dy # Transform into frame of rotated rectangle x_tr = y * sin_theta + x * cos_theta y_tr = y * cos_theta - x * sin_theta if fabs(x_tr) < half_width and fabs(y_tr) < half_height: frac += 1. return frac / (subpixels * subpixels) photutils-0.4/photutils/geometry/tests/0000755000214200020070000000000013175654702022635 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/geometry/tests/__init__.py0000644000214200020070000000017013055576313024742 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains affiliated package tests. """ photutils-0.4/photutils/geometry/tests/test_circular_overlap_grid.py0000644000214200020070000000203213175634532030603 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import itertools from numpy.testing import assert_allclose import pytest from .. import circular_overlap_grid grid_sizes = [50, 500, 1000] circ_sizes = [0.2, 0.4, 0.8] use_exact = [0, 1] subsamples = [1, 5, 10] arg_list = ['grid_size', 'circ_size', 'use_exact', 'subsample'] @pytest.mark.parametrize(('grid_size', 'circ_size', 'use_exact', 'subsample'), list(itertools.product(grid_sizes, circ_sizes, use_exact, subsamples))) def test_circular_overlap_grid(grid_size, circ_size, use_exact, subsample): """ Test normalization of the overlap grid to make sure that a fully enclosed pixel has a value of 1.0. """ g = circular_overlap_grid(-1.0, 1.0, -1.0, 1.0, grid_size, grid_size, circ_size, use_exact, subsample) assert_allclose(g.max(), 1.0) photutils-0.4/photutils/geometry/tests/test_elliptical_overlap_grid.py0000644000214200020070000000250713175634532031130 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import itertools from numpy.testing import assert_allclose import pytest from .. import elliptical_overlap_grid grid_sizes = [50, 500, 1000] maj_sizes = [0.2, 0.4, 0.8] min_sizes = [0.2, 0.4, 0.8] angles = [0.0, 0.5, 1.0] use_exact = [0, 1] subsamples = [1, 5, 10] arg_list = ['grid_size', 'maj_size', 'min_size', 'angle', 'use_exact', 'subsample'] @pytest.mark.parametrize(('grid_size', 'maj_size', 'min_size', 'angle', 'use_exact', 'subsample'), list(itertools.product(grid_sizes, maj_sizes, min_sizes, angles, use_exact, subsamples))) def test_elliptical_overlap_grid(grid_size, maj_size, min_size, angle, use_exact, subsample): """ Test normalization of the overlap grid to make sure that a fully enclosed pixel has a value of 1.0. """ g = elliptical_overlap_grid(-1.0, 1.0, -1.0, 1.0, grid_size, grid_size, maj_size, min_size, angle, use_exact, subsample) assert_allclose(g.max(), 1.0) photutils-0.4/photutils/geometry/tests/test_rectangular_overlap_grid.py0000644000214200020070000000204713175634532031314 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import itertools from numpy.testing import assert_allclose import pytest from .. import rectangular_overlap_grid grid_sizes = [50, 500, 1000] rect_sizes = [0.2, 0.4, 0.8] angles = [0.0, 0.5, 1.0] subsamples = [1, 5, 10] arg_list = ['grid_size', 'rect_size', 'angle', 'subsample'] @pytest.mark.parametrize(('grid_size', 'rect_size', 'angle', 'subsample'), list(itertools.product(grid_sizes, rect_sizes, angles, subsamples))) def test_rectangular_overlap_grid(grid_size, rect_size, angle, subsample): """ Test normalization of the overlap grid to make sure that a fully enclosed pixel has a value of 1.0. """ g = rectangular_overlap_grid(-1.0, 1.0, -1.0, 1.0, grid_size, grid_size, rect_size, rect_size, angle, 0, subsample) assert_allclose(g.max(), 1.0) photutils-0.4/photutils/isophote/0000755000214200020070000000000013175654702021472 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/isophote/__init__.py0000644000214200020070000000067313175634532023610 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains tools for fitting elliptical isophotes to galaxy images. """ from .ellipse import * # noqa from .fitter import * # noqa from .geometry import * # noqa from .harmonics import * # noqa from .integrator import * # noqa from .isophote import * # noqa from .model import * # noqa from .sample import * # noqa photutils-0.4/photutils/isophote/ellipse.py0000644000214200020070000007611513175634532023512 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import warnings import numpy as np from astropy.utils.exceptions import AstropyUserWarning from .fitter import (EllipseFitter, CentralEllipseFitter, DEFAULT_CONVERGENCE, DEFAULT_MINIT, DEFAULT_MAXIT, DEFAULT_FFLAG, DEFAULT_MAXGERR) from .geometry import EllipseGeometry from .integrator import BILINEAR from .isophote import Isophote, IsophoteList from .sample import EllipseSample, CentralEllipseSample __all__ = ['Ellipse'] class Ellipse(object): """ Class to fit elliptical isophotes to a galaxy image. The isophotes in the image are measured using an iterative method described by `Jedrzejewski (1987; MNRAS 226, 747) `_. See the **Notes** section below for details about the algorithm. Parameters ---------- image : 2D `~numpy.ndarray` The image array. geometry : `~photutils.isophote.EllipseGeometry` instance or `None`, optional The optional geometry that describes the first ellipse to be fitted. If `None`, a default `~photutils.isophote.EllipseGeometry` instance is created centered on the image frame with ellipticity of 0.2 and a position angle of 90 degrees. threshold : float, optional The threshold for the object centerer algorithm. By lowering this value the object centerer becomes less strict, in the sense that it will accept lower signal-to-noise data. If set to a very large value, the centerer is effectively shut off. In this case, either the geometry information supplied by the ``geometry`` parameter is used as is, or the fit algorithm will terminate prematurely. Note that once the object centerer runs successfully, the (x, y) coordinates in the ``geometry`` attribute (an `~photutils.isophote.EllipseGeometry` instance) are modified in place. The default is 0.1 Notes ----- The image is measured using an iterative method described by `Jedrzejewski (1987; MNRAS 226, 747) `_. Each isophote is fitted at a pre-defined, fixed semimajor axis length. The algorithm starts from a first-guess elliptical isophote defined by approximate values for the (x, y) center coordinates, ellipticity, and position angle. Using these values, the image is sampled along an elliptical path, producing a 1-dimensional function that describes the dependence of intensity (pixel value) with angle (E). The function is stored as a set of 1D numpy arrays. The harmonic content of this function is analyzed by least-squares fitting to the function: .. math:: y = y0 + (A1 * \\sin(E)) + (B1 * \\cos(E)) + (A2 * \\sin(2 * E)) + (B2 * \\cos(2 * E)) Each one of the harmonic amplitudes (A1, B1, A2, and B2) is related to a specific ellipse geometric parameter in the sense that it conveys information regarding how much the parameter's current value deviates from the "true" one. To compute this deviation, the image's local radial gradient has to be taken into account too. The algorithm picks up the largest amplitude among the four, estimates the local gradient, and computes the corresponding increment in the associated ellipse parameter. That parameter is updated, and the image is resampled. This process is repeated until any one of the following criteria are met: 1. the largest harmonic amplitude is less than a given fraction of the rms residual of the intensity data around the harmonic fit. 2. a user-specified maximum number of iterations is reached. 3. more than a given fraction of the elliptical sample points have no valid data in then, either because they lie outside the image boundaries or because they were flagged out from the fit by sigma-clipping. In any case, a minimum number of iterations is always performed. If iterations stop because of reasons 2 or 3 above, then those ellipse parameters that generated the lowest absolute values for harmonic amplitudes will be used. At this point, the image data sample coming from the best fit ellipse is fitted by the following function: .. math:: y = y0 + (An * sin(n * E)) + (Bn * cos(n * E)) with :math:`n = 3` and :math:`n = 4`. The corresponding amplitudes (A3, B3, A4, and B4), divided by the semimajor axis length and local intensity gradient, measure the isophote's deviations from perfect ellipticity (these amplitudes, divided by semimajor axis and gradient, are the actual quantities stored in the output `~photutils.isophote.Isophote` instance). The algorithm then measures the integrated intensity and the number of non-flagged pixels inside the elliptical isophote, and also inside the corresponding circle with same center and radius equal to the semimajor axis length. These parameters, their errors, other associated parameters, and auxiliary information, are stored in the `~photutils.isophote.Isophote` instance. Errors in intensity and local gradient are obtained directly from the rms scatter of intensity data along the fitted ellipse. Ellipse geometry errors are obtained from the errors in the coefficients of the first and second simultaneous harmonic fit. Third and fourth harmonic amplitude errors are obtained in the same way, but only after the first and second harmonics are subtracted from the raw data. For more details, see the error analysis in `Busko (1996; ASPC 101, 139) `_. After fitting the ellipse that corresponds to a given value of the semimajor axis (by the process described above), the axis length is incremented/decremented following a pre-defined rule. At each step, the starting, first-guess, ellipse parameters are taken from the previously fitted ellipse that has the closest semimajor axis length to the current one. On low surface brightness regions (those having large radii), the small values of the image radial gradient can induce large corrections and meaningless values for the ellipse parameters. The algorithm has the ability to stop increasing semimajor axis based on several criteria, including signal-to-noise ratio. See the `~photutils.isophote.Isophote` documentation for the meaning of the stop code reported after each fit. The fit algorithm provides a k-sigma clipping algorithm for cleaning deviant sample points at each isophote, thus improving convergence stability against any non-elliptical structure such as stars, spiral arms, HII regions, defects, etc. The fit algorithm has no way of finding where, in the input image frame, the galaxy to be measured is located. The center (x, y) coordinates need to be close to the actual center for the fit to work. An "object centerer" function helps to verify that the selected position can be used as starting point. This function scans a 10x10 window centered either on the (x, y) coordinates in the `~photutils.isophote.EllipseGeometry` instance passed to the constructor of the `~photutils.isophote.Ellipse` class, or, if any one of them, or both, are set to `None`, on the input image frame center. In case a successful acquisition takes place, the `~photutils.isophote.EllipseGeometry` instance is modified in place to reflect the solution of the object centerer algorithm. In some cases the object centerer algorithm may fail, even though there is enough signal-to-noise to start a fit (e.g. in objects with very high ellipticity). In those cases the sensitivity of the algorithm can be decreased by decreasing the value of the object centerer threshold parameter. The centerer works by looking to where a quantity akin to a signal-to-noise ratio is maximized within the 10x10 window. The centerer can thus be shut off entirely by setting the threshold to a large value >> 1 (meaning, no location inside the search window will achieve that signal-to-noise ratio). A note of caution: the ellipse fitting algorithm was designed explicitly with an elliptical galaxy brightness distribution in mind. In particular, a well defined negative radial intensity gradient across the region being fitted is paramount for the achievement of stable solutions. Use of the algorithm in other types of images (e.g., planetary nebulae) may lead to inability to converge to any acceptable solution. """ def __init__(self, image, geometry=None, threshold=0.1): self.image = image if geometry is not None: self._geometry = geometry else: _x0 = image.shape[0] / 2 _y0 = image.shape[1] / 2 self._geometry = EllipseGeometry(_x0, _y0, 10., eps=0.2, pa=np.pi/2) def set_threshold(self, threshold): """ Modify the threshold value used by the centerer. Parameters ---------- threshold : float The new threshold value to use. """ self._geometry.centerer_threshold = threshold def fit_image(self, sma0=None, minsma=0., maxsma=None, step=0.1, conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT, maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR, sclip=3., nclip=0, integrmode=BILINEAR, linear=False, maxrit=None): # This parameter list is quite large and should in principle be # simplified by re-distributing these controls to somewhere else. # We keep this design though because it better mimics the flat # architecture used in the original STSDAS task `ellipse`. """ Fit multiple isophotes to the image array. This method loops over each value of the semimajor axis (sma) length (constructed from the input parameters), fitting a single isophote at each sma. The entire set of isophotes is returned in an `~photutils.isophote.IsophoteList` instance. Parameters ---------- sma0 : float, optional The starting value for the semimajor axis length (pixels). This value must not be the minimum or maximum semimajor axis length, but something in between. The algorithm can't start from the very center of the galaxy image because the modelling of elliptical isophotes on that region is poor and it will diverge very easily if not tied to other previously fit isophotes. It can't start from the maximum value either because the maximum is not known beforehand, depending on signal-to-noise. The ``sma0`` value should be selected such that the corresponding isophote has a good signal-to-noise ratio and a clearly defined geometry. If set to `None` (the default), one of two actions will be taken: if a `~photutils.isophote.EllipseGeometry` instance was input to the `~photutils.isophote.Ellipse` constructor, its ``sma`` value will be used. Otherwise, a default value of 10. will be used. minsma : float, optional The minimum value for the semimajor axis length (pixels). The default is 0. maxsma : float or `None`, optional The maximum value for the semimajor axis length (pixels). When set to `None` (default), the algorithm will increase the semimajor axis until one of several conditions will cause it to stop and revert to fit ellipses with sma < ``sma0``. step : float, optional The step value used to grow/shrink the semimajor axis length (pixels if ``linear=True``, or a relative value if ``linear=False``). See the ``linear`` parameter. The default is 0.1. conver : float, optional The main convergence criterion. Iterations stop when the largest harmonic amplitude becomes smaller (in absolute value) than ``conver`` times the harmonic fit rms. The default is 0.05. minit : int, optional The minimum number of iterations to perform. A minimum of 10 (the default) iterations guarantees that, on average, 2 iterations will be available for fitting each independent parameter (the four harmonic amplitudes and the intensity level). For the first isophote, the minimum number of iterations is 2 * ``minit`` to ensure that, even departing from not-so-good initial values, the algorithm has a better chance to converge to a sensible solution. maxit : int, optional The maximum number of iterations to perform. The default is 50. fflag : float, optional The acceptable fraction of flagged data points in the sample. If the actual fraction of valid data points is smaller than this, the iterations will stop and the current `~photutils.isophote.Isophote` will be returned. Flagged data points are points that either lie outside the image frame, are masked, or were rejected by sigma-clipping. The default is 0.7. maxgerr : float, optional The maximum acceptable relative error in the local radial intensity gradient. This is the main control for preventing ellipses to grow to regions of too low signal-to-noise ratio. It specifies the maximum acceptable relative error in the local radial intensity gradient. `Busko (1996; ASPC 101, 139) `_ showed that the fitting precision relates to that relative error. The usual behavior of the gradient relative error is to increase with semimajor axis, being larger in outer, fainter regions of a galaxy image. In the current implementation, the ``maxgerr`` criterion is triggered only when two consecutive isophotes exceed the value specified by the parameter. This prevents premature stopping caused by contamination such as stars and HII regions. A number of actions may happen when the gradient error exceeds ``maxgerr`` (or becomes non-significant and is set to `None`). If the maximum semimajor axis specified by ``maxsma`` is set to `None`, semimajor axis growth is stopped and the algorithm proceeds inwards to the galaxy center. If ``maxsma`` is set to some finite value, and this value is larger than the current semimajor axis length, the algorithm enters non-iterative mode and proceeds outwards until reaching ``maxsma``. The default is 0.5. sclip : float, optional The sigma-clip sigma value. The default is 3.0. nclip : int, optional The number of sigma-clip interations. The default is 0, which means sigma-clipping is skipped. integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional The area integration mode. The default is 'bilinear'. linear : bool, optional The semimajor axis growing/shrinking mode. If `False` (default), the geometric growing mode is chosen, thus the semimajor axis length is increased by a factor of (1. + ``step``), and the process is repeated until either the semimajor axis value reaches the value of parameter ``maxsma``, or the last fitted ellipse has more than a given fraction of its sampled points flagged out (see ``fflag``). The process then resumes from the first fitted ellipse (at ``sma0``) inwards, in steps of (1./(1. + ``step``)), until the semimajor axis length reaches the value ``minsma``. In case of linear growing, the increment or decrement value is given directly by ``step`` in pixels. If ``maxsma`` is set to `None`, the semimajor axis will grow until a low signal-to-noise criterion is met. See ``maxgerr``. maxrit : float or `None`, optional The maximum value of semimajor axis to perform an actual fit. Whenever the current semimajor axis length is larger than ``maxrit``, the isophotes will be extracted using the current geometry, without being fitted. This non-iterative mode may be useful for sampling regions of very low surface brightness, where the algorithm may become unstable and unable to recover reliable geometry information. Non-iterative mode can also be entered automatically whenever the ellipticity exceeds 1.0 or the ellipse center crosses the image boundaries. If `None` (default), then no maximum value is used. Returns ------- result : `~photutils.isophote.IsophoteList` instance A list-like object of `~photutils.isophote.Isophote` instances, sorted by increasing semimajor axis length. """ # multiple fitted isophotes will be stored here isophote_list = [] # get starting sma from appropriate source: keyword parameter, # internal EllipseGeometry instance, or fixed default value. if not sma0: if self._geometry: sma = self._geometry.sma else: sma = 10. else: sma = sma0 # first, go from initial sma outwards until # hitting one of several stopping criteria. noiter = False first_isophote = True while True: # first isophote runs longer minit_a = 2 * minit if first_isophote else minit first_isophote = False isophote = self.fit_isophote(sma, step, conver, minit_a, maxit, fflag, maxgerr, sclip, nclip, integrmode, linear, maxrit, noniterate=noiter, isophote_list=isophote_list) # check for failed fit. if (isophote.stop_code < 0 or isophote.stop_code == 1): # in case the fit failed right at the outset, return an # empty list. This is the usual case when the user # provides initial guesses that are too way off to enable # the fitting algorithm to find any meaningful solution. if len(isophote_list) == 1: warnings.warn('No meaningful fit was possible.', AstropyUserWarning) return IsophoteList([]) self._fix_last_isophote(isophote_list, -1) # get last isophote from the actual list, since the last # `isophote` instance in this context may no longer be OK. isophote = isophote_list[-1] # if two consecutive isophotes failed to fit, # shut off iterative mode. Or, bail out and # change to go inwards. if len(isophote_list) > 2: if ((isophote.stop_code == 5 and isophote_list[-2].stop_code == 5) or isophote.stop_code == 1): if maxsma and maxsma > isophote.sma: # if a maximum sma value was provided by # user, and the current sma is smaller than # maxsma, keep growing sma in non-iterative # mode until reaching it. noiter = True else: # if no maximum sma, stop growing and change # to go inwards. break # reset variable from the actual list, since the last # `isophote` instance may no longer be OK. isophote = isophote_list[-1] # update sma. If exceeded user-defined # maximum, bail out from this loop. sma = isophote.sample.geometry.update_sma(step) if maxsma and sma >= maxsma: break # reset sma so as to go inwards. first_isophote = isophote_list[0] sma, step = first_isophote.sample.geometry.reset_sma(step) # now, go from initial sma inwards towards center. while True: isophote = self.fit_isophote(sma, step, conver, minit, maxit, fflag, maxgerr, sclip, nclip, integrmode, linear, maxrit, going_inwards=True, isophote_list=isophote_list) # if abnormal condition, fix isophote but keep going. if isophote.stop_code < 0: self._fix_last_isophote(isophote_list, 0) # reset variable from the actual list, since the last # `isophote` instance may no longer be OK. isophote = isophote_list[-1] # figure out next sma; if exceeded user-defined # minimum, or too small, bail out from this loop sma = isophote.sample.geometry.update_sma(step) if sma <= max(minsma, 0.5): break # if user asked for minsma=0, extract special isophote there if minsma == 0.0: isophote = self.fit_isophote(0.0, isophote_list=isophote_list) # sort list of isophotes according to sma isophote_list.sort() return IsophoteList(isophote_list) def fit_isophote(self, sma, step=0.1, conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT, maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR, sclip=3., nclip=0, integrmode=BILINEAR, linear=False, maxrit=None, noniterate=False, going_inwards=False, isophote_list=None): """ Fit a single isophote with a given semimajor axis length. The ``step`` and ``linear`` parameters are not used to actually grow or shrink the current fitting semimajor axis length. They are necessary so the sampling algorithm can know where to start the gradient computation and also how to compute the elliptical sector areas (when area integration mode is selected). Parameters ---------- sma : float The semimajor axis length (pixels). step : float, optional The step value used to grow/shrink the semimajor axis length (pixels if ``linear=True``, or a relative value if ``linear=False``). See the ``linear`` parameter. The default is 0.1. conver : float, optional The main convergence criterion. Iterations stop when the largest harmonic amplitude becomes smaller (in absolute value) than ``conver`` times the harmonic fit rms. The default is 0.05. minit : int, optional The minimum number of iterations to perform. A minimum of 10 (the default) iterations guarantees that, on average, 2 iterations will be available for fitting each independent parameter (the four harmonic amplitudes and the intensity level). For the first isophote, the minimum number of iterations is 2 * ``minit`` to ensure that, even departing from not-so-good initial values, the algorithm has a better chance to converge to a sensible solution. maxit : int, optional The maximum number of iterations to perform. The default is 50. fflag : float, optional The acceptable fraction of flagged data points in the sample. If the actual fraction of valid data points is smaller than this, the iterations will stop and the current `~photutils.isophote.Isophote` will be returned. Flagged data points are points that either lie outside the image frame, are masked, or were rejected by sigma-clipping. The default is 0.7. maxgerr : float, optional The maximum acceptable relative error in the local radial intensity gradient. When fitting a single isophote by itself this paramter doesn't have any effect on the outcome. sclip : float, optional The sigma-clip sigma value. The default is 3.0. nclip : int, optional The number of sigma-clip interations. The default is 0, which means sigma-clipping is skipped. integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional The area integration mode. The default is 'bilinear'. linear : bool, optional The semimajor axis growing/shrinking mode. When fitting just one isophote, this parameter is used only by the code that define the details of how elliptical arc segments ("sectors") are extracted from the image when using area extraction modes (see the ``integrmode`` parameter). maxrit : float or `None`, optional The maximum value of semimajor axis to perform an actual fit. Whenever the current semimajor axis length is larger than ``maxrit``, the isophotes will be extracted using the current geometry, without being fitted. This non-iterative mode may be useful for sampling regions of very low surface brightness, where the algorithm may become unstable and unable to recover reliable geometry information. Non-iterative mode can also be entered automatically whenever the ellipticity exceeds 1.0 or the ellipse center crosses the image boundaries. If `None` (default), then no maximum value is used. noniterate : bool, optional Whether the fitting algorithm should be bypassed and an isophote should be extracted with the geometry taken directly from the most recent `~photutils.isophote.Isophote` instance stored in the ``isophote_list`` parameter. This parameter is mainly used when running the method in a loop over different values of semimajor axis length, and we want to change from iterative to non-iterative mode somewhere along the sequence of isophotes. When set to `True`, this parameter overrides the behavior associated with parameter ``maxrit``. The default is `False`. going_inwards : bool, optional Parameter to define the sense of SMA growth. When fitting just one isophote, this parameter is used only by the code that defines the details of how elliptical arc segments ("sectors") are extracted from the image, when using area extraction modes (see the ``integrmode`` parameter). The default is `False`. isophote_list : list or `None`, optional If not `None` (the default), the fitted `~photutils.isophote.Isophote` instance is appended to this list. It must be created and managed by the caller. Returns ------- result : `~photutils.isophote.Isophote` instance The fitted isophote. The fitted isophote is also appended to the input list input to the ``isophote_list`` parameter. """ geometry = self._geometry # if available, geometry from last fitted isophote will be # used as initial guess for next isophote. if isophote_list is not None and len(isophote_list) > 0: geometry = isophote_list[-1].sample.geometry # do the fit if noniterate or (maxrit and sma > maxrit): isophote = self._non_iterative(sma, step, linear, geometry, sclip, nclip, integrmode) else: isophote = self._iterative(sma, step, linear, geometry, sclip, nclip, integrmode, conver, minit, maxit, fflag, maxgerr, going_inwards) # store result in list if isophote_list is not None and isophote.valid: isophote_list.append(isophote) return isophote def _iterative(self, sma, step, linear, geometry, sclip, nclip, integrmode, conver, minit, maxit, fflag, maxgerr, going_inwards=False): if sma > 0.: # iterative fitter sample = EllipseSample(self.image, sma, astep=step, sclip=sclip, nclip=nclip, linear_growth=linear, geometry=geometry, integrmode=integrmode) fitter = EllipseFitter(sample) else: # sma == 0 requires special handling sample = CentralEllipseSample(self.image, 0.0, geometry=geometry) fitter = CentralEllipseFitter(sample) isophote = fitter.fit(conver, minit, maxit, fflag, maxgerr, going_inwards) return isophote def _non_iterative(self, sma, step, linear, geometry, sclip, nclip, integrmode): sample = EllipseSample(self.image, sma, astep=step, sclip=sclip, nclip=nclip, linear_growth=linear, geometry=geometry, integrmode=integrmode) sample.update() # build isophote without iterating with an EllipseFitter isophote = Isophote(sample, 0, True, stop_code=4) return isophote def _fix_last_isophote(self, isophote_list, index): if len(isophote_list) > 0: isophote = isophote_list.pop() # check if isophote is bad; if so, fix its geometry # to be like the geometry of the index-th isophote # in list. isophote.fix_geometry(isophote_list[index]) # force new extraction of raw data, since # geometry changed. isophote.sample.values = None isophote.sample.update() # we take the opportunity to change an eventual # negative stop code to its' positive equivalent. code = (5 if isophote.stop_code < 0 else isophote.stop_code) # build new instance so it can have its attributes # populated from the updated sample attributes. new_isophote = Isophote(isophote.sample, isophote.niter, isophote.valid, code) # add new isophote to list isophote_list.append(new_isophote) photutils-0.4/photutils/isophote/fitter.py0000644000214200020070000003666113175634532023354 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) from astropy import log import math import numpy as np from .harmonics import (fit_first_and_second_harmonics, first_and_second_harmonic_function) from .isophote import Isophote, CentralPixel from .sample import EllipseSample __all__ = ['EllipseFitter'] __doctest_skip__ = ['EllipseFitter.fit'] PI2 = np.pi / 2 MAX_EPS = 0.95 MIN_EPS = 0.05 DEFAULT_CONVERGENCE = 0.05 DEFAULT_MINIT = 10 DEFAULT_MAXIT = 50 DEFAULT_FFLAG = 0.7 DEFAULT_MAXGERR = 0.5 class EllipseFitter(object): """ Class to fit ellipses. Parameters ---------- sample : `~photutils.isophote.EllipseSample` instance The sample data to be fitted. """ def __init__(self, sample): self._sample = sample def fit(self, conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT, maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR, going_inwards=False): """ Fit an elliptical isophote. Parameters ---------- conver : float, optional The main convergence criterion. Iterations stop when the largest harmonic amplitude becomes smaller (in absolute value) than ``conver`` times the harmonic fit rms. The default is 0.05. minit : int, optional The minimum number of iterations to perform. A minimum of 10 (the default) iterations guarantees that, on average, 2 iterations will be available for fitting each independent parameter (the four harmonic amplitudes and the intensity level). For the first isophote, the minimum number of iterations is 2 * ``minit`` to ensure that, even departing from not-so-good initial values, the algorithm has a better chance to converge to a sensible solution. maxit : int, optional The maximum number of iterations to perform. The default is 50. fflag : float, optional The acceptable fraction of flagged data points in the sample. If the actual fraction of valid data points is smaller than this, the iterations will stop and the current `~photutils.isophote.Isophote` will be returned. Flagged data points are points that either lie outside the image frame, are masked, or were rejected by sigma-clipping. The default is 0.7. maxgerr : float, optional The maximum acceptable relative error in the local radial intensity gradient. This is the main control for preventing ellipses to grow to regions of too low signal-to-noise ratio. It specifies the maximum acceptable relative error in the local radial intensity gradient. `Busko (1996; ASPC 101, 139) `_ showed that the fitting precision relates to that relative error. The usual behavior of the gradient relative error is to increase with semimajor axis, being larger in outer, fainter regions of a galaxy image. In the current implementation, the ``maxgerr`` criterion is triggered only when two consecutive isophotes exceed the value specified by the parameter. This prevents premature stopping caused by contamination such as stars and HII regions. A number of actions may happen when the gradient error exceeds ``maxgerr`` (or becomes non-significant and is set to `None`). If the maximum semimajor axis specified by ``maxsma`` is set to `None`, semimajor axis growth is stopped and the algorithm proceeds inwards to the galaxy center. If ``maxsma`` is set to some finite value, and this value is larger than the current semimajor axis length, the algorithm enters non-iterative mode and proceeds outwards until reaching ``maxsma``. The default is 0.5. going_inwards : bool, optional Parameter to define the sense of SMA growth. When fitting just one isophote, this parameter is used only by the code that defines the details of how elliptical arc segments ("sectors") are extracted from the image, when using area extraction modes (see the ``integrmode`` parameter in the `~photutils.isophote.EllipseSample` class). The default is `False`. Returns ------- result : `~photutils.isophote.Isophote` instance The fitted isophote, which also contains fit status information. Examples -------- >>> from photutils.isophote import EllipseSample, EllipseFitter >>> sample = EllipseSample(data, sma=10.) >>> fitter = EllipseFitter(sample) >>> isophote = fitter.fit() """ sample = self._sample # this flag signals that limiting gradient error (`maxgerr`) # wasn't exceeded yet. lexceed = False # here we keep track of the sample that caused the minimum harmonic # amplitude(in absolute value). This will eventually be used to # build the resulting Isophote in cases where iterations run to # the maximum allowed (maxit), or the maximum number of flagged # data points (fflag) is reached. minimum_amplitude_value = np.Inf minimum_amplitude_sample = None for iter in range(maxit): # Force the sample to compute its gradient and associated values. sample.update() # The extract() method returns sampled values as a 2-d numpy array # with the following structure: # values[0] = 1-d array with angles # values[1] = 1-d array with radii # values[2] = 1-d array with intensity values = sample.extract() # Fit harmonic coefficients. Failure in fitting is # a fatal error; terminate immediately with sample # marked as invalid. try: coeffs = fit_first_and_second_harmonics(values[0], values[2]) except Exception as e: log.info(e) return Isophote(sample, iter+1, False, 3) coeffs = coeffs[0] # largest harmonic in absolute value drives the correction. largest_harmonic_index = np.argmax(np.abs(coeffs[1:])) largest_harmonic = coeffs[1:][largest_harmonic_index] # see if the amplitude decreased; if yes, keep the # corresponding sample for eventual later use. if abs(largest_harmonic) < minimum_amplitude_value: minimum_amplitude_value = abs(largest_harmonic) minimum_amplitude_sample = sample # check if converged model = first_and_second_harmonic_function(values[0], coeffs) residual = values[2] - model if ((conver * sample.sector_area * np.std(residual)) > np.abs(largest_harmonic)): # Got a valid solution. But before returning, ensure # that a minimum of iterations has run. if iter >= minit-1: sample.update() return Isophote(sample, iter+1, True, 0) # it may not have converged yet, but the sample contains too # many invalid data points: return. if sample.actual_points < (sample.total_points * fflag): # when too many data points were flagged, return the # best fit sample instead of the current one. minimum_amplitude_sample.update() return Isophote(minimum_amplitude_sample, iter+1, True, 1) # pick appropriate corrector code. corrector = _correctors[largest_harmonic_index] # generate *NEW* EllipseSample instance with corrected # parameter. Note that this instance is still devoid of other # information besides its geometry. It needs to be explicitly # updated for computations to proceed. We have to build a new # EllipseSample instance every time because of the lazy # extraction process used by EllipseSample code. To minimize # the number of calls to the area integrators, we pay a # (hopefully smaller) price here, by having multiple calls to # the EllipseSample constructor. sample = corrector.correct(sample, largest_harmonic) sample.update() # see if any abnormal (or unusual) conditions warrant # the change to non-iterative mode, or go-inwards mode. proceed, lexceed = self._check_conditions( sample, maxgerr, going_inwards, lexceed) if not proceed: sample.update() return Isophote(sample, iter+1, True, -1) # Got to the maximum number of iterations. Return with # code 2, and handle it as a valid isophote. Use the # best fit sample instead of the current one. minimum_amplitude_sample.update() return Isophote(minimum_amplitude_sample, maxit, True, 2) def _check_conditions(self, sample, maxgerr, going_inwards, lexceed): proceed = True # If center wandered more than allowed, put it back # in place and signal the end of iterative mode. # if wander: # if abs(dx) > WANDER(al)) or abs(dy) > WANDER(al): # sample.geometry.x0 -= dx # sample.geometry.y0 -= dy # STOP(al) = ST_NONITERATE # proceed = False # check if an acceptable gradient value could be computed. if sample.gradient_error: if (not going_inwards and (sample.gradient_relative_error > maxgerr or sample.gradient >= 0.)): if lexceed: proceed = False else: lexceed = True else: proceed = False # check if ellipse geometry diverged. if abs(sample.geometry.eps > MAX_EPS): proceed = False if (sample.geometry.x0 < 1. or sample.geometry.x0 > sample.image.shape[0] or sample.geometry.y0 < 1. or sample.geometry.y0 > sample.image.shape[1]): proceed = False # See if eps == 0 (round isophote) was crossed. # If so, fix it but still proceed if sample.geometry.eps < 0.: sample.geometry.eps = min(-sample.geometry.eps, MAX_EPS) if sample.geometry.pa < PI2: sample.geometry.pa += PI2 else: sample.geometry.pa -= PI2 # If ellipse is an exact circle, computations will diverge. # Make it slightly flat, but still proceed if sample.geometry.eps == 0.0: sample.geometry.eps = MIN_EPS return proceed, lexceed class _ParameterCorrector(object): def correct(self, sample, harmonic): raise NotImplementedError class _PositionCorrector(_ParameterCorrector): def finalize_correction(self, dx, dy, sample): new_x0 = sample.geometry.x0 + dx new_y0 = sample.geometry.y0 + dy return EllipseSample(sample.image, sample.geometry.sma, x0=new_x0, y0=new_y0, astep=sample.geometry.astep, sclip=sample.sclip, nclip=sample.nclip, eps=sample.geometry.eps, position_angle=sample.geometry.pa, linear_growth=sample.geometry.linear_growth, integrmode=sample.integrmode) class _PositionCorrector_0(_PositionCorrector): def correct(self, sample, harmonic): aux = -harmonic * (1. - sample.geometry.eps) / sample.gradient dx = -aux * math.sin(sample.geometry.pa) dy = aux * math.cos(sample.geometry.pa) return self.finalize_correction(dx, dy, sample) class _PositionCorrector_1(_PositionCorrector): def correct(self, sample, harmonic): aux = -harmonic / sample.gradient dx = aux * math.cos(sample.geometry.pa) dy = aux * math.sin(sample.geometry.pa) return self.finalize_correction(dx, dy, sample) class _AngleCorrector(_ParameterCorrector): def correct(self, sample, harmonic): eps = sample.geometry.eps sma = sample.geometry.sma gradient = sample.gradient correction = (harmonic * 2. * (1. - eps) / sma / gradient / ((1. - eps)**2 - 1.)) # '% np.pi' to make angle lie between 0 and np.pi radians new_pa = (sample.geometry.pa + correction) % np.pi return EllipseSample(sample.image, sample.geometry.sma, x0=sample.geometry.x0, y0=sample.geometry.y0, astep=sample.geometry.astep, sclip=sample.sclip, nclip=sample.nclip, eps=sample.geometry.eps, position_angle=new_pa, linear_growth=sample.geometry.linear_growth, integrmode=sample.integrmode) class _EllipticityCorrector(_ParameterCorrector): def correct(self, sample, harmonic): eps = sample.geometry.eps sma = sample.geometry.sma gradient = sample.gradient correction = harmonic * 2. * (1. - eps) / sma / gradient new_eps = min((sample.geometry.eps - correction), MAX_EPS) return EllipseSample(sample.image, sample.geometry.sma, x0=sample.geometry.x0, y0=sample.geometry.y0, astep=sample.geometry.astep, sclip=sample.sclip, nclip=sample.nclip, eps=new_eps, position_angle=sample.geometry.pa, linear_growth=sample.geometry.linear_growth, integrmode=sample.integrmode) # instances of corrector code live here: _correctors = [_PositionCorrector_0(), _PositionCorrector_1(), _AngleCorrector(), _EllipticityCorrector()] class CentralEllipseFitter(EllipseFitter): """ A special Fitter class to handle the case of the central pixel in the galaxy image. """ def fit(self, conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT, maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR, going_inwards=False): """ Perform just a simple 1-pixel extraction at the current (x0, y0) position using bilinear interpolation. The input parameters are ignored, but included simple to match the calling signature of the parent class. Returns ------- result : `~photutils.isophote.CentralEllipsePixel` instance The central pixel value. For convenience, the `~photutils.isophote.CentralEllipsePixel` class inherits from the `~photutils.isophote.Isophote` class, although it's not really a true isophote but just a single intensity value at the central position. Thus, most of its attributes are hardcoded to `None` or other default value when appropriate. """ self._sample.update() return CentralPixel(self._sample) photutils-0.4/photutils/isophote/geometry.py0000644000214200020070000004327113175634532023705 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import math import numpy as np from astropy import log __all__ = ['EllipseGeometry'] IN_MASK = [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ] OUT_MASK = [ [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1], [1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1], [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1], [1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1], [1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], ] def _area(sma, eps, phi, r): """ Compute elliptical sector area. """ aux = r * math.cos(phi) / sma signal = aux / abs(aux) if abs(aux) >= 1.: aux = signal return abs(sma**2 * (1.-eps) / 2. * math.acos(aux)) class EllipseGeometry(object): """ Container class to store parameters for the geometry of an ellipse. Parameters that describe the relationship of a given ellipse with other associated ellipses are also encapsulated in this container. These associated ellipses may include, e.g., the two (inner and outer) bounding ellipses that are used to build sectors along the elliptical path. These sectors are used as areas for integrating pixel values, when the area integration mode (mean or median) is used. This class also keeps track of where in the ellipse we are when performing an 'extract' operation. This is mostly relevant when using an area integration mode (as opposed to a pixel integration mode) Parameters ---------- x0, y0 : float The center pixel coordinate of the ellipse. sma : float The semimajor axis of the ellipse in pixels. eps : ellipticity The ellipticity of the ellipse. pa : float The position angle (in radians) of the semimajor axis in relation to the postive x axis of the image array (rotating towards the positive y axis). Position angles are defined in the range :math:`0 < PA <= \\pi`. Avoid using as starting position angle of 0., since the fit algorithm may not work properly. When the ellipses are such that position angles are near either extreme of the range, noise can make the solution jump back and forth between successive isophotes, by amounts close to 180 degrees. astep : float, optional The step value for growing/shrinking the semimajor axis. It can be expressed either in pixels (when ``linear_growth=True``) or as a relative value (when ``linear_growth=False``). The default is 0.1. linear_growth : bool, optional The semimajor axis growing/shrinking mode. The default is `False`. """ def __init__(self, x0, y0, sma, eps, pa, astep=0.1, linear_growth=False): self.x0 = x0 self.y0 = y0 self.sma = sma self.eps = eps self.pa = pa self.astep = astep self.linear_growth = linear_growth # limits for sector angular width self._phi_min = 0.05 self._phi_max = 0.2 # variables used in the calculation of the sector angular width sma1, sma2 = self.bounding_ellipses() inner_sma = min((sma2 - sma1), 3.) self._area_factor = (sma2 - sma1) * inner_sma # sma can eventually be zero! if self.sma > 0.: self.sector_angular_width = max(min((inner_sma / self.sma), self._phi_max), self._phi_min) self.initial_polar_angle = self.sector_angular_width / 2. self.initial_polar_radius = self.radius(self.initial_polar_angle) def find_center(self, image, threshold=0.1, verbose=True): """ Find the center of a galaxy. If the algorithm is successful the (x, y) coordinates in this `~photutils.isophote.EllipseGeometry` (i.e. the ``x0`` and ``y0`` attributes) instance will be modified. The isophote fit algorithm requires an initial guess for the galaxy center (x, y) coordinates and these coordinates must be close to the actual galaxy center for the isophote fit to work. This method provides can provide an initial guess for the galaxy center coordinates. See the **Notes** section below for more details. Parameters ---------- image : 2D `~numpy.ndarray` The image array. Masked arrays are not recognized here. This assumes that centering should always be done on valid pixels. threshold : float, optional The centerer threshold. To turn off the centerer, set this to a large value (i.e. >> 1). The default is 0.1. verbose : bool, optional Whether to print object centering information. The default is `True`. Notes ----- The centerer function scans a 10x10 window centered on the (x, y) coordinates in the `~photutils.isophote.EllipseGeometry` instance passed to the constructor of the `~photutils.isophote.Ellipse` class. If any of the `~photutils.isophote.EllipseGeometry` (x, y) coordinates are `None`, the center of the input image frame is used. If the center acquisition is successful, the `~photutils.isophote.EllipseGeometry` instance is modified in place to reflect the solution of the object centerer algorithm. In some cases the object centerer algorithm may fail even though there is enough signal-to-noise to start a fit (e.g. objects with very high ellipticity). In those cases the sensitivity of the algorithm can be decreased by decreasing the value of the object centerer threshold parameter. The centerer works by looking where a quantity akin to a signal-to-noise ratio is maximized within the 10x10 window. The centerer can thus be shut off entirely by setting the threshold to a large value (i.e. >> 1; meaning no location inside the search window will achieve that signal-to-noise ratio). """ self._centerer_mask_half_size = len(IN_MASK) / 2 self.centerer_threshold = threshold # number of pixels in each mask sz = len(IN_MASK) self._centerer_ones_in = np.ma.masked_array(np.ones(shape=(sz, sz)), mask=IN_MASK) self._centerer_ones_out = np.ma.masked_array(np.ones(shape=(sz, sz)), mask=OUT_MASK) self._centerer_in_mask_npix = np.sum(self._centerer_ones_in) self._centerer_out_mask_npix = np.sum(self._centerer_ones_out) # Check if center coordinates point to somewhere inside the frame. # If not, set then to frame center. shape = image.shape _x0 = self.x0 _y0 = self.y0 if (_x0 is None or _x0 < 0 or _x0 >= shape[0] or _y0 is None or _y0 < 0 or _y0 >= shape[1]): _x0 = shape[0] / 2 _y0 = shape[1] / 2 max_fom = 0. max_i = 0 max_j = 0 # scan all positions inside window window_half_size = 5 for i in range(int(_x0 - window_half_size), int(_x0 + window_half_size) + 1): for j in range(int(_y0 - window_half_size), int(_y0 + window_half_size) + 1): # ensure that it stays inside image frame i1 = int(max(0, i - self._centerer_mask_half_size)) j1 = int(max(0, j - self._centerer_mask_half_size)) i2 = int(min(shape[0] - 1, i + self._centerer_mask_half_size)) j2 = int(min(shape[1] - 1, j + self._centerer_mask_half_size)) window = image[j1:j2, i1:i2] # averages in inner and outer regions. inner = np.ma.masked_array(window, mask=IN_MASK) outer = np.ma.masked_array(window, mask=OUT_MASK) inner_avg = np.sum(inner) / self._centerer_in_mask_npix outer_avg = np.sum(outer) / self._centerer_out_mask_npix # standard deviation and figure of merit inner_std = np.std(inner) outer_std = np.std(outer) stddev = np.sqrt(inner_std**2 + outer_std**2) fom = (inner_avg - outer_avg) / stddev if fom > max_fom: max_fom = fom max_i = i max_j = j # figure of merit > threshold: update geometry with new coordinates. if max_fom > threshold: self.x0 = float(max_i) self.y0 = float(max_j) if verbose: log.info("Found center at x0 = {0:5.1f}, y0 = {1:5.1f}" .format(self.x0, self.y0)) else: if verbose: log.info('Result is below the threshold -- keeping the ' 'original coordinates.') def radius(self, angle): """ Calculate the polar radius for a given polar angle. Parameters ---------- angle : float The polar angle (radians). Returns ------- radius : float The polar radius (pixels). """ return (self.sma * (1. - self.eps) / np.sqrt(((1. - self.eps) * np.cos(angle))**2 + (np.sin(angle))**2)) def initialize_sector_geometry(self, phi): """ Initialize geometry attributes associated with an elliptical sector at the given polar angle ``phi``. This function computes: * the four vertices that define the elliptical sector on the pixel array. * the sector area (saved in the ``sector_area`` attribute) * the sector angular width (saved in ``sector_angular_width`` attribute) Parameters ---------- phi : float The polar angle (radians) where the sector is located. Returns ------- x, y : 1D `~numpy.ndarray` The x and y coordinates of each vertex as 1D arrays. """ # These polar radii bound the region between the inner # and outer ellipses that define the sector. sma1, sma2 = self.bounding_ellipses() eps_ = 1. - self.eps # polar vector at one side of the elliptical sector self._phi1 = phi - self.sector_angular_width / 2. r1 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2 + (math.sin(self._phi1))**2)) r2 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2 + (math.sin(self._phi1))**2)) # polar vector at the other side of the elliptical sector self._phi2 = phi + self.sector_angular_width / 2. r3 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2 + (math.sin(self._phi2))**2)) r4 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2 + (math.sin(self._phi2))**2)) # sector area sa1 = _area(sma1, self.eps, self._phi1, r1) sa2 = _area(sma2, self.eps, self._phi1, r2) sa3 = _area(sma2, self.eps, self._phi2, r3) sa4 = _area(sma1, self.eps, self._phi2, r4) self.sector_area = abs((sa3 - sa2) - (sa4 - sa1)) # angular width of sector. It is calculated such that the sectors # come out with roughly constant area along the ellipse. self.sector_angular_width = max(min((self._area_factor / (r3 - r4) / r4), self._phi_max), self._phi_min) # compute the 4 vertices that define the elliptical sector. vertex_x = np.zeros(shape=4, dtype=float) vertex_y = np.zeros(shape=4, dtype=float) # vertices are labelled in counterclockwise sequence vertex_x[0:2] = np.array([r1, r2]) * math.cos(self._phi1 + self.pa) vertex_x[2:4] = np.array([r4, r3]) * math.cos(self._phi2 + self.pa) vertex_y[0:2] = np.array([r1, r2]) * math.sin(self._phi1 + self.pa) vertex_y[2:4] = np.array([r4, r3]) * math.sin(self._phi2 + self.pa) vertex_x += self.x0 vertex_y += self.y0 return vertex_x, vertex_y def bounding_ellipses(self): """ Compute the semimajor axis of the two ellipses that bound the annulus where integrations take place. Returns ------- sma1, sma2 : float The smaller and larger values of semimajor axis length that define the annulus bounding ellipses. """ if (self.linear_growth): a1 = self.sma - self.astep / 2. a2 = self.sma + self.astep / 2. else: a1 = self.sma * (1. - self.astep / 2.) a2 = self.sma * (1. + self.astep / 2.) return a1, a2 def polar_angle_sector_limits(self): """ Return the two polar angles that bound the sector. The two bounding polar angles become available only after calling the :meth:`~photutils.isophote.EllipseGeometry.initialize_sector_geometry` method. Returns ------- phi1, phi2 : float The smaller and larger values of polar angle that bound the current sector. """ return self._phi1, self._phi2 def to_polar(self, x, y): """ Return the radius and polar angle in the ellipse coordinate system given (x, y) pixel image coordinates. This function takes care of the different definitions for position angle (PA) and polar angle (phi): .. math:: -\\pi < PA < \\pi 0 < phi < 2 \\pi Note that radius can be anything. The solution is not tied to the semimajor axis length, but to the center position and tilt angle. Parameters ---------- x, y : float The (x, y) image coordinates. Returns ------- radius, angle : float The ellipse radius and polar angle. """ x1 = np.atleast_2d(x) - self.x0 y1 = np.atleast_2d(y) - self.y0 radius = x1**2 + y1**2 angle = np.ones(radius.shape) imask = (radius > 0.0) radius[imask] = np.sqrt(radius[imask]) angle[imask] = np.arcsin(np.abs(y1[imask]) / radius[imask]) radius[~imask] = 0. angle[~imask] = 1. idx = (x1 >= 0.) & (y1 < 0) angle[idx] = 2*np.pi - angle[idx] idx = (x1 < 0.) & (y1 >= 0.) angle[idx] = np.pi - angle[idx] idx = (x1 < 0.) & (y1 < 0.) angle[idx] = np.pi + angle[idx] pa1 = self.pa if self.pa < 0.: pa1 = self.pa + 2*np.pi angle = angle - pa1 angle[angle < 0] += 2*np.pi return radius, angle def update_sma(self, step): """ Calculate an updated value for the semimajor axis, given the current value and the step value. The step value must be managed by the caller to support both modes: grow outwards and shrink inwards. Parameters ---------- step : float The step value. Returns ------- sma : float The new semimajor axis length. """ if self.linear_growth: sma = self.sma + step else: sma = self.sma * (1. + step) return sma def reset_sma(self, step): """ Change the direction of semimajor axis growth, from outwards to inwards. Parameters ---------- step : float The current step value. Returns ------- sma, new_step : float The new semimajor axis length and the new step value to initiate the shrinking of the semimajor axis length. This is the step value that should be used when calling the :meth:`~photutils.isophote.EllipseGeometry.update_sma` method. """ if self.linear_growth: sma = self.sma - step step = -step else: aux = 1. / (1. + step) sma = self.sma * aux step = aux - 1. return sma, step photutils-0.4/photutils/isophote/harmonics.py0000644000214200020070000001026413175634532024031 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np __all__ = ['first_and_second_harmonic_function', 'fit_first_and_second_harmonics', 'fit_upper_harmonic'] def _least_squares_fit(optimize_func, parameters): # call the least squares fitting # function and handle the result. from scipy.optimize import leastsq solution = leastsq(optimize_func, parameters, full_output=True) if solution[4] > 4: raise RuntimeError("Error in least squares fit: " + solution[3]) # return coefficients and covariance matrix return (solution[0], solution[1]) def first_and_second_harmonic_function(phi, c): """ Compute the harmonic function value used to calculate the corrections for ellipse fitting. This function includes simultaneously both the first and second order harmonics: .. math:: f(phi) = c[0] + c[1]*\\sin(phi) + c[2]*\\cos(phi) + c[3]*\\sin(2*phi) + c[4]*\\cos(2*phi) Parameters ---------- phi : float or `~numpy.ndarray` The angle(s) along the elliptical path, going towards the positive y axis, starting coincident with the position angle. That is, the angles are defined from the semimajor axis that lies in the positive x quadrant. c : `~numpy.ndarray` of shape (5,) Array containing the five harmonic coefficients. Returns ------- result : float or `~numpy.ndarray` The function value(s) at the given input angle(s). """ return (c[0] + c[1]*np.sin(phi) + c[2]*np.cos(phi) + c[3]*np.sin(2*phi) + c[4]*np.cos(2*phi)) def fit_first_and_second_harmonics(phi, intensities): """ Fit the first and second harmonic function values to a set of (angle, intensity) pairs. This function is used to compute corrections for ellipse fitting: .. math:: f(phi) = y0 + a1*\\sin(phi) + b1*\\cos(phi) + a2*\\sin(2*phi) + b2*\\cos(2*phi) Parameters ---------- phi : float or `~numpy.ndarray` The angle(s) along the elliptical path, going towards the positive y axis, starting coincident with the position angle. That is, the angles are defined from the semimajor axis that lies in the positive x quadrant. intensities : `~numpy.ndarray` The intensities measured along the elliptical path, at the angles defined by the ``phi`` parameter. Returns ------- y0, a1, b1, a2, b2 : float The fitted harmonic coefficent values. """ a1 = b1 = a2 = b2 = 1. def optimize_func(x): return first_and_second_harmonic_function( phi, np.array([x[0], x[1], x[2], x[3], x[4]])) - intensities return _least_squares_fit(optimize_func, [np.mean(intensities), a1, b1, a2, b2]) def fit_upper_harmonic(phi, intensities, order): """ Fit upper harmonic function to a set of (angle, intensity) pairs. With ``order`` set to 3 or 4, the resulting amplitudes, divided by the semimajor axis length and local gradient, measure the deviations from perfect ellipticity. The harmonic function that is fit is: .. math:: y(phi, order) = y0 + An*\\sin(order*phi) + Bn*\\cos(order*phi) Parameters ---------- phi : float or `~numpy.ndarray` The angle(s) along the elliptical path, going towards the positive y axis, starting coincident with the position angle. That is, the angles are defined from the semimajor axis that lies in the positive x quadrant. intensities : `~numpy.ndarray` The intensities measured along the elliptical path, at the angles defined by the ``phi`` parameter. order : int The order of the harmonic to be fitted. Returns ------- y0, An, Bn : float The fitted harmonic values. """ an = bn = 1. def optimize_func(x): return (x[0] + x[1]*np.sin(order*phi) + x[2]*np.cos(order*phi) - intensities) return _least_squares_fit(optimize_func, [np.mean(intensities), an, bn]) photutils-0.4/photutils/isophote/integrator.py0000644000214200020070000002672113175634532024231 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import math import numpy.ma as ma __all__ = ['integrators', 'NEAREST_NEIGHBOR', 'BILINEAR', 'MEAN', 'MEDIAN'] # integration modes NEAREST_NEIGHBOR = 'nearest_neighbor' BILINEAR = 'bilinear' MEAN = 'mean' MEDIAN = 'median' class _Integrator(object): """ Base class that supports different kinds of pixel integration methods. Parameters ---------- image : 2D `~numpy.ndarray` The image array. geometry : `~photutils.isophote.EllipseGeometry` instance object that encapsulates geometry information about current ellipse angles : list output list; contains the angle values along the elliptical path radii : list output list; contains the radius values along the elliptical path intensities : list output list; contains the extracted intensity values along the elliptical path """ def __init__(self, image, geometry, angles, radii, intensities): self._image = image self._geometry = geometry self._angles = angles self._radii = radii self._intensities = intensities # for bounds checking self._i_range = range(0, self._image.shape[0] - 1) self._j_range = range(0, self._image.shape[1] - 1) def integrate(self, radius, phi): """ The three input lists (angles, radii, intensities) are appended with one sample point taken from the image by a chosen integration method. Sub classes should implement the actual integration method. Parameters ---------- radius : float length of radius vector in pixels phi : float polar angle of radius vector """ raise NotImplementedError def _reset(self): """ Starts the results lists anew. This method is for internal use and shouldn't be used by external callers. """ self._angles = [] self._radii = [] self._intensities = [] def _store_results(self, phi, radius, sample): self._angles.append(phi) self._radii.append(radius) self._intensities.append(sample) def get_polar_angle_step(self): """ Returns the polar angle step used to walk over the elliptical path. The polar angle step is defined by the actual integrator subclass. Returns ------- float the polar angle step """ raise NotImplementedError def get_sector_area(self): """ Returns the area of elliptical sectors where the integration takes place. This area is defined and managed by the actual integrator subclass. Depending on the integrator, the area may be a fixed constant, or may change along the elliptical path, so it's up to the caller to use this information in a correct way. Returns ------- float the sector area """ raise NotImplementedError def is_area(self): """ Returns the type of the integrator. An area integrator gets it's value from operating over a (generally variable) number of pixels that define a finite area that lays around the elliptical path, at a certain point on the image defined by a polar angle and radius values. A pixel integrator, by contrast, integrates over a fixed and normally small area related to a single pixel on the image. An example is the bilinear integrator, which integrates over a small, fixed, 5-pixel area. This method checks if the integrator is of the first type or not. Returns ------- boolean True if this is an area integrator, False otherwise """ raise NotImplementedError class _NearestNeighborIntegrator(_Integrator): def integrate(self, radius, phi): self._r = radius # Get image coordinates of (radius, phi) pixel i = int(radius * math.cos(phi + self._geometry.pa) + self._geometry.x0) j = int(radius * math.sin(phi + self._geometry.pa) + self._geometry.y0) # ignore data point if outside image boundaries if (i in self._i_range) and (j in self._j_range): sample = self._image[j][i] if sample is not ma.masked: self._store_results(phi, radius, sample) def get_polar_angle_step(self): return 1. / self._r def get_sector_area(self): return 1. def is_area(self): return False class _BiLinearIntegrator(_Integrator): def integrate(self, radius, phi): self._r = radius # Get image coordinates of (radius, phi) pixel x_ = radius * math.cos(phi + self._geometry.pa) + self._geometry.x0 y_ = radius * math.sin(phi + self._geometry.pa) + self._geometry.y0 i = int(x_) j = int(y_) fx = x_ - i fy = y_ - j # ignore data point if outside image boundaries if (i in self._i_range) and (j in self._j_range): # in the future, will need to handle masked pixels here qx = 1. - fx qy = 1. - fy if (self._image[j][i] is not ma.masked and self._image[j+1][i] is not ma.masked and self._image[j][i+1] is not ma.masked and self._image[j+1][i+1] is not ma.masked): sample = (self._image[j][i] * qx * qy + self._image[j + 1][i] * qx * fy + self._image[j][i + 1] * fx * qy + self._image[j + 1][i + 1] * fy * fx) self._store_results(phi, radius, sample) def get_polar_angle_step(self): return 1. / self._r def get_sector_area(self): return 2. def is_area(self): return False class _AreaIntegrator(_Integrator): def __init__(self, image, geometry, angles, radii, intensities): super(_AreaIntegrator, self).__init__(image, geometry, angles, radii, intensities) # build auxiliary bilinear integrator to be used when # sector areas contain a too small number of valid pixels. self._bilinear_integrator = integrators[BILINEAR](image, geometry, angles, radii, intensities) def integrate(self, radius, phi): self._phi = phi # Get image coordinates of the four vertices of the elliptical sector. vertex_x, vertex_y = self._geometry.initialize_sector_geometry(phi) self._sector_area = self._geometry.sector_area # step in polar angle to be used by caller next time # when updating the current polar angle `phi` to point # to the next sector. self._phistep = self._geometry.sector_angular_width # define rectangular image area that encompasses the elliptical # sector. We have to account for rounding of pixel indices. i1 = int(min(vertex_x)) - 1 j1 = int(min(vertex_y)) - 1 i2 = int(max(vertex_x)) + 1 j2 = int(max(vertex_y)) + 1 # polar angle limits for this sector phi1, phi2 = self._geometry.polar_angle_sector_limits() # ignore data point if the elliptical sector lies # partially, ou totally, outside image boundaries if (i1 in self._i_range) and (j1 in self._j_range) and \ (i2 in self._i_range) and (j2 in self._j_range): # Scan rectangular image area, compute sample value. npix = 0 accumulator = self.initialize_accumulator() for j in range(j1, j2): for i in range(i1, i2): # Check if polar coordinates of each pixel # put it inside elliptical sector. rp, phip = self._geometry.to_polar(i, j) # check if inside angular limits if phip < phi2 and phip >= phi1: # check if radius is inside bounding ellipses sma1, sma2 = self._geometry.bounding_ellipses() aux = ((1. - self._geometry.eps) / math.sqrt(((1. - self._geometry.eps) * math.cos(phip))**2 + (math.sin(phip))**2)) r1 = sma1 * aux r2 = sma2 * aux if rp < r2 and rp >= r1: # update accumulator with pixel value pix_value = self._image[j][i] if pix_value is not ma.masked: accumulator, npix = self.accumulate( pix_value, accumulator) # If 6 or less pixels were sampled, get the bilinear # interpolated value instead. if npix in range(0, 7): # must reset integrator to remove older samples. self._bilinear_integrator._reset() self._bilinear_integrator.integrate(radius, phi) # because it was reset, current value is the only one stored # internally in the bilinear integrator instance. Move it # from the internal integrator to this instance. if len(self._bilinear_integrator._intensities) > 0: sample_value = self._bilinear_integrator._intensities[0] self._store_results(phi, radius, sample_value) elif npix > 6: sample_value = self.compute_sample_value(accumulator) self._store_results(phi, radius, sample_value) def get_polar_angle_step(self): phi1, phi2 = self._geometry.polar_angle_sector_limits() phistep = self._geometry.sector_angular_width / 2. + phi2 - self._phi return phistep def get_sector_area(self): return self._sector_area def is_area(self): return True def initialize_accumulator(self): raise NotImplementedError def accumulate(self, pixel_value, accumulator): raise NotImplementedError def compute_sample_value(self, accumulator): raise NotImplementedError class _MeanIntegrator(_AreaIntegrator): def initialize_accumulator(self): accumulator = 0. self._npix = 0 return accumulator def accumulate(self, pixel_value, accumulator): accumulator += pixel_value self._npix += 1 return accumulator, self._npix def compute_sample_value(self, accumulator): return accumulator / self._npix class _MedianIntegrator(_AreaIntegrator): def initialize_accumulator(self): accumulator = [] self._npix = 0 return accumulator def accumulate(self, pixel_value, accumulator): accumulator.append(pixel_value) self._npix += 1 return accumulator, self._npix def compute_sample_value(self, accumulator): accumulator.sort() return accumulator[int(self._npix/2)] # Specific integrator subclasses can be instantiated from here. integrators = { NEAREST_NEIGHBOR: _NearestNeighborIntegrator, BILINEAR: _BiLinearIntegrator, MEAN: _MeanIntegrator, MEDIAN: _MedianIntegrator } photutils-0.4/photutils/isophote/isophote.py0000644000214200020070000006045013175634532023702 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) from collections import OrderedDict import numpy as np from astropy.table import QTable import astropy.units as u from .harmonics import (fit_first_and_second_harmonics, first_and_second_harmonic_function, fit_upper_harmonic) __all__ = ['Isophote', 'IsophoteList'] class Isophote(object): """ Container class to store the results of single isophote fit. The extracted data sample at the given isophote (sampled intensities along the elliptical path on the image) is also kept as an attribute of this class. The container concept helps in segregating information directly related to the sample, from information that more closely relates to the fitting process, such as status codes, errors for isophote parameters, and the like. Parameters ---------- sample : `~photutils.isophote.EllipseSample` instance The sample information. niter : int The number of iterations used to fit the isophote. valid : bool The status of the fitting operation. stop_code : int The fitting stop code: * 0: Normal. * 1: Fewer than the pre-specified fraction of the extracted data points are valid. * 2: Exceeded maximum number of iterations. * 3: Singular matrix in harmonic fit, results may not be valid. This also signals an insufficient number of data points to fit. * 4: Small or wrong gradient, or ellipse diverged. Subsequent ellipses at larger or smaller semimajor axis may have the same constant geometric parameters. It's also used when the user turns off the fitting algorithm via the ``maxrit`` fitting parameter (see the `~photutils.isophote.Ellipse` class). * 5: Ellipse diverged; not even the minimum number of iterations could be executed. Subsequent ellipses at larger or smaller semimajor axis may have the same constant geometric parameters. * -1: Internal use. Attributes ---------- rms : float The root-mean-square of intensity values along the elliptical path. int_err : float The error of the mean (rms / sqrt(# data points)). ellip_err : float The ellipticity error. pa_err : float The position angle error (radians). x0_err : float The error associated with the center x coordinate. y0_err : float The error associated with the center y coordinate. pix_stddev : float The estimate of pixel standard deviation (rms * sqrt(average sector integration area)). grad : float The local radial intensity gradient. grad_error : float The measurement error of the local radial intensity gradient. grad_r_error : float The relative error of local radial intensity gradient. tflux_e : float The sum of all pixels inside the ellipse. npix_e : int The total number of valid pixels inside the ellipse. tflux_c : float The sum of all pixels inside a circle with the same ``sma`` as the ellipse. npix_c : int The total number of valid pixels inside a circle with the same ``sma`` as the ellipse. sarea : float The average sector area on the isophote (pixel). ndata : int The number of extracted data points. nflag : int The number of discarded data points. Data points can be discarded either because they are physically outside the image frame boundaries, because they were rejected by sigma-clipping, or they are masked. a3, b3, a4, b4 : float The higher order harmonics that measure the deviations from a perfect ellipse. These values are actually the raw harmonic amplitudes divided by the local radial gradient and the semimajor axis length, so they can directly be compared with each other. a3_err, b3_err, a4_err, b4_err : float The errors associated with the ``a3``, ``b3``, ``a4``, and ``b4`` attributes. """ def __init__(self, sample, niter, valid, stop_code): self.sample = sample self.niter = niter self.valid = valid self.stop_code = stop_code self.intens = sample.mean self.rms = np.std(sample.values[2]) self.int_err = self.rms / np.sqrt(sample.actual_points) self.pix_stddev = self.rms * np.sqrt(sample.sector_area) self.grad = sample.gradient self.grad_error = sample.gradient_error self.grad_r_error = sample.gradient_relative_error self.sarea = sample.sector_area self.ndata = sample.actual_points self.nflag = sample.total_points - sample.actual_points # flux contained inside ellipse and circle (self.tflux_e, self.tflux_c, self.npix_e, self.npix_c) = self._compute_fluxes() self._compute_errors() # deviations from a perfect ellipse (self.a3, self.b3, self.a3_err, self.b3_err) = self._compute_deviations(sample, 3) (self.a4, self.b4, self.a4_err, self.b4_err) = self._compute_deviations(sample, 4) # This method is useful for sorting lists of instances. Note # that __lt__ is the python3 way of supporting sorting. This might # not work under python2. def __lt__(self, other): if hasattr(other, 'sma'): return self.sma < other.sma def __str__(self): return str(self.to_table()) @property def sma(self): """The semimajor axis length (pixels).""" return self.sample.geometry.sma @property def eps(self): """The ellipticity of the ellipse.""" return self.sample.geometry.eps @property def pa(self): """The position angle (radians) of the ellipse.""" return self.sample.geometry.pa @property def x0(self): """The center x coordinate (pixel).""" return self.sample.geometry.x0 @property def y0(self): """The center y coordinate (pixel).""" return self.sample.geometry.y0 def _compute_fluxes(self): """ Compute integrated flux inside ellipse, as well as inside a circle defined with the same semimajor axis. Pixels in a square section enclosing circle are scanned; the distance of each pixel to the isophote center is compared both with the semimajor axis length and with the length of the ellipse radius vector, and integrals are updated if the pixel distance is smaller. """ # Compute limits of square array that encloses circle. sma = self.sample.geometry.sma x0 = self.sample.geometry.x0 y0 = self.sample.geometry.y0 xsize = self.sample.image.shape[1] ysize = self.sample.image.shape[0] imin = max(0, int(x0 - sma - 0.5) - 1) jmin = max(0, int(y0 - sma - 0.5) - 1) imax = min(xsize, int(x0 + sma + 0.5) + 1) jmax = min(ysize, int(y0 + sma + 0.5) + 1) # Integrate if (jmax-jmin > 1) and (imax-imin) > 1: y, x = np.mgrid[jmin:jmax, imin:imax] radius, angle = self.sample.geometry.to_polar(x, y) radius_e = self.sample.geometry.radius(angle) midx = (radius <= sma) values = self.sample.image[y[midx], x[midx]] tflux_c = np.ma.sum(values) npix_c = np.ma.count(values) midx2 = (radius <= radius_e) values = self.sample.image[y[midx2], x[midx2]] tflux_e = np.ma.sum(values) npix_e = np.ma.count(values) else: tflux_e = 0. tflux_c = 0. npix_e = 0 npix_c = 0 return tflux_e, tflux_c, npix_e, npix_c def _compute_deviations(self, sample, n): """ Compute deviations from a perfect ellipse, based on the amplitudes and errors for harmonic "n". Note that we first subtract the first and second harmonics from the raw data. """ try: coeffs = fit_first_and_second_harmonics(self.sample.values[0], self.sample.values[2]) coeffs = coeffs[0] model = first_and_second_harmonic_function(self.sample.values[0], coeffs) residual = self.sample.values[2] - model c = fit_upper_harmonic(residual, sample.values[2], n) covariance = c[1] ce = np.diagonal(covariance) c = c[0] a = c[1] / self.sma / sample.gradient b = c[2] / self.sma / sample.gradient # this comes from the old code. Likely it was based on # empirical experience with the STSDAS task, so we leave # it here without too much thought. gre = self.grad_r_error if self.grad_r_error is not None else 0.64 a_err = abs(a) * np.sqrt((ce[1] / c[1])**2 + gre**2) b_err = abs(b) * np.sqrt((ce[2] / c[2])**2 + gre**2) except Exception as e: # we want to catch everything a = b = a_err = b_err = None return a, b, a_err, b_err def _compute_errors(self): """ Compute parameter errors based on the diagonal of the covariance matrix of the four harmonic coefficients for harmonics n=1 and n=2. """ try: coeffs = fit_first_and_second_harmonics(self.sample.values[0], self.sample.values[2]) covariance = coeffs[1] coeffs = coeffs[0] model = first_and_second_harmonic_function(self.sample.values[0], coeffs) residual_rms = np.std(self.sample.values[2] - model) errors = np.diagonal(covariance) * residual_rms eps = self.sample.geometry.eps pa = self.sample.geometry.pa # parameter errors result from direct projection of # coefficient errors. These showed to be the error estimators # that best convey the errors measured in Monte Carlo # experiments (see Busko 1996; ASPC 101, 139). ea = abs(errors[2] / self.grad) eb = abs(errors[1] * (1. - eps) / self.grad) self.x0_err = np.sqrt((ea * np.cos(pa))**2 + (eb * np.sin(pa))**2) self.y0_err = np.sqrt((ea * np.sin(pa))**2 + (eb * np.cos(pa))**2) self.ellip_err = (abs(2. * errors[4] * (1. - eps) / self.sma / self.grad)) if (abs(eps) > np.finfo(float).resolution): self.pa_err = (abs(2. * errors[3] * (1. - eps) / self.sma / self.grad / (1. - (1. - eps)**2))) else: self.pa_err = 0. except Exception as e: # we want to catch everything self.x0_err = self.y0_err = self.pa_err = self.ellip_err = 0. def fix_geometry(self, isophote): """ Fix the geometry of a problematic isophote to be identical to the input isophote. This method should be called when the fitting goes berserk and delivers an isophote with bad geometry, such as ellipticity > 1 or another meaningless situation. This is not a problem in itself when fitting any given isophote, but will create an error when the affected isophote is used as starting guess for the next fit. Parameters ---------- isophote : `~photutils.isophote.Isophote` instance The isophote from which to take the geometry information. """ self.sample.geometry.eps = isophote.sample.geometry.eps self.sample.geometry.pa = isophote.sample.geometry.pa self.sample.geometry.x0 = isophote.sample.geometry.x0 self.sample.geometry.y0 = isophote.sample.geometry.y0 def sampled_coordinates(self): """ Return the (x, y) coordinates where the image was sampled in order to get the intensities associated with this isophote. Returns ------- x, y : 1D `~numpy.ndarray` The x and y coordinates as 1D arrays. """ return self.sample.coordinates() def to_table(self): """ Return the main isophote parameters as an astropy `~astropy.table.QTable`. Returns ------- result : `~astropy.table.QTable` An astropy `~astropy.table.QTable` containing the main isophote paramters. """ return _isophote_list_to_table([self]) class CentralPixel(Isophote): """ Specialized Isophote class for the galaxy central pixel. This class holds only a single intensity value at the central position. Thus, most of its attributes are hardcoded to `None` or a default value when appropriate. Parameters ---------- sample : `~photutils.utils.EllipseSample` instance The sample information. """ def __init__(self, sample): self.sample = sample self.niter = 0 self.valid = True self.stop_code = 0 self.intens = sample.mean # some values are set to zero to ease certain tasks # such as model building and plotting magnitude errors self.rms = None self.int_err = 0.0 self.pix_stddev = None self.grad = 0.0 self.grad_error = None self.grad_r_error = None self.sarea = None self.ndata = sample.actual_points self.nflag = sample.total_points - sample.actual_points self.tflux_e = self.tflux_c = self.npix_e = self.npix_c = None self.a3 = self.b3 = 0.0 self.a4 = self.b4 = 0.0 self.a3_err = self.b3_err = 0.0 self.a4_err = self.b4_err = 0.0 self.ellip_err = 0. self.pa_err = 0. self.x0_err = 0. self.y0_err = 0. @property def eps(self): return 0. @property def pa(self): return 0. @property def x0(self): return self.sample.geometry.x0 class IsophoteList(Isophote, list): """ Container class that provides the same attributes as the `~photutils.isophote.Isophote` class, but for a list of isophotes. The attributes of this class are arrays representing the values of the attributes for the entire list of `~photutils.isophote.Isophote` instances. See the `~photutils.isophote.Isophote` class for a description of the attributes. The class extends the `list` functionality, thus provides basic list behavior such as slicing, appending, and support for '+' and '+=' operators. Parameters ---------- iso_list : list of `~photutils.isophote.Isophote` A list of `~photutils.isophote.Isophote` instances. """ def __init__(self, iso_list): self._list = iso_list def __len__(self): return len(self._list) def __delitem__(self, index): self._list.__delitem__(index) def __setitem__(self, index, value): self._list.__setitem__(index, value) def __getitem__(self, index): if isinstance(index, slice): return IsophoteList(self._list[index]) return self._list.__getitem__(index) # need to override this method for py2.7 in derived list classes # even though it has been deprecated since py2.0 def __getslice__(self, i, j): return self.__getitem__(slice(i, j)) def __iter__(self): return self._list.__iter__() def sort(self): self._list.sort() def insert(self, index, value): self._list.insert(index, value) def append(self, value): self.insert(len(self) + 1, value) def extend(self, value): self._list.extend(value._list) def __iadd__(self, value): self.extend(value) return self def __add__(self, value): temp = self._list[:] # shallow copy temp.extend(value._list) return IsophoteList(temp) def get_closest(self, sma): """ Return the `~photutils.isophote.Isophote` instance that has the closest semimajor axis length to the input semimajor axis. Parameters ---------- sma : float The semimajor axis length. Returns ------- isophote : `~photutils.isophote.Isophote` instance The isophote with the closest semimajor axis value. """ index = (np.abs(self.sma - sma)).argmin() return self._list[index] def _collect_as_array(self, attr_name): return np.array(self._collect_as_list(attr_name), dtype=np.float64) def _collect_as_list(self, attr_name): return [getattr(iso, attr_name) for iso in self._list] @property def sample(self): """ The isophote `~photutils.isophote.EllipseSample` information. """ return self._collect_as_list('sample') @property def sma(self): """The semimajor axis length (pixels).""" return self._collect_as_array('sma') @property def intens(self): """The mean intensity value along the elliptical path.""" return self._collect_as_array('intens') @property def int_err(self): """The error of the mean intensity (rms / sqrt(# data points)).""" return self._collect_as_array('int_err') @property def eps(self): """The ellipticity of the ellipse.""" return self._collect_as_array('eps') @property def ellip_err(self): """The ellipticity error.""" return self._collect_as_array('ellip_err') @property def pa(self): """The position angle (radians) of the ellipse.""" return self._collect_as_array('pa') @property def pa_err(self): """The position angle error (radians).""" return self._collect_as_array('pa_err') @property def x0(self): """The center x coordinate (pixel).""" return self._collect_as_array('x0') @property def x0_err(self): """The error associated with the center x coordinate.""" return self._collect_as_array('x0_err') @property def y0(self): """The center y coordinate (pixel).""" return self._collect_as_array('y0') @property def y0_err(self): """The error associated with the center y coordinate.""" return self._collect_as_array('y0_err') @property def rms(self): """ The root-mean-square of intensity values along the elliptical path. """ return self._collect_as_array('rms') @property def pix_stddev(self): """ The estimate of pixel standard deviation (rms * sqrt(average sector integration area)). """ return self._collect_as_array('pix_stddev') @property def grad(self): """The local radial intensity gradient.""" return self._collect_as_array('grad') @property def grad_error(self): """ The measurement error of the local radial intensity gradient. """ return self._collect_as_array('grad_error') @property def grad_r_error(self): """ The relative error of local radial intensity gradient. """ return self._collect_as_array('grad_r_error') @property def sarea(self): """The average sector area on the isophote (pixel).""" return self._collect_as_array('sarea') @property def ndata(self): """The number of extracted data points.""" return self._collect_as_array('ndata') @property def nflag(self): """ The number of discarded data points. Data points can be discarded either because they are physically outside the image frame boundaries, because they were rejected by sigma-clipping, or they are masked. """ return self._collect_as_array('nflag') @property def niter(self): """The number of iterations used to fit the isophote.""" return self._collect_as_array('niter') @property def valid(self): """The status of the fitting operation.""" return self._collect_as_array('valid') @property def stop_code(self): """The fitting stop code.""" return self._collect_as_array('stop_code') @property def tflux_e(self): """The sum of all pixels inside the ellipse.""" return self._collect_as_array('tflux_e') @property def tflux_c(self): """ The sum of all pixels inside a circle with the same ``sma`` as the ellipse. """ return self._collect_as_array('tflux_c') @property def npix_e(self): """The total number of valid pixels inside the ellipse.""" return self._collect_as_array('npix_e') @property def npix_c(self): """ The total number of valid pixels inside a circle with the same ``sma`` as the ellipse. """ return self._collect_as_array('npix_c') @property def a3(self): """ A third-order harmonic coefficent. See the :func:`~photutils.isophote.fit_upper_harmonic` function for details. """ return self._collect_as_array('a3') @property def b3(self): """ A third-order harmonic coefficent. See the :func:`~photutils.isophote.fit_upper_harmonic` function for details. """ return self._collect_as_array('b3') @property def a4(self): """ A fourth-order harmonic coefficent. See the :func:`~photutils.isophote.fit_upper_harmonic` function for details. """ return self._collect_as_array('a4') @property def b4(self): """ A fourth-order harmonic coefficent. See the :func:`~photutils.isophote.fit_upper_harmonic` function for details. """ return self._collect_as_array('b4') @property def a3_err(self): """ The error associated with `~photutils.isophote.IsophoteList.a3`. """ return self._collect_as_array('a3_err') @property def b3_err(self): """ The error associated with `~photutils.isophote.IsophoteList.b3`. """ return self._collect_as_array('b3_err') @property def a4_err(self): """ The error associated with `~photutils.isophote.IsophoteList.a4`. """ return self._collect_as_array('a4_err') @property def b4_err(self): """ The error associated with `~photutils.isophote.IsophoteList.b3`. """ return self._collect_as_array('b4_err') def to_table(self): """ Convert an `~photutils.isophote.IsophoteList` instance to a `~astropy.table.QTable` with the main isophote parameters. Returns ------- result : `~astropy.table.QTable` An astropy QTable with the main isophote parameters. """ return _isophote_list_to_table(self) def _isophote_list_to_table(isophote_list): """ Convert an `~photutils.isophote.IsophoteList` instance to a `~astropy.table.QTable`. Parameters ---------- isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance A list of isophotes. Returns ------- result : `~astropy.table.QTable` An astropy QTable with the main isophote parameters. """ properties = OrderedDict() properties['sma'] = 'sma' properties['intens'] = 'intens' properties['int_err'] = 'intens_err' properties['eps'] = 'ellipticity' properties['ellip_err'] = 'ellipticity_err' properties['pa'] = 'pa' properties['pa_err'] = 'pa_err' properties['grad_r_error'] = 'grad_rerr' properties['ndata'] = 'ndata' properties['nflag'] = 'flag' properties['niter'] = 'niter' properties['stop_code'] = 'stop_code' isotable = QTable() for k, v in properties.items(): isotable[v] = np.array([getattr(iso, k) for iso in isophote_list]) if k in ('pa', 'pa_err'): isotable[v] = isotable[v] * 180. / np.pi * u.deg return isotable photutils-0.4/photutils/isophote/model.py0000644000214200020070000001400113175634532023137 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from .geometry import EllipseGeometry __all__ = ['build_ellipse_model'] def build_ellipse_model(shape, isolist, fill=0., high_harmonics=False): """ Build an elliptical model galaxy image from a list of isophotes. For each ellipse in the input isophote list the algorithm fills the output image array with the corresponding isophotal intensity. Pixels in the output array are in general only partially covered by the isophote "pixel". The algorithm takes care of this partial pixel coverage by keeping track of how much intensity was added to each pixel by storing the partial area information in an auxiliary array. The information in this array is then used to normalize the pixel intensities. Parameters ---------- shape : 2-tuple The (ny, nx) shape of the array used to generate the input ``isolist``. isolist : `~photutils.isophote.IsophoteList` instance The isophote list created by the `~photutils.isophote.Ellipse` class. fill : float, optional The constant value to fill empty pixels. If an output pixel has no contribution from any isophote, it will be assigned this value. The default is 0. high_harmonics : bool, optional Whether to add the higher-order harmonics (i.e. ``a3``, ``b3``, ``a4``, and ``b4``; see `~photutils.isophote.Isophote` for details) to the result. Returns ------- result : 2D `~numpy.ndarray` The image with the model galaxy. """ from scipy.interpolate import LSQUnivariateSpline # the target grid is spaced in 0.1 pixel intervals so as # to ensure no gaps will result on the output array. finely_spaced_sma = np.arange(isolist[0].sma, isolist[-1].sma, 0.1) # interpolate ellipse parameters # End points must be discarded, but how many? # This seems to work so far nodes = isolist.sma[2:-2] intens_array = LSQUnivariateSpline( isolist.sma, isolist.intens, nodes)(finely_spaced_sma) eps_array = LSQUnivariateSpline( isolist.sma, isolist.eps, nodes)(finely_spaced_sma) pa_array = LSQUnivariateSpline( isolist.sma, isolist.pa, nodes)(finely_spaced_sma) x0_array = LSQUnivariateSpline( isolist.sma, isolist.x0, nodes)(finely_spaced_sma) y0_array = LSQUnivariateSpline( isolist.sma, isolist.y0, nodes)(finely_spaced_sma) grad_array = LSQUnivariateSpline( isolist.sma, isolist.grad, nodes)(finely_spaced_sma) a3_array = LSQUnivariateSpline( isolist.sma, isolist.a3, nodes)(finely_spaced_sma) b3_array = LSQUnivariateSpline( isolist.sma, isolist.b3, nodes)(finely_spaced_sma) a4_array = LSQUnivariateSpline( isolist.sma, isolist.a4, nodes)(finely_spaced_sma) b4_array = LSQUnivariateSpline( isolist.sma, isolist.b4, nodes)(finely_spaced_sma) # Return deviations from ellipticity to their original amplitude meaning a3_array = -a3_array * grad_array * finely_spaced_sma b3_array = -b3_array * grad_array * finely_spaced_sma a4_array = -a4_array * grad_array * finely_spaced_sma b4_array = -b4_array * grad_array * finely_spaced_sma # correct deviations cased by fluctuations in spline solution eps_array[np.where(eps_array < 0.)] = 0. result = np.zeros(shape=shape) weight = np.zeros(shape=shape) eps_array[np.where(eps_array < 0.)] = 0.05 # for each interpolated isophote, generate intensity values on the # output image array # for index in range(len(finely_spaced_sma)): for index in range(1, len(finely_spaced_sma)): sma0 = finely_spaced_sma[index] eps = eps_array[index] pa = pa_array[index] x0 = x0_array[index] y0 = y0_array[index] geometry = EllipseGeometry(x0, y0, sma0, eps, pa) intens = intens_array[index] # scan angles. Need to go a bit beyond full circle to ensure # full coverage. r = sma0 phi = 0. while (phi <= 2*np.pi + geometry._phi_min): # we might want to add the third and fourth harmonics # to the basic isophotal intensity. harm = 0. if high_harmonics: harm = (a3_array[index] * np.sin(3.*phi) + b3_array[index] * np.cos(3.*phi) + a4_array[index] * np.sin(4.*phi) + b4_array[index] * np.cos(4.*phi) / 4.) # get image coordinates of (r, phi) pixel x = r * np.cos(phi + pa) + x0 y = r * np.sin(phi + pa) + y0 i = int(x) j = int(y) # if outside image boundaries, ignore. if (i > 0 and i < shape[0]-1 and j > 0 and j < shape[1] - 1): # get fractional deviations relative to target array fx = x - float(i) fy = y - float(j) # add up the isophote contribution to the overlapping pixels result[j, i] += (intens + harm) * (1. - fy) * (1. - fx) result[j, i + 1] += (intens + harm) * (1. - fy) * fx result[j + 1, i] += (intens + harm) * fy * (1. - fx) result[j + 1, i + 1] += (intens + harm) * fy * fx # add up the fractional area contribution to the # overlapping pixels weight[j, i] += (1. - fy) * (1. - fx) weight[j, i + 1] += (1. - fy) * fx weight[j + 1, i] += fy * (1. - fx) weight[j + 1, i + 1] += fy * fx # step towards next pixel on ellipse phi = max((phi + 0.75 / r), geometry._phi_min) r = geometry.radius(phi) # zero weight values must be set to 1. weight[np.where(weight <= 0.)] = 1. # normalize result /= weight # fill value result[np.where(result == 0.)] = fill return result photutils-0.4/photutils/isophote/sample.py0000644000214200020070000003646513175634532023342 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import copy import numpy as np from .geometry import EllipseGeometry from .integrator import integrators __all__ = ['EllipseSample'] class EllipseSample(object): """ Class to sample image data along an elliptical path. The image intensities along the elliptical path can be extracted using a selection of integration algorithms. The ``geometry`` attribute describes the geometry of the elliptical path. Parameters ---------- image : 2D `~numpy.ndarray` The input image. sma : float The semimajor axis length in pixels. x0, y0 : float, optional The (x, y) coordinate of the ellipse center. astep : float, optional The step value for growing/shrinking the semimajor axis. It can be expressed either in pixels (when ``linear_growth=True``) or as a relative value (when ``linear_growth=False``). The default is 0.1. eps : float, optional The ellipticity of the ellipse. The default is 0.2. pa : float, optional The position angle of ellipse in relation to the positive x axis of the image array (rotating towards the positive y axis). The default is 0. sclip : float, optional The sigma-clip sigma value. The default is 3.0. nclip : int, optional The number of sigma-clip interations. Set to zero to skip sigma-clipping. The default is 0. linear_growth : bool, optional The semimajor axis growing/shrinking mode. The default is `False`. integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional The area integration mode. The default is 'bilinear'. geometry : `~photutils.isophote.EllipseGeometry` instance or `None` The geometry that describes the ellipse. This can be used in lieu of the explicit specification of parameters ``sma``, ``x0``, ``y0``, ``eps``, etc. In any case, the `~photutils.isophote.EllipseGeometry` instance becomes an attribute of the `~photutils.isophote.EllipseSample` object. The default is `None`. Attributes ---------- values : 2D `~numpy.ndarray` The sampled values as a 2D array, where the rows contain the angles, radii, and extracted intensity values, respectively. mean : float The mean intensity along the elliptical path. geometry : `~photutils.isophote.EllipseGeometry` instance The geometry of the elliptical path. gradient : float The local radial intensity gradient. gradient_error : float The error associated with the local radial intensity gradient. gradient_relative_error : float The relative error associated with the local radial intensity gradient. sector_area : float The average area of the sectors along the elliptical path from which the sample values were integrated. total_points : int The total number of sample values that would cover the entire elliptical path. actual_points : int The actual number of sample values that were taken from the image. It can be smaller than ``total_points`` when the ellipse encompasses regions outside the image, or when sigma-clipping removed some of the points. """ def __init__(self, image, sma, x0=None, y0=None, astep=0.1, eps=0.2, position_angle=0., sclip=3., nclip=0, linear_growth=False, integrmode='bilinear', geometry=None): self.image = image self.integrmode = integrmode if geometry: # when the geometry is inherited from somewhere else, # its sma attribute must be replaced by the value # explicitly passed to the constructor. self.geometry = copy.deepcopy(geometry) self.geometry.sma = sma else: # if no center was specified, assume it's roughly # coincident with the image center _x0 = x0 _y0 = y0 if not _x0 or not _y0: _x0 = image.shape[0] / 2 _y0 = image.shape[1] / 2 self.geometry = EllipseGeometry(_x0, _y0, sma, eps, position_angle, astep, linear_growth) # sigma-clip parameters self.sclip = sclip self.nclip = nclip # extracted values associated with this sample. self.values = None self.mean = None self.gradient = None self.gradient_error = None self.gradient_relative_error = None self.sector_area = None # total_points reports the total number of pairs angle-radius that # were attempted. actual_points reports the actual number of sampled # pairs angle-radius that resulted in valid values. self.total_points = 0 self.actual_points = 0 def extract(self): """ Extract sample data by scanning an elliptical path over the image array. Returns ------- result : 2D `~numpy.ndarray` The rows of the array contain the angles, radii, and extracted intensity values, respectively. """ # the sample values themselves are kept cached to prevent # multiple calls to the integrator code. if self.values is not None: return self.values else: s = self._extract() self.values = s return s def _extract(self, phi_min=0.05): # Here the actual sampling takes place. This is called only once # during the life of an EllipseSample instance, because it's an # expensive calculation. This method should not be called from # external code. # If one wants to force it to re-run, then do: # # sample.values = None # # before calling sample.extract() # individual extracted sample points will be stored in here angles = [] radii = [] intensities = [] sector_areas = [] # reset counters self.total_points = 0 self.actual_points = 0 # build integrator integrator = integrators[self.integrmode](self.image, self.geometry, angles, radii, intensities) # initialize walk along elliptical path radius = self.geometry.initial_polar_radius phi = self.geometry.initial_polar_angle # In case of an area integrator, ask the integrator to deliver a # hint of how much area the sectors will have. In case of too # small areas, tests showed that the area integrators (mean, # median) won't perform properly. In that case, we override the # caller's selection and use the bilinear integrator regardless. if integrator.is_area(): integrator.integrate(radius, phi) area = integrator.get_sector_area() # this integration that just took place messes up with the # storage arrays and the constructors. We have to build a new # integrator instance from scratch, even if it is the same # kind as originally selected by the caller. angles = [] radii = [] intensities = [] if area < 1.0: integrator = integrators['bilinear']( self.image, self.geometry, angles, radii, intensities) else: integrator = integrators[self.integrmode](self.image, self.geometry, angles, radii, intensities) # walk along elliptical path, integrating at specified # places defined by polar vector. Need to go a bit beyond # full circle to ensure full coverage. while (phi <= np.pi*2. + phi_min): # do the integration at phi-radius position, and append # results to the angles, radii, and intensities lists. integrator.integrate(radius, phi) # store sector area locally sector_areas.append(integrator.get_sector_area()) # update total number of points self.total_points += 1 # update angle and radius to be used to define # next polar vector along the elliptical path phistep_ = integrator.get_polar_angle_step() phi += min(phistep_, 0.5) radius = self.geometry.radius(phi) # average sector area is calculated after the integrator had # the opportunity to step over the entire elliptical path. self.sector_area = np.mean(np.array(sector_areas)) # apply sigma-clipping. angles, radii, intensities = self._sigma_clip(angles, radii, intensities) # actual number of sampled points, after sigma-clip removed outliers. self.actual_points = len(angles) # pack results in 2-d array result = np.array([np.array(angles), np.array(radii), np.array(intensities)]) return result def _sigma_clip(self, angles, radii, intensities): if self.nclip > 0: for iter in range(self.nclip): # do not use list.copy()! must be python2-compliant. angles, radii, intensities = self._iter_sigma_clip( angles[:], radii[:], intensities[:]) return np.array(angles), np.array(radii), np.array(intensities) def _iter_sigma_clip(self, angles, radii, intensities): # Can't use scipy or astropy tools because they use masked arrays. # Also, they operate on a single array, and we need to operate on # three arrays simultaneously. We need something that physically # removes the clipped points from the arrays, since that is what # the remaining of the `ellipse` code expects. r_angles = [] r_radii = [] r_intensities = [] values = np.array(intensities) mean = np.mean(values) sig = np.std(values) lower = mean - self.sclip * sig upper = mean + self.sclip * sig count = 0 for k in range(len(intensities)): if intensities[k] >= lower and intensities[k] < upper: r_angles.append(angles[k]) r_radii.append(radii[k]) r_intensities.append(intensities[k]) count += 1 return r_angles, r_radii, r_intensities def update(self): """ Update this `~photutils.isophote.EllipseSample` instance. This method calls the :meth:`~photutils.isophote.EllipseSample.extract` method to get the values that match the current ``geometry`` attribute, and then computes the the mean intensity, local gradient, and other associated quantities. """ step = self.geometry.astep # Update the mean value first, using extraction from main sample. s = self.extract() self.mean = np.mean(s[2]) # Get sample with same geometry but at a different distance from # center. Estimate gradient from there. gradient, gradient_error = self._get_gradient(step) # Check for meaningful gradient. If no meaningful gradient, try # another sample, this time using larger radius. Meaningful # gradient means something shallower, but still close to within # a factor 3 from previous gradient estimate. If no previous # estimate is available, guess it. previous_gradient = self.gradient if not previous_gradient: previous_gradient = -0.05 # good enough, based on usage if gradient >= (previous_gradient / 3.): # gradient is negative! gradient, gradient_error = self._get_gradient(2 * step) # If still no meaningful gradient can be measured, try with # previous one, slightly shallower. A factor 0.8 is not too far # from what is expected from geometrical sampling steps of 10-20% # and a deVaucouleurs law or an exponential disk (at least at its # inner parts, r <~ 5 req). Gradient error is meaningless in this # case. if gradient >= (previous_gradient / 3.): gradient = previous_gradient * 0.8 gradient_error = None self.gradient = gradient self.gradient_error = gradient_error if gradient_error: self.gradient_relative_error = gradient_error / np.abs(gradient) else: self.gradient_relative_error = None def _get_gradient(self, step): gradient_sma = (1. + step) * self.geometry.sma gradient_sample = EllipseSample( self.image, gradient_sma, x0=self.geometry.x0, y0=self.geometry.y0, astep=self.geometry.astep, sclip=self.sclip, nclip=self.nclip, eps=self.geometry.eps, position_angle=self.geometry.pa, linear_growth=self.geometry.linear_growth, integrmode=self.integrmode) sg = gradient_sample.extract() mean_g = np.mean(sg[2]) gradient = (mean_g - self.mean) / self.geometry.sma / step s = self.extract() sigma = np.std(s[2]) sigma_g = np.std(sg[2]) gradient_error = (np.sqrt(sigma**2 / len(s[2]) + sigma_g**2 / len(sg[2])) / self.geometry.sma / step) return gradient, gradient_error def coordinates(self): """ Return the (x, y) coordinates associated with each sampled point. Returns ------- x, y : 1D `~numpy.ndarray` The x and y coordinate arrays. """ angles = self.values[0] radii = self.values[1] x = np.zeros(len(angles)) y = np.zeros(len(angles)) for i in range(len(x)): x[i] = (radii[i] * np.cos(angles[i] + self.geometry.pa) + self.geometry.x0) y[i] = (radii[i] * np.sin(angles[i] + self.geometry.pa) + self.geometry.y0) return x, y class CentralEllipseSample(EllipseSample): """ An `~photutils.isophote.EllipseSample` subclass designed to handle the special case of the central pixel in the galaxy image. """ def update(self): """ Update this `~photutils.isophote.EllipseSample` instance with the intensity integrated at the (x0, y0) center position using bilinear integration. The local gradient is set to `None`. """ s = self.extract() self.mean = s[2][0] self.gradient = None self.gradient_error = None self.gradient_relative_error = None def _extract(self): angles = [] radii = [] intensities = [] integrator = integrators['bilinear'](self.image, self.geometry, angles, radii, intensities) integrator.integrate(0.0, 0.0) self.total_points = 1 self.actual_points = 1 return np.array([np.array(angles), np.array(radii), np.array(intensities)]) photutils-0.4/photutils/isophote/setup_package.py0000644000214200020070000000021613175634532024655 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst def get_package_data(): return {'photutils.isophote.tests': ['data/*']} photutils-0.4/photutils/isophote/tests/0000755000214200020070000000000013175654702022634 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/isophote/tests/__init__.py0000644000214200020070000000017013175634532024742 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains affiliated package tests. """ photutils-0.4/photutils/isophote/tests/data/0000755000214200020070000000000013175654702023545 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/isophote/tests/data/M51_table.fits0000644000214200020070000006250013175634532026147 0ustar lbradleySTSCI\science00000000000000SIMPLE = T / file does conform to FITS standard BITPIX = 16 / number of bits per data pixel NAXIS = 0 / number of data axes EXTEND = T / FITS dataset may contain extensions COMMENT FITS (Flexible Image Transport System) format is defined in 'AstronomyCOMMENT and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H ORIGIN = 'STScI-STSDAS/TABLES' / Tables version 2002-02-22 FILENAME= 'M51_table.fits' / name of file NEXTEND = 1 / number of extensions in file END XTENSION= 'BINTABLE' / binary table extension BITPIX = 8 / 8-bit bytes NAXIS = 2 / 2-dimensional binary table NAXIS1 = 160 / width of table in bytes NAXIS2 = 52 PCOUNT = 0 / size of special data area GCOUNT = 1 / one data group (required keyword) TFIELDS = 40 TTYPE1 = 'SMA ' / label for field 1 TFORM1 = '1E ' / data format of field: 4-byte REAL TUNIT1 = 'pixel ' / physical unit of field TTYPE2 = 'INTENS ' / label for field 2 TFORM2 = '1E ' / data format of field: 4-byte REAL TTYPE3 = 'INT_ERR ' / label for field 3 TFORM3 = '1E ' / data format of field: 4-byte REAL TTYPE4 = 'PIX_VAR ' / label for field 4 TFORM4 = '1E ' / data format of field: 4-byte REAL TTYPE5 = 'RMS ' / label for field 5 TFORM5 = '1E ' / data format of field: 4-byte REAL TTYPE6 = 'ELLIP ' / label for field 6 TFORM6 = '1E ' / data format of field: 4-byte REAL TTYPE7 = 'ELLIP_ERR' / label for field 7 TFORM7 = '1E ' / data format of field: 4-byte REAL TTYPE8 = 'PA ' / label for field 8 TFORM8 = '1E ' / data format of field: 4-byte REAL TUNIT8 = 'degrees ' / physical unit of field TTYPE9 = 'PA_ERR ' / label for field 9 TFORM9 = '1E ' / data format of field: 4-byte REAL TUNIT9 = 'degrees ' / physical unit of field TTYPE10 = 'X0 ' / label for field 10 TFORM10 = '1E ' / data format of field: 4-byte REAL TUNIT10 = 'pixel ' / physical unit of field TTYPE11 = 'X0_ERR ' / label for field 11 TFORM11 = '1E ' / data format of field: 4-byte REAL TUNIT11 = 'pixel ' / physical unit of field TTYPE12 = 'Y0 ' / label for field 12 TFORM12 = '1E ' / data format of field: 4-byte REAL TUNIT12 = 'pixel ' / physical unit of field TTYPE13 = 'Y0_ERR ' / label for field 13 TFORM13 = '1E ' / data format of field: 4-byte REAL TUNIT13 = 'pixel ' / physical unit of field TTYPE14 = 'GRAD ' / label for field 14 TFORM14 = '1E ' / data format of field: 4-byte REAL TTYPE15 = 'GRAD_ERR' / label for field 15 TFORM15 = '1E ' / data format of field: 4-byte REAL TTYPE16 = 'GRAD_R_ERR' / label for field 16 TFORM16 = '1E ' / data format of field: 4-byte REAL TTYPE17 = 'RSMA ' / label for field 17 TFORM17 = '1E ' / data format of field: 4-byte REAL TUNIT17 = 'pixel**1/4' / physical unit of field TTYPE18 = 'MAG ' / label for field 18 TFORM18 = '1E ' / data format of field: 4-byte REAL TTYPE19 = 'MAG_LERR' / label for field 19 TFORM19 = '1E ' / data format of field: 4-byte REAL TTYPE20 = 'MAG_UERR' / label for field 20 TFORM20 = '1E ' / data format of field: 4-byte REAL TTYPE21 = 'TFLUX_E ' / label for field 21 TFORM21 = '1E ' / data format of field: 4-byte REAL TTYPE22 = 'TFLUX_C ' / label for field 22 TFORM22 = '1E ' / data format of field: 4-byte REAL TTYPE23 = 'TMAG_E ' / label for field 23 TFORM23 = '1E ' / data format of field: 4-byte REAL TTYPE24 = 'TMAG_C ' / label for field 24 TFORM24 = '1E ' / data format of field: 4-byte REAL TTYPE25 = 'NPIX_E ' / label for field 25 TFORM25 = '1J ' / data format of field: 4-byte INTEGER TTYPE26 = 'NPIX_C ' / label for field 26 TFORM26 = '1J ' / data format of field: 4-byte INTEGER TTYPE27 = 'A3 ' / label for field 27 TFORM27 = '1E ' / data format of field: 4-byte REAL TTYPE28 = 'A3_ERR ' / label for field 28 TFORM28 = '1E ' / data format of field: 4-byte REAL TTYPE29 = 'B3 ' / label for field 29 TFORM29 = '1E ' / data format of field: 4-byte REAL TTYPE30 = 'B3_ERR ' / label for field 30 TFORM30 = '1E ' / data format of field: 4-byte REAL TTYPE31 = 'A4 ' / label for field 31 TFORM31 = '1E ' / data format of field: 4-byte REAL TTYPE32 = 'A4_ERR ' / label for field 32 TFORM32 = '1E ' / data format of field: 4-byte REAL TTYPE33 = 'B4 ' / label for field 33 TFORM33 = '1E ' / data format of field: 4-byte REAL TTYPE34 = 'B4_ERR ' / label for field 34 TFORM34 = '1E ' / data format of field: 4-byte REAL TTYPE35 = 'NDATA ' / label for field 35 TFORM35 = '1J ' / data format of field: 4-byte INTEGER TTYPE36 = 'NFLAG ' / label for field 36 TFORM36 = '1J ' / data format of field: 4-byte INTEGER TTYPE37 = 'NITER ' / label for field 37 TFORM37 = '1J ' / data format of field: 4-byte INTEGER TTYPE38 = 'STOP ' / label for field 38 TFORM38 = '1J ' / data format of field: 4-byte INTEGER TTYPE39 = 'A_BIG ' / label for field 39 TFORM39 = '1E ' / data format of field: 4-byte REAL TTYPE40 = 'SAREA ' / label for field 40 TFORM40 = '1E ' / data format of field: 4-byte REAL TUNIT40 = 'pixel ' / physical unit of field TDISP1 = 'F7.2 ' / display format TDISP2 = 'G10.3 ' / display format TDISP3 = 'G10.3 ' / display format TDISP4 = 'G9.3 ' / display format TDISP5 = 'G9.3 ' / display format TDISP6 = 'F6.4 ' / display format TDISP7 = 'F6.4 ' / display format TDISP8 = 'F6.2 ' / display format TDISP9 = 'F6.2 ' / display format TDISP10 = 'F7.2 ' / display format TDISP11 = 'F6.2 ' / display format TDISP12 = 'F7.2 ' / display format TDISP13 = 'F6.2 ' / display format TDISP14 = 'G8.3 ' / display format TDISP15 = 'G6.3 ' / display format TDISP16 = 'G6.3 ' / display format TDISP17 = 'F7.5 ' / display format TDISP18 = 'G7.3 ' / display format TDISP19 = 'G7.3 ' / display format TDISP20 = 'G7.3 ' / display format TDISP21 = 'G12.5 ' / display format TDISP22 = 'G12.5 ' / display format TDISP23 = 'G7.3 ' / display format TDISP24 = 'G7.3 ' / display format TDISP25 = 'I6 ' / display format TNULL25 = -2147483647 / undefined value for column TDISP26 = 'I6 ' / display format TNULL26 = -2147483647 / undefined value for column TDISP27 = 'G9.3 ' / display format TDISP28 = 'G7.3 ' / display format TDISP29 = 'G9.3 ' / display format TDISP30 = 'G7.3 ' / display format TDISP31 = 'G9.3 ' / display format TDISP32 = 'G7.3 ' / display format TDISP33 = 'G9.3 ' / display format TDISP34 = 'G7.3 ' / display format TDISP35 = 'I5 ' / display format TNULL35 = -2147483647 / undefined value for column TDISP36 = 'I5 ' / display format TNULL36 = -2147483647 / undefined value for column TDISP37 = 'I3 ' / display format TNULL37 = -2147483647 / undefined value for column TDISP38 = 'I2 ' / display format TNULL38 = -2147483647 / undefined value for column TDISP39 = 'G9.3 ' / display format TDISP40 = 'F5.1 ' / display format IMAGE = 'dev$pix ' END Eñ°ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿC€ñ)ÿÿÿÿC~§ÿÿÿÿÄ϶ÿÿÿÿÿÿÿÿÁ‰Bÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿ?__EÖ¥nA·÷ŒBêƒGB¥ÓZ=J‹=³ ´BSäB“°ñC€ñ)<ÁQ&C~§<Á[ÄÏ‚ÇD ÑŒ>Æe§?Y~`Áy•;n¡h;mÑ¡Eñ°Eñ°Á‰BÁ‰B=¨i=8 –¼ÔÛº=›‰=H?ç<Ìh¬½‡À„<õ¸! @)kM@?µµEÔ A»oBø<\B¯‡z=#¡b=­ kB@^-BÅLC€ð=<Íð¤C}À<ͽ)Ä͆YDÆ7>ÀÉ?^¼üÁDE;¬¾;~ºjEñ°Eñ°Á‰BÁ‰B=—”™=.v;¼Î'9=¡&;úsx<‰Ñ½ª;F= H @}“Ä@?!azEÑ‘ÀAäRžC‡BÍέ=J[=¹kÆB[w¹B_ |C€íT<ôš~C~U<ó ’ÄÊLTD#¼6>Ï3>?dùÁ";—¾;—Eñ°Eñ°Á‰BÁ‰B=´í%=G+Û¼Ïok=d=†›==Ô ½f*<<ã×€ A:ª@?1„ÓEÎ} AâãÞCYBÌ„=eàa=¨iB=¥B2©5C€í<ôÄC}<ô;uÄÈ‚DŽ¢>¾!?iœÁÍE;™ {;˜]LEñ°Eñ°Á‰BÁ‰B=••]=+¶Ž¼Éyù=MŒ:Ö<‡†ñ½©Â—= ©ü @£2@?CEOEËSÌAñxiCècBÙ¨=„Á•=£Ì)BHÉBC€êu=ÕËC}=ÿˆÄŽD>¹¾Ë?o>6Áˆ©;¥m;¤©úEñ°Eñ°Á‰BÁ‰B= pD=(&C¼Ï(ä<õ×<ÈÊ7<—Û½2<ôÔ6 @¹*þ@?VÌ>EÇçB\DC# ÈBçg¯=•Û*=–¤SB86éA÷PíC€é=­¶C}=vÄÍm,DÈZ>¦·ã?uÁ=*;²ì ;²Eñ°Eñ°Á‰BÁ‰B=‚l¤=±7¼ÎYc<ñÏ«¼ …‰µ?zë’ÁÜÜ;­¤;¬.¢Eñ°Eñ°Á‰BÁ‰B<ÇM;<ìˆ÷¼»ßÄ<ã&½aM<|“ô½“ã<¶ ? 2@ëì@?óãE¿uAëÖCQBÔ”d=©HÆ=<þ¡AáZuAŠðC€ç<ȺC~<ÎìÄõÓ^CÁä’>Iêô?€|CÁua;«éƒ;«˜Eñ°GÈÁ‰BÁ4¹E¼Ag<¡Ôž½çÞ<¥Œ…½Cr-Bz?ƒ•kÁï±;•zŒ;”ÔPGÈGÈÁ4¹EÁ4¹E¼F”³%E²6oA¯Œ4BßǾBž<=¾&M<Õ<–Aç)÷A _/C€é<‰ƒ‰C}´<Ži´Å¹%Cs")=ê{&?†Á²Á>Y;‰,Œ;ˆž@GÈGÈÁ4¹EÁ4¹E¼I‘Ÿ£yÖ@?¬÷E©û9A™ò­BÄ>ôBŠÄN=·'‘<©ÃóAð@åûBC€êÐÙ"@?ÑJEE—žA£m¶BÐTÔB“P=¼]<‘_Aó*@À‰C€í#GRÿGRÿÁ=LòÁ=Lò ¼\=;îs“<(Á;äN‰<o~;Ô »º¿ø;ÉÇë ? 2è@?æ8E‹ƒA”¼|B½š/B†®=®u\}A >LB[lB'«=Âc°<dA®s @+J[C€îÇ<(*Ct›<,]Ä¥þøB–Ìæ=h¶?Ÿ7[Á <ü;R$£;QŽG°‡G½.ÁF>GÁGrºRv|;-WGå(H¦ÁJƬÁMQñ%¼ Û;_Ã<Í"8;sYÀ;‰ú ;QC@:$;Q ‹ ?ׯ@@`PæE ~f@ï „BA¦ØBîµ=ʶw<2XAñî@UuÉC€ðÉ<¢a?Ch<¨c¥Ä ÜAóøG=`žœ?¯#KÁ2;l×|;l÷«@@v¿dE¾ @èOyBDóeB Cå=Ч݄@@‡¶DíJ@Ù¢¾B@]WB·=ÚøE<[4øB«6@q$›C€é×<òQgCjå<ùT6æF·AÁÜj=•<?·¯§ÁÅ;’•;~¶‡H9@H*”@ÁP)pÁQ°d19¼¸c¹;Ÿú»aõC~Z=?û5Ã`î;A°¢=ÈiK?À¦ÓÁ…¡;ŸMt;ž’àHEH^@ÁT<+ÁVEÊIY¼æ_<ë¼F»¡;îL¼ÆÚ;½’¼7Ý;­dÕ =éý@@´¡²D½*µ@þ¤ÙBƒ=B9š!=¿‹€<¦€BðK@ÒÓ)C€Ø’=v—pCZ=yæÃ<ØjA§‡7=ã/?ÅLÀþe;»Ñ;º˜6Hd €HiÔ@ÁV»­ÁW*þ]a½<€¼e±6<Kr¼þêî;þzç9þ`;â-)" >¸èŸ@@ƱÞD¯•=A=(Bì¯BH¶,=¤S<³íOB ÕÈA×5C€Ý±=‘ÃXC¥>=”’Ã#%ƒA›C=ó ­?ÊÀûÎH;ÎÃÈ;Í“@Hz·H…l`ÁXaÁYvdmy½ <4¸*»1µ´<%^¼úŸ<gGÙ@@ÚuD£ÙAµIB™œwBY=1=–88<Øb{Aõ³×A*2jC€æ(=¼€ŽC¼=ÂeùÃUIAíz> Yä?ÎíSÀù=÷;èß;æ‰çHŽ” H”º ÁZ™Á[YZ‡‘¼ê‰ƒ<\Cä<3”ïedÂÉIkA‚¯S>&5?ÓêµÀ÷í;þ‡5;ü·aHŸß@H¨ÀÁ\š¦Á]z]£±¼©<†Q<¦iT<†ãø;{€<}¼<Êô<„oã- >Tè¡@A;=DÛÆA|B©[ BoG={Ð÷=-ÝAVÕJA¢Y·CÈ>4ŽaCø¸>=aý† Au<ÿ>jh?ÙåÀôáÍ<]µ<hVH· HÀÅ@Á^õ†Á_ÚÖËݼ‰îq<µÛw9mY}<´(Q="Éè<»š,¼2Î<®mM2 >çV@At]D‹ØßA6ïBÀÿýBˆx¹=ƒz=KÎÁÙBA¶¯ñCl÷>kNßC‚1«>rÞÇÂ`@ A]Ų>}+Ø?ÞB ÀóæE<æÂ<²sHʺ€HÖš Á`ºÔÁa·ýï¼Ýx<ÒÌ5=A ©<᪇½|Ïo<äGûÀ(M<À9À7 >xóÙ@A Dˆ0TA"BºRšBƒÀ=ÏêÌ=÷4Á¾’úA3=ÃC£œ>Eð•C‚\ÿ>OÛzÂn±€A9ÀÜ>G8·?ãž©Àòú< e5< Q&HáYÀHô Áb‘KÁcóÁ=¼©_e<—ë‹={—<¹%î½(xÞ<›½¯»Ãžµ<ŒsK<>%Ì>@A0D‚¨@ûüâB².«B{üâ>r˜<é/Á‰\@ÑW?CÄß>$|‘C‚kw>4OЮAïn> |¯?éÈÀñ‰¥<†'<‚áHùw@I ðÁdUiÁeöÕGy½;G P¥<Ï6fÁ­[8@¡ÒÕCÐ/>$&C‚jH>3‚.ÂkxADý> ]ÿ?îºÉÀï—á<®ä<³æI 80Ik Áf>Áh`yѽ/žL<\Õg={C P¥=$ h ëA CÐ/>“ZõC‚jH>˜5¾”A©G>u«U?ô|}ÀíRz< Ž<ÿIlàI0épÁh|ÅÁjg™Ó)<\’¨<¯êl=°½U<ò\æ½ARþ<‘ŽK¾Øæ= !M2ACÂÎ@AjAŠDcßûAcäB˲B‹k>râÍ=&ÃÛÂ"R)@³ÌÂC«{>®™qC‚?m>´SÁë«B@ÿ„ð>ŠÈ ?úaºÀìÇÐ<K]<ë I)`IJ*àÁiŸ Ál¹ ¡<+\;<ËÊ=¨ã =T$½G6<Ý»‘¾ "Ù=$²ÕP2AÜ@A€×?D_îAÔ/BØÓØB™Qù>“òf=†Â"R)@‹MÆCÏÕ>·19C‚ K>¾œ1Áã0°@ôçG>‰úÏ@5®Àì,Ž<&ä¶<%VÈI8ZÀIeG0ÁkÜÁnè®I%½Ìt<úÖ}=<.1<à}R¼Ø}™<ª´O¾B)=" ‹T2@ßy‚@A¹’D]¶ÆA³>Bß_þBó0>· <šÔ&¨Ê?ïåÇCö8>W@7C‚D¥>e"†Â9ŽG@Ö²¦>O@M"ÀëÔ<)¬<(äICÁIƒ«ÐÁl)°ÁqP²}Õ½a9 ‘d@A›åºD<û'@ó(ðBájOBŸd†>6L <”7{Â[·@KáÎCzã>M—C‚?m>BâEÂLyÈ@µié=ã u@w«ÀæG<<3¸T<1ìúI„\(I–8ÁqgçÁsšÇ×±½!ð·<+©à=É<*ŸÁ¼b¹°<ìð¼ªð<zÔn @O @A«|³D*ñ @÷ÛBí¤uB¨ ß>Z³¤<¼;•ÂY7Ê@\þCB>“‚C‚%>Šû»Âd¯@­ž…>Ï@ µÀÀâÊéô<{X2¼|É Óè<š·ÂKà@YºC€ÀŸ>‹ñýCŽH>„î×Â1rØ@×ó*>Å·@ ÙÀáõó<üô<|\2I›Ç˜I¼Á Áti>a^ÊC;ø>X¡ŒÂ?@ŽûÄ=ì7@nrÀÙ¿ü<]–‚Ÿ>š)ZC‚eV>¡•ÙÁê¿@Œ1ñ>àú@ê ÀÚ^?hûê@{ ÀÌÒ<ˆ½v<†«ŠIúv`Ju<Á||=Á}Ü K ¼v;M<ûB=H7<¨mļKÄ.^w³@";ÀÈ…Ö<®´J<«V\J 7¤JKÈÁ~ºÁ~´[ ™¡=`vÚ<ÏvÞ=²øÈ<ñ.¼„=<·ä½w< ÆÐ2A J @BæbCÍÑ@¦‡ÓBÞ¾ŽB =ŠY=2Ú”ÂsèíA˜}ºCV¤?^ÀœCðß?Wþ¿Àö{?a=é¼n@ßãÀÄ7¬<¤¸ï<¡º~Jé J›PÁ¹ÏÁ€!‡¼£-ä<ºÎê=Œ×ì<Å´¢½ŠÜ<Àáк‡û<·:Èå2Bã@B'C‚0@¥n–BèB¤"k=ŠY= 5QÂsèíAo.VC€?@÷C„<›?:PBÁ 3o@‰>ovÉ@"´£ÀÁ?{<²_}<®Þ*J'<øJ,u(Á€ÁOÁ¥ûa½u7}<µ¢–½“ó5<ÆE½?ý×<™½þ=ðQX=¸Xü2B%@B7̇CwÝu@z.ëB² #B|Z>-åX<ɽfB¡r8@éFC|Éa?*;+C‚û?\fÁ¶P?‰7Ô>^õ@&¡ À¿‰ˆ-åX<·o±Øg@ƒã#C|Éa?*tóC‚û?äzÀ‡wž?nÎ> 2@*¥¨À¸òh<1’©JM¹ÜÁÝkÁ‚ßû]<ô¦ÿ`HµÏuC‚’¥>¯%0À•¯>Ü%u=¼AT@.ÃÀ¶§™<„Z;ÿ3ÅJIž¬J`‹Á‚a ÁƒP}%ù;d]#;óöb<¤4 ;ûb[¼‰`ä;둦=! %<S3 ?poñ@Bt¢ìC?Ù?fò¥AÆöVAŒ°C~'f?[ÌC‚’¥?XÓÊ¿±‹ÿÿÿÿÿÿÿÿ@2ùçÀ®Ôø;Ò;ÐÑJr¶Jtm¼Áƒý}Á„ +-=-ñ;ßO'Â>“û3>)ÕbÂ> Ažý6C€ÿ=]UžC€~|=WŸcÄ¢QD Xd?‰©Ò?Y~`Á‡;'; á¢F’*F’*ÁÅ+ÁÅ+¼ÖÙ=ä%.»Ž½=ÔnF½•w·=§ÔD¾ ½Œ>­Aò =Fâ:@?µµF%ªA±[HBâBŸÝ÷>“û3>)ÂÂ=ûpAžëC€€=sZ¦C€~S=mËÄ ˆžD?‰ŽI?^¼üÁz;,a×;+ÿ/F’*F’*ÁÅ+ÁÅ+¼×P÷=ä§»ŽQD=ÔSÀ½•=³=§uð¾ ¬¶>­ . =´dö@?!azF£dAÖ§¸CÐüBÁ|Ï>“û3>)¾¶Â>ÀAžçþC€€=…ÖÝC€~(=‚a¾Ä²D'V]?‰’/?dùÁj©;QvA;PÒçF’*F’*ÁÅ+ÁÅ+¼× «=䈻)ü=ÔO'½•ˆ|=§Ã; ¨ƒ>­ _ =Ó9'@?1„ÓFµBØ!C%…Bê>“û3>)À,Â=þ­ S =@?CEOF GB'CHT¾C §Ê>“û3>)ÊjÂ>ûAžòòC€þ=¡üÝC€}È=ÎQÄ6Á@;šì;šB£F’*F’*ÁÅ+ÁÅ+¼ÕÕd=ä» =Ô_•½•}È=§Ç¤¾ ³œ>­$Ž =Þm.@?VÌ>F `B>&Crd‰C+eÅ>“û3>)ÆÖÂ=ùvAžïšC€€=²*NC€}Œ=­”?ÄO6YD^½‰?‰—’?uÁ#‹;¼Ã†;»½¥F’*F’*ÁÅ+ÁÅ+¼×ƒ=ä!»­¹ =ü™š@?lGF HÎBfÞC’¤ÏCOb¹>“û3>)ÅÐÂ=ýpAžî¥C€€=Ãû#C€}N=¾í³ÄcîXDuO?‰—.?zë’ÁØ;æM;äÓÂF’*F’*ÁÅ+ÁÅ+¼Öö=ä·»Ž@=ÔXÞ½•Z =§i¾ ¯ú>­Ð >&•·@?óãF ÷B‹6îC±vçCzøÿ>“û3>)ȆÂ>Ažñ.C€ý=ךC€} =ÒdÄzÀ˜D†Çi?‰™}?€|CÁÖƒ< É< ¯ôF’*G5ö´ÁÅ+Á:ºŸ¼Õ=ä »Œ“=Ô\ν•¼M=¨Q¾ ¯ë>­® >‰“è@?Žò­FX÷B¨È÷C×(×C˜$ >“K±=ë7éÂ=eA] C€õ=£ðC€|¼=ŸûÚÄÇÛ„DŽZO?6Wk?ƒ•kÁ¢<,ßf<+6-F’*G5ö´ÁÅ+Á:ºŸ¼—D7=ší©»\Ñî=’½Í½F¿=Y¾^dD>7¿ ?Ô=ç@?>%F´B¦Í‘CÔ¢C–Z¬>wdª=…"Â7'úA¾C€€Ô=Bn§C€~µ=A&«Å(èÉD`}">ªI?†Á²Á¶<0uÕ<.¹1G5ö´G5ö´Á:ºŸÁ:ºŸ¼,Ž="š‹»´ƒò=à‹¼·rãSC=(ÉÓÂ;Ôl@Ó'OC€=VC€Z=’uÅH¤ïDç>J½÷?ŠÁ,Z<ûÿ<Ê‹G5ö´G5ö´Á:ºŸÁ:ºŸ»‡ ï<Ģ˻‹Zì<½ -¼€2y<+µ ½…*j<ƒP+ >LDù@?¾C„Eëü;BNxdCƒ™xC:>Gêv<í»âÂ:cö@œ-;C€:<ËýC€R<Èö]ÅMU CãB·> «u?U}Á;ô";òYÓGrâ‡G’®Á?¾áÁC‘ »!ºí<ˆ§­»ñÚ<ƒ\q¼Rè< Ɔ½-xÂ<#~ç ={Ò@?ÑJEEÜ6DB*ÀNCYªyCé½>C%6<·ÙîÂ9ï@vÓ~C€€è<¬ ªC€(<ªv*ÅHÅC¥Ëâ=ÓgÛ?½öÁëm;Ø*Â;Öá3Grâ‡G’®Á?¾áÁC‘ »o£a<ý“<„WÂ8òÈ@6‚ÒC€&<‡A]C€S<†C:Å=šùCPªÃ=ŒÞG?”;xÁ‚Ì;­Ì;¬¿‰G’®G’®ÁC‘ÁC‘ »€æ€<¡gºËj<Ò¼ ‘B;°u¼¬{®;­ðB =Âý@?ý=µE¹@A’ÍB»"·B„S*>5KÇ<·BÂ7Δ?Ö¿^C€<<'ÐC€Ñ<&ªÅ2ÈC\Ë=O|¡?—Î…ÁæÚ;\Ìh;\^G’®G’®ÁC‘ÁC‘ »¾Í±;˜b:;—5T»½Yµ;‡>U»²1; =L\@@ HWE¦:ÙAªVBØÈâB™J9>*üû5z;×ZÜÂënú@@(ˆ-EŒãÕA-2ýBm)¨B'³ >?<‚ÏÂ1Ü?®©KC€€.<@¤pC€€üI`@@9b˜EËè@ô9¤B2Aû½Õ>D›ä;ŸäüÂ6Xî?WQ·C€é<ÂC€€f<6xÄ®¢AAþe<ºv>?¦üÆÁ¼;äb;˜dH ª3H?QÁN5ÁNåv9ζ¾;=rUº³a;5º¨ù¡;øÿ»ÍÁ&;-7 > m³@@KìtEiÑ@™xAÔf4A–0^>AN‚;Y?Â3î?Õ C€ý;Á¸3C€ù;Áù²Ä‰¹A´#<¨'×?«›Áëz:¨w:¨0÷H ª3H9´ÁN5ÁSI%¸£nž:ùë7¸¨çs:ùŽf:½Í:ç1‹;M":çô =¸ó´@@`PæESÌÇ@¡:äAþíUA´B»>AÌr;‹›ýÂ8¶Å?90 C€€š< 8HC€ù< bkÄWÿ§A’ž<­/R4-@@v¿dEABû@'šõAŠø’ADˆÞ>FŸ…;BÂ4¼Z>¿µ:C€»; ¹ C€ý; ’YÄ=ÑAÔ F÷D:Ö¬WÂ18>ŠjsC€€;€ ïC€€;€ZÄ&‘-@Ù<&Õq?·¯§Á â¯:<ª‡:>G²b; 9Â3î¦>°XjC€ü;¶ ÁC€€ ;¶rÄ…ô@£5 <"Šï?¼‘Á Î:gÚg:gÃNHtDKH…”ÁWí4ÁYjŽ;E9ï»:£ôx¹À¨o:¥0Ø7Y¿:”§;'±:•Ì´2?Q÷ @@¤5çE–—?Šæ A:@@ºÿ|>I­:”{‹Â35ô>=E*C€ò;W2C€>;WnÃä ï@HI:ŒXàÂ2ë£>4bÊC€€T;_²ÖC€€H;`2ö×@ \Ë;Ć?ÅLÁn'9ýX 9ýEH“¸5H e˜Á[;Á\©9Sa9µxm:)2¹kK:'c[¹5:Í:²Ã:Œö =<‘@@ƱÞDæ7?%‰M@­>@tÔß>Jdù:R’*Â49j>ˆC€€¢;8­†C€º;8ž¶Ã›V‘?¡x9;… i?ÊÁ›ã9Ǫ]9È hH¡çH´^Á\ºÁ^³ ayº2y¾9÷ýz8:âº9ö´¶¸3Ã9溂+¿9äç # <$YH@@ÚuDÍýÚ>É0@^ø@ ]>I¸": àùÂ3¾Ù=¶” C€€+; ~aC€½; ‰üÃ}æ–?<>§;=Í?ÎíSÁ­v9‡Ê9‡­BH³ÌHÆgãÁ^žÁ`Zúw‘ºi›9¦©M¹=)ö9£ÄÆ·œ¯¿9¡N†9ör¹9žŒ“' ;ÒÃ}@@ðk´D¸{g>_$á?ÿ¤Å?´Äq>Jë¤9©)#Â4’°=TOC€å:²ü¤C€ù:²Ö:ÃRn±?ïÃ;Ê?ÓêµÀý†9(.è9'üƒHÂÖùHÛ´=Á` QÁb q‹±9ª¾9Kà 7ƒš–9L{À¸ÞÚI9@|,¹Ê€ƒ9A$ * ‰¤„@%?ébl>Kee9æ/sÂ3“¢=óC€€;¨ÃC€×;¾Ã./>ËÖ;y?ÙåÀù™,9h7Û9hÿHÝÎÞHõzÀÁbJÔÁd Ƴݹ¤˜9‰Fž7à"9Š<ú9ŒŠÔ9ðç¹ÿ–Y9‚ç. ;~Ç@At]D’ÀŠ>9î?깚?¥ùÄ>K9¯UÎÂ3à’=\ºùC€é:à¿sC€µ:àÉ–Ã °Ø>„Na:ï X?ÞB Àõ’î9/÷$90óHðËKI(äÁc¸Áex%Ó¹o{9P 39=A9Oä½µ,N]9Iȵ9®¹v9I¹ 3 ;˜K@A D‚¬X>°Ÿ? s?‰7C>KF¶9—$Â4ó==¡ÀC€ð:Ô× C€€:ÔÕÒÂè€>:•Ñ:ÍÇ+?ãž©ÀñŠÉ94e9ðŸI¥IFáÁeECÁg÷ý=8q*98зãÈô98þú67Q947š9”°Z94w·8:ˆQq@A0Dhh~=Ñêã?’®?Nœ>K°:9tÀøÂ4C*=iàC€€:¾^C€€:¾L9½¨û=ö$¾:¦Æ?éÈÀíw8ú›ž8ûdYI¬YIÍåÁfÊÁh‡e/y86ì9q÷¸çœ‘9“ù¸ƒ»u9Õj8šmR9ß> ;:̪@AA™šDNXy=¦´ ?sz?+Õs>KÅû9OýÂ42=ÌïC€€ :±¶ÎC€ÿ:±°qÂ™ÈÆ=© :Œ´+?îºÉÀéU8àmÍ8àÌI÷¯I/9ÁhS¨Áj8°mѸC9ZV·“Ø8ÿö²¸ŸªÀ8ø19{̈8÷}D :öLÀ@ATõÃD7z=kŽI?3>ýJ|>KÖ9.&@Â4)s<ØžžC€€5:¢é>C€ë:¢ßÂyBb=P¿Û:Ve?ô|}Àå-Õ8²“¯8²ì“I,) I=PÁiî”Ák™h½)¹‘­8ÒÀÒ·ÞÀà8Ôk®¸"û8Ê]9{`Ì8˯„J :oÇ©@AjAŠD"b¬= x>áä÷>Ÿ»A>Lõ8×E’Â47<†ÌxC€÷:^E_C€ü:^C ÂIÓ<íÅV:_f?úaºÀá18p¤‚8r ’IÔË>7›ì>LMÍ8¬:Â4ƒ§›>lS€>LD¼8ºÙfÂ4á1%¡=ú†>L¥8~û Â4ÿ<.C€Ù:/0 C€ü:/.•ÁÏÄt<ó¸9¦Gß@w«ÀÔƒÅ7ëï[7êì·IqÁtI…j¨ÁoÔXÁq‹Cµ±8—mx8€·„g8Q¶¸†Z82¸¸¬8uqm :R³@A«|³CÉ`E<%×o> “L=ã>L‰8Q‹ºÂ4:<èC€ü:oÃC€€:lBÁ§?#jD<úa¬@ µÀÀÐg-7æbù7ãÚãI„5{Iœ^ÁqbÓÁrñ’©5XO8è·Z9ÿ8ü¶©{u7ûNf¸±³7üvhx 9×lm@A¼¢ÅC³2x< ˜B>ÊH=ËY™>L…s8?SÂ4Š;»»ÄC€õ9ùèÒC€ô9ùèCÁ„Û,>Ô;C<ÌyZ@ ÙÀÌYQ7Ý 7ÙêJIIœµ%ÁrÂ~ÁtVÖ{Õ5àÁ7º]¶A*#7º©·R67ºHU6£M7ºnð„ :bX@AÏÙCŸâK:žu¤<¨§€LŸþ8òÂ3þÂ;³‹C€Ü:íC€ß:ž+ÁKe>”r”<º×@nrÀÈc6¥6†lI›ßI©*tÁt'Áu« -¶¢ p7²V¸7wé‘7²®ã6¾¥7®Db¸`67®ñú‘ 9–Å@Aä?ÕCä:„$Å<“GVL©7ÛiäÂ4±;ˆbeC€€9ÜaëC€ô9Üa]Á#Êæ>Jó§<žš@ê Àćs6uÝ6…œøI¨I¸[ÎÁuŒrÁw)‚ý ·Qâ/7‡”ü¶#ðœ7ˆ=6lm7ƒ’ƒ¸Ké‡7„êŸ 9Q:¸@AûC€‡;ÇW=:Òö=Å>L«7´ÖßÂ3ý³;`ðæC€ó9ÇÚ/C€ò9ÇÚßÁ Ú> Di<‰@{ ÀÀÍY70P˜7)±ÀI¶XÆIÇãdÁvø¶Áx‘- ¯ µÔš7`(k¶ͧ7a"ï6ˆ¯ê7[›]¸dO7\Üf¯ 9T¾@B BCgô¡;#Å=GÂñ= @±>Lª7‹ò^Â4;-?C€€9©í–C€û9©ì‰ÀÆcñ=¬­<^@";À½;~7F7@žÝIÅëÝIÙ"ôÁxe3ÁzB »¡·ªÄæ7*å-·3y7, ë¶™0ð7*n7¬7+±üÀ 7ù¤Û@BæbCRi°>L°Ã7_ï‡Â3þË; m½C€€9•ÎÝC€€ 9•Ï!ÀšYŒ=€ËpL¼7;ųÂ3ý—:é•5C€ü9Š!C€ý9Š!”Àpã°=ï°<(çö@"´£À¶©Í6œïø6¡|IéÎJqÁ{L¼­7(yÂ3ý]:¶ÖûC€þ9n«C€ÿ9n›À7ö<ÒØ7<´3@&¡ À³³6ïüÌ6ïEˆIý™âJ .Á|³ŸÁ~n$«Õ6ŒªÍ6¶P:¶J¨6·B=6žbm6³” ·m°6´  8\A@BJ-ÈC#o:€+Ù<¾>c<†…Ê>L¾è6ÞÕŸÂ4»:Š ñC€ö9F_C€ó9F^ÎÀBD<¬<Cµ@*¥¨À°ú6ÛE´6Ù<J aŠJz˜Á~8IÁ€f ]5Ÿ—ˆ6Š|`5­‹36‹»µêË6‰*û·~Ñ6‰Áý 7ÿ¡Ö@B^eCÊ$:~ç¤<ÆX£<Œ@>L¿Ý6¶ÑãÂ3ÿ:cQOC€€92øïC€ù92ù ¿Ù&¿>LÂã6˜ñÂ4°:>¹C€ü9$êC€û9$é忤×<[¢;ÈGM@2ùçÀ¬Eå3“nˆ³“nˆJ&.J9€GÁ€³2Á§Ä$Ë-ñ¶¿-ƒ6=šÐ´V¡ö6=ÝæµŠ Ë6=Sj¶÷Ù6=­KV 7¾f:@B†ŒÏC~>LÃá6pŸÂ3ÿÒ:¡›C€þ9]¬C€€9]¶¿w’”;¤®z;ªI@7J¾ÀªK¾2áϲáÏJ6Š„JM8{ÁƒÿÁ‚ˆg,_7©6²çs6ö%¶|àj6?ùµT¿g6ã¤4ܰ6={x 7pé@@B”JBÿâh:„n<í8Ù<§½È>LÄð6RÂ3ÿò:¾MC€€9òñC€€9òó¿;é;YêJ;•Ì@;¶9À¨¥7»}7 JI¬}Jc†ÜÁ‚a£ÁƒmÖ5·C)3“-(6Ð^4§+Û6CÀ4di66É4a3°6IF 7à–d@B¢Î8Bõ ô9Õ}LÆY6ôôÂ4e9Æ GC€ÿ8äqC€€8äP¿ |;˸;ƒR@@<ýÀ§•6<·6a³*J_ÏçJ~µÁƒI<Á„cA Qi³ ^5Æ/EµŠ¬5Æ{4†e$5ÂÆ¶Á¿â5Ã=éÇ 7@N°@B³ Bì˜>LÇ76 Î Â3ÿÒ9«–+C€ÿ8Ùj¼C€ü8Ùj˾уÃ:·õ‹;`Æ@@Dß²À¥Ä€3Ôxw³ÔxwJy‰-JŽetÁ„;'Á…aN¿biµµ¦5«°µÈµ—5¬µ®>5«Á5¯§b5«­jô 7cR@BÄþ¦BäÍÚ>LÈ5ÜzhÂ4"9‰BýC€€8¿QÊC€ÿ8¿QÁ¾š…œ:hÙ;@áå@IŸÀ¤¬º4yLÍ´yLÍJ‹ÏÃJ ¬_Á…8IÁ†m‰_Uw=²†ÿã5‰ecµm¥5‰Ë#´ z5ˆº6/6Y5‰:& 6׆º@Bر·BÞÙ>LÈ¿5½,UÂ49kŠäC€€8´’C€ÿ8´’ ¾c=h:†k;%Ø@N{¦À£Â4HR´HRJc¡J¶ !Á†?Á‡ƒ'sG)5Y…5kiíµ’‘L5l-´†iå5jÜN5»)ã5k¡‡] 6 l¶@Bî]BÚ¿>LÉË5š[Â49@ £C€ÿ8¢C€€8¢¾&Ř9½åë;¿ñ@SvJÀ¢ÿi³¯u3¯uJ²)&JÏdEÁ‡SCÁˆ¥‹k®M4ž‰»5@.²µ@(Ä5@Ⳙ5@‚4Âh35@á‘™ 6H”Ë@C™BÖ">LÊc5„B¢Â49$ªC€þ8˜ÀõC€ý8˜Àð½ó‡Ý9u¾[;)À@XªÀ¢_d4*F´*FJÊáwJíÅzÁˆt-Á‰Õ ¨¹Òý2û)û5$£OµŠHõ5% 1Œ[¯5$/ƒµŸ`!5$¼Ü 6Ø@C5ÂBÓt>LÊ£5^f7Â3ÿÿ9 hC€ÿ8AÐC€þ8Aн¯øy9Få:݇Þ@]È…À¡Ý4ÐÓ´ÐÓJèQKö€Á‰¡tÁ‹ÉÌ1ÿ5³…ǧ5 –µ_Z5 ½4„³»5 uµ¨ò5 †c% 68½‰@C¡‰BЊÆ9ÄgÒ<3<6–ß>LË5BPçÂ3ÿÿ8ñ¸àC€€8‡ºQC€ÿ8‡ºQ½}Tæ8ž~®: *@c!À¡sþ6Þ$6…L]K·=K¨NÁŠÚoÁŒV¼÷4Ñ´sÓø4ñÚ©µ9Ø]4òÄ´¤4ñ@µqå.4ò@ u 5Ç8à@C.~}BΔz9FIû<¾Ú;Ác*>LË5#³äÂ3ÿÿ8Ë¿C€þ8{ÍC€ý8{ν5ƒ8•G’:Ó@h›¸À¡ê6¶ç5úÂöK±ºK8´ˆÁŒ|Á¨À*Óuµ³%-4Ëøµ`:4Ì¡ã2š.á4˶05~94ÌsÆÎ 5·@í@C?ñŠBÍ m9ñÇ<;ðr<ä£>LË~5ÞyÂ3ÿÿ8²þC€ý8s4=C€þ8s4>½§8dB‰:ã @n7¢À Ý96256)“K3öKWø¶ÁnåÁ}i³Ä4V™4³\ˆ´ ðÝ4´³¶Ox4²âKµMˆð4³š&/ 4Ǭe@CS#KBËÔ®>LË”4ÿöÇÂ3ÿÿ8Ÿ6}C€þ8mù?C€ÿ8mù?¼µÂ[@sö-À ¨À³„33„3KROLK}¬XÁŽÉkÁjCµ¹#4Bje4Ÿ~,´š_ 4 ©³NO¤4Ÿuü2%„E4 þš 5.†„@Ch@lBÊäõ>LËÆ4ãŸÂ3ÿÿ8C˜C€ÿ8h=ŸC€þ8h=Ÿ¼~Ór7¸@i:¹º@yØ-À Ì4û´ûKvÊÙK•”hÁ-Á‘Ù—–)´”'£4y®´4úa³‰É4hú´c‚›4÷o 3·òJ@CzBÊ,:{<ÝR<œHT>LÌ4ÏœoÂ3ÿÿ8ãC€ÿ8i†“C€þ8i†”¼1bN7¥ð­:ï{ø@Þ~À `6¼+œ6®Ç¡K‘dmK°óPÁ‘™ÝÁ“N§€Ñ! ´ˆùP4O|´“ÉÚ4Ó'³€…u4:Ž´èÒ4ÊË‘ 4yNÖ@CŒƒ#BÉžm>LÌ4¼ÎÂ3ÿõ8pmC€þ8kÛ»C€ÿ8kÛ¿»õŒH7€N;ħ@ƒÀ G´²ÖZ’2ÖZ’K«éLÌ5¡cÂ3ÿõ9c C€þ8ðÉFC€ÿ8ðÉQ»¨=@†-ÌÀ 5³† 3† KÁºKÓ›(Á”ßÁ”Ü[rŸÑc²¡«s4óˆ ´»×Ú4´úÀ´}v]4΢aµ›?4ÉÔî,‘5eß\@photutils-0.4/photutils/isophote/tests/data/synth_lowsnr_table.fits0000644000214200020070000007020013175634532030352 0ustar lbradleySTSCI\science00000000000000SIMPLE = T / file does conform to FITS standard BITPIX = 16 / number of bits per data pixel NAXIS = 0 / number of data axes EXTEND = T / FITS dataset may contain extensions COMMENT FITS (Flexible Image Transport System) format is defined in 'AstronomyCOMMENT and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H ORIGIN = 'STScI-STSDAS/TABLES' / Tables version 2002-02-22 FILENAME= 'synth_lowsnr_table.fits' / name of file NEXTEND = 1 / number of extensions in file END XTENSION= 'BINTABLE' / binary table extension BITPIX = 8 / 8-bit bytes NAXIS = 2 / 2-dimensional binary table NAXIS1 = 160 / width of table in bytes NAXIS2 = 55 PCOUNT = 0 / size of special data area GCOUNT = 1 / one data group (required keyword) TFIELDS = 40 TTYPE1 = 'SMA ' / label for field 1 TFORM1 = '1E ' / data format of field: 4-byte REAL TUNIT1 = 'pixel ' / physical unit of field TTYPE2 = 'INTENS ' / label for field 2 TFORM2 = '1E ' / data format of field: 4-byte REAL TTYPE3 = 'INT_ERR ' / label for field 3 TFORM3 = '1E ' / data format of field: 4-byte REAL TTYPE4 = 'PIX_VAR ' / label for field 4 TFORM4 = '1E ' / data format of field: 4-byte REAL TTYPE5 = 'RMS ' / label for field 5 TFORM5 = '1E ' / data format of field: 4-byte REAL TTYPE6 = 'ELLIP ' / label for field 6 TFORM6 = '1E ' / data format of field: 4-byte REAL TTYPE7 = 'ELLIP_ERR' / label for field 7 TFORM7 = '1E ' / data format of field: 4-byte REAL TTYPE8 = 'PA ' / label for field 8 TFORM8 = '1E ' / data format of field: 4-byte REAL TUNIT8 = 'degrees ' / physical unit of field TTYPE9 = 'PA_ERR ' / label for field 9 TFORM9 = '1E ' / data format of field: 4-byte REAL TUNIT9 = 'degrees ' / physical unit of field TTYPE10 = 'X0 ' / label for field 10 TFORM10 = '1E ' / data format of field: 4-byte REAL TUNIT10 = 'pixel ' / physical unit of field TTYPE11 = 'X0_ERR ' / label for field 11 TFORM11 = '1E ' / data format of field: 4-byte REAL TUNIT11 = 'pixel ' / physical unit of field TTYPE12 = 'Y0 ' / label for field 12 TFORM12 = '1E ' / data format of field: 4-byte REAL TUNIT12 = 'pixel ' / physical unit of field TTYPE13 = 'Y0_ERR ' / label for field 13 TFORM13 = '1E ' / data format of field: 4-byte REAL TUNIT13 = 'pixel ' / physical unit of field TTYPE14 = 'GRAD ' / label for field 14 TFORM14 = '1E ' / data format of field: 4-byte REAL TTYPE15 = 'GRAD_ERR' / label for field 15 TFORM15 = '1E ' / data format of field: 4-byte REAL TTYPE16 = 'GRAD_R_ERR' / label for field 16 TFORM16 = '1E ' / data format of field: 4-byte REAL TTYPE17 = 'RSMA ' / label for field 17 TFORM17 = '1E ' / data format of field: 4-byte REAL TUNIT17 = 'pixel**1/4' / physical unit of field TTYPE18 = 'MAG ' / label for field 18 TFORM18 = '1E ' / data format of field: 4-byte REAL TTYPE19 = 'MAG_LERR' / label for field 19 TFORM19 = '1E ' / data format of field: 4-byte REAL TTYPE20 = 'MAG_UERR' / label for field 20 TFORM20 = '1E ' / data format of field: 4-byte REAL TTYPE21 = 'TFLUX_E ' / label for field 21 TFORM21 = '1E ' / data format of field: 4-byte REAL TTYPE22 = 'TFLUX_C ' / label for field 22 TFORM22 = '1E ' / data format of field: 4-byte REAL TTYPE23 = 'TMAG_E ' / label for field 23 TFORM23 = '1E ' / data format of field: 4-byte REAL TTYPE24 = 'TMAG_C ' / label for field 24 TFORM24 = '1E ' / data format of field: 4-byte REAL TTYPE25 = 'NPIX_E ' / label for field 25 TFORM25 = '1J ' / data format of field: 4-byte INTEGER TTYPE26 = 'NPIX_C ' / label for field 26 TFORM26 = '1J ' / data format of field: 4-byte INTEGER TTYPE27 = 'A3 ' / label for field 27 TFORM27 = '1E ' / data format of field: 4-byte REAL TTYPE28 = 'A3_ERR ' / label for field 28 TFORM28 = '1E ' / data format of field: 4-byte REAL TTYPE29 = 'B3 ' / label for field 29 TFORM29 = '1E ' / data format of field: 4-byte REAL TTYPE30 = 'B3_ERR ' / label for field 30 TFORM30 = '1E ' / data format of field: 4-byte REAL TTYPE31 = 'A4 ' / label for field 31 TFORM31 = '1E ' / data format of field: 4-byte REAL TTYPE32 = 'A4_ERR ' / label for field 32 TFORM32 = '1E ' / data format of field: 4-byte REAL TTYPE33 = 'B4 ' / label for field 33 TFORM33 = '1E ' / data format of field: 4-byte REAL TTYPE34 = 'B4_ERR ' / label for field 34 TFORM34 = '1E ' / data format of field: 4-byte REAL TTYPE35 = 'NDATA ' / label for field 35 TFORM35 = '1J ' / data format of field: 4-byte INTEGER TTYPE36 = 'NFLAG ' / label for field 36 TFORM36 = '1J ' / data format of field: 4-byte INTEGER TTYPE37 = 'NITER ' / label for field 37 TFORM37 = '1J ' / data format of field: 4-byte INTEGER TTYPE38 = 'STOP ' / label for field 38 TFORM38 = '1J ' / data format of field: 4-byte INTEGER TTYPE39 = 'A_BIG ' / label for field 39 TFORM39 = '1E ' / data format of field: 4-byte REAL TTYPE40 = 'SAREA ' / label for field 40 TFORM40 = '1E ' / data format of field: 4-byte REAL TUNIT40 = 'pixel ' / physical unit of field TDISP1 = 'F7.2 ' / display format TDISP2 = 'G10.3 ' / display format TDISP3 = 'G10.3 ' / display format TDISP4 = 'G9.3 ' / display format TDISP5 = 'G9.3 ' / display format TDISP6 = 'F6.4 ' / display format TDISP7 = 'F6.4 ' / display format TDISP8 = 'F6.2 ' / display format TDISP9 = 'F6.2 ' / display format TDISP10 = 'F7.2 ' / display format TDISP11 = 'F6.2 ' / display format TDISP12 = 'F7.2 ' / display format TDISP13 = 'F6.2 ' / display format TDISP14 = 'G8.3 ' / display format TDISP15 = 'G6.3 ' / display format TDISP16 = 'G6.3 ' / display format TDISP17 = 'F7.5 ' / display format TDISP18 = 'G7.3 ' / display format TDISP19 = 'G7.3 ' / display format TDISP20 = 'G7.3 ' / display format TDISP21 = 'G12.5 ' / display format TDISP22 = 'G12.5 ' / display format TDISP23 = 'G7.3 ' / display format TDISP24 = 'G7.3 ' / display format TDISP25 = 'I6 ' / display format TNULL25 = -2147483647 / undefined value for column TDISP26 = 'I6 ' / display format TNULL26 = -2147483647 / undefined value for column TDISP27 = 'G9.3 ' / display format TDISP28 = 'G7.3 ' / display format TDISP29 = 'G9.3 ' / display format TDISP30 = 'G7.3 ' / display format TDISP31 = 'G9.3 ' / display format TDISP32 = 'G7.3 ' / display format TDISP33 = 'G9.3 ' / display format TDISP34 = 'G7.3 ' / display format TDISP35 = 'I5 ' / display format TNULL35 = -2147483647 / undefined value for column TDISP36 = 'I5 ' / display format TNULL36 = -2147483647 / undefined value for column TDISP37 = 'I3 ' / display format TNULL37 = -2147483647 / undefined value for column TDISP38 = 'I2 ' / display format TNULL38 = -2147483647 / undefined value for column TDISP39 = 'G9.3 ' / display format TDISP40 = 'F5.1 ' / display format IMAGE = 'synth_lowsnr.fits' END FGŸÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿC€}àÿÿÿÿC€}rÿÿÿÿÃX‘ðÿÿÿÿÿÿÿÿÁ¼Cÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿ?__F„MA’§àBºó™—>2EžÂH¦ A¡ÔŽC€}à=nMC€}r=adÃñwlD Ε?“)[?Y~`Á…ø;/; ØQFGŸFGŸÁ¼CÁ¼C;†mh=èdž=c&>¾)>¤b¾£ +>»Êƒ >_¨@?µµFÌA³"ABäZB¡x>˜ð7>1G@ÂF A¡~HC€~=¡|C€}G=vÝ€Äÿ¨DŽ)?‘˜:?^¼üÁy‹;.,C;-¹kFGŸFGŸÁ¼CÁ¼C»žAÒ=ç`j=jÕ>d½î¡> l¿¾¤ $>» >òw@?!azFŸ¾AÙ`0C ŒæBÃð™>˜ x>0ëxÂEñëA¡òC€~!=Ž—C€}E=‡R©ÄwD)rç?÷å?dùÁj7;Tî;S€RFGŸFGŸÁ¼CÁ¼C»Ò‚‹=çC‡=LK9=ú2Y½î!>Î ¾£íU>¹þ =“Hi@?1„ÓF„B²VC'á„Bíkb>—†Ä>0?uÂE]UA¡ÉÓC€~3=›^C€}0=”IGÄ%¥ÝD:¥?9~?iœÁWƒ;Æ;€£ëFGŸFGŸÁ¼CÁ¼C¼ \—=æåå=4P=ó]¬½çù>¾Ü¾£Î¦>¸çn >ƒò@?CEOF KëB×:CKÁüC>–Ù>0¾ÂDù1A¢*rC€~M=ª[C€}2=¢ÍoÄ7]/DM÷¦?ÇT?o>6Á@¡;Œð;œà„FGŸFGŸÁ¼CÁ¼C¼"¶P=æîµ=ù(=í8ҽ䫽>‡*¾£ÐK>¸W¶ >¥éó@?VÌ>F jáBAs~CvšPC._ß>–Ù>/UÆÂD…9A¡‹ÿC€~m=º‘¦C€}=²‚ëÄJˆ’Db¢7?;.?uÁ$à;¿÷Ã;¾õ'FGŸFGŸÁ¼CÁ¼C¼@ž=æÏo=ج=è½ßó=þâA¾£°¿>·' ?Ÿu@?lGF VYBjöùC•ÂàCSË[>–8>/^ÂD?A¢HÌC€~„=ÌÒ`C€}=ÄEUÄ`[Dz8 ?Žð?zë’Áˆ;ëj;é˜ÜFGŸFGŸÁ¼CÁ¼C¼S¸E=æñ.<ßû =äàç½ÛÄ=ùÖí¾£Åú>·9û ?7/@?óãF ŒBŽ[LCµxEC€Q‹>–8>/rÂD$A¢’C€~ž=à÷ÍC€|þ=ׇÄw8DD‰ÏÈ?Ž´Å?€|CÁØù<æØ<¾eFGŸG6 Á¼CÁ:¼x¼`Tõ=æïy<Ǹ˜=âᱽܼ~=úz“¾£Œ>¶­- >aMY@?Žò­FtmB¬ÒÿCÜNúC›È>•‚Ê=÷ÒiÂD‹AfšC€~³=®ð~C€|í=§¤¶ÄÁ/D“Æ?C 8?ƒ•kÁ¥‘<0â 0á¡ ?€Î@?>%F1‡B²–šCã¨C ú1>y¹þ=ŒûCÂ;óAd¬C€€®=OZ´C€=KädÅ*<DqÀ >µÅæ?†Á²Áy<<¾r<:ÄQG6 G6 Á:¼xÁ:¼x¼k07=- )»j—=%4.¼ÿW&SòJ=3'-Â? @ßz C€€þ= ì(C€û= D<ÅIHD*Â>XVw?ŠÁ-¶<£¨<BîG6 G6 Á:¼xÁ:¼x»þ _<Ðßjº?`Z<È_ ¼™à<6i齌ÿ%<=? =ö´R@?¾C„EëóBXH÷C‰ÚîCBôé>Hø\<ø«²Â;ª @¢˜äC€=<ÔÀ\C€€ <Ò$-ÅM]ÐCî)Ý>q?U}Ág;ÿ²ë;ýà«GrÙ¢G’"Á?¾>ÁC »Æóœ<Ž¶Ò¸¶Gj<‰̼Y[[<³ ½7*6<*ÈŒ >Z@ÿ@?ÑJEEÜ0îB.[C^BÆC)‚>Eÿ<»˜RÂ9d(@y©C€€â<¯·vC€á<®<"ÅHt©Cª7½=Ùb%?½öÁë;ÜÉÇ;Ûe«GrÙ¢G’"Á?¾>ÁC »íæ?€“<†ÄÂ6ã€@7ͧC€<‰àC€€ <‰EúÅ=—C\?ž=•"ô?”;xÁƒ;°è ;°­G’"G’"ÁCÁC ¼óí<E¹€ò<=£»ø¼];±¬%¼¬·û;¯ññ =”¯@?ý=µE¹×A©„B×’B˜nr>7&<<-Ï×Â49?÷RC€,¹ö<§œÂ/ýv@<9ÜC€€<šUC€n<›-LÅ¥C¨=€½?›w¤Á ÿ;ª#;©TjG’"GºëËÁCÁG<´ ¼yUÖ;çyºò2–;âÇ/»$éÓ;•˜<…Ëe;—­œ @èh¨@@5úE˜—A:Y»Bv„ÀB.P >4-<2Â5Þ‰?¼†¿C€~Õ<0—(C€€º<0:’ÄÑäžB¦åT=KŽÁ?Ÿ7[ÁŒ ;)ñ;)7GâëÕHgpÁJ›ÁM ºÅrX;“%ºFà'; c» ¤½; ª„ìø›@@(ˆ-EŒ©‹ACgðB…É B=3x>=ÂV<R€Â-‚Õ?ÈéC€À<[îC€€û<][ŠÄ¬yKByñ=8Ü?£7Á";AP@;@ÏYGâëÕHgpÁJ›ÁM »aûÎ;¥-:Á¢;£Ú®»-q¥;¢½Éº—=;¡©N ?P1m@@9b˜EÊìA±XBa€aBt!>D—;ËéÂ4?ˆ´ÔC€€{<(iC€b<(4Ä®3ÿBô&<àÅá?¦üÆÁ¼k;%×È;%oJH ‘‡H»ÁNÁNáºÊ• ;oàÄ:¹ß[;eEª»iûH;D˻ݲý;:ÿ™ =ðÏÄ@@KìtEiöy@¬™4BrçA·|>B,>;„ÅÂ3‘ã?-.C€€ˆ;ëùŸC€;ì9ĉ3–B kz=£?«›Áî@:Í%l:ÌôØH ‘‡H9gÁNÁS¡%:=ÅK;9·;}ª;2ï9át—;á};q;¼~ >[Ìñ@@`PæET;^A•ŸBf0rB"Ä´>D½ß;ü_¬Â:¤«?¥xnC€äît;>fºH,ÊîH9gÁQé»ÁS¡!%;VîB;ÌÝ;›;‹Ô›9"y;‹2<;ZY¾;‰6 >t2@@v¿dEA†-@ºžûB¼ìAÚÕ!>FÄü;¢.zÂ7Ÿ¥?SC€€w<1^ÒC€‚‚<0‰±Ä?Ò AªQ<ãM»?³\pÁ ¢[;!,;æ.HEÑVHP=^ÁTC:ÁU')-;²ž;&*;w\-;#[ë;»Ör; ëYºÐÀÌ; LH ={iÄ@@‡¶E.¹ò@ª­íBÐ AÑ é>D"è;˜×ëÂ3( ?G@lC€€²<6-íC€‚‰<6cÄ(Ý…A…Ãb<ÊÉ?·¯§Á Ûð;Ýâ;¥HEÑVHnéÁTC:ÁW{Í)9;Í€z;m&;%yj;];ú¨:×{Á9ººÁ:Õ  <1”ü@@•HFE¼„@‘ÁBC A¼uô>C%X;•øfÂ6ì?Ht±C€€(JHc;©îŽÂ5ö?XTC€}ßO’i;–@Â4å?;ü}C€{<PZ¢;PpýÂ5¥?QEC€|´<8uC€~V<7ªÜÓz@õúÀ<ÈÄ%?ÊÁ©ª:Èi…:ÈÐHŸÇH´B×Á\…”Á^°u_yºÞ?:î˜;|f;:ìŸ[ºû:âλ-G:à!# <§*`@@ÚuDÍ-µ@WV´AíºF’;œÜÂ3ÈÔ?K¦C€€É<–µC€‚)<–À;Ãw@@Õåc<Ýw'?ÎíSÁ›ß;û;ºkH³e`HÆpÁ^›Á`[±w‘»=&ý;&Û²<!6;$Ú»{ú`;ý]»w‰;¥ ' =µ@@ðk´D¶‹n@6n\AÓy{A•ˆû>:‡a;“Ÿ£Â4/ì?J=:C€-<›B–C€ƒŠ<›9lÃKpŸ@È•j<ügá?ÓêµÀý(; ’; „HŃvHÛ®ÓÁ`FîÁb ±9 F;Ø<$ä;åÊ»˜; j³;ið1; !Þ+ =Ä0@A;=D¤ #@j‚[BkAÈöÚ>>Ú!;ʰËÂ9}V?‡Ž¥C€}<ë  C€~ˆ<éõiÃ+ÆZ@Ðw˜=W|?ÙåÀùqo;FöÙ;FbòHݼ¬Hõ-ÁbIgÁd8³Ý;‹,;n^;Ðê;lx¿:z6»;lå®:ë·K;km„/ >’<´@At]D‰ånA<&B¸ÄÅB‚¦²=ÚŸ;Ò};Âeù'?êBC€€= C€€<ùL&à È'AOâ0=Áô?ÞB ÀóiQ<î8<įHû´ÊIQÙÁd},Áe}|ç»Æ ò;b®ã;Ò";_»/dË;^&:öêÓ;] 62B~O@A Dtzv@øß½B¨÷jBnôe=ÚŸ;éãfÂeù'@,ÀC€€=‡¥C€€=¥HÂݺàA%åˆ=¿‰ƒ?ãž©Àï9Z< /< ê7I ËIk×Áf7ãÁgZ=»*°y;{BÉ;§ˆr;~'í;Æ‹¢;qD;Õ‘ë;s²;2B{—@A0De†@No‰B¬TAË/=>>¶E<B[Â.‘Ú?°ÃC€‰Û=KØÄC€vÙ=M\¥Â«ˆæ@“ÐÁ=\™á?éÈÀí;z)û;yN©I_IôÁfö»Áh‹5y;ž Õ;œ&<;œÔżû;‡€Ã<~4;‰´ > ?*$ú@AA™šDRÙm@Ih·B·¦AÎT>Z ;óÈÂ7¼x?ˆC€‚h=RbC€pÎ=Q& ‰@aô =4(Ü?îºÉÀê$;…;„€&I{I/NÃÁh)cÁj?eÑ»ÕM!;”K©;Àp¢;”œü8³';’•Ò;¡‰Ó;“3FC >’ÓÐ@ATõÃD8á’@7{jB ‚~AÅKì>O9;òQ]Â5]Ú?”àGC€=c«@C€zý=c2ˆÂ‡H@L>=A?ô|}Àå„&;Š/é;‰¨.I+ĸI=ÕwÁiäcÁk¡¹);ž×¿;”(º{';”Ñ’O9<+ÏzÂAÔF?Ô¯rC€À=³¾æC€‰=°*âÂFïQ@=Ô=tGò?úaºÀá û;µ™~;´«¡II?;<YÂ3±2?´Ÿ~C€‰=¡ØXC€‚ =¡êøÂÍ”@åx=i”ä@5®Àܱ;”çø;”GSINxGI`x]Ám³ÁnŠn•%;çU8;®Ú»@ëZ;®`×¼:¯_;©X7»šŒc;© iZ >OÓœ@A¹’D?ò@87ÆB!0ÍAãõ>>UPÌXG<8ÉÂCš ?Ú—'C€Ž²>õtC€y,=û·ÁÐá¤?éW_=Žý@w«ÀÔÓ;Ä.Š;Ã8IqCNI…«»ÁoËEÁq“º­±<4÷;ãÜ<&ã˜;ä÷V<`3;â(»a;âžl ?2þ@A«|³CÅ—‚@Û§BM·AÎç>.ò~<†.àÂCš @@= C€u%>HxTC€yµ>D ‹Á~ ø@,·x>. l@ µÀÀϾq;Ól;Ò/‡I…ÕÿIæöÁq™8Árú…­©<¶Þ<(q;WôE<_ˆ;˜Xw< K¼?4M<U¸z ?ÜfE@A¼¢ÅC±“@ 2ñBl’AÉk>2Ó)e±C€oO>)8ºÁŠ ã@á=òýM@ ÙÀËï;;Û*®;ÙÖPI‘,IœÿäÁsÍÁt_¥Õ<ÕÆ;ú‹:;ø:ª:c$;ö.þ¼|Ê;ù~™† ? ¹ø@AÏÙC¢ •@ 7,BfAÍ|>Zf<]*ÇÂ%pÝ@¥ÆC€_à>IlC€z>N2ÁZ’·?øZ—>p«@nrÀÈÚ;ì%Æ;ê™ I™ø6I©ŒÕÁtrÁuµ y-;ú´,< %=<‘þ< yš»»”p< ZÕ;”s< È2@Œz@Aä?ÕC¹n@AWB%ÓAêƒ~>Zf‚}C€z>…6Á0š*?Ê;ì>“ê@ê ÀįQ<ê<¼áI§5YI¸†8ÁuwGÁw-€ß ¼ 9¬<$¸»ïtÔ<$!g¼#PN<#ö;‚<"Êcž2AG@AûCà[?÷ÚúBAËÀu>ZçC€k·>ƒ»ÃC€Íx>†ÛÝÁØ?¹Õ_>¦í@{ ÀÁ*1<H<¸IµIÈ-Áv×ñÁx•j ¼Pâs<ïî:e¥ <”Z»¼¹A<(<_n<JÔ­2@Rp@B BCin“@ ‡8B+©“AòÄn>Zç<¿kHÂ%pÝ@a·C€Vº>èGC€µ¡>íèÑÀ¼Í?¨S>cˆ@";À½sì<(}<&‹ÕIĈ£IÙw„ÁxEéÁz ƒ¡:qÊB`)Á©%C€i<>½7¸À»ìÇ?Ý>Bîû@ßãÀ¹×/<@–<>IØ Iëç}Áyì-Á{qíU¼¶³L<;¨u69Ì=DÂTÆõ@ÐåC€Õ1?_NqC€„¼?Uó½À*t3?jâÆ>°b_@"´£À¶±u<"ßÊ@ŒS=CY'ÂD~A±C€ßZ?žiûChÄ?šÐ¿å*H?Eè…>Ý?@&¡ À²¼<0°Ë<.òºIÿ­/J ?2Á|×ÞÁ~sÙ Õ=!ûÈ= Ú3<Õ\(<ÿü¼˜G<öM3»«Ð¹<ïaÇ? kw@BJ-ÈC6r?¹²B þ¹AÅû›>r¸=×Â.b@Ôf’C€¬å?aâ¡C€R1?cG“À #ä>Ór>Cì¬@*¥¨À°(<"ÖE!€Ú<ÑèÂ'@Ú@¡œC€ÿù?F"C€Ÿ[?I ¸À ‘?â>}uò@.ÃÀ®ZŸ<*z<(Ý3Jh¼J(«kÁ€1Á€ÔBí%ù»·LìG­Ö=R1ÝÂ'@ÚAò0CfÖ?ß»”C¬-?ãÔ0¿’•‘>²J>›|&@2ùçÀ¬9’G­Ö=EÓÂRæ@û”dC€Ý?îB0C~µô?äU”¿v³>ŽÐ>”šÄ@7J¾Àªè<7’<5) J7`¢JM‘ÁŽ)Á‚Œ+,µ7©=b‚ß= Tö¼9›7<õ‹n=ab=q =±¦=HVy2?ß±@B”JBþñ,? ²B AÎW>(‘=<9‚ÂWTA ËSC~?ô29C€~?êHA¿`W9>kÿÿ>†¦ÿ@;¶9À¨nÔ<0º<.VåJNämJcõ$Á‚šqÁƒr 8-C)<Ò<æÀñ¼rŸ<âçµ=–0¾=üµ½7@,<ò ȧ ?^Ư@B¢Î8Bô’ò?ž}PBh8AÓK&>IÀT=Ø€ÕÂWTAˆoVC©¼@ž½RC‚Fí@–áȾ§×u>\¨­?(Gš@@<ýÀ¦ýŒ<5<36ÖJ`dËJ~8ÁƒOÁ„fíAOQi=»Ôj=¶0J¼2wÿ=‡IU>C¢{>«ê½z4 =œv@Ç?®[T@photutils-0.4/photutils/isophote/tests/data/synth_table.fits0000644000214200020070000007020013175634532026746 0ustar lbradleySTSCI\science00000000000000SIMPLE = T / file does conform to FITS standard BITPIX = 16 / number of bits per data pixel NAXIS = 0 / number of data axes EXTEND = T / FITS dataset may contain extensions COMMENT FITS (Flexible Image Transport System) format is defined in 'AstronomyCOMMENT and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H ORIGIN = 'STScI-STSDAS/TABLES' / Tables version 2002-02-22 FILENAME= 'synth_table.fits' / name of file NEXTEND = 1 / number of extensions in file END XTENSION= 'BINTABLE' / binary table extension BITPIX = 8 / 8-bit bytes NAXIS = 2 / 2-dimensional binary table NAXIS1 = 160 / width of table in bytes NAXIS2 = 69 PCOUNT = 0 / size of special data area GCOUNT = 1 / one data group (required keyword) TFIELDS = 40 TTYPE1 = 'SMA ' / label for field 1 TFORM1 = '1E ' / data format of field: 4-byte REAL TUNIT1 = 'pixel ' / physical unit of field TTYPE2 = 'INTENS ' / label for field 2 TFORM2 = '1E ' / data format of field: 4-byte REAL TTYPE3 = 'INT_ERR ' / label for field 3 TFORM3 = '1E ' / data format of field: 4-byte REAL TTYPE4 = 'PIX_VAR ' / label for field 4 TFORM4 = '1E ' / data format of field: 4-byte REAL TTYPE5 = 'RMS ' / label for field 5 TFORM5 = '1E ' / data format of field: 4-byte REAL TTYPE6 = 'ELLIP ' / label for field 6 TFORM6 = '1E ' / data format of field: 4-byte REAL TTYPE7 = 'ELLIP_ERR' / label for field 7 TFORM7 = '1E ' / data format of field: 4-byte REAL TTYPE8 = 'PA ' / label for field 8 TFORM8 = '1E ' / data format of field: 4-byte REAL TUNIT8 = 'degrees ' / physical unit of field TTYPE9 = 'PA_ERR ' / label for field 9 TFORM9 = '1E ' / data format of field: 4-byte REAL TUNIT9 = 'degrees ' / physical unit of field TTYPE10 = 'X0 ' / label for field 10 TFORM10 = '1E ' / data format of field: 4-byte REAL TUNIT10 = 'pixel ' / physical unit of field TTYPE11 = 'X0_ERR ' / label for field 11 TFORM11 = '1E ' / data format of field: 4-byte REAL TUNIT11 = 'pixel ' / physical unit of field TTYPE12 = 'Y0 ' / label for field 12 TFORM12 = '1E ' / data format of field: 4-byte REAL TUNIT12 = 'pixel ' / physical unit of field TTYPE13 = 'Y0_ERR ' / label for field 13 TFORM13 = '1E ' / data format of field: 4-byte REAL TUNIT13 = 'pixel ' / physical unit of field TTYPE14 = 'GRAD ' / label for field 14 TFORM14 = '1E ' / data format of field: 4-byte REAL TTYPE15 = 'GRAD_ERR' / label for field 15 TFORM15 = '1E ' / data format of field: 4-byte REAL TTYPE16 = 'GRAD_R_ERR' / label for field 16 TFORM16 = '1E ' / data format of field: 4-byte REAL TTYPE17 = 'RSMA ' / label for field 17 TFORM17 = '1E ' / data format of field: 4-byte REAL TUNIT17 = 'pixel**1/4' / physical unit of field TTYPE18 = 'MAG ' / label for field 18 TFORM18 = '1E ' / data format of field: 4-byte REAL TTYPE19 = 'MAG_LERR' / label for field 19 TFORM19 = '1E ' / data format of field: 4-byte REAL TTYPE20 = 'MAG_UERR' / label for field 20 TFORM20 = '1E ' / data format of field: 4-byte REAL TTYPE21 = 'TFLUX_E ' / label for field 21 TFORM21 = '1E ' / data format of field: 4-byte REAL TTYPE22 = 'TFLUX_C ' / label for field 22 TFORM22 = '1E ' / data format of field: 4-byte REAL TTYPE23 = 'TMAG_E ' / label for field 23 TFORM23 = '1E ' / data format of field: 4-byte REAL TTYPE24 = 'TMAG_C ' / label for field 24 TFORM24 = '1E ' / data format of field: 4-byte REAL TTYPE25 = 'NPIX_E ' / label for field 25 TFORM25 = '1J ' / data format of field: 4-byte INTEGER TTYPE26 = 'NPIX_C ' / label for field 26 TFORM26 = '1J ' / data format of field: 4-byte INTEGER TTYPE27 = 'A3 ' / label for field 27 TFORM27 = '1E ' / data format of field: 4-byte REAL TTYPE28 = 'A3_ERR ' / label for field 28 TFORM28 = '1E ' / data format of field: 4-byte REAL TTYPE29 = 'B3 ' / label for field 29 TFORM29 = '1E ' / data format of field: 4-byte REAL TTYPE30 = 'B3_ERR ' / label for field 30 TFORM30 = '1E ' / data format of field: 4-byte REAL TTYPE31 = 'A4 ' / label for field 31 TFORM31 = '1E ' / data format of field: 4-byte REAL TTYPE32 = 'A4_ERR ' / label for field 32 TFORM32 = '1E ' / data format of field: 4-byte REAL TTYPE33 = 'B4 ' / label for field 33 TFORM33 = '1E ' / data format of field: 4-byte REAL TTYPE34 = 'B4_ERR ' / label for field 34 TFORM34 = '1E ' / data format of field: 4-byte REAL TTYPE35 = 'NDATA ' / label for field 35 TFORM35 = '1J ' / data format of field: 4-byte INTEGER TTYPE36 = 'NFLAG ' / label for field 36 TFORM36 = '1J ' / data format of field: 4-byte INTEGER TTYPE37 = 'NITER ' / label for field 37 TFORM37 = '1J ' / data format of field: 4-byte INTEGER TTYPE38 = 'STOP ' / label for field 38 TFORM38 = '1J ' / data format of field: 4-byte INTEGER TTYPE39 = 'A_BIG ' / label for field 39 TFORM39 = '1E ' / data format of field: 4-byte REAL TTYPE40 = 'SAREA ' / label for field 40 TFORM40 = '1E ' / data format of field: 4-byte REAL TUNIT40 = 'pixel ' / physical unit of field TDISP1 = 'F7.2 ' / display format TDISP2 = 'G10.3 ' / display format TDISP3 = 'G10.3 ' / display format TDISP4 = 'G9.3 ' / display format TDISP5 = 'G9.3 ' / display format TDISP6 = 'F6.4 ' / display format TDISP7 = 'F6.4 ' / display format TDISP8 = 'F6.2 ' / display format TDISP9 = 'F6.2 ' / display format TDISP10 = 'F7.2 ' / display format TDISP11 = 'F6.2 ' / display format TDISP12 = 'F7.2 ' / display format TDISP13 = 'F6.2 ' / display format TDISP14 = 'G8.3 ' / display format TDISP15 = 'G6.3 ' / display format TDISP16 = 'G6.3 ' / display format TDISP17 = 'F7.5 ' / display format TDISP18 = 'G7.3 ' / display format TDISP19 = 'G7.3 ' / display format TDISP20 = 'G7.3 ' / display format TDISP21 = 'G12.5 ' / display format TDISP22 = 'G12.5 ' / display format TDISP23 = 'G7.3 ' / display format TDISP24 = 'G7.3 ' / display format TDISP25 = 'I6 ' / display format TNULL25 = -2147483647 / undefined value for column TDISP26 = 'I6 ' / display format TNULL26 = -2147483647 / undefined value for column TDISP27 = 'G9.3 ' / display format TDISP28 = 'G7.3 ' / display format TDISP29 = 'G9.3 ' / display format TDISP30 = 'G7.3 ' / display format TDISP31 = 'G9.3 ' / display format TDISP32 = 'G7.3 ' / display format TDISP33 = 'G9.3 ' / display format TDISP34 = 'G7.3 ' / display format TDISP35 = 'I5 ' / display format TNULL35 = -2147483647 / undefined value for column TDISP36 = 'I5 ' / display format TNULL36 = -2147483647 / undefined value for column TDISP37 = 'I3 ' / display format TNULL37 = -2147483647 / undefined value for column TDISP38 = 'I2 ' / display format TNULL38 = -2147483647 / undefined value for column TDISP39 = 'G9.3 ' / display format TDISP40 = 'F5.1 ' / display format IMAGE = 'synth.fits' END F’*ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿC€€ÿÿÿÿC€~zÿÿÿÿÃvôÿÿÿÿÿÿÿÿÁÅ+ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿ?__F‘^A’˜ÃBºßùB„#ø>“ü7>)Ä›Â=ýœAžì›C€€=]?€C€~z=W‹(ĨPD R$?‰0?Y~`Á‡Œ;‹; ÉF’*F’*ÁÅ+ÁÅ+¼× h=䨻‹z«=ÔV½•O.=§—Ô¾ °>­#^ =s z@?µµF%²A±q™Bâ2BŸò>“ü7>)À Â=þ AžèUC€€=sYC€~T=mÄ ŠÍD$P?‰•å?^¼üÁz€;,w®;,üF’*F’*ÁÅ+ÁÅ+¼× a=ä+»Ë =ÔSP½•Uª=§•«¾ «5>­ =Šƒf@?!azF£gAÖ´qCÙBÁˆG>“ü7>)ÊBÂ>¥AžñæC€€=…àC€~*=‚jÄ­D'Zù?‰š[?dùÁj©;Q|;PäcF’*F’*ÁÅ+ÁÅ+¼ÖSû=ä5»ŽQ¾=Ô`,½•­ÿ=§÷¾ ²!>­" =ÊŽ@?1„ÓF¿BÙC%†&Bê>“ü7>)¿Â>üAžçkC€€=“9ùC€}ú=kÒÄ+D\D87?‰G?iœÁWk;~‘s;}­PF’*F’*ÁÅ+ÁÅ+¼Öžj=䑻޲:=ÔQ뽕›ï=§ÖÀ¾ ¨Z>­ Þ =ï+¿@?CEOF G B)#CHWcC ©©>“ü7>)ÉåÂ>ïAžñC€þ=¡ýµC€}É=ÍÄ6Á@;šî2;šDoF’*F’*ÁÅ+ÁÅ+¼ÕuS=ä »_=Ô`T½•Àã=¨ æ¾ ±/>­"4 > U@?VÌ>F `B>?CrY\C+]Þ>“ü7>)·œÂ=ùÞAžàpC€€=²ÁC€}‹=­„àÄO@ÇD^·z?‰Œç?uÁ#‹;¼³š;»»¬F’*F’*ÁÅ+ÁÅ+¼×^Î=äÖ»Œë¨=ÔF(½•)=§^ü¾ £ž>­© >6™‘@?lGF HÐBfbC’¢—CO_•>“ü7>)úÂ=ÿEAžëÈC€€=ÃùC€}N=¾ë5ÄcïDuQ?‰–6?zë’ÁØ;æG‰;äÑÆF’*F’*ÁÅ+ÁÅ+¼× )=äË»•|=ÔV`½•l =§­Ð¾ ­Í>­< >;t@?óãF öýB‹6ˆC±vdCzøF>“ü7>)Ç{Â>ÔAžïLC€þ=טÏC€} =ÒÐÄz¿ûD†Æá?‰™H?€|CÁÖƒ< Ë < ¬ôF’*G5ö´ÁÅ+Á:ºŸ¼Õø=äó»´=Ô\T½•¢½=§ê»¾ ¯ô>­u >jöq@?Žò­FXãB¨ÈªC×(tC˜#Ã>“IÕ=ë7ÁÂ=dÇA]3C€ö=£ïÄC€|¼=ŸûLÄÇÝzDŽY’?6T¯?ƒ•kÁ¢<,á-<+3”F’*G5ö´ÁÅ+Á:ºŸ¼—rb=ší×»]¢k=’¼Î½Eú)=N$¾^c >4€ ?Öý@?>%F±B¦Î{CÔ£2C–[>wcA=… Â7'eAþC€€Ô=BoC€~¶=A'KÅ(éÞD`|r>ª­?†Á²Áµ<0t}<.¼¬G5ö´G5ö´Á:ºŸÁ:ºŸ¼,‹Ô="šÀ»´µI=àß¼·o5SD©=(Î*Â;Ô @Ó+IC€€ÿ=ZC€Z=–ÅH£}Dèß>JÁÆ?ŠÁ,]<þ~<ÌÖG5ö´G5ö´Á:ºŸÁ:ºŸ»‡ T<ĨW»‹N<½|¼€:6<+¸<½….Œ<ƒTÏ > Ê1@?¾C„Eëü;BNxdCƒ™xC:>Géù<íº¿Â:d@œ,ÒC€:<ËôC€R<ÈõRÅMV$CãB> ªM?U}Á;ô";òYÓGrâ‡G’®Á?¾áÁC‘ »!º<ˆ¦ñ»ñ<ƒ[¼¼Rç\< ŵ½-wÓ<#} ={Ò@?ÑJEEÜ6MB*Á,CY«”Cê…>C$ô<·Ù~Â9ï+@vÓ3C€€è<¬ >C€(<ªu¹ÅHŲC¥Ëí=ÓgB?½öÁën;Ø-®;ÖßyGrâ‡G’®Á?¾áÁC‘ »o‘™<ýi<„^Â8òÇ@6‚ÿC€&<‡A`C€S<†C=Å=šùCPªÃ=ŒÞG?”;xÁ‚Ì;­Ì;¬¿‰G’®G’®ÁC‘ÁC‘ »€æ€<¡gºËj<Ò¼ ‘B;°u¼¬{®;­ðB =Âý@?ý=µE¹@A’ÍB»"·B„S*>5K<·IÂ7Î’?Ö¿•C€<<'ÓC€Ñ<&ªÅ2ÈC\Ë=O|¡?—Î…ÁæÚ;\Ìh;\^G’®G’®ÁC‘ÁC‘ »¾Í±;˜b:;—5T»½Yµ;‡>U»²1; =L\@@ HWE¦:àAªBØÈB™Iþ>*ý;áËG’®G»+¿ÁC‘ÁGB¥ ¼W<;Ûã3ºêÀe;Ù_qºmåj;†ð<‡¡^;ˆD @u`˜@@5úE˜úŸAþôBK·«B Ì>5 ;×[ñÂçXº@@(ˆ-EŒãÕA-5ÿBm-ÇB'µô>?ñ<Â1Ûê?®§˜C€€.<@¢wC€€üC®#@@9b˜EËì@ôR1B2úAû×#>Dœ;Ÿê(Â6X§?WXŽC€é<’ C€€f<:ÈÄ®¢·Aþm¼<º|?¦üÆÁ¼;êÒ;¯çH ª3H?QÁN5ÁNåv9ο™;=xº³;5 º©:K;ÿ»ÍÃ;3 >ÝH@@KìtEiÐà@}AÔ;¬A–K>AM|;Y øÂ3 Ÿ?ÌâC€ý;Á¬?C€ù;Áí‹Ä‰ A´8<¨K?«›Áëv:¨[ :¨ ½H ª3H9´ÁN5ÁSI%¸£âš:ùÛu¸¨¢:ù~Í:½Ñ::ç˜;T§:æì& =¿Ùç@@`PæEȘ@¡'mAþÎŽA´,ø>A˱;‹¡(Â8¶ˆ?97rC€€š< =GC€ù< gmÄWÿA’ <­M?¯#KÁ 3¨:Ó—):ÓcH,ëÃH9´ÁQíÁSI!%ºy]Í;#EÕ¹ƒ‰;!C`ºK(t;àÐ;Žãf;, >Lìß@@v¿dEAC@'¢jAŠþÁAD‘>F ;M Â4¼K>¿Ã)C€º; Å±C€ý; žiÄ=ÐøA»JF÷o:Ö­àÂ18>ŠkZC€€;€!ßC€€;€žLÄ&¼@Ùf²<'Í?·¯§Á â®:<°v:<ËHEãŽHn$ÁTDÔÁW|J)9:P(:}}¸ 6ù:{µ<:cÒ(:j*óº½F´:h–H =U@@•HFE`@ qAld_A''‰>G²Æ; ;ŽÂ3îl>°[@C€ü;¶ýC€€;¶¾Ä…ô@¢ßÝ<"6?¼‘Á Ð:gÅì:gf™HtDKH…”ÁWí4ÁYjŽ;E9ìŽ:£÷E¹À·Á:¥3Ÿ7"B°:”ªö;'‹:•І2?S€@@¤5çE–œ?Š™Aðé@º—Ä>I­7:”v™Â36 >=>ÀC€ò;W*ýC€>;WfÚÃä-@HIp:ŒTFÂ2ë¥>4\ôC€€S;_«zC€€I;_þÒö×Ü@ ¾;Å Ý?ÅLÁn'9þ]ª9þmêH“¸5H e˜Á[;Á\©9Sa9´ô:)f¹×*:'^ž¹Fß:3:²ÄŸ:‡u =8ä‡@@ƱÞDæ6ú?$}ñ@¬¡@sIq>JdÂ:R”fÂ49¡>ƒ C€€¢;8¯wC€º;8 šÃ›Vk? º;„q5?ÊÁ›ã9Æßu9ÆWêH¡çH´^Á\ºÁ^³ ayº2Ë;9÷ø˜8=>¥9ö¯è¸4ç`9澺‚# 9ää3# <=º@@ÚuDÍýÕ>ÈsÛ@]K+@zm>I¸: ÜfÂ3¾É=¶Ž?C€€+; yðC€½; …ŽÃ}æP?;R0;<Þá?ÎíSÁ­u9†þ‚9‡uÌH³ÌHÆgãÁ^žÁ`Zúw‘ºLô9¦¦’¹=à9£Â·š‹9¡Mc9öN9ž‹u' ;Ú71@@ðk´D¸{i>^£_?ÿf?´[‡>Jë©9©&JÂ4’µ=TK‚C€å:²ù¢C€ù:²Ó7ÃRnÛ?ÖÌ;¼Ü?ÓêµÀý†9'ž?9'ÌNHÂÖùHÛ´=Á` QÁb q‹±9‘à9KÝ`7M°9Lxý¸Þ¨¥9@x;¹Ê9A * ‰€A@$ÛŸ?é$ï>KeW9æ26Â3“¦=‘²C€€;ª[C€×;ÀÃ.B>Ì; W?ÙåÀù™,9hÞ9gàHÝÎÞHõzÀÁbJÔÁd Ƴݹ¤5B9‰G©7mZ9Š> 9Œwÿ9òa¹ÿ˜d9‚èŸ. ;r{ö@At]D’ÀŠ>;|+?ì°1?§]&>K 9¯PMÂ3à‘=\´C€é:à¸gC€µ:à‰à °Ï>ƒ»4:î~?ÞB Àõ’î91'91žØHðËKI(äÁc¸Áex%Ó¹gÿ9PÜ9='9OÞhµp¼9IÁÖ9®¾9I²/3 ;*I±@A D‚¬X>d–?Á¨Ý?ˆð#>KF·9—ÿÂ4ó==¡C€ð:ÔÖÖC€€:ÔÕžÂè€>:Z:Í…E?ãž©ÀñŠÉ9ÎH9ЉI¥IFáÁeECÁg÷ý=8q-98Ïô·ã¹Q98þÏ67£G947b9”±94w8:‰JØ@A0Dhh„=Ù&?—!?Uº–>K°M9tÄ/Â4C&=kßC€€:¾`œC€€:¾N¾Â½© =ûCã:©“š?éÈÀíw9°ó9ú°I¬YIÍåÁfÊÁh‡e/y86-Î9s§¸çÍ'9•¥¸„©9ÕÛ8š¿Š9L> ;;k@AA™šDNXy=¤ÂÍ?p-¤?)Ôê>KÅú9PˆÂ44=ÐdC€€ :±»C€ÿ:±µ#™ÈÓ=§õ„:‹ÌH?îºÉÀéU8ݼ8ÞwI÷¯I/9ÁhS¨Áj8°mѸ™@9]í·“§:8ÿýݸŸ‰Á8ø#¿9{ß›8÷ƒŽD :ðß^@ATõÃD7{=l·E?3ü_>þ‰Ô>KÖ|9.#WÂ4)r<Ø›C€€5:¢æ…C€ë:¢ÜUÂyBz=Ph}:V 7?ô|}Àå-Õ8³V8´I,) I=PÁiî”Ák™h½)¹i8Ò½a·Þf8Ôh7¸"9-8Éþ9{Tñ8ˬŸJ :psÔ@AjAŠD"b¬=”/>×ng>˜U9>Lô8×FcÂ47<†ÌýC€÷:^F8C€ü:^CåÂIÓ<éÂ:°?úaºÀá18e„)8fë„I²Ã²>|Ϥ>LMX8¬(ÇÂ4ˆ”é´>R˜;>LD¢8ºÜÞÂ4ßYëã>þ>L¤8~óXÂ4<(êC€Ù:/*ðC€ü:/(ãÁÏÄt<uE9¶ì@w«ÀÔƒÅ8ñÑ8pIqÁtI…j¨ÁoÔXÁq‹Cµ±8—KÓ8m·:58M ¸V:8/o¸¸”£8rÂm :LI}@A«|³CÉ`F;é§þ=â<–=Ÿù6>L‰8Q”QÂ49<ípC€ü:vBC€€:rÁÁ§ ?#i°<ú`¿@ µÀÀÐg-7¢7 êI„5{Iœ^ÁqbÓÁrñ’©4謰8î·Y˜8‘¶©£¿7ûYG¸±·Ì7üMx 9Úc@A¼¢ÅC³2w;æI?=éÛN=¥\”>L…m8@Â4†;»¼ÅC€õ9ùê"C€ô9ùé”Á„Û%>Ô:™<ÌxÁ@ ÙÀÌYP7°µè7´ |IIœµ%ÁrÂ~ÁtVÖ{Õ5Ç´7º‘=¶Cc7º©í·RãF7ºHÄ6£“ž7ºo[„ : -@AÏÙCŸâJ;ì*4=û[â=±¼Ó>L 8÷Â3þÀ;³‘kC€Ü:¢’C€ß:¢ÏÁKes>”tF<ºÙ2@nrÀÈc7Ë&O7Ï8I›ßI©*tÁt'Áu« -¶ l¯7²\~7ya7²´»6¾„Ì7®K©¸_éÒ7®ù'‘ 9“ÝÙ@Aä?ÕCä>L©~7ÛqžÂ4´;ˆg5C€€9Üi¯C€ô9ÜiÁ#Êæ>JóÉ<žš6@ê Àćs´,âu4,âuI¨I¸[ÎÁuŒrÁw)‚ý ·QZA7‡š¶$l'7ˆB>6i³¨7ƒ–¦¸Lî7„†&Ÿ 9M–@AûC€‡;f¤¹=†×=>²>L«7´ÛwÂ3ý³;`ö˜C€ó9ÇßCC€ò9ÇßòÁ Ú> E<‰Ò@{ ÀÀÍY7|U7ub I¶XÆIÇãdÁvø¶Áx‘- ¯ µÏBd7`.L¶ˆˆ7a(Ö6‰Ê7[¡ý¸Xé7\㯠9HÓÖ@B BCgô¡;LË´=zÒ=1[¹>Lª7‹ó¯Â4;-žÞC€€9©ï1C€û9©î$ÀÆcñ=¬è<^@";À½;~7xY'7rò¦IÅëÝIÙ"ôÁxe3ÁzB »¡·ªÄ¨7*è·2•7,ζ™!Ô7*qO7ÀT7+µ5À 8…±@BæbCRi°>L°Ç7_îÂ3þË; lÈC€€9•Í×C€€ 9•ÎÀšYŒ=€ËoL¼7;ÆÂÂ3ý—:é–ŠC€ü9Š!ÞC€ý9Š"\Àpã°=ïò<(è<@"´£À¶©Í6¼É[6ÁfËIéÎJqÁ{L¼­7)¬Â3ý]:¶Ø{C€þ9n C€ÿ9n À7ö<Ò׿<³ß@&¡ À³³6Ê&©6ÉoƒIý™âJ .Á|³ŸÁ~n$«Õ6Œ¤S6¶Q¿¶J¡m6·CÅ6žP6³•O·m»6´¡J 8Zé@BJ-ÈC#o:Lã~<˜¯L¾è6ÞÃEÂ4¸:Š•‡C€ö9FN®C€ó9FNxÀBD<¬9<Cg@*¥¨À°ú6®ðu6¬æþJ aŠJz˜Á~8IÁ€f ]5Ÿì‡6Špõ5­SÊ6ŠöEµçó*6‰ ·ir6‰· 8kT@B^eCÊ$:{ŸÚ<ÃË.<Šrq>L¿Ý6¶Ñ†Â3ÿ:cPÚC€€92ø“C€ù92ø±¿Ù&¿>LÂã6˜òüÂ4°:>»zC€ü9$ìC€û9$ëó¿¤×<\è;ÈII@2ùçÀ¬Eå3“nˆ³“nˆJ&.J9€GÁ€³2Á§Ä$Ë-ñ¶¿R6=m´[Ç56=à„µŠÂ³6=Uö^6=¯§V 7»š@B†ŒÏC~>LÃá6pEæÂ3ÿÑ:»UC€þ9v%C€€9v/¿w’”;¤®ß;ªIø@7J¾ÀªK¾2áϲáÏJ6Š„JM8{ÁƒÿÁ‚ˆg,_7©6²ã6 ¶}Ñ6YµW¼6ül4Ý¢;6VQx 7r×@B”JBÿâh:„oÍ<íä*<¨6ë>LÄð6RÂ3ÿò:»ÏC€€9ðTC€€9ðW¿;é;Yì4;•@;¶9À¨¥7»}7 JI¬}Jc†ÜÁ‚a£ÁƒmÖ5·C)3•dX6Íß4§)C6A?4d¦P6Ɖ4YY˜6FÌ 7Þ}@B¢Î8Bõ ô9Ï‘LÆY6õAÂ4e9Æ ¦C€ÿ8äÞC€€8伿 |;Ä‚;‚ýÀ@@<ýÀ§•6z³46])©J_ÏçJ~µÁƒI<Á„cA Qi³w15Æ/¿µ‰Ä…5Æö4„65ÂÇ ¶Á´D5Ã>ÝÇ 7@y4@B³ Bì˜>LÇ76 ÏÔÂ3ÿÑ9«˜gC€ÿ8ÙmC€ü8ÙmŸ¾ÑƒÃ:·øí;`Êb@Dß²À¥Ä€3Ôxw³ÔxwJy‰-JŽetÁ„;'Á…aN¿biµRr5«ƒŠµÉj 5¬òµ®éè5« 85­Ôë5«¯ßô 7eM¿@BÄþ¦BäÍÚ>LÈ5Üw+Â4"9‰@ûC€€8¿NûC€ÿ8¿Nñ¾š…œ:hÃe;@Ïç@IŸÀ¤¬º4yLÍ´yLÍJ‹ÏÃJ ¬_Á…8IÁ†m‰_Uw=².Ž5‰bëµæ5‰È«´þ'5ˆ‡h60Ô÷5ˆúè& 6Ø| @Bر·BÞÙ>LÈ¿5½9ßÂ49k›¿C€€8´žûC€ÿ8´žø¾c=h:XL;$Ýã@N{¦À£Â4HR´HRJc¡J¶ !Á†?Á‡ƒ'sG)5Zl5kz>µ’ïº5l(‰´…ƒ5jîÆ5¹žÀ5k´] 6¦$À@Bî]BÚ¿>LÉÊ5šQ6Â49?þbC€ÿ8¡ù¿C€€8¡ù½¾&Ř9½ý¸;Ò5@SvJÀ¢ÿi³¯u3¯uJ²)&JÏdEÁ‡SCÁˆ¥‹k®M4¯d;5@!ݵ=âÛ5@Õ ³ùëè5@ ¦4Æ€œ5@Ó¨™ 6N.@C™BÖ":.ºú<Ðì <“»W>LÊa5„Â49$_ØC€þ8˜|C€ý8˜|½ó‡9zä;Þy@XªÀ¢_d6éÅQ6ÙïmJÊáwJíÅzÁˆt-Á‰Õ ¨¹Òý3Ah5$YÒµ‰dÚ5$ÖR09 5#ç_µž?^5$s§Ü 60Ø@C5ÂBÓt>LÊ£5^.­Â49 EüC€þ8‹C€ÿ8‰½¯øy9"³:ÝS6@]È…À¡Ý4ÐÓ´ÐÓJèQKö€Á‰¡tÁ‹ÉÌ1ÿ5³¼uU5 tdµK5 åé4ŒQ5 å{µ¨©ð5 dI% 6>ÙP@C¡‰BЊÆ9Ñ}O<‰¶ LË5B™æÂ48ò¯C€€8‡íNC€ÿ8‡íM½}Tæ8Ÿ¼þ:¡k¹@c!À¡sþ6‡îI6KK·=K¨NÁŠÚoÁŒV¼÷4Ñ´q}°4ò>4µ0BÜ4ó'ù´â™4ñ©¤µmð4òªžu 5Íš[@C.~}BΔz9qçÒ<&Ó;ëìý>LË5#ûÂ3ÿû8̲C€ÿ8| 7C€þ8| :½5ƒ8–n:Ô@±@h›¸À¡ê6*ŸÂ6JNK±ºK8´ˆÁŒ|Á¨À*Óuµ1‰nÊ4ÌNݵx 4Ìù2”ô^4Ìí5à¦4ÌÔ×Î 5«©§@C?ñŠBÍ m9-CI;ú—@;±1É>LË}5ÖÂ48²ózC€ý8s%ñC€þ8s%ñ½§8^#:Ý`!@n7¢À Ý95÷5åœuK3öKWø¶ÁnåÁ}i³Ä43rÑ4³Où´5z4³ön³—ˆ24²ã}µ@ï4³›L/ 4ÕP~@CS#KBËÔ®>LË’5ÈWÂ48 5tC€ÿ8ovUC€€8ovT¼µÂ[@sö-À ¨À³„33„3KROLK}¬XÁŽÉkÁjCµ¹#4c¾ó4 u‚´´ÊÐ4¡ é³Zå”4 lø³ª4¡ÿš 5eˆ@Ch@lBÊäõ>LËÆ4ä*¹Â3ÿý8ñ×C€ÿ8i\C€þ8i\¼~Ór7¯ÿ×:°Ïl@yØ-À Ì4û´ûKvÊÙK•”hÁ-Á‘Ù—–)´“E94Žà´ÁÓM4Ž¡/´³,4Ž ­´h0A4Ž›Ä 4&ll@CzBÊ,:]à<ãë< Ý×>LÌ4ОoÂ3ÿý8ºNC€ÿ8j¨ÉC€þ8j¨Ê¼1bN7ªÎµ:ö‚=@Þ~À `6Á«l6´GnK‘dmK°óPÁ‘™ÝÁ“N§€Ñ! ´š14ëj´›n04‚o±²dú~4×X´ 14‚hG‘ 4i(ÿ@CŒƒ#BÉžm>LÌ4¼ÊâÂ3ÿú8qkC€ÿ8lÙ@C€þ8lÙB»õŒH7vý;ÀD@ƒÀ G´²ÖZ’2ÖZ’K«éLÌ5üÂ3ÿú9ÄVC€ÿ8ñk„C€þ8ñk‹»¨=@†-ÌÀ 5³† 3† KÁºKÓ›(Á”ßÁ”Ü[rŸÑc³Ì#4ôµ´á:ª4µgO´lc&4Ï,³µ˜c4Ê\,‘5„›@photutils-0.4/photutils/isophote/tests/data/synth_table_mean.fits0000644000214200020070000023540013175634532027753 0ustar lbradleySTSCI\science00000000000000SIMPLE = T / file does conform to FITS standard BITPIX = 16 / number of bits per data pixel NAXIS = 0 / number of data axes EXTEND = T / There may be standard extensions COMMENT FITS (Flexible Image Transport System) format is defined in 'AstronomyCOMMENT and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H ORIGIN = 'STScI-STSDAS/TABLES' / Tables version 2002-02-22 FILENAME= 'synth_table.fits' / name of file NEXTEND = 3 / number of extensions in file END XTENSION= 'BINTABLE' / binary table extension BITPIX = 8 / 8-bit bytes NAXIS = 2 / 2-dimensional binary table NAXIS1 = 160 / width of table in bytes NAXIS2 = 69 PCOUNT = 0 / size of special data area GCOUNT = 1 / one data group (required keyword) TFIELDS = 40 TTYPE1 = 'SMA ' / label for field 1 TFORM1 = '1E ' / data format of field: 4-byte REAL TUNIT1 = 'pixel ' / physical unit of field TTYPE2 = 'INTENS ' / label for field 2 TFORM2 = '1E ' / data format of field: 4-byte REAL TTYPE3 = 'INT_ERR ' / label for field 3 TFORM3 = '1E ' / data format of field: 4-byte REAL TTYPE4 = 'PIX_VAR ' / label for field 4 TFORM4 = '1E ' / data format of field: 4-byte REAL TTYPE5 = 'RMS ' / label for field 5 TFORM5 = '1E ' / data format of field: 4-byte REAL TTYPE6 = 'ELLIP ' / label for field 6 TFORM6 = '1E ' / data format of field: 4-byte REAL TTYPE7 = 'ELLIP_ERR' / label for field 7 TFORM7 = '1E ' / data format of field: 4-byte REAL TTYPE8 = 'PA ' / label for field 8 TFORM8 = '1E ' / data format of field: 4-byte REAL TUNIT8 = 'degrees ' / physical unit of field TTYPE9 = 'PA_ERR ' / label for field 9 TFORM9 = '1E ' / data format of field: 4-byte REAL TUNIT9 = 'degrees ' / physical unit of field TTYPE10 = 'X0 ' / label for field 10 TFORM10 = '1E ' / data format of field: 4-byte REAL TUNIT10 = 'pixel ' / physical unit of field TTYPE11 = 'X0_ERR ' / label for field 11 TFORM11 = '1E ' / data format of field: 4-byte REAL TUNIT11 = 'pixel ' / physical unit of field TTYPE12 = 'Y0 ' / label for field 12 TFORM12 = '1E ' / data format of field: 4-byte REAL TUNIT12 = 'pixel ' / physical unit of field TTYPE13 = 'Y0_ERR ' / label for field 13 TFORM13 = '1E ' / data format of field: 4-byte REAL TUNIT13 = 'pixel ' / physical unit of field TTYPE14 = 'GRAD ' / label for field 14 TFORM14 = '1E ' / data format of field: 4-byte REAL TTYPE15 = 'GRAD_ERR' / label for field 15 TFORM15 = '1E ' / data format of field: 4-byte REAL TTYPE16 = 'GRAD_R_ERR' / label for field 16 TFORM16 = '1E ' / data format of field: 4-byte REAL TTYPE17 = 'RSMA ' / label for field 17 TFORM17 = '1E ' / data format of field: 4-byte REAL TUNIT17 = 'pixel**1/4' / physical unit of field TTYPE18 = 'MAG ' / label for field 18 TFORM18 = '1E ' / data format of field: 4-byte REAL TTYPE19 = 'MAG_LERR' / label for field 19 TFORM19 = '1E ' / data format of field: 4-byte REAL TTYPE20 = 'MAG_UERR' / label for field 20 TFORM20 = '1E ' / data format of field: 4-byte REAL TTYPE21 = 'TFLUX_E ' / label for field 21 TFORM21 = '1E ' / data format of field: 4-byte REAL TTYPE22 = 'TFLUX_C ' / label for field 22 TFORM22 = '1E ' / data format of field: 4-byte REAL TTYPE23 = 'TMAG_E ' / label for field 23 TFORM23 = '1E ' / data format of field: 4-byte REAL TTYPE24 = 'TMAG_C ' / label for field 24 TFORM24 = '1E ' / data format of field: 4-byte REAL TTYPE25 = 'NPIX_E ' / label for field 25 TFORM25 = '1J ' / data format of field: 4-byte INTEGER TTYPE26 = 'NPIX_C ' / label for field 26 TFORM26 = '1J ' / data format of field: 4-byte INTEGER TTYPE27 = 'A3 ' / label for field 27 TFORM27 = '1E ' / data format of field: 4-byte REAL TTYPE28 = 'A3_ERR ' / label for field 28 TFORM28 = '1E ' / data format of field: 4-byte REAL TTYPE29 = 'B3 ' / label for field 29 TFORM29 = '1E ' / data format of field: 4-byte REAL TTYPE30 = 'B3_ERR ' / label for field 30 TFORM30 = '1E ' / data format of field: 4-byte REAL TTYPE31 = 'A4 ' / label for field 31 TFORM31 = '1E ' / data format of field: 4-byte REAL TTYPE32 = 'A4_ERR ' / label for field 32 TFORM32 = '1E ' / data format of field: 4-byte REAL TTYPE33 = 'B4 ' / label for field 33 TFORM33 = '1E ' / data format of field: 4-byte REAL TTYPE34 = 'B4_ERR ' / label for field 34 TFORM34 = '1E ' / data format of field: 4-byte REAL TTYPE35 = 'NDATA ' / label for field 35 TFORM35 = '1J ' / data format of field: 4-byte INTEGER TTYPE36 = 'NFLAG ' / label for field 36 TFORM36 = '1J ' / data format of field: 4-byte INTEGER TTYPE37 = 'NITER ' / label for field 37 TFORM37 = '1J ' / data format of field: 4-byte INTEGER TTYPE38 = 'STOP ' / label for field 38 TFORM38 = '1J ' / data format of field: 4-byte INTEGER TTYPE39 = 'A_BIG ' / label for field 39 TFORM39 = '1E ' / data format of field: 4-byte REAL TTYPE40 = 'SAREA ' / label for field 40 TFORM40 = '1E ' / data format of field: 4-byte REAL TUNIT40 = 'pixel ' / physical unit of field TDISP1 = 'F7.2 ' / display format TDISP2 = 'G10.3 ' / display format TDISP3 = 'G10.3 ' / display format TDISP4 = 'G9.3 ' / display format TDISP5 = 'G9.3 ' / display format TDISP6 = 'F6.4 ' / display format TDISP7 = 'F6.4 ' / display format TDISP8 = 'F6.2 ' / display format TDISP9 = 'F6.2 ' / display format TDISP10 = 'F7.2 ' / display format TDISP11 = 'F6.2 ' / display format TDISP12 = 'F7.2 ' / display format TDISP13 = 'F6.2 ' / display format TDISP14 = 'G8.3 ' / display format TDISP15 = 'G6.3 ' / display format TDISP16 = 'G6.3 ' / display format TDISP17 = 'F7.5 ' / display format TDISP18 = 'G7.3 ' / display format TDISP19 = 'G7.3 ' / display format TDISP20 = 'G7.3 ' / display format TDISP21 = 'G12.5 ' / display format TDISP22 = 'G12.5 ' / display format TDISP23 = 'G7.3 ' / display format TDISP24 = 'G7.3 ' / display format TDISP25 = 'I6 ' / display format TNULL25 = -2147483647 / undefined value for column TDISP26 = 'I6 ' / display format TNULL26 = -2147483647 / undefined value for column TDISP27 = 'G9.3 ' / display format TDISP28 = 'G7.3 ' / display format TDISP29 = 'G9.3 ' / display format TDISP30 = 'G7.3 ' / display format TDISP31 = 'G9.3 ' / display format TDISP32 = 'G7.3 ' / display format TDISP33 = 'G9.3 ' / display format TDISP34 = 'G7.3 ' / display format TDISP35 = 'I5 ' / display format TNULL35 = -2147483647 / undefined value for column TDISP36 = 'I5 ' / display format TNULL36 = -2147483647 / undefined value for column TDISP37 = 'I3 ' / display format TNULL37 = -2147483647 / undefined value for column TDISP38 = 'I2 ' / display format TNULL38 = -2147483647 / undefined value for column TDISP39 = 'G9.3 ' / display format TDISP40 = 'F5.1 ' / display format IMAGE = 'synth.fits' END F’*ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿC€€ÿÿÿÿC€~zÿÿÿÿÃvôÿÿÿÿÿÿÿÿÁÅ+ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿ?__F‘^A’˜ÃBºßùB„#ø>“ü7>)Ä›Â=ýœAžì›C€€=]?€C€~z=W‹(ĨPD R$?‰0?Y~`Á‡Œ;‹; ÉF’*F’*ÁÅ+ÁÅ+¼× h=䨻‹z«=ÔV½•O.=§—Ô¾ °>­#^ =s z@?µµF%²A±q™Bâ2BŸò>“ü7>)À Â=þ AžèUC€€=sYC€~T=mÄ ŠÍD$P?‰•å?^¼üÁz€;,w®;,üF’*F’*ÁÅ+ÁÅ+¼× a=ä+»Ë =ÔSP½•Uª=§•«¾ «5>­ =Šƒf@?!azF£gAÖ´qCÙBÁˆG>“ü7>)ÊBÂ>¥AžñæC€€=…àC€~*=‚jÄ­D'Zù?‰š[?dùÁj©;Q|;PäcF’*F’*ÁÅ+ÁÅ+¼ÖSû=ä5»ŽQ¾=Ô`,½•­ÿ=§÷¾ ²!>­" =ÊŽ@?1„ÓF¿BÙC%†&Bê>“ü7>)¿Â>üAžçkC€€=“9ùC€}ú=kÒÄ+D\D87?‰G?iœÁWk;~‘s;}­PF’*F’*ÁÅ+ÁÅ+¼Öžj=䑻޲:=ÔQ뽕›ï=§ÖÀ¾ ¨Z>­ Þ =ï+¿@?CEOF G B)#CHWcC ©©>“ü7>)ÉåÂ>ïAžñC€þ=¡ýµC€}É=ÍÄ6Á@;šî2;šDoF’*F’*ÁÅ+ÁÅ+¼ÕuS=ä »_=Ô`T½•Àã=¨ æ¾ ±/>­"4 > U@?VÌ>F `B>?CrY\C+]Þ>“ü7>)·œÂ=ùÞAžàpC€€=²ÁC€}‹=­„àÄO@ÇD^·z?‰Œç?uÁ#‹;¼³š;»»¬F’*F’*ÁÅ+ÁÅ+¼×^Î=äÖ»Œë¨=ÔF(½•)=§^ü¾ £ž>­© >6™‘@?lGF HÐBfbC’¢—CO_•>“ü7>)úÂ=ÿEAžëÈC€€=ÃùC€}N=¾ë5ÄcïDuQ?‰–6?zë’ÁØ;æG‰;äÑÆF’*F’*ÁÅ+ÁÅ+¼× )=äË»•|=ÔV`½•l =§­Ð¾ ­Í>­< >;t@?óãF öýB‹6ˆC±vdCzøF>“ü7>)Ç{Â>ÔAžïLC€þ=טÏC€} =ÒÐÄz¿ûD†Æá?‰™H?€|CÁÖƒ< Ë < ¬ôF’*G5ö´ÁÅ+Á:ºŸ¼Õø=äó»´=Ô\T½•¢½=§ê»¾ ¯ô>­u >jöq@?Žò­FXãB¨ÈªC×(tC˜#Ã>“IÕ=ë7ÁÂ=dÇA]3C€ö=£ïÄC€|¼=ŸûLÄÇÝzDŽY’?6T¯?ƒ•kÁ¢<,á-<+3”F’*G5ö´ÁÅ+Á:ºŸ¼—rb=ší×»]¢k=’¼Î½Eú)=N$¾^c >4€ ?Öý@?>%F±B¦Î{CÔ£2C–[>wcA=… Â7'eAþC€€Ô=BoC€~¶=A'KÅ(éÞD`|r>ª­?†Á²Áµ<0t}<.¼¬G5ö´G5ö´Á:ºŸÁ:ºŸ¼,‹Ô="šÀ»´µI=àß¼·o5SD©=(Î*Â;Ô @Ó+IC€€ÿ=ZC€Z=–ÅH£}Dèß>JÁÆ?ŠÁ,]<þ~<ÌÖG5ö´G5ö´Á:ºŸÁ:ºŸ»‡ T<ĨW»‹N<½|¼€:6<+¸<½….Œ<ƒTÏ > Ê1@?¾C„Eëü;BNxdCƒ™xC:>Géù<íº¿Â:d@œ,ÒC€:<ËôC€R<ÈõRÅMV$CãB> ªM?U}Á;ô";òYÓGrâ‡G’®Á?¾áÁC‘ »!º<ˆ¦ñ»ñ<ƒ[¼¼Rç\< ŵ½-wÓ<#} ={Ò@?ÑJEEÜ6MB*Á,CY«”Cê…>C$ô<·Ù~Â9ï+@vÓ3C€€è<¬ >C€(<ªu¹ÅHŲC¥Ëí=ÓgB?½öÁën;Ø-®;ÖßyGrâ‡G’®Á?¾áÁC‘ »o‘™<ýi<„^Â8òÇ@6‚ÿC€&<‡A`C€S<†C=Å=šùCPªÃ=ŒÞG?”;xÁ‚Ì;­Ì;¬¿‰G’®G’®ÁC‘ÁC‘ »€æ€<¡gºËj<Ò¼ ‘B;°u¼¬{®;­ðB =Âý@?ý=µE¹@A’ÍB»"·B„S*>5K<·IÂ7Î’?Ö¿•C€<<'ÓC€Ñ<&ªÅ2ÈC\Ë=O|¡?—Î…ÁæÚ;\Ìh;\^G’®G’®ÁC‘ÁC‘ »¾Í±;˜b:;—5T»½Yµ;‡>U»²1; =L\@@ HWE¦:àAªBØÈB™Iþ>*ý;áËG’®G»+¿ÁC‘ÁGB¥ ¼W<;Ûã3ºêÀe;Ù_qºmåj;†ð<‡¡^;ˆD @u`˜@@5úE˜úŸAþôBK·«B Ì>5 ;×[ñÂçXº@@(ˆ-EŒãÕA-5ÿBm-ÇB'µô>?ñ<Â1Ûê?®§˜C€€.<@¢wC€€üC®#@@9b˜EËì@ôR1B2úAû×#>Dœ;Ÿê(Â6X§?WXŽC€é<’ C€€f<:ÈÄ®¢·Aþm¼<º|?¦üÆÁ¼;êÒ;¯çH ª3H?QÁN5ÁNåv9ο™;=xº³;5 º©:K;ÿ»ÍÃ;3 >ÝH@@KìtEiÐà@}AÔ;¬A–K>AM|;Y øÂ3 Ÿ?ÌâC€ý;Á¬?C€ù;Áí‹Ä‰ A´8<¨K?«›Áëv:¨[ :¨ ½H ª3H9´ÁN5ÁSI%¸£âš:ùÛu¸¨¢:ù~Í:½Ñ::ç˜;T§:æì& =¿Ùç@@`PæEȘ@¡'mAþÎŽA´,ø>A˱;‹¡(Â8¶ˆ?97rC€€š< =GC€ù< gmÄWÿA’ <­M?¯#KÁ 3¨:Ó—):ÓcH,ëÃH9´ÁQíÁSI!%ºy]Í;#EÕ¹ƒ‰;!C`ºK(t;àÐ;Žãf;, >Lìß@@v¿dEAC@'¢jAŠþÁAD‘>F ;M Â4¼K>¿Ã)C€º; Å±C€ý; žiÄ=ÐøA»JF÷o:Ö­àÂ18>ŠkZC€€;€!ßC€€;€žLÄ&¼@Ùf²<'Í?·¯§Á â®:<°v:<ËHEãŽHn$ÁTDÔÁW|J)9:P(:}}¸ 6ù:{µ<:cÒ(:j*óº½F´:h–H =U@@•HFE`@ qAld_A''‰>G²Æ; ;ŽÂ3îl>°[@C€ü;¶ýC€€;¶¾Ä…ô@¢ßÝ<"6?¼‘Á Ð:gÅì:gf™HtDKH…”ÁWí4ÁYjŽ;E9ìŽ:£÷E¹À·Á:¥3Ÿ7"B°:”ªö;'‹:•І2?S€@@¤5çE–œ?Š™Aðé@º—Ä>I­7:”v™Â36 >=>ÀC€ò;W*ýC€>;WfÚÃä-@HIp:ŒTFÂ2ë¥>4\ôC€€S;_«zC€€I;_þÒö×Ü@ ¾;Å Ý?ÅLÁn'9þ]ª9þmêH“¸5H e˜Á[;Á\©9Sa9´ô:)f¹×*:'^ž¹Fß:3:²ÄŸ:‡u =8ä‡@@ƱÞDæ6ú?$}ñ@¬¡@sIq>JdÂ:R”fÂ49¡>ƒ C€€¢;8¯wC€º;8 šÃ›Vk? º;„q5?ÊÁ›ã9Æßu9ÆWêH¡çH´^Á\ºÁ^³ ayº2Ë;9÷ø˜8=>¥9ö¯è¸4ç`9澺‚# 9ää3# <=º@@ÚuDÍýÕ>ÈsÛ@]K+@zm>I¸: ÜfÂ3¾É=¶Ž?C€€+; yðC€½; …ŽÃ}æP?;R0;<Þá?ÎíSÁ­u9†þ‚9‡uÌH³ÌHÆgãÁ^žÁ`Zúw‘ºLô9¦¦’¹=à9£Â·š‹9¡Mc9öN9ž‹u' ;Ú71@@ðk´D¸{i>^£_?ÿf?´[‡>Jë©9©&JÂ4’µ=TK‚C€å:²ù¢C€ù:²Ó7ÃRnÛ?ÖÌ;¼Ü?ÓêµÀý†9'ž?9'ÌNHÂÖùHÛ´=Á` QÁb q‹±9‘à9KÝ`7M°9Lxý¸Þ¨¥9@x;¹Ê9A * ‰€A@$ÛŸ?é$ï>KeW9æ26Â3“¦=‘²C€€;ª[C€×;ÀÃ.B>Ì; W?ÙåÀù™,9hÞ9gàHÝÎÞHõzÀÁbJÔÁd Ƴݹ¤5B9‰G©7mZ9Š> 9Œwÿ9òa¹ÿ˜d9‚èŸ. ;r{ö@At]D’ÀŠ>;|+?ì°1?§]&>K 9¯PMÂ3à‘=\´C€é:à¸gC€µ:à‰à °Ï>ƒ»4:î~?ÞB Àõ’î91'91žØHðËKI(äÁc¸Áex%Ó¹gÿ9PÜ9='9OÞhµp¼9IÁÖ9®¾9I²/3 ;*I±@A D‚¬X>d–?Á¨Ý?ˆð#>KF·9—ÿÂ4ó==¡C€ð:ÔÖÖC€€:ÔÕžÂè€>:Z:Í…E?ãž©ÀñŠÉ9ÎH9ЉI¥IFáÁeECÁg÷ý=8q-98Ïô·ã¹Q98þÏ67£G947b9”±94w8:‰JØ@A0Dhh„=Ù&?—!?Uº–>K°M9tÄ/Â4C&=kßC€€:¾`œC€€:¾N¾Â½© =ûCã:©“š?éÈÀíw9°ó9ú°I¬YIÍåÁfÊÁh‡e/y86-Î9s§¸çÍ'9•¥¸„©9ÕÛ8š¿Š9L> ;;k@AA™šDNXy=¤ÂÍ?p-¤?)Ôê>KÅú9PˆÂ44=ÐdC€€ :±»C€ÿ:±µ#™ÈÓ=§õ„:‹ÌH?îºÉÀéU8ݼ8ÞwI÷¯I/9ÁhS¨Áj8°mѸ™@9]í·“§:8ÿýݸŸ‰Á8ø#¿9{ß›8÷ƒŽD :ðß^@ATõÃD7{=l·E?3ü_>þ‰Ô>KÖ|9.#WÂ4)r<Ø›C€€5:¢æ…C€ë:¢ÜUÂyBz=Ph}:V 7?ô|}Àå-Õ8³V8´I,) I=PÁiî”Ák™h½)¹i8Ò½a·Þf8Ôh7¸"9-8Éþ9{Tñ8ˬŸJ :psÔ@AjAŠD"b¬=”/>×ng>˜U9>Lô8×FcÂ47<†ÌýC€÷:^F8C€ü:^CåÂIÓ<éÂ:°?úaºÀá18e„)8fë„I²Ã²>|Ϥ>LMX8¬(ÇÂ4ˆ”é´>R˜;>LD¢8ºÜÞÂ4ßYëã>þ>L¤8~óXÂ4<(êC€Ù:/*ðC€ü:/(ãÁÏÄt<uE9¶ì@w«ÀÔƒÅ8ñÑ8pIqÁtI…j¨ÁoÔXÁq‹Cµ±8—KÓ8m·:58M ¸V:8/o¸¸”£8rÂm :LI}@A«|³CÉ`F;é§þ=â<–=Ÿù6>L‰8Q”QÂ49<ípC€ü:vBC€€:rÁÁ§ ?#i°<ú`¿@ µÀÀÐg-7¢7 êI„5{Iœ^ÁqbÓÁrñ’©4謰8î·Y˜8‘¶©£¿7ûYG¸±·Ì7üMx 9Úc@A¼¢ÅC³2w;æI?=éÛN=¥\”>L…m8@Â4†;»¼ÅC€õ9ùê"C€ô9ùé”Á„Û%>Ô:™<ÌxÁ@ ÙÀÌYP7°µè7´ |IIœµ%ÁrÂ~ÁtVÖ{Õ5Ç´7º‘=¶Cc7º©í·RãF7ºHÄ6£“ž7ºo[„ : -@AÏÙCŸâJ;ì*4=û[â=±¼Ó>L 8÷Â3þÀ;³‘kC€Ü:¢’C€ß:¢ÏÁKes>”tF<ºÙ2@nrÀÈc7Ë&O7Ï8I›ßI©*tÁt'Áu« -¶ l¯7²\~7ya7²´»6¾„Ì7®K©¸_éÒ7®ù'‘ 9“ÝÙ@Aä?ÕCä>L©~7ÛqžÂ4´;ˆg5C€€9Üi¯C€ô9ÜiÁ#Êæ>JóÉ<žš6@ê Àćs´,âu4,âuI¨I¸[ÎÁuŒrÁw)‚ý ·QZA7‡š¶$l'7ˆB>6i³¨7ƒ–¦¸Lî7„†&Ÿ 9M–@AûC€‡;f¤¹=†×=>²>L«7´ÛwÂ3ý³;`ö˜C€ó9ÇßCC€ò9ÇßòÁ Ú> E<‰Ò@{ ÀÀÍY7|U7ub I¶XÆIÇãdÁvø¶Áx‘- ¯ µÏBd7`.L¶ˆˆ7a(Ö6‰Ê7[¡ý¸Xé7\㯠9HÓÖ@B BCgô¡;LË´=zÒ=1[¹>Lª7‹ó¯Â4;-žÞC€€9©ï1C€û9©î$ÀÆcñ=¬è<^@";À½;~7xY'7rò¦IÅëÝIÙ"ôÁxe3ÁzB »¡·ªÄ¨7*è·2•7,ζ™!Ô7*qO7ÀT7+µ5À 8…±@BæbCRi°>L°Ç7_îÂ3þË; lÈC€€9•Í×C€€ 9•ÎÀšYŒ=€ËoL¼7;ÆÂÂ3ý—:é–ŠC€ü9Š!ÞC€ý9Š"\Àpã°=ïò<(è<@"´£À¶©Í6¼É[6ÁfËIéÎJqÁ{L¼­7)¬Â3ý]:¶Ø{C€þ9n C€ÿ9n À7ö<Ò׿<³ß@&¡ À³³6Ê&©6ÉoƒIý™âJ .Á|³ŸÁ~n$«Õ6Œ¤S6¶Q¿¶J¡m6·CÅ6žP6³•O·m»6´¡J 8Zé@BJ-ÈC#o:Lã~<˜¯L¾è6ÞÃEÂ4¸:Š•‡C€ö9FN®C€ó9FNxÀBD<¬9<Cg@*¥¨À°ú6®ðu6¬æþJ aŠJz˜Á~8IÁ€f ]5Ÿì‡6Špõ5­SÊ6ŠöEµçó*6‰ ·ir6‰· 8kT@B^eCÊ$:{ŸÚ<ÃË.<Šrq>L¿Ý6¶Ñ†Â3ÿ:cPÚC€€92ø“C€ù92ø±¿Ù&¿>LÂã6˜òüÂ4°:>»zC€ü9$ìC€û9$ëó¿¤×<\è;ÈII@2ùçÀ¬Eå3“nˆ³“nˆJ&.J9€GÁ€³2Á§Ä$Ë-ñ¶¿R6=m´[Ç56=à„µŠÂ³6=Uö^6=¯§V 7»š@B†ŒÏC~>LÃá6pEæÂ3ÿÑ:»UC€þ9v%C€€9v/¿w’”;¤®ß;ªIø@7J¾ÀªK¾2áϲáÏJ6Š„JM8{ÁƒÿÁ‚ˆg,_7©6²ã6 ¶}Ñ6YµW¼6ül4Ý¢;6VQx 7r×@B”JBÿâh:„oÍ<íä*<¨6ë>LÄð6RÂ3ÿò:»ÏC€€9ðTC€€9ðW¿;é;Yì4;•@;¶9À¨¥7»}7 JI¬}Jc†ÜÁ‚a£ÁƒmÖ5·C)3•dX6Íß4§)C6A?4d¦P6Ɖ4YY˜6FÌ 7Þ}@B¢Î8Bõ ô9Ï‘LÆY6õAÂ4e9Æ ¦C€ÿ8äÞC€€8伿 |;Ä‚;‚ýÀ@@<ýÀ§•6z³46])©J_ÏçJ~µÁƒI<Á„cA Qi³w15Æ/¿µ‰Ä…5Æö4„65ÂÇ ¶Á´D5Ã>ÝÇ 7@y4@B³ Bì˜>LÇ76 ÏÔÂ3ÿÑ9«˜gC€ÿ8ÙmC€ü8ÙmŸ¾ÑƒÃ:·øí;`Êb@Dß²À¥Ä€3Ôxw³ÔxwJy‰-JŽetÁ„;'Á…aN¿biµRr5«ƒŠµÉj 5¬òµ®éè5« 85­Ôë5«¯ßô 7eM¿@BÄþ¦BäÍÚ>LÈ5Üw+Â4"9‰@ûC€€8¿NûC€ÿ8¿Nñ¾š…œ:hÃe;@Ïç@IŸÀ¤¬º4yLÍ´yLÍJ‹ÏÃJ ¬_Á…8IÁ†m‰_Uw=².Ž5‰bëµæ5‰È«´þ'5ˆ‡h60Ô÷5ˆúè& 6Ø| @Bر·BÞÙ>LÈ¿5½9ßÂ49k›¿C€€8´žûC€ÿ8´žø¾c=h:XL;$Ýã@N{¦À£Â4HR´HRJc¡J¶ !Á†?Á‡ƒ'sG)5Zl5kz>µ’ïº5l(‰´…ƒ5jîÆ5¹žÀ5k´] 6¦$À@Bî]BÚ¿>LÉÊ5šQ6Â49?þbC€ÿ8¡ù¿C€€8¡ù½¾&Ř9½ý¸;Ò5@SvJÀ¢ÿi³¯u3¯uJ²)&JÏdEÁ‡SCÁˆ¥‹k®M4¯d;5@!ݵ=âÛ5@Õ ³ùëè5@ ¦4Æ€œ5@Ó¨™ 6N.@C™BÖ":.ºú<Ðì <“»W>LÊa5„Â49$_ØC€þ8˜|C€ý8˜|½ó‡9zä;Þy@XªÀ¢_d6éÅQ6ÙïmJÊáwJíÅzÁˆt-Á‰Õ ¨¹Òý3Ah5$YÒµ‰dÚ5$ÖR09 5#ç_µž?^5$s§Ü 60Ø@C5ÂBÓt>LÊ£5^.­Â49 EüC€þ8‹C€ÿ8‰½¯øy9"³:ÝS6@]È…À¡Ý4ÐÓ´ÐÓJèQKö€Á‰¡tÁ‹ÉÌ1ÿ5³¼uU5 tdµK5 åé4ŒQ5 å{µ¨©ð5 dI% 6>ÙP@C¡‰BЊÆ9Ñ}O<‰¶ LË5B™æÂ48ò¯C€€8‡íNC€ÿ8‡íM½}Tæ8Ÿ¼þ:¡k¹@c!À¡sþ6‡îI6KK·=K¨NÁŠÚoÁŒV¼÷4Ñ´q}°4ò>4µ0BÜ4ó'ù´â™4ñ©¤µmð4òªžu 5Íš[@C.~}BΔz9qçÒ<&Ó;ëìý>LË5#ûÂ3ÿû8̲C€ÿ8| 7C€þ8| :½5ƒ8–n:Ô@±@h›¸À¡ê6*ŸÂ6JNK±ºK8´ˆÁŒ|Á¨À*Óuµ1‰nÊ4ÌNݵx 4Ìù2”ô^4Ìí5à¦4ÌÔ×Î 5«©§@C?ñŠBÍ m9-CI;ú—@;±1É>LË}5ÖÂ48²ózC€ý8s%ñC€þ8s%ñ½§8^#:Ý`!@n7¢À Ý95÷5åœuK3öKWø¶ÁnåÁ}i³Ä43rÑ4³Où´5z4³ön³—ˆ24²ã}µ@ï4³›L/ 4ÕP~@CS#KBËÔ®>LË’5ÈWÂ48 5tC€ÿ8ovUC€€8ovT¼µÂ[@sö-À ¨À³„33„3KROLK}¬XÁŽÉkÁjCµ¹#4c¾ó4 u‚´´ÊÐ4¡ é³Zå”4 lø³ª4¡ÿš 5eˆ@Ch@lBÊäõ>LËÆ4ä*¹Â3ÿý8ñ×C€ÿ8i\C€þ8i\¼~Ór7¯ÿ×:°Ïl@yØ-À Ì4û´ûKvÊÙK•”hÁ-Á‘Ù—–)´“E94Žà´ÁÓM4Ž¡/´³,4Ž ­´h0A4Ž›Ä 4&ll@CzBÊ,:]à<ãë< Ý×>LÌ4ОoÂ3ÿý8ºNC€ÿ8j¨ÉC€þ8j¨Ê¼1bN7ªÎµ:ö‚=@Þ~À `6Á«l6´GnK‘dmK°óPÁ‘™ÝÁ“N§€Ñ! ´š14ëj´›n04‚o±²dú~4×X´ 14‚hG‘ 4i(ÿ@CŒƒ#BÉžm>LÌ4¼ÊâÂ3ÿú8qkC€ÿ8lÙ@C€þ8lÙB»õŒH7vý;ÀD@ƒÀ G´²ÖZ’2ÖZ’K«éLÌ5üÂ3ÿú9ÄVC€ÿ8ñk„C€þ8ñk‹»¨=@†-ÌÀ 5³† 3† KÁºKÓ›(Á”ßÁ”Ü[rŸÑc³Ì#4ôµ´á:ª4µgO´lc&4Ï,³µ˜c4Ê\,‘5„›@XTENSION= 'BINTABLE' / binary table extension BITPIX = 8 / 8-bit bytes NAXIS = 2 / 2-dimensional binary table NAXIS1 = 160 / width of table in bytes NAXIS2 = 69 PCOUNT = 0 / size of special data area GCOUNT = 1 / one data group (required keyword) TFIELDS = 40 TTYPE1 = 'SMA ' / label for field 1 TFORM1 = '1E ' / data format of field: 4-byte REAL TUNIT1 = 'pixel ' / physical unit of field TTYPE2 = 'INTENS ' / label for field 2 TFORM2 = '1E ' / data format of field: 4-byte REAL TTYPE3 = 'INT_ERR ' / label for field 3 TFORM3 = '1E ' / data format of field: 4-byte REAL TTYPE4 = 'PIX_VAR ' / label for field 4 TFORM4 = '1E ' / data format of field: 4-byte REAL TTYPE5 = 'RMS ' / label for field 5 TFORM5 = '1E ' / data format of field: 4-byte REAL TTYPE6 = 'ELLIP ' / label for field 6 TFORM6 = '1E ' / data format of field: 4-byte REAL TTYPE7 = 'ELLIP_ERR' / label for field 7 TFORM7 = '1E ' / data format of field: 4-byte REAL TTYPE8 = 'PA ' / label for field 8 TFORM8 = '1E ' / data format of field: 4-byte REAL TUNIT8 = 'degrees ' / physical unit of field TTYPE9 = 'PA_ERR ' / label for field 9 TFORM9 = '1E ' / data format of field: 4-byte REAL TUNIT9 = 'degrees ' / physical unit of field TTYPE10 = 'X0 ' / label for field 10 TFORM10 = '1E ' / data format of field: 4-byte REAL TUNIT10 = 'pixel ' / physical unit of field TTYPE11 = 'X0_ERR ' / label for field 11 TFORM11 = '1E ' / data format of field: 4-byte REAL TUNIT11 = 'pixel ' / physical unit of field TTYPE12 = 'Y0 ' / label for field 12 TFORM12 = '1E ' / data format of field: 4-byte REAL TUNIT12 = 'pixel ' / physical unit of field TTYPE13 = 'Y0_ERR ' / label for field 13 TFORM13 = '1E ' / data format of field: 4-byte REAL TUNIT13 = 'pixel ' / physical unit of field TTYPE14 = 'GRAD ' / label for field 14 TFORM14 = '1E ' / data format of field: 4-byte REAL TTYPE15 = 'GRAD_ERR' / label for field 15 TFORM15 = '1E ' / data format of field: 4-byte REAL TTYPE16 = 'GRAD_R_ERR' / label for field 16 TFORM16 = '1E ' / data format of field: 4-byte REAL TTYPE17 = 'RSMA ' / label for field 17 TFORM17 = '1E ' / data format of field: 4-byte REAL TUNIT17 = 'pixel**1/4' / physical unit of field TTYPE18 = 'MAG ' / label for field 18 TFORM18 = '1E ' / data format of field: 4-byte REAL TTYPE19 = 'MAG_LERR' / label for field 19 TFORM19 = '1E ' / data format of field: 4-byte REAL TTYPE20 = 'MAG_UERR' / label for field 20 TFORM20 = '1E ' / data format of field: 4-byte REAL TTYPE21 = 'TFLUX_E ' / label for field 21 TFORM21 = '1E ' / data format of field: 4-byte REAL TTYPE22 = 'TFLUX_C ' / label for field 22 TFORM22 = '1E ' / data format of field: 4-byte REAL TTYPE23 = 'TMAG_E ' / label for field 23 TFORM23 = '1E ' / data format of field: 4-byte REAL TTYPE24 = 'TMAG_C ' / label for field 24 TFORM24 = '1E ' / data format of field: 4-byte REAL TTYPE25 = 'NPIX_E ' / label for field 25 TFORM25 = '1J ' / data format of field: 4-byte INTEGER TTYPE26 = 'NPIX_C ' / label for field 26 TFORM26 = '1J ' / data format of field: 4-byte INTEGER TTYPE27 = 'A3 ' / label for field 27 TFORM27 = '1E ' / data format of field: 4-byte REAL TTYPE28 = 'A3_ERR ' / label for field 28 TFORM28 = '1E ' / data format of field: 4-byte REAL TTYPE29 = 'B3 ' / label for field 29 TFORM29 = '1E ' / data format of field: 4-byte REAL TTYPE30 = 'B3_ERR ' / label for field 30 TFORM30 = '1E ' / data format of field: 4-byte REAL TTYPE31 = 'A4 ' / label for field 31 TFORM31 = '1E ' / data format of field: 4-byte REAL TTYPE32 = 'A4_ERR ' / label for field 32 TFORM32 = '1E ' / data format of field: 4-byte REAL TTYPE33 = 'B4 ' / label for field 33 TFORM33 = '1E ' / data format of field: 4-byte REAL TTYPE34 = 'B4_ERR ' / label for field 34 TFORM34 = '1E ' / data format of field: 4-byte REAL TTYPE35 = 'NDATA ' / label for field 35 TFORM35 = '1J ' / data format of field: 4-byte INTEGER TTYPE36 = 'NFLAG ' / label for field 36 TFORM36 = '1J ' / data format of field: 4-byte INTEGER TTYPE37 = 'NITER ' / label for field 37 TFORM37 = '1J ' / data format of field: 4-byte INTEGER TTYPE38 = 'STOP ' / label for field 38 TFORM38 = '1J ' / data format of field: 4-byte INTEGER TTYPE39 = 'A_BIG ' / label for field 39 TFORM39 = '1E ' / data format of field: 4-byte REAL TTYPE40 = 'SAREA ' / label for field 40 TFORM40 = '1E ' / data format of field: 4-byte REAL TUNIT40 = 'pixel ' / physical unit of field TDISP1 = 'F7.2 ' / display format TDISP2 = 'G10.3 ' / display format TDISP3 = 'G10.3 ' / display format TDISP4 = 'G9.3 ' / display format TDISP5 = 'G9.3 ' / display format TDISP6 = 'F6.4 ' / display format TDISP7 = 'F6.4 ' / display format TDISP8 = 'F6.2 ' / display format TDISP9 = 'F6.2 ' / display format TDISP10 = 'F7.2 ' / display format TDISP11 = 'F6.2 ' / display format TDISP12 = 'F7.2 ' / display format TDISP13 = 'F6.2 ' / display format TDISP14 = 'G8.3 ' / display format TDISP15 = 'G6.3 ' / display format TDISP16 = 'G6.3 ' / display format TDISP17 = 'F7.5 ' / display format TDISP18 = 'G7.3 ' / display format TDISP19 = 'G7.3 ' / display format TDISP20 = 'G7.3 ' / display format TDISP21 = 'G12.5 ' / display format TDISP22 = 'G12.5 ' / display format TDISP23 = 'G7.3 ' / display format TDISP24 = 'G7.3 ' / display format TDISP25 = 'I6 ' / display format TNULL25 = -2147483647 / undefined value for column TDISP26 = 'I6 ' / display format TNULL26 = -2147483647 / undefined value for column TDISP27 = 'G9.3 ' / display format TDISP28 = 'G7.3 ' / display format TDISP29 = 'G9.3 ' / display format TDISP30 = 'G7.3 ' / display format TDISP31 = 'G9.3 ' / display format TDISP32 = 'G7.3 ' / display format TDISP33 = 'G9.3 ' / display format TDISP34 = 'G7.3 ' / display format TDISP35 = 'I5 ' / display format TNULL35 = -2147483647 / undefined value for column TDISP36 = 'I5 ' / display format TNULL36 = -2147483647 / undefined value for column TDISP37 = 'I3 ' / display format TNULL37 = -2147483647 / undefined value for column TDISP38 = 'I2 ' / display format TNULL38 = -2147483647 / undefined value for column TDISP39 = 'G9.3 ' / display format TDISP40 = 'F5.1 ' / display format IMAGE = 'synth.fits' END F’*ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿC€€ÿÿÿÿC€~zÿÿÿÿÃvôÿÿÿÿÿÿÿÿÁÅ+ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿ?__F‘^A’˜ÃBºßùB„#ø>“ü7>)Ä›Â=ýœAžì›C€€=]?€C€~z=W‹(ĨPD R$?‰0?Y~`Á‡Œ;‹; ÉF’*F’*ÁÅ+ÁÅ+¼× h=䨻‹z«=ÔV½•O.=§—Ô¾ °>­#^ =s z@?µµF%²A±q™Bâ2BŸò>“ü7>)À Â=þ AžèUC€€=sYC€~T=mÄ ŠÍD$P?‰•å?^¼üÁz€;,w®;,üF’*F’*ÁÅ+ÁÅ+¼× a=ä+»Ë =ÔSP½•Uª=§•«¾ «5>­ =Šƒf@?!azF£gAÖ´qCÙBÁˆG>“ü7>)ÊBÂ>¥AžñæC€€=…àC€~*=‚jÄ­D'Zù?‰š[?dùÁj©;Q|;PäcF’*F’*ÁÅ+ÁÅ+¼ÖSû=ä5»ŽQ¾=Ô`,½•­ÿ=§÷¾ ²!>­" =ÊŽ@?1„ÓF¿BÙC%†&Bê>“ü7>)¿Â>üAžçkC€€=“9ùC€}ú=kÒÄ+D\D87?‰G?iœÁWk;~‘s;}­PF’*F’*ÁÅ+ÁÅ+¼Öžj=䑻޲:=ÔQ뽕›ï=§ÖÀ¾ ¨Z>­ Þ =ï+¿@?CEOF G B)#CHWcC ©©>“ü7>)ÉåÂ>ïAžñC€þ=¡ýµC€}É=ÍÄ6Á@;šî2;šDoF’*F’*ÁÅ+ÁÅ+¼ÕuS=ä »_=Ô`T½•Àã=¨ æ¾ ±/>­"4 > U@?VÌ>F `B>?CrY\C+]Þ>“ü7>)·œÂ=ùÞAžàpC€€=²ÁC€}‹=­„àÄO@ÇD^·z?‰Œç?uÁ#‹;¼³š;»»¬F’*F’*ÁÅ+ÁÅ+¼×^Î=äÖ»Œë¨=ÔF(½•)=§^ü¾ £ž>­© >6™‘@?lGF HÐBfbC’¢—CO_•>“ü7>)úÂ=ÿEAžëÈC€€=ÃùC€}N=¾ë5ÄcïDuQ?‰–6?zë’ÁØ;æG‰;äÑÆF’*F’*ÁÅ+ÁÅ+¼× )=äË»•|=ÔV`½•l =§­Ð¾ ­Í>­< >;t@?óãF öýB‹6ˆC±vdCzøF>“ü7>)Ç{Â>ÔAžïLC€þ=טÏC€} =ÒÐÄz¿ûD†Æá?‰™H?€|CÁÖƒ< Ë < ¬ôF’*G5ö´ÁÅ+Á:ºŸ¼Õø=äó»´=Ô\T½•¢½=§ê»¾ ¯ô>­u >jöq@?Žò­FXãB¨ÈªC×(tC˜#Ã>“IÕ=ë7ÁÂ=dÇA]3C€ö=£ïÄC€|¼=ŸûLÄÇÝzDŽY’?6T¯?ƒ•kÁ¢<,á-<+3”F’*G5ö´ÁÅ+Á:ºŸ¼—rb=ší×»]¢k=’¼Î½Eú)=N$¾^c >4€ ?Öý@?>%F±B¦Î{CÔ£2C–[>wcA=… Â7'eAþC€€Ô=BoC€~¶=A'KÅ(éÞD`|r>ª­?†Á²Áµ<0t}<.¼¬G5ö´G5ö´Á:ºŸÁ:ºŸ¼,‹Ô="šÀ»´µI=àß¼·o5SD©=(Î*Â;Ô @Ó+IC€€ÿ=ZC€Z=–ÅH£}Dèß>JÁÆ?ŠÁ,]<þ~<ÌÖG5ö´G5ö´Á:ºŸÁ:ºŸ»‡ T<ĨW»‹N<½|¼€:6<+¸<½….Œ<ƒTÏ > Ê1@?¾C„Eëü;BNxdCƒ™xC:>Géù<íº¿Â:d@œ,ÒC€:<ËôC€R<ÈõRÅMV$CãB> ªM?U}Á;ô";òYÓGrâ‡G’®Á?¾áÁC‘ »!º<ˆ¦ñ»ñ<ƒ[¼¼Rç\< ŵ½-wÓ<#} ={Ò@?ÑJEEÜ6MB*Á,CY«”Cê…>C$ô<·Ù~Â9ï+@vÓ3C€€è<¬ >C€(<ªu¹ÅHŲC¥Ëí=ÓgB?½öÁën;Ø-®;ÖßyGrâ‡G’®Á?¾áÁC‘ »o‘™<ýi<„^Â8òÇ@6‚ÿC€&<‡A`C€S<†C=Å=šùCPªÃ=ŒÞG?”;xÁ‚Ì;­Ì;¬¿‰G’®G’®ÁC‘ÁC‘ »€æ€<¡gºËj<Ò¼ ‘B;°u¼¬{®;­ðB =Âý@?ý=µE¹@A’ÍB»"·B„S*>5K<·IÂ7Î’?Ö¿•C€<<'ÓC€Ñ<&ªÅ2ÈC\Ë=O|¡?—Î…ÁæÚ;\Ìh;\^G’®G’®ÁC‘ÁC‘ »¾Í±;˜b:;—5T»½Yµ;‡>U»²1; =L\@@ HWE¦:àAªBØÈB™Iþ>*ý;áËG’®G»+¿ÁC‘ÁGB¥ ¼W<;Ûã3ºêÀe;Ù_qºmåj;†ð<‡¡^;ˆD @u`˜@@5úE˜úŸAþôBK·«B Ì>5 ;×[ñÂçXº@@(ˆ-EŒãÕA-5ÿBm-ÇB'µô>?ñ<Â1Ûê?®§˜C€€.<@¢wC€€üC®#@@9b˜EËì@ôR1B2úAû×#>Dœ;Ÿê(Â6X§?WXŽC€é<’ C€€f<:ÈÄ®¢·Aþm¼<º|?¦üÆÁ¼;êÒ;¯çH ª3H?QÁN5ÁNåv9ο™;=xº³;5 º©:K;ÿ»ÍÃ;3 >ÝH@@KìtEiÐà@}AÔ;¬A–K>AM|;Y øÂ3 Ÿ?ÌâC€ý;Á¬?C€ù;Áí‹Ä‰ A´8<¨K?«›Áëv:¨[ :¨ ½H ª3H9´ÁN5ÁSI%¸£âš:ùÛu¸¨¢:ù~Í:½Ñ::ç˜;T§:æì& =¿Ùç@@`PæEȘ@¡'mAþÎŽA´,ø>A˱;‹¡(Â8¶ˆ?97rC€€š< =GC€ù< gmÄWÿA’ <­M?¯#KÁ 3¨:Ó—):ÓcH,ëÃH9´ÁQíÁSI!%ºy]Í;#EÕ¹ƒ‰;!C`ºK(t;àÐ;Žãf;, >Lìß@@v¿dEAC@'¢jAŠþÁAD‘>F ;M Â4¼K>¿Ã)C€º; Å±C€ý; žiÄ=ÐøA»JF÷o:Ö­àÂ18>ŠkZC€€;€!ßC€€;€žLÄ&¼@Ùf²<'Í?·¯§Á â®:<°v:<ËHEãŽHn$ÁTDÔÁW|J)9:P(:}}¸ 6ù:{µ<:cÒ(:j*óº½F´:h–H =U@@•HFE`@ qAld_A''‰>G²Æ; ;ŽÂ3îl>°[@C€ü;¶ýC€€;¶¾Ä…ô@¢ßÝ<"6?¼‘Á Ð:gÅì:gf™HtDKH…”ÁWí4ÁYjŽ;E9ìŽ:£÷E¹À·Á:¥3Ÿ7"B°:”ªö;'‹:•І2?S€@@¤5çE–œ?Š™Aðé@º—Ä>I­7:”v™Â36 >=>ÀC€ò;W*ýC€>;WfÚÃä-@HIp:ŒTFÂ2ë¥>4\ôC€€S;_«zC€€I;_þÒö×Ü@ ¾;Å Ý?ÅLÁn'9þ]ª9þmêH“¸5H e˜Á[;Á\©9Sa9´ô:)f¹×*:'^ž¹Fß:3:²ÄŸ:‡u =8ä‡@@ƱÞDæ6ú?$}ñ@¬¡@sIq>JdÂ:R”fÂ49¡>ƒ C€€¢;8¯wC€º;8 šÃ›Vk? º;„q5?ÊÁ›ã9Æßu9ÆWêH¡çH´^Á\ºÁ^³ ayº2Ë;9÷ø˜8=>¥9ö¯è¸4ç`9澺‚# 9ää3# <=º@@ÚuDÍýÕ>ÈsÛ@]K+@zm>I¸: ÜfÂ3¾É=¶Ž?C€€+; yðC€½; …ŽÃ}æP?;R0;<Þá?ÎíSÁ­u9†þ‚9‡uÌH³ÌHÆgãÁ^žÁ`Zúw‘ºLô9¦¦’¹=à9£Â·š‹9¡Mc9öN9ž‹u' ;Ú71@@ðk´D¸{i>^£_?ÿf?´[‡>Jë©9©&JÂ4’µ=TK‚C€å:²ù¢C€ù:²Ó7ÃRnÛ?ÖÌ;¼Ü?ÓêµÀý†9'ž?9'ÌNHÂÖùHÛ´=Á` QÁb q‹±9‘à9KÝ`7M°9Lxý¸Þ¨¥9@x;¹Ê9A * ‰€A@$ÛŸ?é$ï>KeW9æ26Â3“¦=‘²C€€;ª[C€×;ÀÃ.B>Ì; W?ÙåÀù™,9hÞ9gàHÝÎÞHõzÀÁbJÔÁd Ƴݹ¤5B9‰G©7mZ9Š> 9Œwÿ9òa¹ÿ˜d9‚èŸ. ;r{ö@At]D’ÀŠ>;|+?ì°1?§]&>K 9¯PMÂ3à‘=\´C€é:à¸gC€µ:à‰à °Ï>ƒ»4:î~?ÞB Àõ’î91'91žØHðËKI(äÁc¸Áex%Ó¹gÿ9PÜ9='9OÞhµp¼9IÁÖ9®¾9I²/3 ;*I±@A D‚¬X>d–?Á¨Ý?ˆð#>KF·9—ÿÂ4ó==¡C€ð:ÔÖÖC€€:ÔÕžÂè€>:Z:Í…E?ãž©ÀñŠÉ9ÎH9ЉI¥IFáÁeECÁg÷ý=8q-98Ïô·ã¹Q98þÏ67£G947b9”±94w8:‰JØ@A0Dhh„=Ù&?—!?Uº–>K°M9tÄ/Â4C&=kßC€€:¾`œC€€:¾N¾Â½© =ûCã:©“š?éÈÀíw9°ó9ú°I¬YIÍåÁfÊÁh‡e/y86-Î9s§¸çÍ'9•¥¸„©9ÕÛ8š¿Š9L> ;;k@AA™šDNXy=¤ÂÍ?p-¤?)Ôê>KÅú9PˆÂ44=ÐdC€€ :±»C€ÿ:±µ#™ÈÓ=§õ„:‹ÌH?îºÉÀéU8ݼ8ÞwI÷¯I/9ÁhS¨Áj8°mѸ™@9]í·“§:8ÿýݸŸ‰Á8ø#¿9{ß›8÷ƒŽD :ðß^@ATõÃD7{=l·E?3ü_>þ‰Ô>KÖ|9.#WÂ4)r<Ø›C€€5:¢æ…C€ë:¢ÜUÂyBz=Ph}:V 7?ô|}Àå-Õ8³V8´I,) I=PÁiî”Ák™h½)¹i8Ò½a·Þf8Ôh7¸"9-8Éþ9{Tñ8ˬŸJ :psÔ@AjAŠD"b¬=”/>×ng>˜U9>Lô8×FcÂ47<†ÌýC€÷:^F8C€ü:^CåÂIÓ<éÂ:°?úaºÀá18e„)8fë„I²Ã²>|Ϥ>LMX8¬(ÇÂ4ˆ”é´>R˜;>LD¢8ºÜÞÂ4ßYëã>þ>L¤8~óXÂ4<(êC€Ù:/*ðC€ü:/(ãÁÏÄt<uE9¶ì@w«ÀÔƒÅ8ñÑ8pIqÁtI…j¨ÁoÔXÁq‹Cµ±8—KÓ8m·:58M ¸V:8/o¸¸”£8rÂm :LI}@A«|³CÉ`F;é§þ=â<–=Ÿù6>L‰8Q”QÂ49<ípC€ü:vBC€€:rÁÁ§ ?#i°<ú`¿@ µÀÀÐg-7¢7 êI„5{Iœ^ÁqbÓÁrñ’©4謰8î·Y˜8‘¶©£¿7ûYG¸±·Ì7üMx 9Úc@A¼¢ÅC³2w;æI?=éÛN=¥\”>L…m8@Â4†;»¼ÅC€õ9ùê"C€ô9ùé”Á„Û%>Ô:™<ÌxÁ@ ÙÀÌYP7°µè7´ |IIœµ%ÁrÂ~ÁtVÖ{Õ5Ç´7º‘=¶Cc7º©í·RãF7ºHÄ6£“ž7ºo[„ : -@AÏÙCŸâJ;ì*4=û[â=±¼Ó>L 8÷Â3þÀ;³‘kC€Ü:¢’C€ß:¢ÏÁKes>”tF<ºÙ2@nrÀÈc7Ë&O7Ï8I›ßI©*tÁt'Áu« -¶ l¯7²\~7ya7²´»6¾„Ì7®K©¸_éÒ7®ù'‘ 9“ÝÙ@Aä?ÕCä>L©~7ÛqžÂ4´;ˆg5C€€9Üi¯C€ô9ÜiÁ#Êæ>JóÉ<žš6@ê Àćs´,âu4,âuI¨I¸[ÎÁuŒrÁw)‚ý ·QZA7‡š¶$l'7ˆB>6i³¨7ƒ–¦¸Lî7„†&Ÿ 9M–@AûC€‡;f¤¹=†×=>²>L«7´ÛwÂ3ý³;`ö˜C€ó9ÇßCC€ò9ÇßòÁ Ú> E<‰Ò@{ ÀÀÍY7|U7ub I¶XÆIÇãdÁvø¶Áx‘- ¯ µÏBd7`.L¶ˆˆ7a(Ö6‰Ê7[¡ý¸Xé7\㯠9HÓÖ@B BCgô¡;LË´=zÒ=1[¹>Lª7‹ó¯Â4;-žÞC€€9©ï1C€û9©î$ÀÆcñ=¬è<^@";À½;~7xY'7rò¦IÅëÝIÙ"ôÁxe3ÁzB »¡·ªÄ¨7*è·2•7,ζ™!Ô7*qO7ÀT7+µ5À 8…±@BæbCRi°>L°Ç7_îÂ3þË; lÈC€€9•Í×C€€ 9•ÎÀšYŒ=€ËoL¼7;ÆÂÂ3ý—:é–ŠC€ü9Š!ÞC€ý9Š"\Àpã°=ïò<(è<@"´£À¶©Í6¼É[6ÁfËIéÎJqÁ{L¼­7)¬Â3ý]:¶Ø{C€þ9n C€ÿ9n À7ö<Ò׿<³ß@&¡ À³³6Ê&©6ÉoƒIý™âJ .Á|³ŸÁ~n$«Õ6Œ¤S6¶Q¿¶J¡m6·CÅ6žP6³•O·m»6´¡J 8Zé@BJ-ÈC#o:Lã~<˜¯L¾è6ÞÃEÂ4¸:Š•‡C€ö9FN®C€ó9FNxÀBD<¬9<Cg@*¥¨À°ú6®ðu6¬æþJ aŠJz˜Á~8IÁ€f ]5Ÿì‡6Špõ5­SÊ6ŠöEµçó*6‰ ·ir6‰· 8kT@B^eCÊ$:{ŸÚ<ÃË.<Šrq>L¿Ý6¶Ñ†Â3ÿ:cPÚC€€92ø“C€ù92ø±¿Ù&¿>LÂã6˜òüÂ4°:>»zC€ü9$ìC€û9$ëó¿¤×<\è;ÈII@2ùçÀ¬Eå3“nˆ³“nˆJ&.J9€GÁ€³2Á§Ä$Ë-ñ¶¿R6=m´[Ç56=à„µŠÂ³6=Uö^6=¯§V 7»š@B†ŒÏC~>LÃá6pEæÂ3ÿÑ:»UC€þ9v%C€€9v/¿w’”;¤®ß;ªIø@7J¾ÀªK¾2áϲáÏJ6Š„JM8{ÁƒÿÁ‚ˆg,_7©6²ã6 ¶}Ñ6YµW¼6ül4Ý¢;6VQx 7r×@B”JBÿâh:„oÍ<íä*<¨6ë>LÄð6RÂ3ÿò:»ÏC€€9ðTC€€9ðW¿;é;Yì4;•@;¶9À¨¥7»}7 JI¬}Jc†ÜÁ‚a£ÁƒmÖ5·C)3•dX6Íß4§)C6A?4d¦P6Ɖ4YY˜6FÌ 7Þ}@B¢Î8Bõ ô9Ï‘LÆY6õAÂ4e9Æ ¦C€ÿ8äÞC€€8伿 |;Ä‚;‚ýÀ@@<ýÀ§•6z³46])©J_ÏçJ~µÁƒI<Á„cA Qi³w15Æ/¿µ‰Ä…5Æö4„65ÂÇ ¶Á´D5Ã>ÝÇ 7@y4@B³ Bì˜>LÇ76 ÏÔÂ3ÿÑ9«˜gC€ÿ8ÙmC€ü8ÙmŸ¾ÑƒÃ:·øí;`Êb@Dß²À¥Ä€3Ôxw³ÔxwJy‰-JŽetÁ„;'Á…aN¿biµRr5«ƒŠµÉj 5¬òµ®éè5« 85­Ôë5«¯ßô 7eM¿@BÄþ¦BäÍÚ>LÈ5Üw+Â4"9‰@ûC€€8¿NûC€ÿ8¿Nñ¾š…œ:hÃe;@Ïç@IŸÀ¤¬º4yLÍ´yLÍJ‹ÏÃJ ¬_Á…8IÁ†m‰_Uw=².Ž5‰bëµæ5‰È«´þ'5ˆ‡h60Ô÷5ˆúè& 6Ø| @Bر·BÞÙ>LÈ¿5½9ßÂ49k›¿C€€8´žûC€ÿ8´žø¾c=h:XL;$Ýã@N{¦À£Â4HR´HRJc¡J¶ !Á†?Á‡ƒ'sG)5Zl5kz>µ’ïº5l(‰´…ƒ5jîÆ5¹žÀ5k´] 6¦$À@Bî]BÚ¿>LÉÊ5šQ6Â49?þbC€ÿ8¡ù¿C€€8¡ù½¾&Ř9½ý¸;Ò5@SvJÀ¢ÿi³¯u3¯uJ²)&JÏdEÁ‡SCÁˆ¥‹k®M4¯d;5@!ݵ=âÛ5@Õ ³ùëè5@ ¦4Æ€œ5@Ó¨™ 6N.@C™BÖ":.ºú<Ðì <“»W>LÊa5„Â49$_ØC€þ8˜|C€ý8˜|½ó‡9zä;Þy@XªÀ¢_d6éÅQ6ÙïmJÊáwJíÅzÁˆt-Á‰Õ ¨¹Òý3Ah5$YÒµ‰dÚ5$ÖR09 5#ç_µž?^5$s§Ü 60Ø@C5ÂBÓt>LÊ£5^.­Â49 EüC€þ8‹C€ÿ8‰½¯øy9"³:ÝS6@]È…À¡Ý4ÐÓ´ÐÓJèQKö€Á‰¡tÁ‹ÉÌ1ÿ5³¼uU5 tdµK5 åé4ŒQ5 å{µ¨©ð5 dI% 6>ÙP@C¡‰BЊÆ9Ñ}O<‰¶ LË5B™æÂ48ò¯C€€8‡íNC€ÿ8‡íM½}Tæ8Ÿ¼þ:¡k¹@c!À¡sþ6‡îI6KK·=K¨NÁŠÚoÁŒV¼÷4Ñ´q}°4ò>4µ0BÜ4ó'ù´â™4ñ©¤µmð4òªžu 5Íš[@C.~}BΔz9qçÒ<&Ó;ëìý>LË5#ûÂ3ÿû8̲C€ÿ8| 7C€þ8| :½5ƒ8–n:Ô@±@h›¸À¡ê6*ŸÂ6JNK±ºK8´ˆÁŒ|Á¨À*Óuµ1‰nÊ4ÌNݵx 4Ìù2”ô^4Ìí5à¦4ÌÔ×Î 5«©§@C?ñŠBÍ m9-CI;ú—@;±1É>LË}5ÖÂ48²ózC€ý8s%ñC€þ8s%ñ½§8^#:Ý`!@n7¢À Ý95÷5åœuK3öKWø¶ÁnåÁ}i³Ä43rÑ4³Où´5z4³ön³—ˆ24²ã}µ@ï4³›L/ 4ÕP~@CS#KBËÔ®>LË’5ÈWÂ48 5tC€ÿ8ovUC€€8ovT¼µÂ[@sö-À ¨À³„33„3KROLK}¬XÁŽÉkÁjCµ¹#4c¾ó4 u‚´´ÊÐ4¡ é³Zå”4 lø³ª4¡ÿš 5eˆ@Ch@lBÊäõ>LËÆ4ä*¹Â3ÿý8ñ×C€ÿ8i\C€þ8i\¼~Ór7¯ÿ×:°Ïl@yØ-À Ì4û´ûKvÊÙK•”hÁ-Á‘Ù—–)´“E94Žà´ÁÓM4Ž¡/´³,4Ž ­´h0A4Ž›Ä 4&ll@CzBÊ,:]à<ãë< Ý×>LÌ4ОoÂ3ÿý8ºNC€ÿ8j¨ÉC€þ8j¨Ê¼1bN7ªÎµ:ö‚=@Þ~À `6Á«l6´GnK‘dmK°óPÁ‘™ÝÁ“N§€Ñ! ´š14ëj´›n04‚o±²dú~4×X´ 14‚hG‘ 4i(ÿ@CŒƒ#BÉžm>LÌ4¼ÊâÂ3ÿú8qkC€ÿ8lÙ@C€þ8lÙB»õŒH7vý;ÀD@ƒÀ G´²ÖZ’2ÖZ’K«éLÌ5üÂ3ÿú9ÄVC€ÿ8ñk„C€þ8ñk‹»¨=@†-ÌÀ 5³† 3† KÁºKÓ›(Á”ßÁ”Ü[rŸÑc³Ì#4ôµ´á:ª4µgO´lc&4Ï,³µ˜c4Ê\,‘5„›@XTENSION= 'BINTABLE' / binary table extension BITPIX = 8 / 8-bit bytes NAXIS = 2 / 2-dimensional binary table NAXIS1 = 160 / width of table in bytes NAXIS2 = 69 PCOUNT = 0 / size of special data area GCOUNT = 1 / one data group (required keyword) TFIELDS = 40 TTYPE1 = 'SMA ' / label for field 1 TFORM1 = '1E ' / data format of field: 4-byte REAL TUNIT1 = 'pixel ' / physical unit of field TTYPE2 = 'INTENS ' / label for field 2 TFORM2 = '1E ' / data format of field: 4-byte REAL TTYPE3 = 'INT_ERR ' / label for field 3 TFORM3 = '1E ' / data format of field: 4-byte REAL TTYPE4 = 'PIX_VAR ' / label for field 4 TFORM4 = '1E ' / data format of field: 4-byte REAL TTYPE5 = 'RMS ' / label for field 5 TFORM5 = '1E ' / data format of field: 4-byte REAL TTYPE6 = 'ELLIP ' / label for field 6 TFORM6 = '1E ' / data format of field: 4-byte REAL TTYPE7 = 'ELLIP_ERR' / label for field 7 TFORM7 = '1E ' / data format of field: 4-byte REAL TTYPE8 = 'PA ' / label for field 8 TFORM8 = '1E ' / data format of field: 4-byte REAL TUNIT8 = 'degrees ' / physical unit of field TTYPE9 = 'PA_ERR ' / label for field 9 TFORM9 = '1E ' / data format of field: 4-byte REAL TUNIT9 = 'degrees ' / physical unit of field TTYPE10 = 'X0 ' / label for field 10 TFORM10 = '1E ' / data format of field: 4-byte REAL TUNIT10 = 'pixel ' / physical unit of field TTYPE11 = 'X0_ERR ' / label for field 11 TFORM11 = '1E ' / data format of field: 4-byte REAL TUNIT11 = 'pixel ' / physical unit of field TTYPE12 = 'Y0 ' / label for field 12 TFORM12 = '1E ' / data format of field: 4-byte REAL TUNIT12 = 'pixel ' / physical unit of field TTYPE13 = 'Y0_ERR ' / label for field 13 TFORM13 = '1E ' / data format of field: 4-byte REAL TUNIT13 = 'pixel ' / physical unit of field TTYPE14 = 'GRAD ' / label for field 14 TFORM14 = '1E ' / data format of field: 4-byte REAL TTYPE15 = 'GRAD_ERR' / label for field 15 TFORM15 = '1E ' / data format of field: 4-byte REAL TTYPE16 = 'GRAD_R_ERR' / label for field 16 TFORM16 = '1E ' / data format of field: 4-byte REAL TTYPE17 = 'RSMA ' / label for field 17 TFORM17 = '1E ' / data format of field: 4-byte REAL TUNIT17 = 'pixel**1/4' / physical unit of field TTYPE18 = 'MAG ' / label for field 18 TFORM18 = '1E ' / data format of field: 4-byte REAL TTYPE19 = 'MAG_LERR' / label for field 19 TFORM19 = '1E ' / data format of field: 4-byte REAL TTYPE20 = 'MAG_UERR' / label for field 20 TFORM20 = '1E ' / data format of field: 4-byte REAL TTYPE21 = 'TFLUX_E ' / label for field 21 TFORM21 = '1E ' / data format of field: 4-byte REAL TTYPE22 = 'TFLUX_C ' / label for field 22 TFORM22 = '1E ' / data format of field: 4-byte REAL TTYPE23 = 'TMAG_E ' / label for field 23 TFORM23 = '1E ' / data format of field: 4-byte REAL TTYPE24 = 'TMAG_C ' / label for field 24 TFORM24 = '1E ' / data format of field: 4-byte REAL TTYPE25 = 'NPIX_E ' / label for field 25 TFORM25 = '1J ' / data format of field: 4-byte INTEGER TTYPE26 = 'NPIX_C ' / label for field 26 TFORM26 = '1J ' / data format of field: 4-byte INTEGER TTYPE27 = 'A3 ' / label for field 27 TFORM27 = '1E ' / data format of field: 4-byte REAL TTYPE28 = 'A3_ERR ' / label for field 28 TFORM28 = '1E ' / data format of field: 4-byte REAL TTYPE29 = 'B3 ' / label for field 29 TFORM29 = '1E ' / data format of field: 4-byte REAL TTYPE30 = 'B3_ERR ' / label for field 30 TFORM30 = '1E ' / data format of field: 4-byte REAL TTYPE31 = 'A4 ' / label for field 31 TFORM31 = '1E ' / data format of field: 4-byte REAL TTYPE32 = 'A4_ERR ' / label for field 32 TFORM32 = '1E ' / data format of field: 4-byte REAL TTYPE33 = 'B4 ' / label for field 33 TFORM33 = '1E ' / data format of field: 4-byte REAL TTYPE34 = 'B4_ERR ' / label for field 34 TFORM34 = '1E ' / data format of field: 4-byte REAL TTYPE35 = 'NDATA ' / label for field 35 TFORM35 = '1J ' / data format of field: 4-byte INTEGER TTYPE36 = 'NFLAG ' / label for field 36 TFORM36 = '1J ' / data format of field: 4-byte INTEGER TTYPE37 = 'NITER ' / label for field 37 TFORM37 = '1J ' / data format of field: 4-byte INTEGER TTYPE38 = 'STOP ' / label for field 38 TFORM38 = '1J ' / data format of field: 4-byte INTEGER TTYPE39 = 'A_BIG ' / label for field 39 TFORM39 = '1E ' / data format of field: 4-byte REAL TTYPE40 = 'SAREA ' / label for field 40 TFORM40 = '1E ' / data format of field: 4-byte REAL TUNIT40 = 'pixel ' / physical unit of field TDISP1 = 'F7.2 ' / display format TDISP2 = 'G10.3 ' / display format TDISP3 = 'G10.3 ' / display format TDISP4 = 'G9.3 ' / display format TDISP5 = 'G9.3 ' / display format TDISP6 = 'F6.4 ' / display format TDISP7 = 'F6.4 ' / display format TDISP8 = 'F6.2 ' / display format TDISP9 = 'F6.2 ' / display format TDISP10 = 'F7.2 ' / display format TDISP11 = 'F6.2 ' / display format TDISP12 = 'F7.2 ' / display format TDISP13 = 'F6.2 ' / display format TDISP14 = 'G8.3 ' / display format TDISP15 = 'G6.3 ' / display format TDISP16 = 'G6.3 ' / display format TDISP17 = 'F7.5 ' / display format TDISP18 = 'G7.3 ' / display format TDISP19 = 'G7.3 ' / display format TDISP20 = 'G7.3 ' / display format TDISP21 = 'G12.5 ' / display format TDISP22 = 'G12.5 ' / display format TDISP23 = 'G7.3 ' / display format TDISP24 = 'G7.3 ' / display format TDISP25 = 'I6 ' / display format TNULL25 = -2147483647 / undefined value for column TDISP26 = 'I6 ' / display format TNULL26 = -2147483647 / undefined value for column TDISP27 = 'G9.3 ' / display format TDISP28 = 'G7.3 ' / display format TDISP29 = 'G9.3 ' / display format TDISP30 = 'G7.3 ' / display format TDISP31 = 'G9.3 ' / display format TDISP32 = 'G7.3 ' / display format TDISP33 = 'G9.3 ' / display format TDISP34 = 'G7.3 ' / display format TDISP35 = 'I5 ' / display format TNULL35 = -2147483647 / undefined value for column TDISP36 = 'I5 ' / display format TNULL36 = -2147483647 / undefined value for column TDISP37 = 'I3 ' / display format TNULL37 = -2147483647 / undefined value for column TDISP38 = 'I2 ' / display format TNULL38 = -2147483647 / undefined value for column TDISP39 = 'G9.3 ' / display format TDISP40 = 'F5.1 ' / display format IMAGE = 'synth.fits' END F’*ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿC€€ÿÿÿÿC€~zÿÿÿÿÃvôÿÿÿÿÿÿÿÿÁÅ+ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€€ÿÿÿÿÿÿÿÿ?__F‘^A’˜ÃBºßùB„#ø>“ü7>)Ä›Â=ýœAžì›C€€=]?€C€~z=W‹(ĨPD R$?‰0?Y~`Á‡Œ;‹; ÉF’*F’*ÁÅ+ÁÅ+¼× h=䨻‹z«=ÔV½•O.=§—Ô¾ °>­#^ =s z@?µµF%²A±q™Bâ2BŸò>“ü7>)À Â=þ AžèUC€€=sYC€~T=mÄ ŠÍD$P?‰•å?^¼üÁz€;,w®;,üF’*F’*ÁÅ+ÁÅ+¼× a=ä+»Ë =ÔSP½•Uª=§•«¾ «5>­ =Šƒf@?!azF£gAÖ´qCÙBÁˆG>“ü7>)ÊBÂ>¥AžñæC€€=…àC€~*=‚jÄ­D'Zù?‰š[?dùÁj©;Q|;PäcF’*F’*ÁÅ+ÁÅ+¼ÖSû=ä5»ŽQ¾=Ô`,½•­ÿ=§÷¾ ²!>­" =ÊŽ@?1„ÓF¿BÙC%†&Bê>“ü7>)¿Â>üAžçkC€€=“9ùC€}ú=kÒÄ+D\D87?‰G?iœÁWk;~‘s;}­PF’*F’*ÁÅ+ÁÅ+¼Öžj=䑻޲:=ÔQ뽕›ï=§ÖÀ¾ ¨Z>­ Þ =ï+¿@?CEOF G B)#CHWcC ©©>“ü7>)ÉåÂ>ïAžñC€þ=¡ýµC€}É=ÍÄ6Á@;šî2;šDoF’*F’*ÁÅ+ÁÅ+¼ÕuS=ä »_=Ô`T½•Àã=¨ æ¾ ±/>­"4 > U@?VÌ>F `B>?CrY\C+]Þ>“ü7>)·œÂ=ùÞAžàpC€€=²ÁC€}‹=­„àÄO@ÇD^·z?‰Œç?uÁ#‹;¼³š;»»¬F’*F’*ÁÅ+ÁÅ+¼×^Î=äÖ»Œë¨=ÔF(½•)=§^ü¾ £ž>­© >6™‘@?lGF HÐBfbC’¢—CO_•>“ü7>)úÂ=ÿEAžëÈC€€=ÃùC€}N=¾ë5ÄcïDuQ?‰–6?zë’ÁØ;æG‰;äÑÆF’*F’*ÁÅ+ÁÅ+¼× )=äË»•|=ÔV`½•l =§­Ð¾ ­Í>­< >;t@?óãF öýB‹6ˆC±vdCzøF>“ü7>)Ç{Â>ÔAžïLC€þ=טÏC€} =ÒÐÄz¿ûD†Æá?‰™H?€|CÁÖƒ< Ë < ¬ôF’*G5ö´ÁÅ+Á:ºŸ¼Õø=äó»´=Ô\T½•¢½=§ê»¾ ¯ô>­u >jöq@?Žò­FXãB¨ÈªC×(tC˜#Ã>“IÕ=ë7ÁÂ=dÇA]3C€ö=£ïÄC€|¼=ŸûLÄÇÝzDŽY’?6T¯?ƒ•kÁ¢<,á-<+3”F’*G5ö´ÁÅ+Á:ºŸ¼—rb=ší×»]¢k=’¼Î½Eú)=N$¾^c >4€ ?Öý@?>%F±B¦Î{CÔ£2C–[>wcA=… Â7'eAþC€€Ô=BoC€~¶=A'KÅ(éÞD`|r>ª­?†Á²Áµ<0t}<.¼¬G5ö´G5ö´Á:ºŸÁ:ºŸ¼,‹Ô="šÀ»´µI=àß¼·o5SD©=(Î*Â;Ô @Ó+IC€€ÿ=ZC€Z=–ÅH£}Dèß>JÁÆ?ŠÁ,]<þ~<ÌÖG5ö´G5ö´Á:ºŸÁ:ºŸ»‡ T<ĨW»‹N<½|¼€:6<+¸<½….Œ<ƒTÏ > Ê1@?¾C„Eëü;BNxdCƒ™xC:>Géù<íº¿Â:d@œ,ÒC€:<ËôC€R<ÈõRÅMV$CãB> ªM?U}Á;ô";òYÓGrâ‡G’®Á?¾áÁC‘ »!º<ˆ¦ñ»ñ<ƒ[¼¼Rç\< ŵ½-wÓ<#} ={Ò@?ÑJEEÜ6MB*Á,CY«”Cê…>C$ô<·Ù~Â9ï+@vÓ3C€€è<¬ >C€(<ªu¹ÅHŲC¥Ëí=ÓgB?½öÁën;Ø-®;ÖßyGrâ‡G’®Á?¾áÁC‘ »o‘™<ýi<„^Â8òÇ@6‚ÿC€&<‡A`C€S<†C=Å=šùCPªÃ=ŒÞG?”;xÁ‚Ì;­Ì;¬¿‰G’®G’®ÁC‘ÁC‘ »€æ€<¡gºËj<Ò¼ ‘B;°u¼¬{®;­ðB =Âý@?ý=µE¹@A’ÍB»"·B„S*>5K<·IÂ7Î’?Ö¿•C€<<'ÓC€Ñ<&ªÅ2ÈC\Ë=O|¡?—Î…ÁæÚ;\Ìh;\^G’®G’®ÁC‘ÁC‘ »¾Í±;˜b:;—5T»½Yµ;‡>U»²1; =L\@@ HWE¦:àAªBØÈB™Iþ>*ý;áËG’®G»+¿ÁC‘ÁGB¥ ¼W<;Ûã3ºêÀe;Ù_qºmåj;†ð<‡¡^;ˆD @u`˜@@5úE˜úŸAþôBK·«B Ì>5 ;×[ñÂçXº@@(ˆ-EŒãÕA-5ÿBm-ÇB'µô>?ñ<Â1Ûê?®§˜C€€.<@¢wC€€üC®#@@9b˜EËì@ôR1B2úAû×#>Dœ;Ÿê(Â6X§?WXŽC€é<’ C€€f<:ÈÄ®¢·Aþm¼<º|?¦üÆÁ¼;êÒ;¯çH ª3H?QÁN5ÁNåv9ο™;=xº³;5 º©:K;ÿ»ÍÃ;3 >ÝH@@KìtEiÐà@}AÔ;¬A–K>AM|;Y øÂ3 Ÿ?ÌâC€ý;Á¬?C€ù;Áí‹Ä‰ A´8<¨K?«›Áëv:¨[ :¨ ½H ª3H9´ÁN5ÁSI%¸£âš:ùÛu¸¨¢:ù~Í:½Ñ::ç˜;T§:æì& =¿Ùç@@`PæEȘ@¡'mAþÎŽA´,ø>A˱;‹¡(Â8¶ˆ?97rC€€š< =GC€ù< gmÄWÿA’ <­M?¯#KÁ 3¨:Ó—):ÓcH,ëÃH9´ÁQíÁSI!%ºy]Í;#EÕ¹ƒ‰;!C`ºK(t;àÐ;Žãf;, >Lìß@@v¿dEAC@'¢jAŠþÁAD‘>F ;M Â4¼K>¿Ã)C€º; Å±C€ý; žiÄ=ÐøA»JF÷o:Ö­àÂ18>ŠkZC€€;€!ßC€€;€žLÄ&¼@Ùf²<'Í?·¯§Á â®:<°v:<ËHEãŽHn$ÁTDÔÁW|J)9:P(:}}¸ 6ù:{µ<:cÒ(:j*óº½F´:h–H =U@@•HFE`@ qAld_A''‰>G²Æ; ;ŽÂ3îl>°[@C€ü;¶ýC€€;¶¾Ä…ô@¢ßÝ<"6?¼‘Á Ð:gÅì:gf™HtDKH…”ÁWí4ÁYjŽ;E9ìŽ:£÷E¹À·Á:¥3Ÿ7"B°:”ªö;'‹:•І2?S€@@¤5çE–œ?Š™Aðé@º—Ä>I­7:”v™Â36 >=>ÀC€ò;W*ýC€>;WfÚÃä-@HIp:ŒTFÂ2ë¥>4\ôC€€S;_«zC€€I;_þÒö×Ü@ ¾;Å Ý?ÅLÁn'9þ]ª9þmêH“¸5H e˜Á[;Á\©9Sa9´ô:)f¹×*:'^ž¹Fß:3:²ÄŸ:‡u =8ä‡@@ƱÞDæ6ú?$}ñ@¬¡@sIq>JdÂ:R”fÂ49¡>ƒ C€€¢;8¯wC€º;8 šÃ›Vk? º;„q5?ÊÁ›ã9Æßu9ÆWêH¡çH´^Á\ºÁ^³ ayº2Ë;9÷ø˜8=>¥9ö¯è¸4ç`9澺‚# 9ää3# <=º@@ÚuDÍýÕ>ÈsÛ@]K+@zm>I¸: ÜfÂ3¾É=¶Ž?C€€+; yðC€½; …ŽÃ}æP?;R0;<Þá?ÎíSÁ­u9†þ‚9‡uÌH³ÌHÆgãÁ^žÁ`Zúw‘ºLô9¦¦’¹=à9£Â·š‹9¡Mc9öN9ž‹u' ;Ú71@@ðk´D¸{i>^£_?ÿf?´[‡>Jë©9©&JÂ4’µ=TK‚C€å:²ù¢C€ù:²Ó7ÃRnÛ?ÖÌ;¼Ü?ÓêµÀý†9'ž?9'ÌNHÂÖùHÛ´=Á` QÁb q‹±9‘à9KÝ`7M°9Lxý¸Þ¨¥9@x;¹Ê9A * ‰€A@$ÛŸ?é$ï>KeW9æ26Â3“¦=‘²C€€;ª[C€×;ÀÃ.B>Ì; W?ÙåÀù™,9hÞ9gàHÝÎÞHõzÀÁbJÔÁd Ƴݹ¤5B9‰G©7mZ9Š> 9Œwÿ9òa¹ÿ˜d9‚èŸ. ;r{ö@At]D’ÀŠ>;|+?ì°1?§]&>K 9¯PMÂ3à‘=\´C€é:à¸gC€µ:à‰à °Ï>ƒ»4:î~?ÞB Àõ’î91'91žØHðËKI(äÁc¸Áex%Ó¹gÿ9PÜ9='9OÞhµp¼9IÁÖ9®¾9I²/3 ;*I±@A D‚¬X>d–?Á¨Ý?ˆð#>KF·9—ÿÂ4ó==¡C€ð:ÔÖÖC€€:ÔÕžÂè€>:Z:Í…E?ãž©ÀñŠÉ9ÎH9ЉI¥IFáÁeECÁg÷ý=8q-98Ïô·ã¹Q98þÏ67£G947b9”±94w8:‰JØ@A0Dhh„=Ù&?—!?Uº–>K°M9tÄ/Â4C&=kßC€€:¾`œC€€:¾N¾Â½© =ûCã:©“š?éÈÀíw9°ó9ú°I¬YIÍåÁfÊÁh‡e/y86-Î9s§¸çÍ'9•¥¸„©9ÕÛ8š¿Š9L> ;;k@AA™šDNXy=¤ÂÍ?p-¤?)Ôê>KÅú9PˆÂ44=ÐdC€€ :±»C€ÿ:±µ#™ÈÓ=§õ„:‹ÌH?îºÉÀéU8ݼ8ÞwI÷¯I/9ÁhS¨Áj8°mѸ™@9]í·“§:8ÿýݸŸ‰Á8ø#¿9{ß›8÷ƒŽD :ðß^@ATõÃD7{=l·E?3ü_>þ‰Ô>KÖ|9.#WÂ4)r<Ø›C€€5:¢æ…C€ë:¢ÜUÂyBz=Ph}:V 7?ô|}Àå-Õ8³V8´I,) I=PÁiî”Ák™h½)¹i8Ò½a·Þf8Ôh7¸"9-8Éþ9{Tñ8ˬŸJ :psÔ@AjAŠD"b¬=”/>×ng>˜U9>Lô8×FcÂ47<†ÌýC€÷:^F8C€ü:^CåÂIÓ<éÂ:°?úaºÀá18e„)8fë„I²Ã²>|Ϥ>LMX8¬(ÇÂ4ˆ”é´>R˜;>LD¢8ºÜÞÂ4ßYëã>þ>L¤8~óXÂ4<(êC€Ù:/*ðC€ü:/(ãÁÏÄt<uE9¶ì@w«ÀÔƒÅ8ñÑ8pIqÁtI…j¨ÁoÔXÁq‹Cµ±8—KÓ8m·:58M ¸V:8/o¸¸”£8rÂm :LI}@A«|³CÈýë<,ß|=Ø[=˜Ì±>Mÿj8’’Â4 ï<6KMC€ø:]XÆC€€:]VOÁ¤ª·?$Ê/=v@ µÀÀÐV17ñ97ív I„5{Iœ^ÁqbÓÁrñ’©¸ÅG81˜a7‰y81ûû7]4Ì8$ÔZ¸à* 8&š2 :7Nå@A¼¢ÅC²áu<.=»Ÿz=„«b>N·8~yòÂ4Ì<¿C€ð:TZC€ÿ:TQÁuÙ>Òtó<Ð4@ ÙÀÌI˜7è½·7ég±IIœµ%ÁrÂ~ÁtVÖ{Õ5Þ!—8`ÿ5’ë8½W8^ËÛ8¢Ê¸x¶;84“2 9ª¸@AÏÙCŸŸ >$ÔT@Ov?‘°£>M«–:—@HÂ4Ì><ïC€ð<Š˜wC€ÿ<Š—ÉÁG‚Ì>‘‘@<ºÈs@nrÀÈTi:‹Ë:~ëI›ßI©*tÁt'Áu« -5Äad:6uø:m‰Ö:7i¹À?9:'+ã:ö:(:ž22>|be@LÍÚAä?ÕCŽ‹x>jÝ@Žñƒ?Ï—”>J¥‡:ôþÂ4Ì>šfèC€<õï¾C€€î<õî‡Á!ú >Ujª<¨¥º@ê ÀÄf:e•:dÞ/I¨HþI¸[ÎÁu“ÒÁw)‚ º^Râ:”M繆Úr:•D!9ޏY:}j:ý_“:¶'2 >9\ä@òÂzAûC€Õ> Þ @; Î?yI>J¥‡:ªÖdÂ4Ì>WU§C€€Â<¼¥BC€-<¼¤RÀú÷…>º–<’œ€@{ ÀÀ®h:Ý·:Ø…I¶XÆIÇãdÁvø¶Áx‘- ¯ :qÞú:Må9áö:Nk§9Oa:LT¹ÀRf:Mñ22>K‘ AnÞB BCg€Š>b—@_%?‹i&>Lgc:Öw8Â3}>†¤^C€Ì=C€€#=®`ÀÂnÄ=Ã×+<€íC@";À½*:4¤E:4|IIÅúYIÙ"ôÁxfxÁzB ½¡¹sÖ]:ƒ ã8§ã7:„b¹Oùa:rÉ»:Uƒ:sTy7 =°¡UA#íÓBæbCQÜ]=Æ,T@!±Æ?Ay>Lœ›:¥xÂ3×#>QTñC€Ì<Þ±gC€€#<ÞºÀ•Î=Šv,¥:>BIÖ›Ië‘ÁyÍÁ{k'¹oñÖ:M;­¹Eo:L9\@:L(.º $:KH= =[¶A2Ï9B'C?®Œ=]A”?Ƨî>âaê>MIm:TgŠÂ4Ô>âjC€G¶­_>MIm:A9ÝÂ4Ô=ñISC€€µ<œ¢×C€ <œ #À4êÙ<ÜÐé<:@&¡ À³ª 9‡¹9†æ©Iý™âJ .Á|³ŸÁ~n$«Õ·±ì„9î1&¹"ã09ïO|9 eP9ëv"º “79ìà«I =ªrs>LÛg:I%pÂ3ö4=ý®™C€ž<´ûC€õ<´­À …™<²<ˆ@*¥¨À°íÚ9T”9YˆJ aŠJz˜Á~8IÁ€f ]·Õl–9úk½·X199ùb¸‹‡9úAl7fÕ9ù±zQ :âÜæAo†ƒB^eCý=´5?¡TC>žÙl>Lôœ:UÆìÂ3ö4>¦C€ž<Ò[vC€õ<Ò]t¿ÔÜY<_ }<y[@.ÃÀ®uº9w=9v¹JpuJ(w•ÁÉNÁ€Ñ–]%ù8SM:B7¬{:ÉW8x¼:4úºɵ: hY <³“ÒA„Bt¢ìC<2<ˆî?5(:>)q9>Lôœ:õÂ3ö4=¢¾,C€ž<‹Ì¤C€õ<‹Íô¿ Æ$< Qž;Ú§@2ùçÀ¬=•9×g9¡ïJ& ÀJ9€GÁ€²Á§Ä$Å-ñ8ŸÁŸ9 þ󏻨F9 o”8Ÿ T9 Éo¸©09 lUb2=Vç˜A’O¥B†ŒÏCnÄ>/>Mcë: ÐËÂ3ö4=­‡C€ž<¥zâC€õ<¥|¢¿sÎÙ;¶mï;¿V@7J¾ÀªGÈ8ý·8üJ6qKJM8{Á‚ÌÁ‚ˆg,S7©8´x9¬ŽM´êò9­ï˜7P-z9«™H9ÅD9¬·Ýj <¯Þ9A£€UB”JBÿª·÷¥>KðÁ: »óÂ3ö4=®L’C€€0<·C€c<·S¿8 ;n-Ï;¥—S@;¶9À¨ˆ8ÜßB8ÜÈrJIÌyJc†ÜÁ‚cÁƒmÖ5ÇC)¸8Ûÿ9­)¤·=÷E9®óÜ6%í»9­Ä9%9­úòq ;Î4A¸Ê˜B¢Î8BôËÚ<…? ó=Μ`>L˜:«=Â3ö4=žäaC€€j<¸*SC€¡<¸-¿ DT;#€;˜´N@@<ýÀ§¡8¬ @8«ý™J_æßJ~µÁƒJ Á„cAQi¶ ÷9Ÿ'z8—Ù9žùI8(šÂ9˜0:€×d9—4*w ;¶AÔI%B³ Bëò®;¾û¶>¸a=„aµ>LÆ£9ÄÔ9Â3ö4=qL¶C€€j<š|C€¡<š~ɾÌÁ:ÊFÂ;|æÿ@Dß²À¥¾/8a«08`T„Jy‰-JŽetÁ„;'Á…aN¿bi8™~y9r£Ù¶ª6…9qœa¸¬IŠ9ož*9«Nq9pTÀ{ ;dçÀAøLÏBÄþ¦Bä«{;}¬:>†ì™=1÷„>L®]9“øÂ3ö4=8‘ÅC€[<¼C€€®< ¾—",:wÃ;X]?@IŸÀ¤§8(8J‹ÓVJ ¬_Á…8‚Á†m‰_Yw=8‡-k97à 8>› 97EH8øÛ97Co¹€96Åè~ ;¨aDB$ÄBر·BÞ¾Ú;#}ë>?NÙ<åf">L®]9‘fÂ3ö4=5\úC€€Y<‹p)C€±<‹r¿¾^Y:;60@N{¦À£¾ 7Íèe7Ê!ìJc¡J¶ !Á†?Á‡ƒ'sG)¸ êc94§1¸"غ94+‘·þÚ«94–h¸.žC94¡~ ;š B2 ÙBî]BÙó/:úEr>! :<¯”œ>M%9‚©ãÂ4:D="¿4C€€Y<‰è9C€±<‰Øþ¾#ŒÎ9Ñ:Ž;#¿ñ@SvJÀ¢üt7 ºm7žßJ²…JÏdEÁ‡R™Áˆ¥‹[®M¶s{9"u¢·¨=9"v7;»9"K…¸ +Ã9!ÜN~ <÷€BWX)C™BÖò:ø÷5>079<®ª>M%9uuÂ3ä™=ÝC€€Y<Žp¿C€±<Žx&½îo69„à&;ª#@XªÀ¢\Û7¢7¡;£JÊÚÆJíÅzÁˆsäÁ‰Õ ¨±Òý·àÖ39wŽ8ƒÄW9:8ðB9‹à9 -p9$~ ;F¢õB‚H›C5ÂBÒ÷ò:–¿¥=ê LÝ¥9®ÏÂ4È<ÇsC€€YLÚ,9fÂ3û­<ÄQ®C€S<].éC€€·<]0¸½wñ8Õ -:ÛùÊ@c!À¡rA2–½²–½K¶lK¨NÁŠÚaÁŒV¼÷ 4Ñ8¿8ðÈ5cçI8Ã*Ûµ½mÛ8ÀÛb¹7ƒ8ÀWq~ :†©B¾ÈvC.~}BÎ<:îb0>`–Ž<§=”>Lç§9 é[Â3û­<È„ÈC€€ -_LÁÀ8ëäÂ3û­<’Ü„C€€ ¸»<(EV>LËp8ã²"Â3û­<ðEC€€ U÷)Láæ8ÒØÂ3þÉ<‚èC€ä@yØ-À 87hpe7lYKvÂìK•”hÁ,ÖÁ‘Ùƒ–)7 8‚”j5+oü8‚; ¶Ét`8‚>8N Ž8äñ~ 8ZÚÞCLyqCzBÊ):®½>pÝÏLÝ¥8š¨Â3þÉL¿â8¥1DÂ4<–ÌC€õ<}XTC€Û<}UÑ»ðÙÓ@ƒÀ GY³ñýL3ñýLK«êÏKÆ …Á“|Á”I ]ŒK¶ì7l8‹ì5û*8OšÓ·nDl8m]Ö8)k8lzqK3 7óRQC•¶Cš@BÉ1';/ ‡>¨°ƒ<Ì­>L¿â8€ ‹Â4<¯[C€õ<‹òC€Û<‹ïý»¥`…8µ2ª<Œ>Ä@†-ÌÀ 4Ú7ð?¯7óG{KÁçKÓ›(Á”íÁ”Ü[r¥Ñc¶f 8Hòê7¼V8H= 1.: aux = saux return (abs(a ** 2 * (1. - eps) / 2. * math.acos(aux))) def test_angles(phi_min=0.05, phi_max=0.2): a = 40. astep = 1.1 eps = 0.1 # r = a a1 = a * (1. - ((1. - 1. / astep) / 2.)) a2 = a * (1. + (astep - 1.) / 2.) r3 = a2 r4 = a1 aux = min((a2 - a1), 3.) sarea = (a2 - a1) * aux dphi = max(min((aux / a), phi_max), phi_min) phi = dphi / 2. phi2 = phi - dphi / 2. aux = 1. - eps r3 = a2 * aux / np.sqrt((aux * np.cos(phi2))**2 + (np.sin(phi2))**2) r4 = a1 * aux / np.sqrt((aux * np.cos(phi2))**2 + (np.sin(phi2))**2) ncount = 0 while phi < np.pi*2: phi1 = phi2 r1 = r4 r2 = r3 phi2 = phi + dphi / 2. aux = 1. - eps r3 = a2 * aux / np.sqrt((aux * np.cos(phi2))**2 + (np.sin(phi2))**2) r4 = a1 * aux / np.sqrt((aux * np.cos(phi2))**2 + (np.sin(phi2))**2) sa1 = sector_area(a1, eps, phi1, r1) sa2 = sector_area(a2, eps, phi1, r2) sa3 = sector_area(a2, eps, phi2, r3) sa4 = sector_area(a1, eps, phi2, r4) area = abs((sa3 - sa2) - (sa4 - sa1)) # Compute step to next sector and its angular span dphi = max(min((sarea / (r3 - r4) / r4), phi_max), phi_min) phistep = dphi / 2. + phi2 - phi ncount += 1 assert area > 11.0 and area < 12.4 phi = phi + min(phistep, 0.5) # r = (a * (1. - eps) / np.sqrt(((1. - eps) * np.cos(phi))**2 + # (np.sin(phi))**2)) assert ncount == 72 photutils-0.4/photutils/isophote/tests/test_ellipse.py0000644000214200020070000001072713175634532025710 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import pytest from astropy.io import fits from astropy.tests.helper import remote_data from .make_test_data import make_test_image from ..ellipse import Ellipse from ..geometry import EllipseGeometry from ..isophote import Isophote, IsophoteList from ...datasets import get_path try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False # define an off-center position and a tilted sma POS = 384 PA = 10. / 180. * np.pi # build off-center test data. It's fine to have a single np array to use # in all tests that need it, but do not use a single instance of # EllipseGeometry. The code may eventually modify it's contents. The safe # bet is to build it wherever it's needed. The cost is negligible. OFFSET_GALAXY = make_test_image(x0=POS, y0=POS, pa=PA, noise=1.e-12, random_state=123) @pytest.mark.skipif('not HAS_SCIPY') class TestEllipse(object): def setup_class(self): # centered, tilted galaxy. self.data = make_test_image(pa=PA, random_state=123) @remote_data def test_find_center(self): path = get_path('isophote/M51.fits', location='photutils-datasets', cache=True) hdu = fits.open(path) data = hdu[0].data hdu.close() geometry = EllipseGeometry(252, 253, 10., 0.2, np.pi/2) geometry.find_center(data) assert geometry.x0 == 257. assert geometry.y0 == 258. def test_basic(self): ellipse = Ellipse(self.data) isophote_list = ellipse.fit_image() assert isinstance(isophote_list, IsophoteList) assert len(isophote_list) > 1 assert isinstance(isophote_list[0], Isophote) # verify that the list is properly sorted in sem-major axis length assert isophote_list[-1] > isophote_list[0] # the fit should stop where gradient looses reliability. assert len(isophote_list) == 67 assert isophote_list[-1].stop_code == 5 def test_fit_one_ellipse(self): ellipse = Ellipse(self.data) isophote = ellipse.fit_isophote(40.) assert isinstance(isophote, Isophote) assert isophote.valid def test_offcenter_fail(self): # A first guess ellipse that is centered in the image frame. # This should result in failure since the real galaxy # image is off-center by a large offset. ellipse = Ellipse(OFFSET_GALAXY) isophote_list = ellipse.fit_image() assert len(isophote_list) == 0 def test_offcenter_fit(self): # A first guess ellipse that is roughly centered on the # offset galaxy image. g = EllipseGeometry(POS+5, POS+5, 10., eps=0.2, pa=PA, astep=0.1) ellipse = Ellipse(OFFSET_GALAXY, geometry=g) isophote_list = ellipse.fit_image() # the fit should stop when too many potential sample # points fall outside the image frame. assert len(isophote_list) == 63 assert isophote_list[-1].stop_code == 1 def test_offcenter_go_beyond_frame(self): # Same as before, but now force the fit to goo # beyond the image frame limits. g = EllipseGeometry(POS+5, POS+5, 10., eps=0.2, pa=PA, astep=0.1) ellipse = Ellipse(OFFSET_GALAXY, geometry=g) isophote_list = ellipse.fit_image(maxsma=400.) # the fit should go to maxsma, but with fixed geometry assert len(isophote_list) == 71 assert isophote_list[-1].stop_code == 4 # check that no zero-valued intensities were left behind # in the sample arrays when sampling outside the image. for iso in isophote_list: assert not np.any(iso.sample.values[2] == 0) @remote_data @pytest.mark.skipif('not HAS_SCIPY') class TestEllipseOnRealData(object): def test_basic(self): path = get_path('isophote/M105-S001-RGB.fits', location='photutils-datasets', cache=True) hdu = fits.open(path) data = hdu[0].data[0] hdu.close() g = EllipseGeometry(530., 511, 30., 0.2, 20./180.*3.14) ellipse = Ellipse(data, geometry=g) isophote_list = ellipse.fit_image() assert len(isophote_list) == 57 # check that isophote at about sma=70 got an uneventful fit assert isophote_list.get_closest(70.).stop_code == 0 photutils-0.4/photutils/isophote/tests/test_fitter.py0000644000214200020070000001674413175634532025555 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import pytest from astropy.io import fits from astropy.tests.helper import remote_data from .make_test_data import make_test_image from ..fitter import EllipseFitter, CentralEllipseFitter from ..geometry import EllipseGeometry from ..harmonics import fit_first_and_second_harmonics from ..integrator import MEAN from ..isophote import Isophote from ..sample import EllipseSample, CentralEllipseSample from ...datasets import get_path try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False DATA = make_test_image(random_state=123) DEFAULT_POS = 256 def test_gradient(): sample = EllipseSample(DATA, 40.) sample.update() assert sample.mean == pytest.approx(200.02, abs=0.01) assert sample.gradient == pytest.approx(-4.222, abs=0.001) assert sample.gradient_error == pytest.approx(0.0003, abs=0.0001) assert sample.gradient_relative_error == pytest.approx(7.45e-05, abs=1.e-5) assert sample.sector_area == pytest.approx(2.00, abs=0.01) @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_raw(): """ This test performs a raw (no EllipseFitter), 1-step correction in one single ellipse coefficient. """ # pick first guess ellipse that is off in just # one of the parameters (eps). sample = EllipseSample(DATA, 40., eps=2*0.2) sample.update() s = sample.extract() harmonics = fit_first_and_second_harmonics(s[0], s[2]) y0, a1, b1, a2, b2 = harmonics[0] # when eps is off, b2 is the largest (in absolute value). assert abs(b2) > abs(a1) assert abs(b2) > abs(b1) assert abs(b2) > abs(a2) correction = (b2 * 2. * (1. - sample.geometry.eps) / sample.geometry.sma / sample.gradient) new_eps = sample.geometry.eps - correction # got closer to test data (eps=0.2) assert new_eps == pytest.approx(0.21, abs=0.01) @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_small_radii(): sample = EllipseSample(DATA, 2.) fitter = EllipseFitter(sample) isophote = fitter.fit() assert isinstance(isophote, Isophote) assert isophote.ndata == 13 @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_eps(): # initial guess is off in the eps parameter sample = EllipseSample(DATA, 40., eps=2*0.2) fitter = EllipseFitter(sample) isophote = fitter.fit() assert isinstance(isophote, Isophote) g = isophote.sample.geometry assert g.eps >= 0.19 assert g.eps <= 0.21 @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_pa(): data = make_test_image(pa=np.pi/4, noise=0.01, random_state=123) # initial guess is off in the pa parameter sample = EllipseSample(data, 40) fitter = EllipseFitter(sample) isophote = fitter.fit() g = isophote.sample.geometry assert g.pa >= (np.pi/4 - 0.05) assert g.pa <= (np.pi/4 + 0.05) @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_xy(): pos = DEFAULT_POS - 5 data = make_test_image(x0=pos, y0=pos, random_state=123) # initial guess is off in the x0 and y0 parameters sample = EllipseSample(data, 40) fitter = EllipseFitter(sample) isophote = fitter.fit() g = isophote.sample.geometry assert g.x0 >= (pos - 1) assert g.x0 <= (pos + 1) assert g.y0 >= (pos - 1) assert g.y0 <= (pos + 1) @pytest.mark.skipif('not HAS_SCIPY') def test_fitting_all(): # build test image that is off from the defaults # assumed by the EllipseSample constructor. POS = DEFAULT_POS - 5 ANGLE = np.pi / 4 EPS = 2 * 0.2 data = make_test_image(x0=POS, y0=POS, eps=EPS, pa=ANGLE, random_state=123) sma = 60. # initial guess is off in all parameters. We find that the initial # guesses, especially for position angle, must be kinda close to the # actual value. 20% off max seems to work in this case of high SNR. sample = EllipseSample(data, sma, position_angle=(1.2 * ANGLE)) fitter = EllipseFitter(sample) isophote = fitter.fit() assert isophote.stop_code == 0 g = isophote.sample.geometry assert g.x0 >= (POS - 1.5) # position within 1.5 pixel assert g.x0 <= (POS + 1.5) assert g.y0 >= (POS - 1.5) assert g.y0 <= (POS + 1.5) assert g.eps >= (EPS - 0.01) # eps within 0.01 assert g.eps <= (EPS + 0.01) assert g.pa >= (ANGLE - 0.05) # pa within 5 deg assert g.pa <= (ANGLE + 0.05) sample_m = EllipseSample(data, sma, position_angle=(1.2 * ANGLE), integrmode=MEAN) fitter_m = EllipseFitter(sample_m) isophote_m = fitter_m.fit() assert isophote_m.stop_code == 0 @remote_data @pytest.mark.skipif('not HAS_SCIPY') class TestM51(object): def setup_class(self): path = get_path('isophote/M51.fits', location='photutils-datasets', cache=True) hdu = fits.open(path) self.data = hdu[0].data hdu.close() def test_m51(self): # here we evaluate the detailed convergence behavior # for a particular ellipse where we can see the eps # parameter jumping back and forth. # sample = EllipseSample(self.data, 13.31000001, eps=0.16, # position_angle=((-37.5+90)/180.*np.pi)) # sample.update() # fitter = EllipseFitter(sample) # isophote = fitter.fit() # we start the fit with initial values taken from # previous isophote, as determined by the old code. # sample taken in high SNR region sample = EllipseSample(self.data, 21.44, eps=0.18, position_angle=(36./180.*np.pi)) fitter = EllipseFitter(sample) isophote = fitter.fit() assert isophote.ndata == 119 assert isophote.intens == pytest.approx(685.4, abs=0.1) # last sample taken by the original code, before turning inwards. sample = EllipseSample(self.data, 61.16, eps=0.219, position_angle=((77.5+90)/180*np.pi)) fitter = EllipseFitter(sample) isophote = fitter.fit() assert isophote.ndata == 382 assert isophote.intens == pytest.approx(155.0, abs=0.1) def test_m51_outer(self): # sample taken at the outskirts of the image, so many # data points lay outside the image frame. This checks # for the presence of gaps in the sample arrays. sample = EllipseSample(self.data, 330., eps=0.2, position_angle=((90)/180*np.pi), integrmode='median') fitter = EllipseFitter(sample) isophote = fitter.fit() assert not np.any(isophote.sample.values[2] == 0) def test_m51_central(self): # this code finds central x and y offset by about 0.1 pixel wrt the # spp code. In here we use as input the position computed by this # code, thus this test is checking just the extraction algorithm. g = EllipseGeometry(257.02, 258.1, 0.0, 0.0, 0.0, 0.1, False) sample = CentralEllipseSample(self.data, 0.0, geometry=g) fitter = CentralEllipseFitter(sample) isophote = fitter.fit() # the central pixel intensity is about 3% larger than # found by the spp code. assert isophote.ndata == 1 assert isophote.intens <= 7560. assert isophote.intens >= 7550. photutils-0.4/photutils/isophote/tests/test_geometry.py0000644000214200020070000001535013175634532026103 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import pytest from ..geometry import EllipseGeometry @pytest.mark.parametrize('astep, linear_growth', [(0.2, False), (20., True)]) def test_geometry(astep, linear_growth): geometry = EllipseGeometry(255., 255., 100., 0.4, np.pi/2, astep, linear_growth) sma1, sma2 = geometry.bounding_ellipses() assert (sma1, sma2) == pytest.approx((90.0, 110.0), abs=0.01) # using an arbitrary angle of 0.5 rad. This is to avoid a polar # vector that sits on top of one of the ellipse's axis. vertex_x, vertex_y = geometry.initialize_sector_geometry(0.6) assert geometry.sector_angular_width == pytest.approx(0.0571, abs=0.01) assert geometry.sector_area == pytest.approx(63.83, abs=0.01) assert vertex_x[0] == pytest.approx(215.4, abs=0.1) assert vertex_x[1] == pytest.approx(206.6, abs=0.1) assert vertex_x[2] == pytest.approx(213.5, abs=0.1) assert vertex_x[3] == pytest.approx(204.3, abs=0.1) assert vertex_y[0] == pytest.approx(316.1, abs=0.1) assert vertex_y[1] == pytest.approx(329.7, abs=0.1) assert vertex_y[2] == pytest.approx(312.5, abs=0.1) assert vertex_y[3] == pytest.approx(325.3, abs=0.1) def test_to_polar(): # trivial case of a circle centered in (0.,0.) geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 0.2, False) r, p = geometry.to_polar(100., 0.) assert (r, p) == pytest.approx((100., 0.), abs=(0.1, 0.0001)) r, p = geometry.to_polar(0., 100.) assert (r, p) == pytest.approx((100., np.pi/2.), abs=(0.1, 0.0001)) # vector with length 100. at 45 deg angle r, p = geometry.to_polar(70.71, 70.71) # these have to be tested separately. For some unknown reason, using # a combined assert statement as above raises an TypeError: # unorderable types: tuple() < int() # assert (r, p) == pytest.approx((100., np.pi/4.), abs=(0.1, 0.0001)) assert r == pytest.approx(100., abs=0.1) assert p == pytest.approx(np.pi/4., abs=0.0001) # position angle tilted 45 deg from X axis geometry = EllipseGeometry(0., 0., 100., 0.0, np.pi/4., 0.2, False) r, p = geometry.to_polar(100., 0.) assert (r, p) == pytest.approx((100., np.pi*7./4), abs=(0.1, 0.0001)) r, p = geometry.to_polar(0., 100.) assert (r, p) == pytest.approx((100., np.pi/4.), abs=(0.1, 0.0001)) # vector with length 100. at 45 deg angle r, p = geometry.to_polar(70.71, 70.71) # same error as above # assert (r, p) == pytest.approx((100., np.pi*2.), abs=(0.1, 0.0001)) assert r == pytest.approx(100., abs=0.1) assert p == pytest.approx(np.pi*2., abs=0.0001) def test_area(): # circle with center at origin geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 0.2, False) # sector at 45 deg on circle vertex_x, vertex_y = geometry.initialize_sector_geometry(45./180.*np.pi) assert vertex_x[0] == pytest.approx(65.21, abs=0.01) assert vertex_x[1] == pytest.approx(79.70, abs=0.01) assert vertex_x[2] == pytest.approx(62.03, abs=0.01) assert vertex_x[3] == pytest.approx(75.81, abs=0.01) assert vertex_y[0] == pytest.approx(62.03, abs=0.01) assert vertex_y[1] == pytest.approx(75.81, abs=0.01) assert vertex_y[2] == pytest.approx(65.21, abs=0.01) assert vertex_y[3] == pytest.approx(79.70, abs=0.01) # sector at 0 deg on circle vertex_x, vertex_y = geometry.initialize_sector_geometry(0) assert vertex_x[0] == pytest.approx(89.97, abs=0.01) assert vertex_x[1] == pytest.approx(109.97, abs=0.01) assert vertex_x[2] == pytest.approx(89.97, abs=0.01) assert vertex_x[3] == pytest.approx(109.96, abs=0.01) assert vertex_y[0] == pytest.approx(-2.25, abs=0.01) assert vertex_y[1] == pytest.approx(-2.75, abs=0.01) assert vertex_y[2] == pytest.approx(2.25, abs=0.01) assert vertex_y[3] == pytest.approx(2.75, abs=0.01) def test_area2(): # circle with center at 100.,100. geometry = EllipseGeometry(100., 100., 100., 0.0, 0., 0.2, False) # sector at 45 deg on circle vertex_x, vertex_y = geometry.initialize_sector_geometry(45./180.*np.pi) assert vertex_x[0] == pytest.approx(165.21, abs=0.01) assert vertex_x[1] == pytest.approx(179.70, abs=0.01) assert vertex_x[2] == pytest.approx(162.03, abs=0.01) assert vertex_x[3] == pytest.approx(175.81, abs=0.01) assert vertex_y[0] == pytest.approx(162.03, abs=0.01) assert vertex_y[1] == pytest.approx(175.81, abs=0.01) assert vertex_y[2] == pytest.approx(165.21, abs=0.01) assert vertex_y[3] == pytest.approx(179.70, abs=0.01) # sector at 225 deg on circle vertex_x, vertex_y = geometry.initialize_sector_geometry(225./180.*np.pi) assert vertex_x[0] == pytest.approx(34.791, abs=0.01) assert vertex_x[1] == pytest.approx(20.30, abs=0.01) assert vertex_x[2] == pytest.approx(37.97, abs=0.01) assert vertex_x[3] == pytest.approx(24.19, abs=0.01) assert vertex_y[0] == pytest.approx(37.97, abs=0.01) assert vertex_y[1] == pytest.approx(24.19, abs=0.01) assert vertex_y[2] == pytest.approx(34.79, abs=0.01) assert vertex_y[3] == pytest.approx(20.30, abs=0.01) def test_reset_sma(): geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 0.2, False) sma, step = geometry.reset_sma(0.2) assert sma == pytest.approx(83.33, abs=0.01) assert step == pytest.approx(-0.1666, abs=0.001) geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 20., True) sma, step = geometry.reset_sma(20.) assert sma == pytest.approx(80.0, abs=0.01) assert step == pytest.approx(-20.0, abs=0.01) def test_update_sma(): geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 0.2, False) sma = geometry.update_sma(0.2) assert sma == pytest.approx(120.0, abs=0.01) geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 20., True) sma = geometry.update_sma(20.) assert sma == pytest.approx(120.0, abs=0.01) def test_polar_angle_sector_limits(): geometry = EllipseGeometry(0., 0., 100., 0.3, np.pi/4, 0.2, False) geometry.initialize_sector_geometry(np.pi/3) phi1, phi2 = geometry.polar_angle_sector_limits() assert phi1 == pytest.approx(1.022198, abs=0.0001) assert phi2 == pytest.approx(1.072198, abs=0.0001) def test_bounding_ellipses(): geometry = EllipseGeometry(0., 0., 100., 0.3, np.pi/4, 0.2, False) sma1, sma2 = geometry.bounding_ellipses() assert (sma1, sma2) == pytest.approx((90.0, 110.0), abs=0.01) def test_radius(): geometry = EllipseGeometry(0., 0., 100., 0.3, np.pi/4, 0.2, False) r = geometry.radius(0.0) assert r == pytest.approx(100.0, abs=0.01) r = geometry.radius(np.pi/2) assert r == pytest.approx(70.0, abs=0.01) photutils-0.4/photutils/isophote/tests/test_harmonics.py0000644000214200020070000001364413175634532026237 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import pytest from .make_test_data import make_test_image from ..harmonics import (fit_first_and_second_harmonics, fit_upper_harmonic, first_and_second_harmonic_function) from ..sample import EllipseSample try: from scipy.optimize import leastsq # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False @pytest.mark.skipif('not HAS_SCIPY') def test_harmonics_1(): # this is an almost as-is example taken from stackoverflow N = 100 # number of data points t = np.linspace(0, 4*np.pi, N) # create artificial data with noise: # mean = 0.5, amplitude = 3., phase = 0.1, noise-std = 0.01 data = 3.0 * np.sin(t + 0.1) + 0.5 + 0.01 * np.random.randn(N) # first guesses for harmonic parameters guess_mean = np.mean(data) guess_std = 3 * np.std(data) / 2**0.5 guess_phase = 0 # Minimize the difference between the actual data and our "guessed" # parameters # optimize_func = lambda x: x[0] * np.sin(t + x[1]) + x[2] - data def optimize_func(x): return x[0] * np.sin(t + x[1]) + x[2] - data est_std, est_phase, est_mean = leastsq( optimize_func, [guess_std, guess_phase, guess_mean])[0] # recreate the fitted curve using the optimized parameters data_fit = est_std * np.sin(t + est_phase) + est_mean residual = data - data_fit assert np.mean(residual) == pytest.approx(0.00, abs=0.001) assert np.std(residual) == pytest.approx(0.01, abs=0.01) @pytest.mark.skipif('not HAS_SCIPY') def test_harmonics_2(): # this uses the actual functional form used for fitting ellipses N = 100 E = np.linspace(0, 4*np.pi, N) y0_0 = 100. a1_0 = 10. b1_0 = 5. a2_0 = 8. b2_0 = 2. data = (y0_0 + a1_0*np.sin(E) + b1_0*np.cos(E) + a2_0*np.sin(2*E) + b2_0*np.cos(2*E) + 0.01*np.random.randn(N)) harmonics = fit_first_and_second_harmonics(E, data) y0, a1, b1, a2, b2 = harmonics[0] data_fit = (y0 + a1*np.sin(E) + b1*np.cos(E) + a2*np.sin(2*E) + b2*np.cos(2*E) + 0.01*np.random.randn(N)) residual = data - data_fit assert np.mean(residual) == pytest.approx(0.00, abs=0.01) assert np.std(residual) == pytest.approx(0.015, abs=0.01) @pytest.mark.skipif('not HAS_SCIPY') def test_harmonics_3(): """Tests an upper harmonic fit.""" N = 100 E = np.linspace(0, 4*np.pi, N) y0_0 = 100. a1_0 = 10. b1_0 = 5. order = 3 data = (y0_0 + a1_0*np.sin(order*E) + b1_0*np.cos(order*E) + 0.01*np.random.randn(N)) harmonic = fit_upper_harmonic(E, data, order) y0, a1, b1 = harmonic[0] data_fit = (y0 + a1*np.sin(order*E) + b1*np.cos(order*E) + 0.01*np.random.randn(N)) residual = data - data_fit assert np.mean(residual) == pytest.approx(0.00, abs=0.01) assert np.std(residual) == pytest.approx(0.015, abs=0.01) @pytest.mark.skipif('not HAS_SCIPY') class TestFitEllipseSamples(object): def setup_class(self): # major axis parallel to X image axis self.data1 = make_test_image(random_state=123) # major axis tilted 45 deg wrt X image axis self.data2 = make_test_image(pa=np.pi/4, random_state=123) def test_fit_ellipsesample_1(self): sample = EllipseSample(self.data1, 40.) s = sample.extract() harmonics = fit_first_and_second_harmonics(s[0], s[2]) y0, a1, b1, a2, b2 = harmonics[0] assert np.mean(y0) == pytest.approx(200.019, abs=0.001) assert np.mean(a1) == pytest.approx(-0.000138, abs=0.001) assert np.mean(b1) == pytest.approx(0.000254, abs=0.001) assert np.mean(a2) == pytest.approx(-5.658e-05, abs=0.001) assert np.mean(b2) == pytest.approx(-0.00911, abs=0.001) # check that harmonics subtract nicely model = first_and_second_harmonic_function( s[0], np.array([y0, a1, b1, a2, b2])) residual = s[2] - model assert np.mean(residual) == pytest.approx(0.00, abs=0.001) assert np.std(residual) == pytest.approx(0.015, abs=0.01) def test_fit_ellipsesample_2(self): # initial guess is rounder than actual image sample = EllipseSample(self.data1, 40., eps=0.1) s = sample.extract() harmonics = fit_first_and_second_harmonics(s[0], s[2]) y0, a1, b1, a2, b2 = harmonics[0] assert np.mean(y0) == pytest.approx(188.686, abs=0.001) assert np.mean(a1) == pytest.approx(0.000283, abs=0.001) assert np.mean(b1) == pytest.approx(0.00692, abs=0.001) assert np.mean(a2) == pytest.approx(-0.000215, abs=0.001) assert np.mean(b2) == pytest.approx(10.153, abs=0.001) def test_fit_ellipsesample_3(self): # initial guess for center is offset sample = EllipseSample(self.data1, x0=220., y0=210., sma=40.) s = sample.extract() harmonics = fit_first_and_second_harmonics(s[0], s[2]) y0, a1, b1, a2, b2 = harmonics[0] assert np.mean(y0) == pytest.approx(152.660, abs=0.001) assert np.mean(a1) == pytest.approx(55.338, abs=0.001) assert np.mean(b1) == pytest.approx(33.091, abs=0.001) assert np.mean(a2) == pytest.approx(33.036, abs=0.001) assert np.mean(b2) == pytest.approx(-14.306, abs=0.001) def test_fit_ellipsesample_4(self): sample = EllipseSample(self.data2, 40., eps=0.4) s = sample.extract() harmonics = fit_first_and_second_harmonics(s[0], s[2]) y0, a1, b1, a2, b2 = harmonics[0] assert np.mean(y0) == pytest.approx(245.102, abs=0.001) assert np.mean(a1) == pytest.approx(-0.003108, abs=0.001) assert np.mean(b1) == pytest.approx(-0.0578, abs=0.001) assert np.mean(a2) == pytest.approx(28.781, abs=0.001) assert np.mean(b2) == pytest.approx(-63.184, abs=0.001) photutils-0.4/photutils/isophote/tests/test_integrator.py0000644000214200020070000001245213175634532026426 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import numpy.ma as ma import pytest from astropy.io import fits from astropy.tests.helper import remote_data from ..sample import EllipseSample from ..integrator import NEAREST_NEIGHBOR, BILINEAR, MEAN, MEDIAN from ...datasets import get_path @remote_data class TestData(object): def setup_class(self): path = get_path('isophote/synth_highsnr.fits', location='photutils-datasets', cache=True) hdu = fits.open(path) self.data = hdu[0].data hdu.close() def make_sample(self, masked=False, sma=40., integrmode=BILINEAR): if masked: data = ma.masked_values(self.data, 200., atol=10.0, rtol=0.) else: data = self.data sample = EllipseSample(data, sma, integrmode=integrmode) s = sample.extract() assert len(s) == 3 assert len(s[0]) == len(s[1]) assert len(s[0]) == len(s[2]) return s, sample @remote_data class TestUnmasked(TestData): def test_bilinear(self): s, sample = self.make_sample() assert len(s[0]) == 225 # intensities assert np.mean(s[2]) == pytest.approx(200.76, abs=0.01) assert np.std(s[2]) == pytest.approx(21.55, abs=0.01) # radii assert np.max(s[1]) == pytest.approx(40.0, abs=0.01) assert np.min(s[1]) == pytest.approx(32.0, abs=0.01) assert sample.total_points == 225 assert sample.actual_points == 225 def test_bilinear_small(self): # small radius forces sub-pixel sampling s, sample = self.make_sample(sma=10.) # intensities assert np.mean(s[2]) == pytest.approx(1045.4, abs=0.1) assert np.std(s[2]) == pytest.approx(143.0, abs=0.1) # radii assert np.max(s[1]) == pytest.approx(10.0, abs=0.1) assert np.min(s[1]) == pytest.approx(8.0, abs=0.1) assert sample.total_points == 57 assert sample.actual_points == 57 def test_nearest_neighbor(self): s, sample = self.make_sample(integrmode=NEAREST_NEIGHBOR) assert len(s[0]) == 225 # intensities assert np.mean(s[2]) == pytest.approx(201.1, abs=0.1) assert np.std(s[2]) == pytest.approx(21.8, abs=0.1) # radii assert np.max(s[1]) == pytest.approx(40.0, abs=0.01) assert np.min(s[1]) == pytest.approx(32.0, abs=0.01) assert sample.total_points == 225 assert sample.actual_points == 225 def test_mean(self): s, sample = self.make_sample(integrmode=MEAN) assert len(s[0]) == 64 # intensities assert np.mean(s[2]) == pytest.approx(199.9, abs=0.1) assert np.std(s[2]) == pytest.approx(21.3, abs=0.1) # radii assert np.max(s[1]) == pytest.approx(40.0, abs=0.01) assert np.min(s[1]) == pytest.approx(32.0, abs=0.01) assert sample.sector_area == pytest.approx(12.4, abs=0.1) assert sample.total_points == 64 assert sample.actual_points == 64 def test_mean_small(self): s, sample = self.make_sample(sma=5., integrmode=MEAN) assert len(s[0]) == 29 # intensities assert np.mean(s[2]) == pytest.approx(2339.0, abs=0.1) assert np.std(s[2]) == pytest.approx(284.7, abs=0.1) # radii assert np.max(s[1]) == pytest.approx(5.0, abs=0.01) assert np.min(s[1]) == pytest.approx(4.0, abs=0.01) assert sample.sector_area == pytest.approx(2.0, abs=0.1) assert sample.total_points == 29 assert sample.actual_points == 29 def test_median(self): s, sample = self.make_sample(integrmode=MEDIAN) assert len(s[0]) == 64 # intensities assert np.mean(s[2]) == pytest.approx(199.9, abs=0.1) assert np.std(s[2]) == pytest.approx(21.3, abs=0.1) # radii assert np.max(s[1]) == pytest.approx(40.0, abs=0.01) assert np.min(s[1]) == pytest.approx(32.01, abs=0.01) assert sample.sector_area == pytest.approx(12.4, abs=0.1) assert sample.total_points == 64 assert sample.actual_points == 64 @remote_data class TestMasked(TestData): def test_bilinear(self): s, sample = self.make_sample(masked=True, integrmode=BILINEAR) assert len(s[0]) == 157 # intensities assert np.mean(s[2]) == pytest.approx(201.52, abs=0.01) assert np.std(s[2]) == pytest.approx(25.21, abs=0.01) # radii assert np.max(s[1]) == pytest.approx(40.0, abs=0.01) assert np.min(s[1]) == pytest.approx(32.0, abs=0.01) assert sample.total_points == 225 assert sample.actual_points == 157 def test_mean(self): s, sample = self.make_sample(masked=True, integrmode=MEAN) assert len(s[0]) == 51 # intensities assert np.mean(s[2]) == pytest.approx(199.9, abs=0.1) assert np.std(s[2]) == pytest.approx(24.12, abs=0.1) # radii assert np.max(s[1]) == pytest.approx(40.0, abs=0.01) assert np.min(s[1]) == pytest.approx(32.0, abs=0.01) assert sample.sector_area == pytest.approx(12.4, abs=0.1) assert sample.total_points == 64 assert sample.actual_points == 51 photutils-0.4/photutils/isophote/tests/test_isophote.py0000644000214200020070000002061113175634532026076 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import pytest from astropy.io import fits from astropy.tests.helper import remote_data from .make_test_data import make_test_image from ..fitter import EllipseFitter from ..isophote import Isophote, IsophoteList from ..sample import EllipseSample from ...datasets import get_path try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False @remote_data @pytest.mark.skipif('not HAS_SCIPY') class TestIsophote(object): def setup_class(self): path = get_path('isophote/M51.fits', location='photutils-datasets', cache=True) hdu = fits.open(path) self.data = hdu[0].data hdu.close() def test_fit(self): # low noise image, fitted perfectly by sample data = make_test_image(noise=1.e-10, random_state=123) sample = EllipseSample(data, 40) fitter = EllipseFitter(sample) iso = fitter.fit(maxit=400) assert iso.valid assert iso.stop_code == 0 or iso.stop_code == 2 # fitted values assert iso.intens <= 201. assert iso.intens >= 199. assert iso.int_err <= 0.0010 assert iso.int_err >= 0.0009 assert iso.pix_stddev <= 0.03 assert iso.pix_stddev >= 0.02 assert abs(iso.grad) <= 4.25 assert abs(iso.grad) >= 4.20 # integrals assert iso.tflux_e <= 1.85E6 assert iso.tflux_e >= 1.82E6 assert iso.tflux_c <= 2.025E6 assert iso.tflux_c >= 2.022E6 # deviations from perfect ellipticity assert abs(iso.a3) <= 0.01 assert abs(iso.b3) <= 0.01 assert abs(iso.a4) <= 0.01 assert abs(iso.b4) <= 0.01 def test_m51(self): sample = EllipseSample(self.data, 21.44) fitter = EllipseFitter(sample) iso = fitter.fit() assert iso.valid assert iso.stop_code == 0 or iso.stop_code == 2 # geometry g = iso.sample.geometry assert g.x0 >= (257 - 1.5) # position within 1.5 pixel assert g.x0 <= (257 + 1.5) assert g.y0 >= (259 - 1.5) assert g.y0 <= (259 + 2.0) assert g.eps >= (0.19 - 0.05) # eps within 0.05 assert g.eps <= (0.19 + 0.05) assert g.pa >= (0.62 - 0.05) # pa within 5 deg assert g.pa <= (0.62 + 0.05) # fitted values assert iso.intens == pytest.approx(682.9, abs=0.1) assert iso.rms == pytest.approx(83.27, abs=0.01) assert iso.int_err == pytest.approx(7.63, abs=0.01) assert iso.pix_stddev == pytest.approx(117.8, abs=0.1) assert iso.grad == pytest.approx(-36.08, abs=0.1) # integrals assert iso.tflux_e <= 1.20e6 assert iso.tflux_e >= 1.19e6 assert iso.tflux_c <= 1.38e6 assert iso.tflux_c >= 1.36e6 # deviations from perfect ellipticity assert abs(iso.a3) <= 0.05 assert abs(iso.b3) <= 0.05 assert abs(iso.a4) <= 0.05 assert abs(iso.b4) <= 0.05 def test_m51_niter(self): # compares with old STSDAS task. In this task, the # default for the starting value of SMA is 10; it # fits with 20 iterations. sample = EllipseSample(self.data, 10) fitter = EllipseFitter(sample) iso = fitter.fit() assert iso.valid assert iso.niter == 50 class TestIsophoteList(object): def setup_class(self): data = make_test_image(random_state=123) self.slen = 5 self.isolist_sma10 = self.build_list(data, sma0=10., slen=self.slen) self.isolist_sma100 = self.build_list(data, sma0=100., slen=self.slen) self.isolist_sma200 = self.build_list(data, sma0=200., slen=self.slen) @staticmethod def build_list(data, sma0, slen=5): iso_list = [] for k in range(slen): sample = EllipseSample(data, float(k + sma0)) sample.update() iso_list.append(Isophote(sample, k, True, 0)) result = IsophoteList(iso_list) return result def test_basic_list(self): # make sure it can be indexed as a list. result = self.isolist_sma10[:] assert isinstance(result[0], Isophote) # make sure the important arrays contain floats. # especially the sma array, which is derived # from a property in the Isophote class. assert isinstance(result.sma, np.ndarray) assert isinstance(result.sma[0], float) assert isinstance(result.intens, np.ndarray) assert isinstance(result.intens[0], float) assert isinstance(result.rms, np.ndarray) assert isinstance(result.int_err, np.ndarray) assert isinstance(result.pix_stddev, np.ndarray) assert isinstance(result.grad, np.ndarray) assert isinstance(result.grad_error, np.ndarray) assert isinstance(result.grad_r_error, np.ndarray) assert isinstance(result.sarea, np.ndarray) assert isinstance(result.niter, np.ndarray) assert isinstance(result.ndata, np.ndarray) assert isinstance(result.nflag, np.ndarray) assert isinstance(result.valid, np.ndarray) assert isinstance(result.stop_code, np.ndarray) assert isinstance(result.tflux_c, np.ndarray) assert isinstance(result.tflux_e, np.ndarray) assert isinstance(result.npix_c, np.ndarray) assert isinstance(result.npix_e, np.ndarray) assert isinstance(result.a3, np.ndarray) assert isinstance(result.a4, np.ndarray) assert isinstance(result.b3, np.ndarray) assert isinstance(result.b4, np.ndarray) samples = result.sample assert isinstance(samples, list) assert isinstance(samples[0], EllipseSample) iso = result.get_closest(13.6) assert isinstance(iso, Isophote) assert iso.sma == pytest.approx(14., abs=0.000001) def test_extend(self): # the extend method shouldn't return anything, # and should modify the first list in place. inner_list = self.isolist_sma10[:] outer_list = self.isolist_sma100[:] assert len(inner_list) == self.slen assert len(outer_list) == self.slen dummy = inner_list.extend(outer_list) assert not dummy assert len(inner_list) == 2 * self.slen # the __iadd__ operator should behave like the # extend method. inner_list = self.isolist_sma10[:] outer_list = self.isolist_sma100[:] inner_list += outer_list assert len(inner_list) == 2 * self.slen # the __add__ operator should create a new IsophoteList # instance with the result, and should not modify # the operands. inner_list = self.isolist_sma10[:] outer_list = self.isolist_sma100[:] result = inner_list + outer_list assert isinstance(result, IsophoteList) assert len(inner_list) == self.slen assert len(outer_list) == self.slen assert len(result) == 2 * self.slen def test_slicing(self): iso_list = self.isolist_sma10[:] assert len(iso_list) == self.slen assert len(iso_list[1:-1]) == self.slen - 2 assert len(iso_list[2:-2]) == self.slen - 4 def test_combined(self): # combine extend with slicing. inner_list = self.isolist_sma10[:] outer_list = self.isolist_sma100[:] sublist = inner_list[2:-2] dummy = sublist.extend(outer_list) assert not dummy assert len(sublist) == 2*self.slen - 4 # try one more slice. even_outer_list = self.isolist_sma200 sublist.extend(even_outer_list[1:-1]) assert len(sublist) == 2*self.slen - 4 + 3 # combine __add__ with slicing. sublist = inner_list[2:-2] result = sublist + outer_list assert isinstance(result, IsophoteList) assert len(sublist) == self.slen - 4 assert len(result) == 2*self.slen - 4 result = inner_list[2:-2] + outer_list assert isinstance(result, IsophoteList) assert len(result) == 2*self.slen - 4 def test_sort(self): inner_list = self.isolist_sma10[:] outer_list = self.isolist_sma100[:] result = outer_list[2:-2] + inner_list assert result[-1].sma < result[0].sma result.sort() assert result[-1].sma > result[0].sma photutils-0.4/photutils/isophote/tests/test_model.py0000644000214200020070000000345413175634532025352 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import pytest from astropy.io import fits from astropy.tests.helper import remote_data from .make_test_data import make_test_image from ..ellipse import Ellipse from ..geometry import EllipseGeometry from ..model import build_ellipse_model from ...datasets import get_path try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False @remote_data @pytest.mark.skipif('not HAS_SCIPY') def test_model(): path = get_path('isophote/M105-S001-RGB.fits', location='photutils-datasets', cache=True) hdu = fits.open(path) data = hdu[0].data[0] hdu.close() g = EllipseGeometry(530., 511, 10., 0.1, 10./180.*np.pi) ellipse = Ellipse(data, geometry=g, threshold=1.e5) isophote_list = ellipse.fit_image() model = build_ellipse_model(data.shape, isophote_list, fill=np.mean(data[10:100, 10:100])) assert data.shape == model.shape residual = data - model assert np.mean(residual) <= 5.0 assert np.mean(residual) >= -5.0 @pytest.mark.skipif('not HAS_SCIPY') def test_model_simulated_data(): data = make_test_image(eps=0.5, pa=np.pi/3., noise=1.e-2, random_state=123) g = EllipseGeometry(256., 256., 10., 0.5, np.pi/3.) ellipse = Ellipse(data, geometry=g, threshold=1.e5) isophote_list = ellipse.fit_image() model = build_ellipse_model(data.shape, isophote_list, fill=np.mean(data[0:50, 0:50])) assert data.shape == model.shape residual = data - model assert np.mean(residual) <= 5.0 assert np.mean(residual) >= -5.0 photutils-0.4/photutils/isophote/tests/test_regression.py0000644000214200020070000001756513175634532026442 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Despite being cast as a unit test, this code implements regression testing of the Ellipse algorithm, against results obtained by the stsdas$analysis/isophote task 'ellipse'. The stsdas task was run on test images and results were stored in tables. The code here runs the Ellipse algorithm on the same images, producing a list of Isophote instances. The contents of this list then get compared with the contents of the corresponding table. Some quantities are compared in assert statements. These were designed to be executed only when the synth_highsnr.fits image is used as input. That way, we are mainly checking numerical differences that originate in the algorithms themselves, and not caused by noise. The quantities compared this way are: - mean intensity: less than 1% diff. for sma > 3 pixels, 5% otherwise - ellipticity: less than 1% diff. for sma > 3 pixels, 20% otherwise - position angle: less than 1 deg. diff. for sma > 3 pixels, 20 deg. otherwise - X and Y position: less than 0.2 pixel diff. For the M51 image we have mostly good agreement with the SPP code in most of the parameters (mean isophotal intensity agrees within a fraction of 1% mostly), but every now and then the ellipticity and position angle of the semi-major axis may differ by a large amount from what the SPP code measures. The code also stops prematurely wrt the larger sma values measured by the SPP code. This is caused by a difference in the way the gradient relative error is measured in each case, and suggests that the SPP code may have a bug. The not-so-good behavior observed in the case of the M51 image is to be expected though. This image is exactly the type of galaxy image for which the algorithm *wasn't* designed for. It has an almost negligible smooth ellipsoidal component, and a lot of lumpy spiral structure that causes the radial gradient computation to go berserk. On top of that, the ellipticity is small (roundish isophotes) throughout the image, causing large relative errors and instability in the fitting algorithm. For now, we can only check the bilinear integration mode. The mean and median modes cannot be checked since the original 'ellipse' task has a bug that causes the creation of erroneous output tables. A partial comparison could be made if we write new code that reads the standard output of 'ellipse' instead, captured from screen, and use it as reference for the regression. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import math import numpy as np import os.path as op import pytest from astropy.io import fits from astropy.table import Table from astropy.tests.helper import remote_data from ..ellipse import Ellipse from ..integrator import BILINEAR from ...datasets import get_path try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False @remote_data @pytest.mark.skipif('not HAS_SCIPY') # @pytest.mark.parametrize('name', ['M51', 'synth', 'synth_lowsnr', # 'synth_highsnr']) @pytest.mark.parametrize('name', ['synth_highsnr']) def test_regression(name, integrmode=BILINEAR, verbose=False): """ NOTE: The original code in SPP won't create the right table for the MEAN integration moder, so use the screen output at synth_table_mean.txt to compare results visually with synth_table_mean.fits. """ filename = '{0}_table.fits'.format(name) path = op.join(op.dirname(op.abspath(__file__)), 'data', filename) table = Table.read(path) nrows = len(table['SMA']) path = get_path('isophote/{0}.fits'.format(name), location='photutils-datasets', cache=True) hdu = fits.open(path) data = hdu[0].data hdu.close() ellipse = Ellipse(data) isophote_list = ellipse.fit_image() # isophote_list = ellipse.fit_image(sclip=2., nclip=3) fmt = ("%5.2f %6.1f %8.3f %8.3f %8.3f %9.5f %6.2f " "%6.2f %6.2f %5.2f %4d %3d %3d %2d") for row in range(nrows): try: iso = isophote_list[row] except IndexError: # skip non-existent rows in isophote list, if that's the case. break # data from Isophote sma_i = iso.sample.geometry.sma intens_i = iso.intens int_err_i = iso.int_err if iso.int_err else 0. pix_stddev_i = iso.pix_stddev if iso.pix_stddev else 0. rms_i = iso.rms if iso.rms else 0. ellip_i = iso.sample.geometry.eps if iso.sample.geometry.eps else 0. pa_i = iso.sample.geometry.pa if iso.sample.geometry.pa else 0. x0_i = iso.sample.geometry.x0 y0_i = iso.sample.geometry.y0 rerr_i = (iso.sample.gradient_relative_error if iso.sample.gradient_relative_error else 0.) ndata_i = iso.ndata nflag_i = iso.nflag niter_i = iso.niter stop_i = iso.stop_code # convert to old code reference system pa_i = (pa_i - np.pi/2) / np.pi * 180. x0_i += 1 y0_i += 1 # ref data from table sma_t = table['SMA'][row] intens_t = table['INTENS'][row] int_err_t = table['INT_ERR'][row] pix_stddev_t = table['PIX_VAR'][row] rms_t = table['RMS'][row] ellip_t = table['ELLIP'][row] pa_t = table['PA'][row] x0_t = table['X0'][row] y0_t = table['Y0'][row] rerr_t = table['GRAD_R_ERR'][row] ndata_t = table['NDATA'][row] nflag_t = table['NFLAG'][row] niter_t = table['NITER'][row] if table['NITER'][row] else 0 stop_t = table['STOP'][row] if table['STOP'][row] else -1 # relative differences sma_d = (sma_i - sma_t) / sma_t * 100. if sma_t > 0. else 0. intens_d = (intens_i - intens_t) / intens_t * 100. int_err_d = ((int_err_i - int_err_t) / int_err_t * 100. if int_err_t > 0. else 0.) pix_stddev_d = ((pix_stddev_i - pix_stddev_t) / pix_stddev_t * 100. if pix_stddev_t > 0. else 0.) rms_d = (rms_i - rms_t) / rms_t * 100. if rms_t > 0. else 0. ellip_d = (ellip_i - ellip_t) / ellip_t * 100. pa_d = pa_i - pa_t # diff in angle is absolute x0_d = x0_i - x0_t # diff in position is absolute y0_d = y0_i - y0_t rerr_d = rerr_i - rerr_t # diff in relative error is absolute ndata_d = (ndata_i - ndata_t) / ndata_t * 100. nflag_d = 0 niter_d = 0 stop_d = 0 if stop_i == stop_t else -1 if verbose: print("* data " + fmt % (sma_i, intens_i, int_err_i, pix_stddev_i, rms_i, ellip_i, pa_i, x0_i, y0_i, rerr_i, ndata_i, nflag_i, niter_i, stop_i)) print(" ref " + fmt % (sma_t, intens_t, int_err_t, pix_stddev_t, rms_t, ellip_t, pa_t, x0_t, y0_t, rerr_t, ndata_t, nflag_t, niter_t, stop_t)) print(" diff " + fmt % (sma_d, intens_d, int_err_d, pix_stddev_d, rms_d, ellip_d, pa_d, x0_d, y0_d, rerr_d, ndata_d, nflag_d, niter_d, stop_d)) print() if name == "synth_highsnr" and integrmode == BILINEAR: assert abs(x0_d) <= 0.21 assert abs(y0_d) <= 0.21 if sma_i > 3.: assert abs(intens_d) <= 1. else: assert abs(intens_d) <= 5. if not math.isnan(ellip_d): if sma_i > 3.: assert abs(ellip_d) <= 1. # 1% else: assert abs(ellip_d) <= 20. # 20% if not math.isnan(pa_d): if sma_i > 3.: assert abs(pa_d) <= 1. # 1 deg. else: assert abs(pa_d) <= 20. # 20 deg. photutils-0.4/photutils/isophote/tests/test_sample.py0000644000214200020070000000324513175634532025531 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import pytest from .make_test_data import make_test_image from ..integrator import MEDIAN, MEAN, BILINEAR, NEAREST_NEIGHBOR from ..isophote import Isophote from ..sample import EllipseSample DATA = make_test_image(background=100., i0=0., noise=10., random_state=123) # the median is not so good at estimating rms @pytest.mark.parametrize('integrmode, amin, amax', [(NEAREST_NEIGHBOR, 7., 15.), (BILINEAR, 7., 15.), (MEAN, 7., 15.), (MEDIAN, 6., 15.)]) def test_scatter(integrmode, amin, amax): """ Check that the pixel standard deviation can be reliably estimated from the rms scatter and the sector area. The test data is just a flat image with noise, no galaxy. We define the noise rms and then compare how close the pixel std dev estimated at extraction matches this input noise. """ sample = EllipseSample(DATA, 50., astep=0.2, integrmode=integrmode) sample.update() iso = Isophote(sample, 0, True, 0) assert iso.pix_stddev < amax assert iso.pix_stddev > amin def test_coordinates(): sample = EllipseSample(DATA, 50.) sample.update() x, y = sample.coordinates() assert isinstance(x, np.ndarray) assert isinstance(y, np.ndarray) def test_sclip(): sample = EllipseSample(DATA, 50., nclip=3) sample.update() x, y = sample.coordinates() assert isinstance(x, np.ndarray) assert isinstance(y, np.ndarray) photutils-0.4/photutils/morphology/0000755000214200020070000000000013175654702022037 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/morphology/__init__.py0000644000214200020070000000040413175634532024145 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains tools for measuring morphological properties of objects in an astronomical image. """ from .core import * # noqa from .non_parametric import * # noqa photutils-0.4/photutils/morphology/core.py0000644000214200020070000000342413174502576023344 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Functions for measuring morphological properties of objects in an astronomical image using image moments. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from ..segmentation import SourceProperties __all__ = ['data_properties'] def data_properties(data, mask=None, background=None): """ Calculate the morphological properties (and centroid) of a 2D array (e.g. an image cutout of an object) using image moments. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was previously present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. Returns ------- result : `~photutils.segmentation.SourceProperties` instance A `~photutils.segmentation.SourceProperties` object. """ segment_image = np.ones(data.shape, dtype=np.int) return SourceProperties(data, segment_image, label=1, mask=mask, background=background) photutils-0.4/photutils/morphology/non_parametric.py0000644000214200020070000000355213055576313025415 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Functions for measuring non-parametric morphology. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np def gini(data): """ Calculate the `Gini coefficient `_ of a 2D array. The Gini coefficient is calculated using the prescription from `Lotz et al. 2004 `_ as: .. math:: G = \\frac{1}{\\left | \\bar{x} \\right | n (n - 1)} \\sum^{n}_{i} (2i - n - 1) \\left | x_i \\right | where :math:`\\bar{x}` is the mean over all pixel values :math:`x_i`. The Gini coefficient is a way of measuring the inequality in a given set of values. In the context of galaxy morphology, it measures how the light of a galaxy image is distributed among its pixels. A ``G`` value of 0 corresponds to a galaxy image with the light evenly distributed over all pixels while a ``G`` value of 1 represents a galaxy image with all its light concentrated in just one pixel. Usually Gini's measurement needs some sort of preprocessing for defining the galaxy region in the image based on the quality of the input data. As there is not a general standard for doing this, this is left for the user. Parameters ---------- data : array-like The 2D data array or object that can be converted to an array. Returns ------- gini : `float` The Gini coefficient of the input 2D array. """ flattened = np.sort(np.ravel(data)) N = np.size(flattened) normalization = 1. / (np.abs(np.mean(flattened)) * N * (N - 1)) kernel = (2 * np.arange(1, N + 1) - N - 1) * np.abs(flattened) G = normalization * np.sum(kernel) return G photutils-0.4/photutils/morphology/tests/0000755000214200020070000000000013175654702023201 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/morphology/tests/__init__.py0000644000214200020070000000017113055576313025307 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This packages contains affiliated package tests. """ photutils-0.4/photutils/morphology/tests/test_core.py0000644000214200020070000000214113175634532025537 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_allclose import pytest from ..core import data_properties try: import skimage # noqa HAS_SKIMAGE = True except ImportError: HAS_SKIMAGE = False XCS = [25.7] YCS = [26.2] XSTDDEVS = [3.2, 4.0] YSTDDEVS = [5.7, 4.1] THETAS = np.array([30., 45.]) * np.pi / 180. DATA = np.zeros((3, 3)) DATA[0:2, 1] = 1. DATA[1, 0:2] = 1. DATA[1, 1] = 2. @pytest.mark.skipif('not HAS_SKIMAGE') def test_data_properties(): data = np.ones((2, 2)).astype(np.float) mask = np.array([[False, False], [True, True]]) props = data_properties(data, mask=None) props2 = data_properties(data, mask=mask) properties = ['xcentroid', 'ycentroid', 'area'] result = [props[i].value for i in properties] result2 = [props2[i].value for i in properties] assert_allclose([0.5, 0.5, 4.0], result, rtol=0, atol=1.e-6) assert_allclose([0.5, 0.0, 2.0], result2, rtol=0, atol=1.e-6) photutils-0.4/photutils/morphology/tests/test_non_parametric.py0000644000214200020070000000100313055576313027603 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from ..non_parametric import gini def test_gini(): """ Test Gini coefficient measurement. """ data_evenly_distributed = np.ones((100, 100)) data_point_like = np.zeros((100, 100)) data_point_like[50, 50] = 1 assert gini(data_evenly_distributed) == 0. assert gini(data_point_like) == 1. photutils-0.4/photutils/psf/0000755000214200020070000000000013175654702020430 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/psf/__init__.py0000644000214200020070000000056513175634532022546 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains modules and packages for point spread function photometry. """ from .funcs import * # noqa from .groupstars import * # noqa from .matching import * # noqa from .models import * # noqa from .groupstars import * # noqa from .photometry import * # noqa photutils-0.4/photutils/psf/funcs.py0000644000214200020070000000774013175634532022127 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Models and functions for doing PSF/PRF fitting photometry on image data. """ from __future__ import division import numpy as np from astropy.table import Table from astropy.nddata.utils import add_array, extract_array __all__ = ['subtract_psf'] def _extract_psf_fitting_names(psf): """ Determine the names of the x coordinate, y coordinate, and flux from a model. Returns (xname, yname, fluxname) """ if hasattr(psf, 'xname'): xname = psf.xname elif 'x_0' in psf.param_names: xname = 'x_0' else: raise ValueError('Could not determine x coordinate name for ' 'psf_photometry.') if hasattr(psf, 'yname'): yname = psf.yname elif 'y_0' in psf.param_names: yname = 'y_0' else: raise ValueError('Could not determine y coordinate name for ' 'psf_photometry.') if hasattr(psf, 'fluxname'): fluxname = psf.fluxname elif 'flux' in psf.param_names: fluxname = 'flux' else: raise ValueError('Could not determine flux name for psf_photometry.') return xname, yname, fluxname def _call_fitter(fitter, psf, x, y, data, weights): """ Not all fitters have to support a weight array. This function includes the weight in the fitter call only if really needed. """ if np.all(weights == 1.): return fitter(psf, x, y, data) else: return fitter(psf, x, y, data, weights=weights) def subtract_psf(data, psf, posflux, subshape=None): """ Subtract PSF/PRFs from an image. Parameters ---------- data : `~astropy.nddata.NDData` or array (must be 2D) Image data. psf : `astropy.modeling.Fittable2DModel` instance PSF/PRF model to be substracted from the data. posflux : Array-like of shape (3, N) or `~astropy.table.Table` Positions and fluxes for the objects to subtract. If an array, it is interpreted as ``(x, y, flux)`` If a table, the columns 'x_fit', 'y_fit', and 'flux_fit' must be present. subshape : length-2 or None The shape of the region around the center of the location to subtract the PSF from. If None, subtract from the whole image. Returns ------- subdata : same shape and type as ``data`` The image with the PSF subtracted """ if data.ndim != 2: raise ValueError('{0}-d array not supported. Only 2-d arrays can be ' 'passed to subtract_psf.'.format(data.ndim)) # translate array input into table if hasattr(posflux, 'colnames'): if 'x_fit' not in posflux.colnames: raise ValueError('Input table does not have x_fit') if 'y_fit' not in posflux.colnames: raise ValueError('Input table does not have y_fit') if 'flux_fit' not in posflux.colnames: raise ValueError('Input table does not have flux_fit') else: posflux = Table(names=['x_fit', 'y_fit', 'flux_fit'], data=posflux) # Set up contstants across the loop psf = psf.copy() xname, yname, fluxname = _extract_psf_fitting_names(psf) indices = np.indices(data.shape) subbeddata = data.copy() if subshape is None: indicies_reversed = indices[::-1] for row in posflux: getattr(psf, xname).value = row['x_fit'] getattr(psf, yname).value = row['y_fit'] getattr(psf, fluxname).value = row['flux_fit'] subbeddata -= psf(*indicies_reversed) else: for row in posflux: x_0, y_0 = row['x_fit'], row['y_fit'] y = extract_array(indices[0], subshape, (y_0, x_0)) x = extract_array(indices[1], subshape, (y_0, x_0)) getattr(psf, xname).value = x_0 getattr(psf, yname).value = y_0 getattr(psf, fluxname).value = row['flux_fit'] subbeddata = add_array(subbeddata, -psf(x, y), (y_0, x_0)) return subbeddata photutils-0.4/photutils/psf/groupstars.py0000644000214200020070000002243613175634532023221 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """Module which provides classes to perform source grouping.""" from __future__ import division import abc import six import numpy as np from astropy.table import Column __all__ = ['DAOGroup', 'DBSCANGroup', 'GroupStarsBase'] @six.add_metaclass(abc.ABCMeta) class GroupStarsBase(object): """ This base class provides the basic interface for subclasses that are capable of classifying stars in groups. """ def __call__(self, starlist): """ Classify stars into groups. Parameters ---------- starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- group_starlist : `~astropy.table.Table` ``starlist`` with an additional column named ``group_id`` whose unique values represent groups of mutually overlapping stars. """ return self.group_stars(starlist) class DAOGroup(GroupStarsBase): """ This is class implements the DAOGROUP algorithm presented by Stetson (1987). The method ``group_stars`` divides an entire starlist into sets of distinct, self-contained groups of mutually overlapping stars. It accepts as input a list of stars and determines which stars are close enough to be capable of adversely influencing each others' profile fits. Parameters ---------- crit_separation : float or int Distance, in units of pixels, such that any two stars separated by less than this distance will be placed in the same group. Notes ----- Assuming the psf fwhm to be known, ``crit_separation`` may be set to k*fwhm, for some positive real k. See Also -------- photutils.DAOStarFinder References ---------- [1] Stetson, Astronomical Society of the Pacific, Publications, (ISSN 0004-6280), vol. 99, March 1987, p. 191-222. Available at: http://adsabs.harvard.edu/abs/1987PASP...99..191S """ def __init__(self, crit_separation): self.crit_separation = crit_separation @property def crit_separation(self): return self._crit_separation @crit_separation.setter def crit_separation(self, crit_separation): if not isinstance(crit_separation, (float, int)): raise ValueError('crit_separation is expected to be either ' 'float or int. Received {}.' .format(type(crit_separation))) elif crit_separation < 0.0: raise ValueError('crit_separation is expected to be a positive ' 'real number. Got {}'.format(crit_separation)) else: self._crit_separation = crit_separation def group_stars(self, starlist): """ Classify stars into groups. Parameters ---------- starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- group_starlist : `~astropy.table.Table` ``starlist`` with an additional column named ``group_id`` whose unique values represent groups of mutually overlapping stars. """ cstarlist = starlist.copy() if 'id' not in cstarlist.colnames: cstarlist.add_column(Column(name='id', data=np.arange(len(cstarlist)) + 1)) cstarlist.add_column(Column(name='group_id', data=np.zeros(len(cstarlist), dtype=np.int))) if not np.array_equal(cstarlist['id'], np.arange(len(cstarlist)) + 1): raise ValueError('id colum must be an integer-valued ' + 'sequence starting from 1. ' + 'Got {}'.format(cstarlist['id'])) n = 1 while (cstarlist['group_id'] == 0).sum() > 0: init_star = cstarlist[np.where(cstarlist['group_id'] == 0)[0][0]] index = self.find_group(init_star, cstarlist[cstarlist['group_id'] == 0]) cstarlist['group_id'][index-1] = n k = 1 K = len(index) while k < K: init_star = cstarlist[cstarlist['id'] == index[k]] tmp_index = self.find_group( init_star, cstarlist[cstarlist['group_id'] == 0]) if len(tmp_index) > 0: cstarlist['group_id'][tmp_index-1] = n index = np.append(index, tmp_index) K = len(index) k += 1 n += 1 return cstarlist def find_group(self, star, starlist): """ Find the ids of those stars in ``starlist`` which are at a distance less than ``crit_separation`` from ``star``. Parameters ---------- star : `~astropy.table.Row` Star which will be either the head of a cluster or an isolated one. starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- Array containing the ids of those stars which are at a distance less than ``crit_separation`` from ``star``. """ star_distance = np.hypot(star['x_0'] - starlist['x_0'], star['y_0'] - starlist['y_0']) distance_criteria = star_distance < self.crit_separation return np.asarray(starlist[distance_criteria]['id']) class DBSCANGroup(GroupStarsBase): """ Class to create star groups according to a distance criteria using the Density-based Spatial Clustering of Applications with Noise (DBSCAN) from scikit-learn. Parameters ---------- crit_separation : float or int Distance, in units of pixels, such that any two stars separated by less than this distance will be placed in the same group. min_samples : int, optional (default=1) Minimum number of stars necessary to form a group. metric : string or callable (default='euclidean') The metric to use when calculating distance between each pair of stars. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional The algorithm to be used to actually find nearest neighbors. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. References ---------- [1] Scikit Learn DBSCAN. http://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html#sklearn.cluster.DBSCAN Notes ----- * The attribute ``crit_separation`` corresponds to ``eps`` in `sklearn.cluster.DBSCAN `_. * This class provides more general algorithms than `photutils.psf.DAOGroup`. More precisely, `photutils.psf.DAOGroup` is a special case of `photutils.psf.DBSCANGroup` when ``min_samples=1`` and ``metric=euclidean``. Additionally, `photutils.psf.DBSCANGroup` may be faster than `photutils.psf.DAOGroup`. """ def __init__(self, crit_separation, min_samples=1, metric='euclidean', algorithm='auto', leaf_size=30): self.crit_separation = crit_separation self.min_samples = min_samples self.metric = metric self.algorithm = algorithm self.leaf_size = leaf_size def group_stars(self, starlist): """ Classify stars into groups. Parameters ---------- starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- group_starlist : `~astropy.table.Table` ``starlist`` with an additional column named ``group_id`` whose unique values represent groups of mutually overlapping stars. """ from sklearn.cluster import DBSCAN cstarlist = starlist.copy() if 'id' not in cstarlist.colnames: cstarlist.add_column(Column(name='id', data=np.arange(len(cstarlist)) + 1)) if not np.array_equal(cstarlist['id'], np.arange(len(cstarlist)) + 1): raise ValueError('id colum must be an integer-valued ' + 'sequence starting from 1. ' + 'Got {}'.format(cstarlist['id'])) pos_stars = list(zip(cstarlist['x_0'], cstarlist['y_0'])) dbscan = DBSCAN(eps=self.crit_separation, min_samples=self.min_samples, metric=self.metric, algorithm=self.algorithm, leaf_size=self.leaf_size) cstarlist['group_id'] = (dbscan.fit(pos_stars).labels_ + np.ones(len(cstarlist), dtype=np.int)) return cstarlist photutils-0.4/photutils/psf/matching/0000755000214200020070000000000013175654702022222 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/psf/matching/__init__.py0000644000214200020070000000036213055576313024332 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains modules and packages to generate kernels for matching point spread functions. """ from .fourier import * # noqa from .windows import * # noqa photutils-0.4/photutils/psf/matching/fourier.py0000644000214200020070000000640713055576313024254 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tools for matching PSFs using Fourier methods. """ from __future__ import division import numpy as np from numpy.fft import fft2, ifft2, fftshift, ifftshift __all__ = ['resize_psf', 'create_matching_kernel'] def resize_psf(psf, input_pixel_scale, output_pixel_scale, order=3): """ Resize a PSF using spline interpolation of the requested order. Parameters ---------- psf : 2D `~numpy.ndarray` The 2D data array of the PSF. input_pixel_scale : float The pixel scale of the input ``psf``. The units must match ``output_pixel_scale``. output_pixel_scale : float The pixel scale of the output ``psf``. The units must match ``input_pixel_scale``. order : float, optional The order of the spline interpolation (0-5). The default is 3. Returns ------- result : 2D `~numpy.ndarray` The resampled/interpolated 2D data array. """ from scipy.ndimage import zoom ratio = input_pixel_scale / output_pixel_scale return zoom(psf, ratio, order=order) / ratio**2 def create_matching_kernel(source_psf, target_psf, window=None): """ Create a kernel to match 2D point spread functions (PSF) using the ratio of Fourier transforms. Parameters ---------- source_psf : 2D `~numpy.ndarray` The source PSF. The source PSF should have higher resolution (i.e. narrower) than the target PSF. ``source_psf`` and ``target_psf`` must have the same shape and pixel scale. target_psf : 2D `~numpy.ndarray` The target PSF. The target PSF should have lower resolution (i.e. broader) than the source PSF. ``source_psf`` and ``target_psf`` must have the same shape and pixel scale. window : callable, optional The window (or taper) function or callable class instance used to remove high frequency noise from the PSF matching kernel. Some examples include: * `~photutils.psf.matching.HanningWindow` * `~photutils.psf.matching.TukeyWindow` * `~photutils.psf.matching.CosineBellWindow` * `~photutils.psf.matching.SplitCosineBellWindow` * `~photutils.psf.matching.TopHatWindow` For more information on window functions and example usage, see :ref:`psf_matching`. Returns ------- kernel : 2D `~numpy.ndarray` The matching kernel to go from ``source_psf`` to ``target_psf``. The output matching kernel is normalized such that it sums to 1. """ source_psf = np.asanyarray(source_psf) target_psf = np.asanyarray(target_psf) if source_psf.shape != target_psf.shape: raise ValueError('source_psf and target_psf must have the same shape ' '(i.e. registered with the same pixel scale).') # ensure input PSFs are normalized source_psf /= source_psf.sum() target_psf /= target_psf.sum() source_otf = fftshift(fft2(source_psf)) target_otf = fftshift(fft2(target_psf)) ratio = target_otf / source_otf # apply a window function in frequency space if window is not None: ratio *= window(target_psf.shape) kernel = np.real(fftshift((ifft2(ifftshift(ratio))))) return kernel / kernel.sum() photutils-0.4/photutils/psf/matching/tests/0000755000214200020070000000000013175654702023364 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/psf/matching/tests/__init__.py0000644000214200020070000000017513055576313025476 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This packages contains tests for the psf subpackage. """ photutils-0.4/photutils/psf/matching/tests/test_fourier.py0000644000214200020070000000250213175634532026446 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest import numpy as np from numpy.testing import assert_allclose from astropy.modeling.models import Gaussian2D from ..fourier import resize_psf, create_matching_kernel from ..windows import TopHatWindow try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False @pytest.mark.skipif('not HAS_SCIPY') def test_resize_psf(): psf1 = np.ones((5, 5)) psf2 = resize_psf(psf1, 0.1, 0.05) assert psf2.shape == (10, 10) def test_create_matching_kernel(): """Test with noiseless 2D Gaussians.""" y, x = np.mgrid[0:101, 0:101] gm1 = Gaussian2D(100, 50, 50, 3, 3) gm2 = Gaussian2D(100, 50, 50, 4, 4) gm3 = Gaussian2D(100, 50, 50, 5, 5) g1 = gm1(x, y) g2 = gm2(x, y) g3 = gm3(x, y) g1 /= g1.sum() g2 /= g2.sum() g3 /= g3.sum() window = TopHatWindow(32./101) k = create_matching_kernel(g1, g3, window=window) assert_allclose(k, g3, atol=1.e-2) def test_create_matching_kernel_shapes(): """Test with wrong PSF shapes.""" with pytest.raises(ValueError): psf1 = np.ones((5, 5)) psf2 = np.ones((3, 3)) create_matching_kernel(psf1, psf2) photutils-0.4/photutils/psf/matching/tests/test_windows.py0000644000214200020070000000410413175634532026465 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest import numpy as np from numpy.testing import assert_allclose from ..windows import (HanningWindow, TukeyWindow, CosineBellWindow, SplitCosineBellWindow, TopHatWindow) try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False def test_hanning(): win = HanningWindow() data = win((5, 5)) ref = [0., 0.19715007, 0.5, 0.19715007, 0.] assert_allclose(data[1, :], ref) def test_hanning_numpy(): """Test Hanning window against 1D numpy version.""" size = 101 cen = (size - 1) // 2 shape = (size, size) win = HanningWindow() data = win(shape) ref1d = np.hanning(shape[0]) assert_allclose(data[cen, :], ref1d) def test_tukey(): win = TukeyWindow(0.5) data = win((5, 5)) ref = [0., 0.63312767, 1., 0.63312767, 0.] assert_allclose(data[1, :], ref) @pytest.mark.skipif('not HAS_SCIPY') def test_tukey_scipy(): """Test Tukey window against 1D scipy version.""" # scipy.signal.tukey was introduced in Scipy v0.16.0 from scipy.signal import tukey size = 101 cen = (size - 1) // 2 shape = (size, size) alpha = 0.4 win = TukeyWindow(alpha=alpha) data = win(shape) ref1d = tukey(shape[0], alpha=alpha) assert_allclose(data[cen, :], ref1d) def test_cosine_bell(): win = CosineBellWindow(alpha=0.8) data = win((7, 7)) ref = [0., 0., 0.19715007, 0.5, 0.19715007, 0., 0.] assert_allclose(data[2, :], ref) def test_split_cosine_bell(): win = SplitCosineBellWindow(alpha=0.8, beta=0.2) data = win((5, 5)) ref = [0., 0.3454915, 1., 0.3454915, 0.] assert_allclose(data[2, :], ref) def test_tophat(): win = TopHatWindow(beta=0.5) data = win((5, 5)) ref = [0., 1., 1., 1., 0.] assert_allclose(data[2, :], ref) def test_invalid_shape(): with pytest.raises(ValueError): win = HanningWindow() win((5,)) photutils-0.4/photutils/psf/matching/windows.py0000644000214200020070000001517013055576313024270 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Window (or tapering) functions for matching PSFs using Fourier methods. """ from __future__ import division import numpy as np __all__ = ['SplitCosineBellWindow', 'HanningWindow', 'TukeyWindow', 'CosineBellWindow', 'TopHatWindow'] def _radial_distance(shape): """ Return an array where each value is the Euclidean distance from the array center. Parameters ---------- shape : tuple of int The size of the output array along each axis. Returns ------- result : `~numpy.ndarray` An array containing the Euclidian radial distances from the array center. """ if len(shape) != 2: raise ValueError('shape must have only 2 elements') position = (np.asarray(shape) - 1) / 2. x = np.arange(shape[1]) - position[1] y = np.arange(shape[0]) - position[0] xx, yy = np.meshgrid(x, y) return np.sqrt(xx**2 + yy**2) class SplitCosineBellWindow(object): """ Class to define a 2D split cosine bell taper function. Parameters ---------- alpha : float, optional The percentage of array values that are tapered. beta : float, optional The inner diameter as a fraction of the array size beyond which the taper begins. ``beta`` must be less or equal to 1.0. Examples -------- .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import SplitCosineBellWindow taper = SplitCosineBellWindow(alpha=0.4, beta=0.3) data = taper((101, 101)) plt.imshow(data, cmap='viridis', origin='lower') plt.colorbar() A 1D cut across the image center: .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import SplitCosineBellWindow taper = SplitCosineBellWindow(alpha=0.4, beta=0.3) data = taper((101, 101)) plt.plot(data[50, :]) """ def __init__(self, alpha, beta): self.alpha = alpha self.beta = beta def __call__(self, shape): """ Return a 2D split cosine bell. Parameters ---------- shape : tuple of int The size of the output array along each axis. Returns ------- result : `~numpy.ndarray` A 2D array containing the cosine bell values. """ radial_dist = _radial_distance(shape) npts = (np.array(shape).min() - 1.) / 2. r_inner = self.beta * npts r = radial_dist - r_inner r_taper = int(np.floor(self.alpha * npts)) if r_taper != 0: f = 0.5 * (1.0 + np.cos(np.pi * r / r_taper)) else: f = np.ones(shape) f[radial_dist < r_inner] = 1. r_cut = r_inner + r_taper f[radial_dist > r_cut] = 0. return f class HanningWindow(SplitCosineBellWindow): """ Class to define a 2D `Hanning (or Hann) window `_ function. The Hann window is a taper formed by using a raised cosine with ends that touch zero. Examples -------- .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import HanningWindow taper = HanningWindow() data = taper((101, 101)) plt.imshow(data, cmap='viridis', origin='lower') plt.colorbar() A 1D cut across the image center: .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import HanningWindow taper = HanningWindow() data = taper((101, 101)) plt.plot(data[50, :]) """ def __init__(self): self.alpha = 1.0 self.beta = 0.0 class TukeyWindow(SplitCosineBellWindow): """ Class to define a 2D `Tukey window `_ function. The Tukey window is a taper formed by using a split cosine bell function with ends that touch zero. Parameters ---------- alpha : float, optional The percentage of array values that are tapered. Examples -------- .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import TukeyWindow taper = TukeyWindow(alpha=0.4) data = taper((101, 101)) plt.imshow(data, cmap='viridis', origin='lower') plt.colorbar() A 1D cut across the image center: .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import TukeyWindow taper = TukeyWindow(alpha=0.4) data = taper((101, 101)) plt.plot(data[50, :]) """ def __init__(self, alpha): self.alpha = alpha self.beta = 1. - self.alpha class CosineBellWindow(SplitCosineBellWindow): """ Class to define a 2D cosine bell window function. Parameters ---------- alpha : float, optional The percentage of array values that are tapered. Examples -------- .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import CosineBellWindow taper = CosineBellWindow(alpha=0.3) data = taper((101, 101)) plt.imshow(data, cmap='viridis', origin='lower') plt.colorbar() A 1D cut across the image center: .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import CosineBellWindow taper = CosineBellWindow(alpha=0.3) data = taper((101, 101)) plt.plot(data[50, :]) """ def __init__(self, alpha): self.alpha = alpha self.beta = 0.0 class TopHatWindow(SplitCosineBellWindow): """ Class to define a 2D top hat window function. Parameters ---------- beta : float, optional The inner diameter as a fraction of the array size beyond which the taper begins. ``beta`` must be less or equal to 1.0. Examples -------- .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import TopHatWindow taper = TopHatWindow(beta=0.4) data = taper((101, 101)) plt.imshow(data, cmap='viridis', origin='lower', interpolation='nearest') plt.colorbar() A 1D cut across the image center: .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import TopHatWindow taper = TopHatWindow(beta=0.4) data = taper((101, 101)) plt.plot(data[50, :]) """ def __init__(self, beta): self.alpha = 0.0 self.beta = beta photutils-0.4/photutils/psf/models.py0000644000214200020070000007002613175634734022275 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Models for doing PSF/PRF fitting photometry on image data. """ from __future__ import division import warnings import copy import numpy as np from astropy.modeling import models, Parameter, Fittable2DModel from astropy.utils.exceptions import AstropyWarning __all__ = ['FittableImageModel', 'NonNormalizable', 'IntegratedGaussianPRF', 'PRFAdapter', 'prepare_psf_model', 'get_grouped_psf_model'] class NonNormalizable(AstropyWarning): """ Used to indicate that a :py:class:`FittableImageModel` model is non-normalizable. """ pass class FittableImageModel(Fittable2DModel): """ A fittable 2D model of an image allowing for image intensity scaling and image translations. This class takes 2D image data and computes the values of the model at arbitrary locations (including at intra-pixel, fractional positions) within this image using spline interpolation provided by :py:class:`~scipy.interpolate.RectBivariateSpline`. The fittable model provided by this class has three model parameters: an image intensity scaling factor (`flux`) which is applied to (normalized) image, and two positional parameters (`x_0` and `y_0`) indicating the location of a feature in the coordinate grid on which the model is to be evaluated. If this class is initialized with `flux` (intensity scaling factor) set to `None`, then `flux` is going to be estimated as ``sum(data)``. Parameters ---------- data : numpy.ndarray Array containing 2D image. origin : tuple, None, optional A reference point in the input image ``data`` array. When origin is `None`, origin will be set at the middle of the image array. If `origin` represents the location of a feature (e.g., the position of an intensity peak) in the input ``data``, then model parameters `x_0` and `y_0` show the location of this peak in an another target image to which this model was fitted. Fundamentally, it is the coordinate in the model's image data that should map to coordinate (`x_0`, `y_0`) of the output coordinate system on which the model is evaluated. Alternatively, when `origin` is set to ``(0,0)``, then model parameters `x_0` and `y_0` are shifts by which model's image should be translated in order to match a target image. normalize : bool, optional Indicates whether or not the model should be build on normalized input image data. If true, then the normalization constant (*N*) is computed so that .. math:: N \\cdot C \\cdot \\Sigma_{i,j}D_{i,j} = 1, where *N* is the normalization constant, *C* is correction factor given by the parameter ``normalization_correction``, and :math:`D_{i,j}` are the elements of the input image ``data`` array. normalization_correction : float, optional A strictly positive number that represents correction that needs to be applied to model's data normalization (see *C* in the equation in the comments to ``normalize`` for more details). A possible application for this parameter is to account for aperture correction. Assuming model's data represent a PSF to be fitted to some target star, we set ``normalization_correction`` to the aperture correction that needs to be applied to the model. That is, ``normalization_correction`` in this case should be set to the ratio between the total flux of the PSF (including flux outside model's data) to the flux of model's data. Then, best fitted value of the `flux` model parameter will represent an aperture-corrected flux of the target star. fill_value : float, optional The value to be returned by the `evaluate` or ``astropy.modeling.Model.__call__`` methods when evaluation is performed outside the definition domain of the model. ikwargs : dict, optional Additional optional keyword arguments to be passed directly to the `compute_interpolator` method. See `compute_interpolator` for more details. """ flux = Parameter(description='Intensity scaling factor for image data.', default=1.0) x_0 = Parameter(description='X-position of a feature in the image in ' 'the output coordinate grid on which the model is ' 'evaluated.', default=0.0) y_0 = Parameter(description='Y-position of a feature in the image in ' 'the output coordinate grid on which the model is ' 'evaluated.', default=0.0) def __init__(self, data, flux=flux.default, x_0=x_0.default, y_0=y_0.default, normalize=False, normalization_correction=1.0, origin=None, oversampling=1, fill_value=0.0, ikwargs={}): self._fill_value = fill_value self._img_norm = None self._normalization_status = 0 if normalize else 2 self._store_interpolator_kwargs(ikwargs) self._set_oversampling(oversampling) if normalization_correction <= 0: raise ValueError("'normalization_correction' must be strictly " "positive.") self._normalization_correction = normalization_correction self._data = np.array(data, copy=True, dtype=np.float64) if not np.all(np.isfinite(self._data)): raise ValueError("All elements of input 'data' must be finite.") # set input image related parameters: self._ny, self._nx = self._data.shape self._shape = self._data.shape if self._data.size < 1: raise ValueError("Image data array cannot be zero-sized.") # set the origin of the coordinate system in image's pixel grid: self.origin = origin if flux is None: if self._img_norm is None: self._img_norm = self._compute_raw_image_norm(self._data) flux = self._img_norm self._compute_normalization(normalize) super(FittableImageModel, self).__init__(flux, x_0, y_0) # initialize interpolator: self.compute_interpolator(ikwargs) def _compute_raw_image_norm(self, data): """ Helper function that computes the uncorrected inverse normalization factor of input image data. This quantity is computed as the *sum of all pixel values*. .. note:: This function is intended to be overriden in a subclass if one desires to change the way the normalization factor is computed. """ return np.sum(self._data, dtype=np.float64) def _compute_normalization(self, normalize): """ Helper function that computes (corrected) normalization factor of the original image data. This quantity is computed as the inverse "raw image norm" (or total "flux" of model's image) corrected by the ``normalization_correction``: .. math:: N = 1/(\\Phi * C), where :math:`\\Phi` is the "total flux" of model's image as computed by `_compute_raw_image_norm` and *C* is the normalization correction factor. :math:`\\Phi` is computed only once if it has not been previously computed. Otherwise, the existing (stored) value of :math:`\\Phi` is not modified as :py:class:`FittableImageModel` does not allow image data to be modified after the object is created. .. note:: Normally, this function should not be called by the end-user. It is intended to be overriden in a subclass if one desires to change the way the normalization factor is computed. """ self._normalization_constant = 1.0 / self._normalization_correction if normalize: # compute normalization constant so that # N*C*sum(data) = 1: if self._img_norm is None: self._img_norm = self._compute_raw_image_norm(self._data) if self._img_norm != 0.0 and np.isfinite(self._img_norm): self._normalization_constant /= self._img_norm self._normalization_status = 0 else: self._normalization_constant = 1.0 self._normalization_status = 1 warnings.warn("Overflow encountered while computing " "normalization constant. Normalization " "constant will be set to 1.", NonNormalizable) else: self._normalization_status = 2 @property def oversampling(self): """ The factor by which the stored image is oversampled. I.e., an input to this model is multipled by this factor to yield the index into the stored image. """ return self._oversampling def _set_oversampling(self, value): """ This is a private method because it's used in the initializer but the ``oversampling`` """ try: value = float(value) except ValueError: raise ValueError('Oversampling factor must be a scalar') if value <= 0: raise ValueError('Oversampling factor must be greater than 0') self._oversampling = value @property def data(self): """ Get original image data. """ return self._data @property def normalized_data(self): """ Get normalized and/or intensity-corrected image data. """ return (self._normalization_constant * self._data) @property def normalization_constant(self): """ Get normalization constant. """ return self._normalization_constant @property def normalization_status(self): """ Get normalization status. Possible status values are: - 0: **Performed**. Model has been successfuly normalized at user's request. - 1: **Failed**. Attempt to normalize has failed. - 2: **NotRequested**. User did not request model to be normalized. """ return self._normalization_status @property def normalization_correction(self): """ Set/Get flux correction factor. .. note:: When setting correction factor, model's flux will be adjusted accordingly such that if this model was a good fit to some target image before, then it will remain a good fit after correction factor change. """ return self._normalization_correction @normalization_correction.setter def normalization_correction(self, normalization_correction): old_cf = self._normalization_correction self._normalization_correction = normalization_correction self._compute_normalization(normalize=self._normalization_status != 2) # adjust model's flux so that if this model was a good fit to some # target image, then it will remain a good fit after correction factor # change: self.flux *= normalization_correction / old_cf @property def shape(self): """A tuple of dimensions of the data array in numpy style (ny, nx).""" return self._shape @property def nx(self): """Number of columns in the data array.""" return self._nx @property def ny(self): """Number of rows in the data array.""" return self._ny @property def origin(self): """ A tuple of ``x`` and ``y`` coordinates of the origin of the coordinate system in terms of pixels of model's image. When setting the coordinate system origin, a tuple of two `int` or `float` may be used. If origin is set to `None`, the origin of the coordinate system will be set to the middle of the data array (``(npix-1)/2.0``). .. warning:: Modifying `origin` will not adjust (modify) model's parameters `x_0` and `y_0`. """ return (self._x_origin, self._y_origin) @origin.setter def origin(self, origin): if origin is None: self._x_origin = (self._nx - 1) / 2.0 self._y_origin = (self._ny - 1) / 2.0 elif hasattr(origin, '__iter__') and len(origin) == 2: self._x_origin, self._y_origin = origin else: raise TypeError("Parameter 'origin' must be either None or an " "iterable with two elements.") @property def x_origin(self): """X-coordinate of the origin of the coordinate system.""" return self._x_origin @property def y_origin(self): """Y-coordinate of the origin of the coordinate system.""" return self._y_origin @property def fill_value(self): """Fill value to be returned for coordinates outside of the domain of definition of the interpolator. If ``fill_value`` is `None`, then values outside of the domain of definition are the ones returned by the interpolator. """ return self._fill_value @fill_value.setter def fill_value(self, fill_value): self._fill_value = fill_value def _store_interpolator_kwargs(self, ikwargs): """ This function should be called in a subclass whenever model's interpolator is (re-)computed. """ self._interpolator_kwargs = copy.deepcopy(ikwargs) @property def interpolator_kwargs(self): """ Get current interpolator's arguments used when interpolator was created. """ return self._interpolator_kwargs def compute_interpolator(self, ikwargs={}): """ Compute/define the interpolating spline. This function can be overriden in a subclass to define custom interpolators. Parameters ---------- ikwargs : dict, optional Additional optional keyword arguments. Possible values are: - **degree** : int, tuple, optional Degree of the interpolating spline. A tuple can be used to provide different degrees for the X- and Y-axes. Default value is degree=3. - **s** : float, optional Non-negative smoothing factor. Default value s=0 corresponds to interpolation. See :py:class:`~scipy.interpolate.RectBivariateSpline` for more details. Notes ----- * When subclassing :py:class:`FittableImageModel` for the purpose of overriding :py:func:`compute_interpolator`, the :py:func:`evaluate` may need to overriden as well depending on the behavior of the new interpolator. In addition, for improved future compatibility, make sure that the overriding method stores keyword arguments ``ikwargs`` by calling ``_store_interpolator_kwargs`` method. * Use caution when modifying interpolator's degree or smoothness in a computationally intensive part of the code as it may decrease code performance due to the need to recompute interpolator. """ from scipy.interpolate import RectBivariateSpline if 'degree' in ikwargs: degree = ikwargs['degree'] if hasattr(degree, '__iter__') and len(degree) == 2: degx = int(degree[0]) degy = int(degree[1]) else: degx = int(degree) degy = int(degree) if degx < 0 or degy < 0: raise ValueError("Interpolator degree must be a non-negative " "integer") else: degx = 3 degy = 3 if 's' in ikwargs: smoothness = ikwargs['s'] else: smoothness = 0 x = np.arange(self._nx, dtype=np.float) y = np.arange(self._ny, dtype=np.float) self.interpolator = RectBivariateSpline( x, y, self._data.T, kx=degx, ky=degx, s=smoothness ) self._store_interpolator_kwargs(ikwargs) def evaluate(self, x, y, flux, x_0, y_0): """ Evaluate the model on some input variables and provided model parameters. """ xi = self._oversampling * (np.asarray(x) - x_0) + self._x_origin yi = self._oversampling * (np.asarray(y) - y_0) + self._y_origin f = flux * self._normalization_constant evaluated_model = f * self.interpolator.ev(xi, yi) if self._fill_value is not None: # find indices of pixels that are outside the input pixel grid and # set these pixels to the 'fill_value': invalid = (((xi < 0) | (xi > self._nx - 1)) | ((yi < 0) | (yi > self._ny - 1))) evaluated_model[invalid] = self._fill_value return evaluated_model class IntegratedGaussianPRF(Fittable2DModel): r""" Circular Gaussian model integrated over pixels. Because it is integrated, this model is considered a PRF, *not* a PSF (see :ref:`psf-terminology` for more about the terminology used here.) This model is a Gaussian *integrated* over an area of ``1`` (in units of the model input coordinates, e.g. 1 pixel). This is in contrast to the apparently similar `astropy.modeling.functional_models.Gaussian2D`, which is the value of a 2D Gaussian *at* the input coordinates, with no integration. So this model is equivalent to assuming the PSF is Gaussian at a *sub-pixel* level. Parameters ---------- sigma : float Width of the Gaussian PSF. flux : float (default 1) Total integrated flux over the entire PSF x_0 : float (default 0) Position of the peak in x direction. y_0 : float (default 0) Position of the peak in y direction. Notes ----- This model is evaluated according to the following formula: .. math:: f(x, y) = \frac{F}{4} \left[ {\rm erf} \left(\frac{x - x_0 + 0.5} {\sqrt{2} \sigma} \right) - {\rm erf} \left(\frac{x - x_0 - 0.5} {\sqrt{2} \sigma} \right) \right] \left[ {\rm erf} \left(\frac{y - y_0 + 0.5} {\sqrt{2} \sigma} \right) - {\rm erf} \left(\frac{y - y_0 - 0.5} {\sqrt{2} \sigma} \right) \right] where ``erf`` denotes the error function and ``F`` the total integrated flux. """ flux = Parameter(default=1) x_0 = Parameter(default=0) y_0 = Parameter(default=0) sigma = Parameter(default=1, fixed=True) _erf = None fit_deriv = None @property def bounding_box(self): halfwidth = 4 * self.sigma return ((int(self.y_0 - halfwidth), int(self.y_0 + halfwidth)), (int(self.x_0 - halfwidth), int(self.x_0 + halfwidth))) def __init__(self, sigma=sigma.default, x_0=x_0.default, y_0=y_0.default, flux=flux.default, **kwargs): if self._erf is None: from scipy.special import erf self.__class__._erf = erf super(IntegratedGaussianPRF, self).__init__(n_models=1, sigma=sigma, x_0=x_0, y_0=y_0, flux=flux, **kwargs) def evaluate(self, x, y, flux, x_0, y_0, sigma): """Model function Gaussian PSF model.""" return (flux / 4 * ((self._erf((x - x_0 + 0.5) / (np.sqrt(2) * sigma)) - self._erf((x - x_0 - 0.5) / (np.sqrt(2) * sigma))) * (self._erf((y - y_0 + 0.5) / (np.sqrt(2) * sigma)) - self._erf((y - y_0 - 0.5) / (np.sqrt(2) * sigma))))) class PRFAdapter(Fittable2DModel): """ A model that adapts a supplied PSF model to act as a PRF. It integrates the PSF model over pixel "boxes". A critical built-in assumption is that the PSF model scale and location parameters are in *pixel* units. Parameters ---------- psfmodel : a 2D model The model to assume as representative of the PSF renormalize_psf : bool If True, the model will be integrated from -inf to inf and re-scaled so that the total integrates to 1. Note that this renormalization only occurs *once*, so if the total flux of ``psfmodel`` depends on position, this will *not* be correct. xname : str or None The name of the ``psfmodel`` parameter that corresponds to the x-axis center of the PSF. If None, the model will be assumed to be centered at x=0. yname : str or None The name of the ``psfmodel`` parameter that corresponds to the y-axis center of the PSF. If None, the model will be assumed to be centered at y=0. fluxname : str or None The name of the ``psfmodel`` parameter that corresponds to the total flux of the star. If None, a scaling factor will be applied by the ``PRFAdapter`` instead of modifying the ``psfmodel``. Notes ----- This current implementation of this class (using numerical integration for each pixel) is extremely slow, and only suited for experimentation over relatively few small regions. """ flux = Parameter(default=1) x_0 = Parameter(default=0) y_0 = Parameter(default=0) def __init__(self, psfmodel, renormalize_psf=True, flux=flux.default, x_0=x_0.default, y_0=y_0.default, xname=None, yname=None, fluxname=None, **kwargs): self.psfmodel = psfmodel.copy() if renormalize_psf: from scipy.integrate import dblquad self._psf_scale_factor = 1. / dblquad(self.psfmodel, -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] else: self._psf_scale_factor = 1 self.xname = xname self.yname = yname self.fluxname = fluxname # these can be used to adjust the integration behavior. Might be # used in the future to expose how the integration happens self._dblquadkwargs = {} super(PRFAdapter, self).__init__(n_models=1, x_0=x_0, y_0=y_0, flux=flux, **kwargs) def evaluate(self, x, y, flux, x_0, y_0): """The evaluation function for PRFAdapter.""" if self.xname is None: dx = x - x_0 else: dx = x setattr(self.psfmodel, self.xname, x_0) if self.xname is None: dy = y - y_0 else: dy = y setattr(self.psfmodel, self.yname, y_0) if self.fluxname is None: return (flux * self._psf_scale_factor * self._integrated_psfmodel(dx, dy)) else: setattr(self.psfmodel, self.yname, flux * self._psf_scale_factor) return self._integrated_psfmodel(dx, dy) def _integrated_psfmodel(self, dx, dy): from scipy.integrate import dblquad # infer type/shape from the PSF model. Seems wasteful, but the # integration step is a *lot* more expensive so its just peanuts out = np.empty_like(self.psfmodel(dx, dy)) outravel = out.ravel() for i, (xi, yi) in enumerate(zip(dx.ravel(), dy.ravel())): outravel[i] = dblquad(self.psfmodel, xi-0.5, xi+0.5, lambda x: yi-0.5, lambda x: yi+0.5, **self._dblquadkwargs)[0] return out def prepare_psf_model(psfmodel, xname=None, yname=None, fluxname=None, renormalize_psf=True): """ Convert a 2D PSF model to one suitable for use with `BasicPSFPhotometry` or its subclasses. The resulting model may be a composite model, but should have only the x, y, and flux related parameters un-fixed. Parameters ---------- psfmodel : a 2D model The model to assume as representative of the PSF. xname : str or None The name of the ``psfmodel`` parameter that corresponds to the x-axis center of the PSF. If None, the model will be assumed to be centered at x=0, and a new parameter will be added for the offset. yname : str or None The name of the ``psfmodel`` parameter that corresponds to the y-axis center of the PSF. If None, the model will be assumed to be centered at x=0, and a new parameter will be added for the offset. fluxname : str or None The name of the ``psfmodel`` parameter that corresponds to the total flux of the star. If None, a scaling factor will be added to the model. renormalize_psf : bool If True, the model will be integrated from -inf to inf and re-scaled so that the total integrates to 1. Note that this renormalization only occurs *once*, so if the total flux of ``psfmodel`` depends on position, this will *not* be correct. Returns ------- outmod : a model A new model ready to be passed into `BasicPSFPhotometry` or its subclasses. """ if xname is None: xinmod = models.Shift(0, name='x_offset') xname = 'offset_0' else: xinmod = models.Identity(1) xname = xname + '_2' xinmod.fittable = True if yname is None: yinmod = models.Shift(0, name='y_offset') yname = 'offset_1' else: yinmod = models.Identity(1) yname = yname + '_2' yinmod.fittable = True outmod = (xinmod & yinmod) | psfmodel if fluxname is None: outmod = outmod * models.Const2D(1, name='flux_scaling') fluxname = 'amplitude_3' else: fluxname = fluxname + '_2' if renormalize_psf: # we do the import here because other machinery works w/o scipy from scipy import integrate integrand = integrate.dblquad(psfmodel, -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] normmod = models.Const2D(1./integrand, name='renormalize_scaling') outmod = outmod * normmod # final setup of the output model - fix all the non-offset/scale # parameters for pnm in outmod.param_names: outmod.fixed[pnm] = pnm not in (xname, yname, fluxname) # and set the names so that BasicPSFPhotometry knows what to do outmod.xname = xname outmod.yname = yname outmod.fluxname = fluxname # now some convenience aliases if reasonable outmod.psfmodel = outmod[2] if 'x_0' not in outmod.param_names and 'y_0' not in outmod.param_names: outmod.x_0 = getattr(outmod, xname) outmod.y_0 = getattr(outmod, yname) if 'flux' not in outmod.param_names: outmod.flux = getattr(outmod, fluxname) return outmod def get_grouped_psf_model(template_psf_model, star_group, pars_to_set): """ Construct a joint PSF model which consists of a sum of PSF's templated on a specific model, but whose parameters are given by a table of objects. Parameters ---------- template_psf_model : `astropy.modeling.Fittable2DModel` instance The model to use for *individual* objects. Must have parameters named ``x_0``, ``y_0``, and ``flux``. star_group : `~astropy.table.Table` Table of stars for which the compound PSF will be constructed. It must have columns named ``x_0``, ``y_0``, and ``flux_0``. Returns ------- group_psf An `astropy.modeling` ``CompoundModel`` instance which is a sum of the given PSF models. """ group_psf = None for star in star_group: psf_to_add = template_psf_model.copy() for param_tab_name, param_name in pars_to_set.items(): setattr(psf_to_add, param_name, star[param_tab_name]) if group_psf is None: # this is the first one only group_psf = psf_to_add else: group_psf += psf_to_add return group_psf photutils-0.4/photutils/psf/photometry.py0000644000214200020070000011746213175634565023234 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """Module which provides classes to perform PSF Photometry""" from __future__ import division import numpy as np import warnings from astropy.modeling.fitting import LevMarLSQFitter from astropy.nddata.utils import overlap_slices from astropy.stats import gaussian_sigma_to_fwhm, SigmaClip from astropy.table import Table, Column, vstack, hstack from astropy.utils import deprecated_renamed_argument from astropy.utils.exceptions import AstropyUserWarning from . import DAOGroup from .funcs import subtract_psf, _extract_psf_fitting_names from .models import get_grouped_psf_model from ..aperture import CircularAperture, aperture_photometry from ..background import MMMBackground from ..detection import DAOStarFinder __all__ = ['BasicPSFPhotometry', 'IterativelySubtractedPSFPhotometry', 'DAOPhotPSFPhotometry'] class BasicPSFPhotometry(object): """ This class implements a PSF photometry algorithm that can find sources in an image, group overlapping sources into a single model, fit the model to the sources, and subtracting the models from the image. This is roughly equivalent to the DAOPHOT routines FIND, GROUP, NSTAR, and SUBTRACT. This implementation allows a flexible and customizable interface to perform photometry. For instance, one is able to use different implementations for grouping and finding sources by using ``group_maker`` and ``finder`` respectivelly. In addition, sky background estimation is performed by ``bkg_estimator``. Parameters ---------- group_maker : callable or `~photutils.psf.GroupStarsBase` ``group_maker`` should be able to decide whether a given star overlaps with any other and label them as beloging to the same group. ``group_maker`` receives as input an `~astropy.table.Table` object with columns named as ``id``, ``x_0``, ``y_0``, in which ``x_0`` and ``y_0`` have the same meaning of ``xcentroid`` and ``ycentroid``. This callable must return an `~astropy.table.Table` with columns ``id``, ``x_0``, ``y_0``, and ``group_id``. The column ``group_id`` should cotain integers starting from ``1`` that indicate which group a given source belongs to. See, e.g., `~photutils.psf.DAOGroup`. bkg_estimator : callable, instance of any `~photutils.BackgroundBase` subclass, or None ``bkg_estimator`` should be able to compute either a scalar background or a 2D background of a given 2D image. See, e.g., `~photutils.background.MedianBackground`. If None, no background subtraction is performed. psf_model : `astropy.modeling.Fittable2DModel` instance PSF or PRF model to fit the data. Could be one of the models in this package like `~photutils.psf.sandbox.DiscretePRF`, `~photutils.psf.IntegratedGaussianPRF`, or any other suitable 2D model. This object needs to identify three parameters (position of center in x and y coordinates and the flux) in order to set them to suitable starting values for each fit. The names of these parameters should be given as ``x_0``, ``y_0`` and ``flux``. `~photutils.psf.prepare_psf_model` can be used to prepare any 2D model to match this assumption. fitshape : int or length-2 array-like Rectangular shape around the center of a star which will be used to collect the data to do the fitting. Can be an integer to be the same along both axes. E.g., 5 is the same as (5, 5), which means to fit only at the following relative pixel positions: [-2, -1, 0, 1, 2]. Each element of ``fitshape`` must be an odd number. finder : callable or instance of any `~photutils.detection.StarFinderBase` subclasses or None ``finder`` should be able to identify stars, i.e. compute a rough estimate of the centroids, in a given 2D image. ``finder`` receives as input a 2D image and returns an `~astropy.table.Table` object which contains columns with names: ``id``, ``xcentroid``, ``ycentroid``, and ``flux``. In which ``id`` is an integer-valued column starting from ``1``, ``xcentroid`` and ``ycentroid`` are center position estimates of the sources and ``flux`` contains flux estimates of the sources. See, e.g., `~photutils.detection.DAOStarFinder`. If ``finder`` is ``None``, initial guesses for positions of objects must be provided. fitter : `~astropy.modeling.fitting.Fitter` instance Fitter object used to compute the optimized centroid positions and/or flux of the identified sources. See `~astropy.modeling.fitting` for more details on fitters. aperture_radius : float or None The radius (in units of pixels) used to compute initial estimates for the fluxes of sources. If ``None``, one FWHM will be used if it can be determined from the ```psf_model``. Notes ----- Note that an ambiguity arises whenever ``finder`` and ``init_guesses`` (keyword argument for ``do_photometry``) are both not ``None``. In this case, ``finder`` is ignored and initial guesses are taken from ``init_guesses``. In addition, an warning is raised to remaind the user about this behavior. If there are problems with fitting large groups, change the parameters of the grouping algorithm to reduce the number of sources in each group or input a ``star_groups`` table that only includes the groups that are relevant (e.g. manually remove all entries that coincide with artifacts). References ---------- [1] Stetson, Astronomical Society of the Pacific, Publications, (ISSN 0004-6280), vol. 99, March 1987, p. 191-222. Available at: http://adsabs.harvard.edu/abs/1987PASP...99..191S """ def __init__(self, group_maker, bkg_estimator, psf_model, fitshape, finder=None, fitter=LevMarLSQFitter(), aperture_radius=None): self.group_maker = group_maker self.bkg_estimator = bkg_estimator self.psf_model = psf_model self.fitter = fitter self.fitshape = fitshape self.finder = finder self.aperture_radius = aperture_radius self._pars_to_set = None self._pars_to_output = None self._residual_image = None @property def fitshape(self): return self._fitshape @fitshape.setter def fitshape(self, value): value = np.asarray(value) # assume a lone value should mean both axes if value.shape == (): value = np.array((value, value)) if value.size == 2: if np.all(value) > 0: if np.all(value % 2) == 1: self._fitshape = tuple(value) else: raise ValueError('fitshape must be odd integer-valued, ' 'received fitshape = {}'.format(value)) else: raise ValueError('fitshape must have positive elements, ' 'received fitshape = {}'.format(value)) else: raise ValueError('fitshape must have two dimensions, ' 'received fitshape = {}'.format(value)) @property def aperture_radius(self): return self._aperture_radius @aperture_radius.setter def aperture_radius(self, value): if isinstance(value, (int, float)) and value > 0: self._aperture_radius = value elif value is None: self._aperture_radius = value else: raise ValueError('aperture_radius must be a real-valued ' 'number, received aperture_radius = {}' .format(value)) def get_residual_image(self): """ Returns an image that is the result of the subtraction between the original image and the fitted sources. Returns ------- residual_image : 2D array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList` """ return self._residual_image @deprecated_renamed_argument('positions', 'init_guesses', '0.4') def __call__(self, image, init_guesses=None): """ Performs PSF photometry. See `do_photometry` for more details including the `__call__` signature. """ return self.do_photometry(image, init_guesses) @deprecated_renamed_argument('positions', 'init_guesses', '0.4') def do_photometry(self, image, init_guesses=None): """ Perform PSF photometry in ``image``. This method assumes that ``psf_model`` has centroids and flux parameters which will be fitted to the data provided in ``image``. A compound model, in fact a sum of ``psf_model``, will be fitted to groups of stars automatically identified by ``group_maker``. Also, ``image`` is not assumed to be background subtracted. If ``init_guesses`` are not ``None`` then this method uses ``init_guesses`` as initial guesses for the centroids. If the centroid positions are set as ``fixed`` in the PSF model ``psf_model``, then the optimizer will only consider the flux as a variable. Parameters ---------- image : 2D array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList` Image to perform photometry. init_guesses: `~astropy.table.Table` Table which contains the initial guesses (estimates) for the set of parameters. Columns 'x_0' and 'y_0' which represent the positions (in pixel coordinates) for each object must be present. 'flux_0' can also be provided to set initial fluxes. If 'flux_0' is not provided, aperture photometry is used to estimate initial values for the fluxes. Additional columns of the form '_0' will be used to set the initial guess for any parameters of the ``psf_model`` model that are not fixed. Returns ------- output_tab : `~astropy.table.Table` or None Table with the photometry results, i.e., centroids and fluxes estimations and the initial estimates used to start the fitting process. Uncertainties on the fitted parameters are reported as columns called ``_unc`` provided that the fitter object contains a dictionary called ``fit_info`` with the key ``param_cov``, which contains the covariance matrix. If ``param_cov`` is not present, uncertanties are not reported. """ if self.bkg_estimator is not None: image = image - self.bkg_estimator(image) if self.aperture_radius is None: if hasattr(self.psf_model, 'fwhm'): self.aperture_radius = self.psf_model.fwhm.value elif hasattr(self.psf_model, 'sigma'): self.aperture_radius = (self.psf_model.sigma.value * gaussian_sigma_to_fwhm) if init_guesses is not None: # make sure the code does not modify user's input init_guesses = init_guesses.copy() if self.aperture_radius is None: if 'flux_0' not in init_guesses.colnames: raise ValueError('aperture_radius is None and could not ' 'be determined by psf_model. Please, ' 'either provided a value for ' 'aperture_radius or define fwhm/sigma ' 'at psf_model.') if self.finder is not None: warnings.warn('Both init_guesses and finder are different ' 'than None, which is ambiguous. finder is ' 'going to be ignored.', AstropyUserWarning) if 'flux_0' not in init_guesses.colnames: apertures = CircularAperture((init_guesses['x_0'], init_guesses['y_0']), r=self.aperture_radius) init_guesses['flux_0'] = aperture_photometry( image, apertures)['aperture_sum'] else: if self.finder is None: raise ValueError('Finder cannot be None if init_guesses are ' 'not given.') sources = self.finder(image) if len(sources) > 0: apertures = CircularAperture((sources['xcentroid'], sources['ycentroid']), r=self.aperture_radius) sources['aperture_flux'] = aperture_photometry( image, apertures)['aperture_sum'] init_guesses = Table(names=['x_0', 'y_0', 'flux_0'], data=[sources['xcentroid'], sources['ycentroid'], sources['aperture_flux']]) self._define_fit_param_names() for p0, param in self._pars_to_set.items(): if p0 not in init_guesses.colnames: init_guesses[p0] = (len(init_guesses) * [getattr(self.psf_model, param).value]) star_groups = self.group_maker(init_guesses) output_tab, self._residual_image = self.nstar(image, star_groups) star_groups = star_groups.group_by('group_id') output_tab = hstack([star_groups, output_tab]) return output_tab def nstar(self, image, star_groups): """ Fit, as appropriate, a compound or single model to the given ``star_groups``. Groups are fitted sequentially from the smallest to the biggest. In each iteration, ``image`` is subtracted by the previous fitted group. Parameters ---------- image : numpy.ndarray Background-subtracted image. star_groups : `~astropy.table.Table` This table must contain the following columns: ``id``, ``group_id``, ``x_0``, ``y_0``, ``flux_0``. ``x_0`` and ``y_0`` are initial estimates of the centroids and ``flux_0`` is an initial estimate of the flux. Additionally, columns named as ``_0`` are required if any other parameter in the psf model is free (i.e., the ``fixed`` attribute of that parameter is ``False``). Returns ------- result_tab : `~astropy.table.Table` Astropy table that contains photometry results. image : numpy.ndarray Residual image. """ result_tab = Table() for param_tab_name in self._pars_to_output.keys(): result_tab.add_column(Column(name=param_tab_name)) unc_tab = Table() for param, isfixed in self.psf_model.fixed.items(): if not isfixed: unc_tab.add_column(Column(name=param + "_unc")) y, x = np.indices(image.shape) star_groups = star_groups.group_by('group_id') for n in range(len(star_groups.groups)): group_psf = get_grouped_psf_model(self.psf_model, star_groups.groups[n], self._pars_to_set) usepixel = np.zeros_like(image, dtype=np.bool) for row in star_groups.groups[n]: usepixel[overlap_slices(large_array_shape=image.shape, small_array_shape=self.fitshape, position=(row['y_0'], row['x_0']), mode='trim')[0]] = True fit_model = self.fitter(group_psf, x[usepixel], y[usepixel], image[usepixel]) param_table = self._model_params2table(fit_model, len(star_groups.groups[n])) result_tab = vstack([result_tab, param_table]) if 'param_cov' in self.fitter.fit_info.keys(): unc_tab = vstack([unc_tab, self._get_uncertainties( len(star_groups.groups[n]))]) try: from astropy.nddata.utils import NoOverlapError except ImportError: raise ImportError("astropy 1.1 or greater is required in " "order to use this class.") # do not subtract if the fitting did not go well try: image = subtract_psf(image, self.psf_model, param_table, subshape=self.fitshape) except NoOverlapError: pass if 'param_cov' in self.fitter.fit_info.keys(): result_tab = hstack([result_tab, unc_tab]) return result_tab, image def _define_fit_param_names(self): """ Convenience function to define mappings between the names of the columns in the initial guess table (and the name of the fitted parameters) and the actual name of the parameters in the model. This method sets the following parameters on the ``self`` object: * ``pars_to_set`` : Dict which maps the names of the parameters initial guesses to the actual name of the parameter in the model. * ``pars_to_output`` : Dict which maps the names of the fitted parameters to the actual name of the parameter in the model. """ xname, yname, fluxname = _extract_psf_fitting_names(self.psf_model) self._pars_to_set = {'x_0': xname, 'y_0': yname, 'flux_0': fluxname} self._pars_to_output = {'x_fit': xname, 'y_fit': yname, 'flux_fit': fluxname} for p, isfixed in self.psf_model.fixed.items(): p0 = p + '_0' pfit = p + '_fit' if p not in (xname, yname, fluxname) and not isfixed: self._pars_to_set[p0] = p self._pars_to_output[pfit] = p def _get_uncertainties(self, star_group_size): """ Retrieve uncertainties on fitted parameters from the fitter object. Parameters ---------- star_group_size : int Number of stars in the given group. Returns ------- unc_tab : `~astropy.table.Table` Table which contains uncertainties on the fitted parameters. The uncertainties are reported as one standard deviation. """ unc_tab = Table() for param_name in self.psf_model.param_names: if not self.psf_model.fixed[param_name]: unc_tab.add_column(Column(name=param_name + "_unc", data=np.empty(star_group_size))) if 'param_cov' in self.fitter.fit_info.keys(): if self.fitter.fit_info['param_cov'] is not None: k = 0 n_fit_params = len(unc_tab.colnames) for i in range(star_group_size): unc_tab[i] = np.sqrt(np.diag( self.fitter.fit_info['param_cov']) )[k: k + n_fit_params] k = k + n_fit_params return unc_tab def _model_params2table(self, fit_model, star_group_size): """ Place fitted parameters into an astropy table. Parameters ---------- fit_model : `astropy.modeling.Fittable2DModel` instance PSF or PRF model to fit the data. Could be one of the models in this package like `~photutils.psf.sandbox.DiscretePRF`, `~photutils.psf.IntegratedGaussianPRF`, or any other suitable 2D model. star_group_size : int Number of stars in the given group. Returns ------- param_tab : `~astropy.table.Table` Table that contains the fitted parameters. """ param_tab = Table() for param_tab_name in self._pars_to_output.keys(): param_tab.add_column(Column(name=param_tab_name, data=np.empty(star_group_size))) if star_group_size > 1: for i in range(star_group_size): for param_tab_name, param_name in self._pars_to_output.items(): param_tab[param_tab_name][i] = getattr(fit_model, param_name + '_' + str(i)).value else: for param_tab_name, param_name in self._pars_to_output.items(): param_tab[param_tab_name] = getattr(fit_model, param_name).value return param_tab class IterativelySubtractedPSFPhotometry(BasicPSFPhotometry): """ This class implements an iterative algorithm to perform point spread function photometry in crowded fields. This consists of applying a loop of find sources, make groups, fit groups, subtract groups, and then repeat until no more stars are detected or a given number of iterations is reached. Parameters ---------- group_maker : callable or `~photutils.psf.GroupStarsBase` ``group_maker`` should be able to decide whether a given star overlaps with any other and label them as beloging to the same group. ``group_maker`` receives as input an `~astropy.table.Table` object with columns named as ``id``, ``x_0``, ``y_0``, in which ``x_0`` and ``y_0`` have the same meaning of ``xcentroid`` and ``ycentroid``. This callable must return an `~astropy.table.Table` with columns ``id``, ``x_0``, ``y_0``, and ``group_id``. The column ``group_id`` should cotain integers starting from ``1`` that indicate which group a given source belongs to. See, e.g., `~photutils.psf.DAOGroup`. bkg_estimator : callable, instance of any `~photutils.BackgroundBase` subclass, or None ``bkg_estimator`` should be able to compute either a scalar background or a 2D background of a given 2D image. See, e.g., `~photutils.background.MedianBackground`. If None, no background subtraction is performed. psf_model : `astropy.modeling.Fittable2DModel` instance PSF or PRF model to fit the data. Could be one of the models in this package like `~photutils.psf.sandbox.DiscretePRF`, `~photutils.psf.IntegratedGaussianPRF`, or any other suitable 2D model. This object needs to identify three parameters (position of center in x and y coordinates and the flux) in order to set them to suitable starting values for each fit. The names of these parameters should be given as ``x_0``, ``y_0`` and ``flux``. `~photutils.psf.prepare_psf_model` can be used to prepare any 2D model to match this assumption. fitshape : int or length-2 array-like Rectangular shape around the center of a star which will be used to collect the data to do the fitting. Can be an integer to be the same along both axes. E.g., 5 is the same as (5, 5), which means to fit only at the following relative pixel positions: [-2, -1, 0, 1, 2]. Each element of ``fitshape`` must be an odd number. finder : callable or instance of any `~photutils.detection.StarFinderBase` subclasses ``finder`` should be able to identify stars, i.e. compute a rough estimate of the centroids, in a given 2D image. ``finder`` receives as input a 2D image and returns an `~astropy.table.Table` object which contains columns with names: ``id``, ``xcentroid``, ``ycentroid``, and ``flux``. In which ``id`` is an integer-valued column starting from ``1``, ``xcentroid`` and ``ycentroid`` are center position estimates of the sources and ``flux`` contains flux estimates of the sources. See, e.g., `~photutils.detection.DAOStarFinder` or `~photutils.detection.IRAFStarFinder`. fitter : `~astropy.modeling.fitting.Fitter` instance Fitter object used to compute the optimized centroid positions and/or flux of the identified sources. See `~astropy.modeling.fitting` for more details on fitters. aperture_radius : float The radius (in units of pixels) used to compute initial estimates for the fluxes of sources. If ``None``, one FWHM will be used if it can be determined from the ```psf_model``. niters : int or None Number of iterations to perform of the loop FIND, GROUP, SUBTRACT, NSTAR. If None, iterations will proceed until no more stars remain. Note that in this case it is *possible* that the loop will never end if the PSF has structure that causes subtraction to create new sources infinitely. Notes ----- If there are problems with fitting large groups, change the parameters of the grouping algorithm to reduce the number of sources in each group or input a ``star_groups`` table that only includes the groups that are relevant (e.g. manually remove all entries that coincide with artifacts). References ---------- [1] Stetson, Astronomical Society of the Pacific, Publications, (ISSN 0004-6280), vol. 99, March 1987, p. 191-222. Available at: http://adsabs.harvard.edu/abs/1987PASP...99..191S """ def __init__(self, group_maker, bkg_estimator, psf_model, fitshape, finder, fitter=LevMarLSQFitter(), niters=3, aperture_radius=None): super(IterativelySubtractedPSFPhotometry, self).__init__( group_maker, bkg_estimator, psf_model, fitshape, finder, fitter, aperture_radius) self.niters = niters @property def niters(self): return self._niters @niters.setter def niters(self, value): if value is None: self._niters = None else: try: if value <= 0: raise ValueError('niters must be positive.') else: self._niters = int(value) except ValueError: raise ValueError('niters must be None or an integer or ' 'convertable into an integer.') @property def finder(self): return self._finder @finder.setter def finder(self, value): if value is None: raise ValueError("finder cannot be None for " "IterativelySubtractedPSFPhotometry - you may " "want to use BasicPSFPhotometry. Please see the " "Detection section on photutils documentation.") else: self._finder = value @deprecated_renamed_argument('positions', 'init_guesses', '0.4') def do_photometry(self, image, init_guesses=None): """ Perform PSF photometry in ``image``. This method assumes that ``psf_model`` has centroids and flux parameters which will be fitted to the data provided in ``image``. A compound model, in fact a sum of ``psf_model``, will be fitted to groups of stars automatically identified by ``group_maker``. Also, ``image`` is not assumed to be background subtracted. If ``init_guesses`` are not ``None`` then this method uses ``init_guesses`` as initial guesses for the centroids. If the centroid positions are set as ``fixed`` in the PSF model ``psf_model``, then the optimizer will only consider the flux as a variable. Parameters ---------- image : 2D array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList` Image to perform photometry. init_guesses: `~astropy.table.Table` Table which contains the initial guesses (estimates) for the set of parameters. Columns 'x_0' and 'y_0' which represent the positions (in pixel coordinates) for each object must be present. 'flux_0' can also be provided to set initial fluxes. If 'flux_0' is not provided, aperture photometry is used to estimate initial values for the fluxes. Additional columns of the form '_0' will be used to set the initial guess for any parameters of the ``psf_model`` model that are not fixed. Returns ------- output_table : `~astropy.table.Table` or None Table with the photometry results, i.e., centroids and fluxes estimations and the initial estimates used to start the fitting process. Uncertainties on the fitted parameters are reported as columns called ``_unc`` provided that the fitter object contains a dictionary called ``fit_info`` with the key ``param_cov``, which contains the covariance matrix. """ if init_guesses is not None: table = super(IterativelySubtractedPSFPhotometry, self).do_photometry(image, init_guesses) table['iter_detected'] = np.ones(table['x_fit'].shape, dtype=np.int32) # n_start = 2 because it starts in the second iteration # since the first iteration is above output_table = self._do_photometry(init_guesses.colnames, n_start=2) output_table = vstack([table, output_table]) else: if self.bkg_estimator is not None: self._residual_image = image - self.bkg_estimator(image) if self.aperture_radius is None: if hasattr(self.psf_model, 'fwhm'): self.aperture_radius = self.psf_model.fwhm.value elif hasattr(self.psf_model, 'sigma'): self.aperture_radius = (self.psf_model.sigma.value * gaussian_sigma_to_fwhm) output_table = self._do_photometry(['x_0', 'y_0', 'flux_0']) return output_table def _do_photometry(self, param_tab, n_start=1): """ Helper function which performs the iterations of the photometry process. Parameters ---------- param_names : list Names of the columns which represent the initial guesses. For example, ['x_0', 'y_0', 'flux_0'], for intial guesses on the center positions and the flux. n_start : int Integer representing the start index of the iteration. It is 1 if init_guesses are None, and 2 otherwise. Returns ------- output_table : `~astropy.table.Table` or None Table with the photometry results, i.e., centroids and fluxes estimations and the initial estimates used to start the fitting process. """ output_table = Table() self._define_fit_param_names() for (init_parname, fit_parname) in zip(self._pars_to_set.keys(), self._pars_to_output.keys()): output_table.add_column(Column(name=init_parname)) output_table.add_column(Column(name=fit_parname)) sources = self.finder(self._residual_image) n = n_start while(len(sources) > 0 and (self.niters is None or n <= self.niters)): apertures = CircularAperture((sources['xcentroid'], sources['ycentroid']), r=self.aperture_radius) sources['aperture_flux'] = aperture_photometry( self._residual_image, apertures)['aperture_sum'] init_guess_tab = Table(names=['id', 'x_0', 'y_0', 'flux_0'], data=[sources['id'], sources['xcentroid'], sources['ycentroid'], sources['aperture_flux']]) for param_tab_name, param_name in self._pars_to_set.items(): if param_tab_name not in (['x_0', 'y_0', 'flux_0']): init_guess_tab.add_column( Column(name=param_tab_name, data=(getattr(self.psf_model, param_name) * np.ones(len(sources))))) star_groups = self.group_maker(init_guess_tab) table, self._residual_image = super( IterativelySubtractedPSFPhotometry, self).nstar( self._residual_image, star_groups) star_groups = star_groups.group_by('group_id') table = hstack([star_groups, table]) table['iter_detected'] = n*np.ones(table['x_fit'].shape, dtype=np.int32) output_table = vstack([output_table, table]) # do not warn if no sources are found beyond the first iteration with warnings.catch_warnings(): warnings.simplefilter('ignore', AstropyUserWarning) sources = self.finder(self._residual_image) n += 1 return output_table class DAOPhotPSFPhotometry(IterativelySubtractedPSFPhotometry): """ This class implements an iterative algorithm based on the DAOPHOT algorithm presented by Stetson (1987) to perform point spread function photometry in crowded fields. This consists of applying a loop of find sources, make groups, fit groups, subtract groups, and then repeat until no more stars are detected or a given number of iterations is reached. Basically, this classes uses `~photutils.psf.IterativelySubtractedPSFPhotometry`, but with grouping, finding, and background estimation routines defined a priori. More precisely, this class uses `~photutils.psf.DAOGroup` for grouping, `~photutils.detection.DAOStarFinder` for finding sources, and `~photutils.background.MMMBackground` for background estimation. Those classes are based on GROUP, FIND, and SKY routines used in DAOPHOT, respectively. The parameter ``crit_separation`` is associated with `~photutils.psf.DAOGroup`. ``sigma_clip`` is associated with `~photutils.background.MMMBackground`. ``threshold`` and ``fwhm`` are associated with `~photutils.detection.DAOStarFinder`. Parameters from ``ratio`` to ``roundhi`` are also associated with `~photutils.detection.DAOStarFinder`. Parameters ---------- crit_separation : float or int Distance, in units of pixels, such that any two stars separated by less than this distance will be placed in the same group. threshold : float The absolute image value above which to select sources. fwhm : float The full-width half-maximum (FWHM) of the major axis of the Gaussian kernel in units of pixels. psf_model : `astropy.modeling.Fittable2DModel` instance PSF or PRF model to fit the data. Could be one of the models in this package like `~photutils.psf.sandbox.DiscretePRF`, `~photutils.psf.IntegratedGaussianPRF`, or any other suitable 2D model. This object needs to identify three parameters (position of center in x and y coordinates and the flux) in order to set them to suitable starting values for each fit. The names of these parameters should be given as ``x_0``, ``y_0`` and ``flux``. `~photutils.psf.prepare_psf_model` can be used to prepare any 2D model to match this assumption. fitshape : int or length-2 array-like Rectangular shape around the center of a star which will be used to collect the data to do the fitting. Can be an integer to be the same along both axes. E.g., 5 is the same as (5, 5), which means to fit only at the following relative pixel positions: [-2, -1, 0, 1, 2]. Each element of ``fitshape`` must be an odd number. sigma : float, optional Number of standard deviations used to perform sigma clip with a `astropy.stats.SigmaClip` object. ratio : float, optional The ratio of the minor to major axis standard deviations of the Gaussian kernel. ``ratio`` must be strictly positive and less than or equal to 1.0. The default is 1.0 (i.e., a circular Gaussian kernel). theta : float, optional The position angle (in degrees) of the major axis of the Gaussian kernel measured counter-clockwise from the positive x axis. sigma_radius : float, optional The truncation radius of the Gaussian kernel in units of sigma (standard deviation) [``1 sigma = FWHM / (2.0*sqrt(2.0*log(2.0)))``]. sharplo : float, optional The lower bound on sharpness for object detection. sharphi : float, optional The upper bound on sharpness for object detection. roundlo : float, optional The lower bound on roundess for object detection. roundhi : float, optional The upper bound on roundess for object detection. fitter : `~astropy.modeling.fitting.Fitter` instance Fitter object used to compute the optimized centroid positions and/or flux of the identified sources. See `~astropy.modeling.fitting` for more details on fitters. niters : int or None Number of iterations to perform of the loop FIND, GROUP, SUBTRACT, NSTAR. If None, iterations will proceed until no more stars remain. Note that in this case it is *possible* that the loop will never end if the PSF has structure that causes subtraction to create new sources infinitely. aperture_radius : float The radius (in units of pixels) used to compute initial estimates for the fluxes of sources. If ``None``, one FWHM will be used if it can be determined from the ```psf_model``. Notes ----- If there are problems with fitting large groups, change the parameters of the grouping algorithm to reduce the number of sources in each group or input a ``star_groups`` table that only includes the groups that are relevant (e.g. manually remove all entries that coincide with artifacts). References ---------- [1] Stetson, Astronomical Society of the Pacific, Publications, (ISSN 0004-6280), vol. 99, March 1987, p. 191-222. Available at: http://adsabs.harvard.edu/abs/1987PASP...99..191S """ def __init__(self, crit_separation, threshold, fwhm, psf_model, fitshape, sigma=3., ratio=1.0, theta=0.0, sigma_radius=1.5, sharplo=0.2, sharphi=1.0, roundlo=-1.0, roundhi=1.0, fitter=LevMarLSQFitter(), niters=3, aperture_radius=None): self.crit_separation = crit_separation self.threshold = threshold self.fwhm = fwhm self.sigma = sigma self.ratio = ratio self.theta = theta self.sigma_radius = sigma_radius self.sharplo = sharplo self.sharphi = sharphi self.roundlo = roundlo self.roundhi = roundhi group_maker = DAOGroup(crit_separation=self.crit_separation) bkg_estimator = MMMBackground(sigma_clip=SigmaClip(sigma=self.sigma)) finder = DAOStarFinder(threshold=self.threshold, fwhm=self.fwhm, ratio=self.ratio, theta=self.theta, sigma_radius=self.sigma_radius, sharplo=self.sharplo, sharphi=self.sharphi, roundlo=self.roundlo, roundhi=self.roundhi) super(DAOPhotPSFPhotometry, self).__init__( group_maker=group_maker, bkg_estimator=bkg_estimator, psf_model=psf_model, fitshape=fitshape, finder=finder, fitter=fitter, niters=niters, aperture_radius=aperture_radius) photutils-0.4/photutils/psf/sandbox.py0000644000214200020070000003553513175634565022460 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module stores work related to photutils.psf that is not quite ready for prime-time (i.e., is not considered a stable public API), but is included either for experimentation or as legacy code. """ from __future__ import (absolute_import, unicode_literals, division, print_function) import numpy as np from astropy.table import Table from astropy.modeling import Parameter, Fittable2DModel from astropy.modeling.fitting import LevMarLSQFitter from astropy.nddata.utils import subpixel_indices, extract_array from astropy import wcs as fitswcs from ..utils import mask_to_mirrored_num __all__ = ['DiscretePRF', 'Reproject'] __doctest_requires__ = {('Reproject'): ['gwcs']} class DiscretePRF(Fittable2DModel): """ A discrete Pixel Response Function (PRF) model. The discrete PRF model stores images of the PRF at different subpixel positions or offsets as a lookup table. The resolution is given by the subsampling parameter, which states in how many subpixels a pixel is divided. In the typical case of wanting to create a PRF from an image with many point sources, use the `~DiscretePRF.create_from_image` method, rather than directly initializing this class. The discrete PRF model class in initialized with a 4 dimensional array, that contains the PRF images at different subpixel positions. The definition of the axes is as following: 1. Axis: y subpixel position 2. Axis: x subpixel position 3. Axis: y direction of the PRF image 4. Axis: x direction of the PRF image The total array therefore has the following shape (subsampling, subsampling, prf_size, prf_size) Parameters ---------- prf_array : ndarray Array containing PRF images. normalize : bool Normalize PRF images to unity. Equivalent to saying there is *no* flux outside the bounds of the PRF images. subsampling : int, optional Factor of subsampling. Default = 1. Notes ----- See :ref:`psf-terminology` for more details on the distinction between PSF and PRF as used in this module. """ flux = Parameter('flux') x_0 = Parameter('x_0') y_0 = Parameter('y_0') def __init__(self, prf_array, normalize=True, subsampling=1): # Array shape and dimension check if subsampling == 1: if prf_array.ndim == 2: prf_array = np.array([[prf_array]]) if prf_array.ndim != 4: raise TypeError('Array must have 4 dimensions.') if prf_array.shape[:2] != (subsampling, subsampling): raise TypeError('Incompatible subsampling and array size') if np.isnan(prf_array).any(): raise Exception("Array contains NaN values. Can't create PRF.") # Normalize if requested if normalize: for i in range(prf_array.shape[0]): for j in range(prf_array.shape[1]): prf_array[i, j] /= prf_array[i, j].sum() # Set PRF asttributes self._prf_array = prf_array self.subsampling = subsampling constraints = {'fixed': {'x_0': True, 'y_0': True}} x_0 = 0 y_0 = 0 flux = 1 super(DiscretePRF, self).__init__(n_models=1, x_0=x_0, y_0=y_0, flux=flux, **constraints) self.fitter = LevMarLSQFitter() @property def prf_shape(self): """Shape of the PRF image.""" return self._prf_array.shape[-2:] def evaluate(self, x, y, flux, x_0, y_0): """ Discrete PRF model evaluation. Given a certain position and flux the corresponding image of the PSF is chosen and scaled to the flux. If x and y are outside the boundaries of the image, zero will be returned. Parameters ---------- x : float x coordinate array in pixel coordinates. y : float y coordinate array in pixel coordinates. flux : float Model flux. x_0 : float x position of the center of the PRF. y_0 : float y position of the center of the PRF. """ # Convert x and y to index arrays x = (x - x_0 + 0.5 + self.prf_shape[1] // 2).astype('int') y = (y - y_0 + 0.5 + self.prf_shape[0] // 2).astype('int') # Get subpixel indices y_sub, x_sub = subpixel_indices((y_0, x_0), self.subsampling) # Out of boundary masks x_bound = np.logical_or(x < 0, x >= self.prf_shape[1]) y_bound = np.logical_or(y < 0, y >= self.prf_shape[0]) out_of_bounds = np.logical_or(x_bound, y_bound) # Set out of boundary indices to zero x[x_bound] = 0 y[y_bound] = 0 result = flux * self._prf_array[int(y_sub), int(x_sub)][y, x] # Set out of boundary values to zero result[out_of_bounds] = 0 return result @classmethod def create_from_image(cls, imdata, positions, size, fluxes=None, mask=None, mode='mean', subsampling=1, fix_nan=False): """ Create a discrete point response function (PRF) from image data. Given a list of positions and size this function estimates an image of the PRF by extracting and combining the individual PRFs from the given positions. NaN values are either ignored by passing a mask or can be replaced by the mirrored value with respect to the center of the PRF. Note that if fluxes are *not* specified explicitly, it will be flux estimated from an aperture of the same size as the PRF image. This does *not* account for aperture corrections so often will *not* be what you want for anything other than quick-look needs. Parameters ---------- imdata : array Data array with the image to extract the PRF from positions : List or array or `~astropy.table.Table` List of pixel coordinate source positions to use in creating the PRF. If this is a `~astropy.table.Table` it must have columns called ``x_0`` and ``y_0``. size : odd int Size of the quadratic PRF image in pixels. mask : bool array, optional Boolean array to mask out bad values. fluxes : array, optional Object fluxes to normalize extracted PRFs. If not given (or None), the flux is estimated from an aperture of the same size as the PRF image. mode : {'mean', 'median'} One of the following modes to combine the extracted PRFs: * 'mean': Take the pixelwise mean of the extracted PRFs. * 'median': Take the pixelwise median of the extracted PRFs. subsampling : int Factor of subsampling of the PRF (default = 1). fix_nan : bool Fix NaN values in the data by replacing it with the mirrored value. Assuming that the PRF is symmetrical. Returns ------- prf : `photutils.psf.sandbox.DiscretePRF` Discrete PRF model estimated from data. """ # Check input array type and dimension. if np.iscomplexobj(imdata): raise TypeError('Complex type not supported') if imdata.ndim != 2: raise ValueError('{0}-d array not supported. ' 'Only 2-d arrays supported.'.format(imdata.ndim)) if size % 2 == 0: raise TypeError("Size must be odd.") if fluxes is not None and len(fluxes) != len(positions): raise TypeError('Position and flux arrays must be of equal ' 'length.') if mask is None: mask = np.isnan(imdata) if isinstance(positions, (list, tuple)): positions = np.array(positions) if isinstance(positions, Table) or \ (isinstance(positions, np.ndarray) and positions.dtype.names is not None): # One can do clever things like # positions['x_0', 'y_0'].as_array().view((positions['x_0'].dtype, # 2)) # but that requires positions['x_0'].dtype is # positions['y_0'].dtype. # Better do something simple to allow type promotion if required. pos = np.empty((len(positions), 2)) pos[:, 0] = positions['x_0'] pos[:, 1] = positions['y_0'] positions = pos if isinstance(fluxes, (list, tuple)): fluxes = np.array(fluxes) if mode == 'mean': combine = np.ma.mean elif mode == 'median': combine = np.ma.median else: raise Exception('Invalid mode to combine prfs.') data_internal = np.ma.array(data=imdata, mask=mask) prf_model = np.ndarray(shape=(subsampling, subsampling, size, size)) positions_subpixel_indices = \ np.array([subpixel_indices(_, subsampling) for _ in positions], dtype=np.int) for i in range(subsampling): for j in range(subsampling): extracted_sub_prfs = [] sub_prf_indices = np.all(positions_subpixel_indices == [j, i], axis=1) if not sub_prf_indices.any(): raise ValueError('The source coordinates do not sample ' 'all sub-pixel positions. Reduce the ' 'value of the subsampling parameter.') positions_sub_prfs = positions[sub_prf_indices] for k, position in enumerate(positions_sub_prfs): x, y = position extracted_prf = extract_array(data_internal, (size, size), (y, x)) # Check shape to exclude incomplete PRFs at the boundaries # of the image if (extracted_prf.shape == (size, size) and np.ma.sum(extracted_prf) != 0): # Replace NaN values by mirrored value, with respect # to the prf's center if fix_nan: prf_nan = extracted_prf.mask if prf_nan.any(): if (prf_nan.sum() > 3 or prf_nan[size // 2, size // 2]): continue else: extracted_prf = mask_to_mirrored_num( extracted_prf, prf_nan, (size // 2, size // 2)) # Normalize and add extracted PRF to data cube if fluxes is None: extracted_prf_norm = (np.ma.copy(extracted_prf) / np.ma.sum(extracted_prf)) else: fluxes_sub_prfs = fluxes[sub_prf_indices] extracted_prf_norm = (np.ma.copy(extracted_prf) / fluxes_sub_prfs[k]) extracted_sub_prfs.append(extracted_prf_norm) else: continue prf_model[i, j] = np.ma.getdata( combine(np.ma.dstack(extracted_sub_prfs), axis=2)) return cls(prf_model, subsampling=subsampling) class Reproject(object): """ Class to reproject pixel coordinates between unrectified and rectified images. Parameters ---------- wcs_original, wcs_rectified : `~astropy.wcs.WCS` or `~gwcs.wcs.WCS` The WCS objects for the original (unrectified) and rectified images. origin : {0, 1} Whether to use 0- or 1-based pixel coordinates. """ def __init__(self, wcs_original, wcs_rectified): self.wcs_original = wcs_original self.wcs_rectified = wcs_rectified @staticmethod def _reproject(wcs1, wcs2): """ Perform the forward transformation of ``wcs1`` followed by the inverse transformation of ``wcs2``. Parameters ---------- wcs1, wcs2 : `~astropy.wcs.WCS` or `~gwcs.wcs.WCS` The WCS objects. Returns ------- result : func Function to compute the transformations. It takes x, y positions in ``wcs1`` and returns x, y positions in ``wcs2``. The input and output x, y positions are zero indexed. """ import gwcs forward_origin = [] if isinstance(wcs1, fitswcs.WCS): forward = wcs1.all_pix2world forward_origin = [0] elif isinstance(wcs2, gwcs.wcs.WCS): forward = wcs1.forward_transform else: raise ValueError('wcs1 must be an astropy.wcs.WCS or ' 'gwcs.wcs.WCS object.') inverse_origin = [] if isinstance(wcs2, fitswcs.WCS): inverse = wcs2.all_world2pix inverse_origin = [0] elif isinstance(wcs2, gwcs.wcs.WCS): inverse = wcs2.forward_transform.inverse else: raise ValueError('wcs2 must be an astropy.wcs.WCS or ' 'gwcs.wcs.WCS object.') def _reproject_func(x, y): forward_args = [x, y] + forward_origin sky = forward(*forward_args) inverse_args = sky + inverse_origin return inverse(*inverse_args) return _reproject_func def to_rectified(self, x, y): """ Convert the input (x, y) positions from the original (unrectified) image to the rectified image. Parameters ---------- x, y: float or array-like of float The zero-index pixel coordinates in the original (unrectified) image. Returns ------- x, y: float or array-like The zero-index pixel coordinates in the rectified image. """ return self._reproject(self.wcs_original, self.wcs_rectified)(x, y) def to_original(self, x, y): """ Convert the input (x, y) positions from the rectified image to the original (unrectified) image. Parameters ---------- x, y: float or array-like of float The zero-index pixel coordinates in the rectified image. Returns ------- x, y: float or array-like The zero-index pixel coordinates in the original (unrectified) image. """ return self._reproject(self.wcs_rectified, self.wcs_original)(x, y) photutils-0.4/photutils/psf/tests/0000755000214200020070000000000013175654702021572 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/psf/tests/__init__.py0000644000214200020070000000017513055576313023704 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This packages contains tests for the psf subpackage. """ photutils-0.4/photutils/psf/tests/test_funcs.py0000644000214200020070000000331513175634532024322 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import division import numpy as np from numpy.testing import assert_allclose import pytest from astropy.modeling.models import Gaussian2D from astropy.convolution.utils import discretize_model from astropy.table import Table from .. import subtract_psf from ..sandbox import DiscretePRF try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False PSF_SIZE = 11 GAUSSIAN_WIDTH = 1. IMAGE_SIZE = 101 # Position and FLUXES of test sources INTAB = Table([[50., 23, 12, 86], [50., 83, 80, 84], [np.pi * 10, 3.654, 20., 80 / np.sqrt(3)]], names=['x_0', 'y_0', 'flux_0']) # Create test psf psf_model = Gaussian2D(1. / (2 * np.pi * GAUSSIAN_WIDTH ** 2), PSF_SIZE // 2, PSF_SIZE // 2, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH) test_psf = discretize_model(psf_model, (0, PSF_SIZE), (0, PSF_SIZE), mode='oversample') # Set up grid for test image image = np.zeros((IMAGE_SIZE, IMAGE_SIZE)) # Add sources to test image for x, y, flux in INTAB: model = Gaussian2D(flux / (2 * np.pi * GAUSSIAN_WIDTH ** 2), x, y, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH) image += discretize_model(model, (0, IMAGE_SIZE), (0, IMAGE_SIZE), mode='oversample') @pytest.mark.skipif('not HAS_SCIPY') def test_subtract_psf(): """Test subtract_psf.""" prf = DiscretePRF(test_psf, subsampling=1) posflux = INTAB.copy() for n in posflux.colnames: posflux.rename_column(n, n.split('_')[0] + '_fit') residuals = subtract_psf(image, prf, posflux) assert_allclose(residuals, np.zeros_like(image), atol=1E-4) photutils-0.4/photutils/psf/tests/test_groupstars.py0000644000214200020070000004373213175634532025424 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import division import numpy as np import pytest from numpy.testing import assert_array_equal from astropy.table import Table, vstack from ..groupstars import DAOGroup, DBSCANGroup try: import sklearn.cluster # noqa HAS_SKLEARN = True except ImportError: HAS_SKLEARN = False class TestDAOGROUP(object): def test_daogroup_one(self): """ +---------+--------+---------+---------+--------+---------+ | * * * * | | | 0.2 + + | | | | | | 0 + * * + | | | | | | -0.2 + + | | | * * * * | +---------+--------+---------+---------+--------+---------+ 0 0.5 1 1.5 2 x and y axis are in pixel coordinates. Each asterisk represents the centroid of a star. """ x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4]) y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4]) x_1 = x_0 + 2.0 first_group = Table([x_0, y_0, np.arange(len(x_0)) + 1, np.ones(len(x_0), dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) second_group = Table([x_1, y_0, len(x_0) + np.arange(len(x_0)) + 1, 2*np.ones(len(x_0), dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) starlist = vstack([first_group, second_group]) daogroup = DAOGroup(crit_separation=0.6) test_starlist = daogroup(starlist['x_0', 'y_0', 'id']) assert_array_equal(starlist, test_starlist) def test_daogroup_two(self): """ +--------------+--------------+-------------+--------------+ 3 + * + | * | 2.5 + * + | * | 2 + * + | | 1.5 + + | | 1 + * + | * | 0.5 + * + | * | 0 + * + +--------------+--------------+-------------+--------------+ -1 -0.5 0 0.5 1 """ first_group = Table([np.zeros(5), np.linspace(0, 1, 5), np.arange(5) + 1, np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) second_group = Table([np.zeros(5), np.linspace(2, 3, 5), 6 + np.arange(5), 2*np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) starlist = vstack([first_group, second_group]) daogroup = DAOGroup(crit_separation=0.3) test_starlist = daogroup(starlist['x_0', 'y_0', 'id']) assert_array_equal(starlist, test_starlist) def test_daogroup_three(self): """ 1 +--+-------+--------+--------+--------+-------+--------+--+ | | | | | | 0.5 + + | | | | 0 + * * * * * * * * * * + | | | | -0.5 + + | | | | | | -1 +--+-------+--------+--------+--------+-------+--------+--+ 0 0.5 1 1.5 2 2.5 3 """ first_group = Table([np.linspace(0, 1, 5), np.zeros(5), np.arange(5) + 1, np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) second_group = Table([np.linspace(2, 3, 5), np.zeros(5), 6 + np.arange(5), 2*np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) starlist = vstack([first_group, second_group]) daogroup = DAOGroup(crit_separation=0.3) test_starlist = daogroup(starlist['x_0', 'y_0', 'id']) assert_array_equal(starlist, test_starlist) def test_daogroup_four(self): """ +-+---------+---------+---------+---------+-+ 1 + * + | * * | | | | | 0.5 + + | | | | | | 0 + * * + | | | | -0.5 + + | | | | | * * | -1 + * + +-+---------+---------+---------+---------+-+ -1 -0.5 0 0.5 1 """ x = np.linspace(-1., 1., 5) y = np.sqrt(1. - x**2) xx = np.hstack((x, x)) yy = np.hstack((y, -y)) starlist = Table([xx, yy, np.arange(10) + 1, np.ones(10, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) daogroup = DAOGroup(crit_separation=2.5) test_starlist = daogroup(starlist['x_0', 'y_0', 'id']) assert_array_equal(starlist, test_starlist) def test_daogroup_five(self): """ +--+--------+--------+-------+--------+--------+--------+--+ 3 + * + | * | 2.5 + * + | * | 2 + * + | | 1.5 + * * * * * * * * * * + | | 1 + * + | * | 0.5 + * + | * | 0 + * + +--+--------+--------+-------+--------+--------+--------+--+ 0 0.5 1 1.5 2 2.5 3 """ first_group = Table([1.5*np.ones(5), np.linspace(0, 1, 5), np.arange(5) + 1, np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) second_group = Table([1.5*np.ones(5), np.linspace(2, 3, 5), 6 + np.arange(5), 2*np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) third_group = Table([np.linspace(0, 1, 5), 1.5*np.ones(5), 11 + np.arange(5), 3*np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) fourth_group = Table([np.linspace(2, 3, 5), 1.5*np.ones(5), 16 + np.arange(5), 4*np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) starlist = vstack([first_group, second_group, third_group, fourth_group]) daogroup = DAOGroup(crit_separation=0.3) test_starlist = daogroup(starlist['x_0', 'y_0', 'id']) assert_array_equal(starlist, test_starlist) def test_daogroup_six(self): """ +------+----------+----------+----------+----------+------+ | * * * * * * | | | 0.2 + + | | | | | | 0 + * * * + | | | | | | -0.2 + + | | | * * * * * * | +------+----------+----------+----------+----------+------+ 0 1 2 3 4 """ x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4]) y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4]) x_1 = x_0 + 2.0 x_2 = x_0 + 4.0 first_group = Table([x_0, y_0, np.arange(5) + 1, np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) second_group = Table([x_1, y_0, 6 + np.arange(5), 2*np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) third_group = Table([x_2, y_0, 11 + np.arange(5), 3*np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) starlist = vstack([first_group, second_group, third_group]) daogroup = DAOGroup(crit_separation=0.6) test_starlist = daogroup(starlist['x_0', 'y_0', 'id']) assert_array_equal(starlist, test_starlist) def test_isolated_sources(self): """ Test case when all sources are isolated. """ x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4]) y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4]) starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1, np.arange(len(x_0)) + 1], names=('x_0', 'y_0', 'id', 'group_id')) daogroup = DAOGroup(crit_separation=0.01) test_starlist = daogroup(starlist['x_0', 'y_0', 'id']) assert_array_equal(starlist, test_starlist) def test_id_column(self): x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4]) y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4]) starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1, np.arange(len(x_0)) + 1], names=('x_0', 'y_0', 'id', 'group_id')) daogroup = DAOGroup(crit_separation=0.01) test_starlist = daogroup(starlist['x_0', 'y_0']) assert_array_equal(starlist, test_starlist) def test_id_column_raise_error(self): x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4]) y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4]) starlist = Table([x_0, y_0, np.arange(len(x_0)), np.arange(len(x_0)) + 1], names=('x_0', 'y_0', 'id', 'group_id')) daogroup = DAOGroup(crit_separation=0.01) with pytest.raises(ValueError): daogroup(starlist['x_0', 'y_0', 'id']) @pytest.mark.skipif('not HAS_SKLEARN') class TestDBSCANGroup(object): def test_group_stars_one(object): x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4]) y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4]) x_1 = x_0 + 2.0 first_group = Table([x_0, y_0, np.arange(len(x_0)) + 1, np.ones(len(x_0), dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) second_group = Table([x_1, y_0, len(x_0) + np.arange(len(x_0)) + 1, 2*np.ones(len(x_0), dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) starlist = vstack([first_group, second_group]) dbscan = DBSCANGroup(crit_separation=0.6) test_starlist = dbscan(starlist['x_0', 'y_0', 'id']) assert_array_equal(starlist, test_starlist) def test_group_stars_two(object): first_group = Table([1.5*np.ones(5), np.linspace(0, 1, 5), np.arange(5) + 1, np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) second_group = Table([1.5*np.ones(5), np.linspace(2, 3, 5), 6 + np.arange(5), 2*np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) third_group = Table([np.linspace(0, 1, 5), 1.5*np.ones(5), 11 + np.arange(5), 3*np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) fourth_group = Table([np.linspace(2, 3, 5), 1.5*np.ones(5), 16 + np.arange(5), 4*np.ones(5, dtype=np.int)], names=('x_0', 'y_0', 'id', 'group_id')) starlist = vstack([first_group, second_group, third_group, fourth_group]) dbscan = DBSCANGroup(crit_separation=0.3) test_starlist = dbscan(starlist['x_0', 'y_0', 'id']) assert_array_equal(starlist, test_starlist) def test_isolated_sources(self): """ Test case when all sources are isolated. """ x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4]) y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4]) starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1, np.arange(len(x_0)) + 1], names=('x_0', 'y_0', 'id', 'group_id')) dbscan = DBSCANGroup(crit_separation=0.01) test_starlist = dbscan(starlist['x_0', 'y_0', 'id']) assert_array_equal(starlist, test_starlist) def test_id_column(self): x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4]) y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4]) starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1, np.arange(len(x_0)) + 1], names=('x_0', 'y_0', 'id', 'group_id')) dbscan = DBSCANGroup(crit_separation=0.01) test_starlist = dbscan(starlist['x_0', 'y_0']) assert_array_equal(starlist, test_starlist) def test_id_column_raise_error(self): x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4]) y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4]) starlist = Table([x_0, y_0, np.arange(len(x_0)), np.arange(len(x_0)) + 1], names=('x_0', 'y_0', 'id', 'group_id')) dbscan = DBSCANGroup(crit_separation=0.01) with pytest.raises(ValueError): dbscan(starlist['x_0', 'y_0', 'id']) photutils-0.4/photutils/psf/tests/test_imagemodel.py0000644000214200020070000000646013175634532025313 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import division import numpy as np from astropy.modeling.models import Gaussian2D import pytest from .. import FittableImageModel try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False @pytest.mark.skipif('not HAS_SCIPY') def test_image_model(): gm = Gaussian2D(x_stddev=3, y_stddev=3) xg, yg = np.mgrid[-2:3, -2:3] imod_nonorm = FittableImageModel(gm(xg, yg)) assert np.allclose(imod_nonorm(0, 0), gm(0, 0)) assert np.allclose(imod_nonorm(1, 1), gm(1, 1)) assert np.allclose(imod_nonorm(-2, 1), gm(-2, 1)) # now sub-pixel should *not* match, but be reasonably close assert not np.allclose(imod_nonorm(0.5, 0.5), gm(0.5, 0.5)) # in this case good to ~0.1% seems to be fine assert np.allclose(imod_nonorm(0.5, 0.5), gm(0.5, 0.5), rtol=.001) assert np.allclose(imod_nonorm(-0.5, 1.75), gm(-0.5, 1.75), rtol=.001) imod_norm = FittableImageModel(gm(xg, yg), normalize=True) assert not np.allclose(imod_norm(0, 0), gm(0, 0)) assert np.allclose(np.sum(imod_norm(xg, yg)), 1) imod_norm2 = FittableImageModel(gm(xg, yg), normalize=True, normalization_correction=2) assert not np.allclose(imod_norm2(0, 0), gm(0, 0)) assert np.allclose(imod_norm(0, 0), imod_norm2(0, 0)*2) assert np.allclose(np.sum(imod_norm2(xg, yg)), 0.5) @pytest.mark.skipif('not HAS_SCIPY') def test_image_model_oversampling(): gm = Gaussian2D(x_stddev=3, y_stddev=3) osa = 3 # oversampling factor xg, yg = np.mgrid[-3:3.00001:(1/osa), -3:3.00001:(1/osa)] im = gm(xg, yg) # should be obvious, but at least ensures the test is right: assert im.shape[0] > 7 imod_oversampled = FittableImageModel(im, oversampling=osa) assert np.allclose(imod_oversampled(0, 0), gm(0, 0)) assert np.allclose(imod_oversampled(1, 1), gm(1, 1)) assert np.allclose(imod_oversampled(-2, 1), gm(-2, 1)) assert np.allclose(imod_oversampled(0.5, 0.5), gm(0.5, 0.5), rtol=.001) assert np.allclose(imod_oversampled(-0.5, 1.75), gm(-0.5, 1.75), rtol=.001) imod_wrongsampled = FittableImageModel(im) # now make sure that all *fails* without the oversampling # except for at the origin assert np.allclose(imod_wrongsampled(0, 0), gm(0, 0)) assert not np.allclose(imod_wrongsampled(1, 1), gm(1, 1)) assert not np.allclose(imod_wrongsampled(-2, 1), gm(-2, 1)) assert not np.allclose(imod_wrongsampled(0.5, 0.5), gm(0.5, 0.5), rtol=.001) assert not np.allclose(imod_wrongsampled(-0.5, 1.75), gm(-0.5, 1.75), rtol=.001) @pytest.mark.skipif('not HAS_SCIPY') def test_centering_oversampled(): gm = Gaussian2D(x_stddev=2, y_stddev=3) osa = 3 # oversampling factor xg, yg = np.mgrid[-3:3.00001:(1/osa), -3:3.00001:(1/osa)] imod_oversampled = FittableImageModel(gm(xg, yg), oversampling=osa) valcen = gm(0, 0) val36 = gm(0.66, 0.66) assert np.allclose(valcen, imod_oversampled(0, 0)) assert np.allclose(val36, imod_oversampled(0.66, 0.66)) imod_oversampled.x_0 = 2.5 imod_oversampled.y_0 = -3.5 assert np.allclose(valcen, imod_oversampled(2.5, -3.5)) assert np.allclose(val36, imod_oversampled(2.5 + 0.66, -3.5 + 0.66)) photutils-0.4/photutils/psf/tests/test_misc.py0000644000214200020070000001227113175634532024140 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Miscellaneous tests for psf functionality that doesn't have another obvious place to go """ from __future__ import division import numpy as np from numpy.testing import assert_allclose import pytest from astropy.table import Table from .. import IntegratedGaussianPRF, prepare_psf_model, get_grouped_psf_model try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False widths = [0.001, 0.01, 0.1, 1] sigmas = [0.5, 1., 2., 10., 12.34] @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('width', widths) def test_subpixel_gauss_psf(width): """ Test subpixel accuracy of Gaussian PSF by checking the sum of pixels. """ gauss_psf = IntegratedGaussianPRF(width) y, x = np.mgrid[-10:11, -10:11] assert_allclose(gauss_psf(x, y).sum(), 1) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('sigma', sigmas) def test_gaussian_psf_integral(sigma): """ Test if Gaussian PSF integrates to unity on larger scales. """ psf = IntegratedGaussianPRF(sigma=sigma) y, x = np.mgrid[-100:101, -100:101] assert_allclose(psf(y, x).sum(), 1) @pytest.fixture(scope="module") def moffimg(): """ This fixture requires scipy so don't call it from non-scipy tests """ from scipy import integrate from astropy.modeling.models import Moffat2D mof = Moffat2D(alpha=4.8) # this is the analytic value needed to get a total flux of 1 mof.amplitude = (mof.alpha-1)/(np.pi*mof.gamma**2) # first make sure it really is normalized assert (1 - integrate.dblquad(mof, -10, 10, lambda x: -10, lambda x: 10)[0]) < 1e-6 # now create an "image" of the PSF xg, yg = np.meshgrid(*([np.linspace(-2, 2, 100)]*2)) return mof, (xg, yg, mof(xg, yg)) @pytest.mark.skipif('not HAS_SCIPY') def test_moffat_fitting(moffimg): """ Test that the Moffat to be fit in test_psf_adapter is behaving correctly """ from astropy.modeling.fitting import LevMarLSQFitter from astropy.modeling.models import Moffat2D mof, (xg, yg, img) = moffimg # a closeish-but-wrong "guessed Moffat" guess_moffat = Moffat2D(x_0=.1, y_0=-.05, gamma=1.05, amplitude=mof.amplitude*1.06, alpha=4.75) f = LevMarLSQFitter() fit_mof = f(guess_moffat, xg, yg, img) assert_allclose(fit_mof.parameters, mof.parameters, rtol=.01, atol=.0005) # we set the tolerances in flux to be 2-3% because the shape paraameters of # the guessed version are known to be wrong. @pytest.mark.parametrize("prepkwargs,tols", [ (dict(xname='x_0', yname='y_0', fluxname=None, renormalize_psf=True), (1e-3, .02)), (dict(xname=None, yname=None, fluxname=None, renormalize_psf=True), (1e-3, .02)), (dict(xname=None, yname=None, fluxname=None, renormalize_psf=False), (1e-3, .03)), (dict(xname='x_0', yname='y_0', fluxname='amplitude', renormalize_psf=False), (1e-3, None)), ]) @pytest.mark.skipif('not HAS_SCIPY') def test_psf_adapter(moffimg, prepkwargs, tols): """ Test that the PSF adapter behaves as expected for fitting (don't worry about full-on psf photometry for now) """ from astropy.modeling.fitting import LevMarLSQFitter from astropy.modeling.models import Moffat2D mof, (xg, yg, img) = moffimg f = LevMarLSQFitter() # a close-but-wrong "guessed Moffat" guess_moffat = Moffat2D(x_0=.1, y_0=-.05, gamma=1.01, amplitude=mof.amplitude*1.01, alpha=4.79) if prepkwargs['renormalize_psf']: # definitely very wrong, so this ensures the re-normalization # stuff works guess_moffat.amplitude = 5. if prepkwargs['xname'] is None: guess_moffat.x_0 = 0 if prepkwargs['yname'] is None: guess_moffat.y_0 = 0 psfmod = prepare_psf_model(guess_moffat, **prepkwargs) xytol, fluxtol = tols fit_psfmod = f(psfmod, xg, yg, img) if xytol is not None: assert np.abs(getattr(fit_psfmod, fit_psfmod.xname)) < xytol assert np.abs(getattr(fit_psfmod, fit_psfmod.yname)) < xytol if fluxtol is not None: assert np.abs(1 - getattr(fit_psfmod, fit_psfmod.fluxname)) < fluxtol # ensure the amplitude and shape parameters did *not* change assert fit_psfmod.psfmodel.gamma == guess_moffat.gamma assert fit_psfmod.psfmodel.alpha == guess_moffat.alpha if prepkwargs['fluxname'] is None: assert fit_psfmod.psfmodel.amplitude == guess_moffat.amplitude @pytest.mark.skipif('not HAS_SCIPY') def test_get_grouped_psf_model(): igp = IntegratedGaussianPRF(sigma=1.2) tab = Table(names=['x_0', 'y_0', 'flux_0'], data=[[1, 2], [3, 4], [0.5, 1]]) pars_to_set = {'x_0': 'x_0', 'y_0': 'y_0', 'flux_0': 'flux'} gpsf = get_grouped_psf_model(igp, tab, pars_to_set) assert gpsf.x_0_0 == 1 assert gpsf.y_0_1 == 4 assert gpsf.flux_0 == 0.5 assert gpsf.flux_1 == 1 assert gpsf.sigma_0 == gpsf.sigma_1 == 1.2 photutils-0.4/photutils/psf/tests/test_photometry.py0000644000214200020070000005432113175635000025410 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import division import pytest import numpy as np from numpy.testing import assert_allclose, assert_array_equal, assert_equal from astropy.table import Table from astropy.stats import gaussian_sigma_to_fwhm, SigmaClip from astropy.modeling import Parameter, Fittable2DModel from astropy.modeling.fitting import LevMarLSQFitter from astropy.modeling.models import Gaussian2D, Moffat2D from astropy.convolution.utils import discretize_model from astropy.tests.helper import catch_warnings from astropy.utils.exceptions import AstropyUserWarning from ..groupstars import DAOGroup from ..models import IntegratedGaussianPRF, prepare_psf_model from ..photometry import (DAOPhotPSFPhotometry, BasicPSFPhotometry, IterativelySubtractedPSFPhotometry) from ..sandbox import DiscretePRF from ...background import StdBackgroundRMS, MMMBackground from ...datasets import make_gaussian_sources_image, make_noise_image from ...detection import DAOStarFinder try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False def make_psf_photometry_objs(std=1, sigma_psf=1): """ Produces baseline photometry objects which are then modified as-needed in specific tests below """ daofind = DAOStarFinder(threshold=5.0*std, fwhm=sigma_psf*gaussian_sigma_to_fwhm) daogroup = DAOGroup(1.5*sigma_psf*gaussian_sigma_to_fwhm) threshold = 5. * std fwhm = sigma_psf * gaussian_sigma_to_fwhm crit_separation = 1.5 * sigma_psf * gaussian_sigma_to_fwhm daofind = DAOStarFinder(threshold=threshold, fwhm=fwhm) daogroup = DAOGroup(crit_separation) mode_bkg = MMMBackground() psf_model = IntegratedGaussianPRF(sigma=sigma_psf) fitter = LevMarLSQFitter() basic_phot_obj = BasicPSFPhotometry(finder=daofind, group_maker=daogroup, bkg_estimator=mode_bkg, psf_model=psf_model, fitter=fitter, fitshape=(11, 11)) iter_phot_obj = IterativelySubtractedPSFPhotometry(finder=daofind, group_maker=daogroup, bkg_estimator=mode_bkg, psf_model=psf_model, fitter=fitter, niters=1, fitshape=(11, 11)) dao_phot_obj = DAOPhotPSFPhotometry(crit_separation=crit_separation, threshold=threshold, fwhm=fwhm, psf_model=psf_model, fitshape=(11, 11), niters=1) return (basic_phot_obj, iter_phot_obj, dao_phot_obj) sigma_psfs = [] # A group of two overlapped stars and an isolated one sigma_psfs.append(2) sources1 = Table() sources1['flux'] = [800, 1000, 1200] sources1['x_mean'] = [13, 18, 25] sources1['y_mean'] = [16, 16, 25] sources1['x_stddev'] = [sigma_psfs[-1]] * 3 sources1['y_stddev'] = sources1['x_stddev'] sources1['theta'] = [0] * 3 sources1['id'] = [1, 2, 3] sources1['group_id'] = [1, 1, 2] # one single group with four stars. sigma_psfs.append(2) sources2 = Table() sources2['flux'] = [700, 800, 700, 800] sources2['x_mean'] = [12, 17, 12, 17] sources2['y_mean'] = [15, 15, 20, 20] sources2['x_stddev'] = [sigma_psfs[-1]] * 4 sources2['y_stddev'] = sources2['x_stddev'] sources2['theta'] = [0] * 4 sources2['id'] = [1, 2, 3, 4] sources2['group_id'] = [1, 1, 1, 1] # one faint star and one brither companion # although they are in the same group, the detection algorithm # is not able to detect the fainter star, hence photometry should # be performed with niters > 1 or niters=None sigma_psfs.append(2) sources3 = Table() sources3['flux'] = [10000, 1000] sources3['x_mean'] = [18, 13] sources3['y_mean'] = [17, 19] sources3['x_stddev'] = [sigma_psfs[-1]] * 2 sources3['y_stddev'] = sources3['x_stddev'] sources3['theta'] = [0] * 2 sources3['id'] = [1] * 2 sources3['group_id'] = [1] * 2 sources3['iter_detected'] = [1, 2] @pytest.mark.xfail('not HAS_SCIPY') @pytest.mark.parametrize("sigma_psf, sources", [(sigma_psfs[2], sources3)]) def test_psf_photometry_niters(sigma_psf, sources): img_shape = (32, 32) # generate image with read-out noise (Gaussian) and # background noise (Poisson) image = (make_gaussian_sources_image(img_shape, sources) + make_noise_image(img_shape, type='poisson', mean=6., random_state=1) + make_noise_image(img_shape, type='gaussian', mean=0., stddev=2., random_state=1)) cp_image = image.copy() sigma_clip = SigmaClip(sigma=3.) bkgrms = StdBackgroundRMS(sigma_clip) std = bkgrms(image) phot_obj = make_psf_photometry_objs(std, sigma_psf)[1:3] for iter_phot_obj in phot_obj: iter_phot_obj.niters = None result_tab = iter_phot_obj(image) residual_image = iter_phot_obj.get_residual_image() assert (result_tab['x_0_unc'] < 1.96 * sigma_psf / np.sqrt(sources['flux'])).all() assert (result_tab['y_0_unc'] < 1.96 * sigma_psf / np.sqrt(sources['flux'])).all() assert (result_tab['flux_unc'] < 1.96 * np.sqrt(sources['flux'])).all() assert_allclose(result_tab['x_fit'], sources['x_mean'], rtol=1e-1) assert_allclose(result_tab['y_fit'], sources['y_mean'], rtol=1e-1) assert_allclose(result_tab['flux_fit'], sources['flux'], rtol=1e-1) assert_array_equal(result_tab['id'], sources['id']) assert_array_equal(result_tab['group_id'], sources['group_id']) assert_array_equal(result_tab['iter_detected'], sources['iter_detected']) assert_allclose(np.mean(residual_image), 0.0, atol=1e1) # make sure image is note overwritten assert_array_equal(cp_image, image) @pytest.mark.xfail('not HAS_SCIPY') @pytest.mark.parametrize("sigma_psf, sources", [(sigma_psfs[0], sources1), (sigma_psfs[1], sources2), # these ensure that the test *fails* if the model # PSFs are the wrong shape pytest.param(sigma_psfs[0]/1.2, sources1, marks=pytest.mark.xfail()), pytest.param(sigma_psfs[1]*1.2, sources2, marks=pytest.mark.xfail())]) def test_psf_photometry_oneiter(sigma_psf, sources): """ Tests in an image with a group of two overlapped stars and an isolated one. """ img_shape = (32, 32) # generate image with read-out noise (Gaussian) and # background noise (Poisson) image = (make_gaussian_sources_image(img_shape, sources) + make_noise_image(img_shape, type='poisson', mean=6., random_state=1) + make_noise_image(img_shape, type='gaussian', mean=0., stddev=2., random_state=1)) cp_image = image.copy() sigma_clip = SigmaClip(sigma=3.) bkgrms = StdBackgroundRMS(sigma_clip) std = bkgrms(image) phot_objs = make_psf_photometry_objs(std, sigma_psf) for phot_proc in phot_objs: result_tab = phot_proc(image) residual_image = phot_proc.get_residual_image() assert (result_tab['x_0_unc'] < 1.96 * sigma_psf / np.sqrt(sources['flux'])).all() assert (result_tab['y_0_unc'] < 1.96 * sigma_psf / np.sqrt(sources['flux'])).all() assert (result_tab['flux_unc'] < 1.96 * np.sqrt(sources['flux'])).all() assert_allclose(result_tab['x_fit'], sources['x_mean'], rtol=1e-1) assert_allclose(result_tab['y_fit'], sources['y_mean'], rtol=1e-1) assert_allclose(result_tab['flux_fit'], sources['flux'], rtol=1e-1) assert_array_equal(result_tab['id'], sources['id']) assert_array_equal(result_tab['group_id'], sources['group_id']) assert_allclose(np.mean(residual_image), 0.0, atol=1e1) # test fixed photometry phot_proc.psf_model.x_0.fixed = True phot_proc.psf_model.y_0.fixed = True pos = Table(names=['x_0', 'y_0'], data=[sources['x_mean'], sources['y_mean']]) cp_pos = pos.copy() result_tab = phot_proc(image, pos) residual_image = phot_proc.get_residual_image() assert 'x_0_unc' not in result_tab.colnames assert 'y_0_unc' not in result_tab.colnames assert (result_tab['flux_unc'] < 1.96 * np.sqrt(sources['flux'])).all() assert_array_equal(result_tab['x_fit'], sources['x_mean']) assert_array_equal(result_tab['y_fit'], sources['y_mean']) assert_allclose(result_tab['flux_fit'], sources['flux'], rtol=1e-1) assert_array_equal(result_tab['id'], sources['id']) assert_array_equal(result_tab['group_id'], sources['group_id']) assert_allclose(np.mean(residual_image), 0.0, atol=1e1) # make sure image is not overwritten assert_array_equal(cp_image, image) # make sure initial guess table is not modified assert_array_equal(cp_pos, pos) # resets fixed positions phot_proc.psf_model.x_0.fixed = False phot_proc.psf_model.y_0.fixed = False @pytest.mark.xfail('not HAS_SCIPY') def test_niters_errors(): iter_phot_obj = make_psf_photometry_objs()[1] # tests that niters is set to an integer even if the user inputs # a float iter_phot_obj.niters = 1.1 assert_equal(iter_phot_obj.niters, 1) # test that a ValueError is raised if niters <= 0 with pytest.raises(ValueError): iter_phot_obj.niters = 0 # test that it's OK to set niters to None iter_phot_obj.niters = None @pytest.mark.xfail('not HAS_SCIPY') def test_fitshape_erros(): basic_phot_obj = make_psf_photometry_objs()[0] # first make sure setting to a scalar does the right thing (and makes # no errors) basic_phot_obj.fitshape = 11 assert np.all(basic_phot_obj.fitshape == (11, 11)) # test that a ValuError is raised if fitshape has even components with pytest.raises(ValueError): basic_phot_obj.fitshape = (2, 2) with pytest.raises(ValueError): basic_phot_obj.fitshape = 2 # test that a ValueError is raised if fitshape has non positive # components with pytest.raises(ValueError): basic_phot_obj.fitshape = (-1, 0) # test that a ValueError is raised if fitshape has more than two # dimensions with pytest.raises(ValueError): basic_phot_obj.fitshape = (3, 3, 3) @pytest.mark.xfail('not HAS_SCIPY') def test_aperture_radius_errors(): basic_phot_obj = make_psf_photometry_objs()[0] # test that aperture_radius was set to None by default assert_equal(basic_phot_obj.aperture_radius, None) # test that a ValueError is raised if aperture_radius is non positive with pytest.raises(ValueError): basic_phot_obj.aperture_radius = -3 @pytest.mark.xfail('not HAS_SCIPY') def test_finder_erros(): iter_phot_obj = make_psf_photometry_objs()[1] with pytest.raises(ValueError): iter_phot_obj.finder = None with pytest.raises(ValueError): iter_phot_obj = IterativelySubtractedPSFPhotometry( finder=None, group_maker=DAOGroup(1), bkg_estimator=MMMBackground(), psf_model=IntegratedGaussianPRF(1), fitshape=(11, 11)) @pytest.mark.xfail('not HAS_SCIPY') def test_finder_positions_warning(): basic_phot_obj = make_psf_photometry_objs(sigma_psf=2)[0] positions = Table() positions['x_0'] = [12.8, 18.2, 25.3] positions['y_0'] = [15.7, 16.5, 25.1] image = (make_gaussian_sources_image((32, 32), sources1) + make_noise_image((32, 32), type='poisson', mean=6., random_state=1)) with catch_warnings(AstropyUserWarning): result_tab = basic_phot_obj(image=image, init_guesses=positions) assert_array_equal(result_tab['x_0'], positions['x_0']) assert_array_equal(result_tab['y_0'], positions['y_0']) assert_allclose(result_tab['x_fit'], positions['x_0'], rtol=1e-1) assert_allclose(result_tab['y_fit'], positions['y_0'], rtol=1e-1) with pytest.raises(ValueError): basic_phot_obj.finder = None result_tab = basic_phot_obj(image=image) @pytest.mark.xfail('not HAS_SCIPY') def test_aperture_radius(): img_shape = (32, 32) # generate image with read-out noise (Gaussian) and # background noise (Poisson) image = (make_gaussian_sources_image(img_shape, sources1) + make_noise_image(img_shape, type='poisson', mean=6., random_state=1) + make_noise_image(img_shape, type='gaussian', mean=0., stddev=2., random_state=1)) basic_phot_obj = make_psf_photometry_objs()[0] # test that aperture radius is properly set whenever the PSF model has # a `fwhm` attribute class PSFModelWithFWHM(Fittable2DModel): x_0 = Parameter(default=1) y_0 = Parameter(default=1) flux = Parameter(default=1) fwhm = Parameter(default=5) def __init__(self, fwhm=fwhm.default): super(PSFModelWithFWHM, self).__init__(fwhm=fwhm) def evaluate(self, x, y, x_0, y_0, flux, fwhm): return flux / (fwhm * (x - x_0)**2 * (y - y_0)**2) psf_model = PSFModelWithFWHM() basic_phot_obj.psf_model = psf_model basic_phot_obj(image) assert_equal(basic_phot_obj.aperture_radius, psf_model.fwhm.value) PARS_TO_SET_0 = {'x_0': 'x_0', 'y_0': 'y_0', 'flux_0': 'flux'} PARS_TO_OUTPUT_0 = {'x_fit': 'x_0', 'y_fit': 'y_0', 'flux_fit': 'flux'} PARS_TO_SET_1 = PARS_TO_SET_0.copy() PARS_TO_SET_1['sigma_0'] = 'sigma' PARS_TO_OUTPUT_1 = PARS_TO_OUTPUT_0.copy() PARS_TO_OUTPUT_1['sigma_fit'] = 'sigma' @pytest.mark.parametrize("actual_pars_to_set, actual_pars_to_output," "is_sigma_fixed", [(PARS_TO_SET_0, PARS_TO_OUTPUT_0, True), (PARS_TO_SET_1, PARS_TO_OUTPUT_1, False)]) @pytest.mark.skipif('not HAS_SCIPY') def test_define_fit_param_names(actual_pars_to_set, actual_pars_to_output, is_sigma_fixed): psf_model = IntegratedGaussianPRF() psf_model.sigma.fixed = is_sigma_fixed basic_phot_obj = make_psf_photometry_objs()[0] basic_phot_obj.psf_model = psf_model basic_phot_obj._define_fit_param_names() assert_equal(basic_phot_obj._pars_to_set, actual_pars_to_set) assert_equal(basic_phot_obj._pars_to_output, actual_pars_to_output) # tests previously written to psf_photometry PSF_SIZE = 11 GAUSSIAN_WIDTH = 1. IMAGE_SIZE = 101 # Position and FLUXES of test sources INTAB = Table([[50., 23, 12, 86], [50., 83, 80, 84], [np.pi * 10, 3.654, 20., 80 / np.sqrt(3)]], names=['x_0', 'y_0', 'flux_0']) # Create test psf psf_model = Gaussian2D(1. / (2 * np.pi * GAUSSIAN_WIDTH ** 2), PSF_SIZE // 2, PSF_SIZE // 2, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH) test_psf = discretize_model(psf_model, (0, PSF_SIZE), (0, PSF_SIZE), mode='oversample') # Set up grid for test image image = np.zeros((IMAGE_SIZE, IMAGE_SIZE)) # Add sources to test image for x, y, flux in INTAB: model = Gaussian2D(flux / (2 * np.pi * GAUSSIAN_WIDTH ** 2), x, y, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH) image += discretize_model(model, (0, IMAGE_SIZE), (0, IMAGE_SIZE), mode='oversample') # Some tests require an image with wider sources. WIDE_GAUSSIAN_WIDTH = 3. WIDE_INTAB = Table([[50, 23.2], [50.5, 1], [10, 20]], names=['x_0', 'y_0', 'flux_0']) wide_image = np.zeros((IMAGE_SIZE, IMAGE_SIZE)) # Add sources to test image for x, y, flux in WIDE_INTAB: model = Gaussian2D(flux / (2 * np.pi * WIDE_GAUSSIAN_WIDTH ** 2), x, y, WIDE_GAUSSIAN_WIDTH, WIDE_GAUSSIAN_WIDTH) wide_image += discretize_model(model, (0, IMAGE_SIZE), (0, IMAGE_SIZE), mode='oversample') @pytest.mark.skipif('not HAS_SCIPY') def test_psf_photometry_discrete(): """ Test psf_photometry with discrete PRF model. """ prf = DiscretePRF(test_psf, subsampling=1) basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2), bkg_estimator=None, psf_model=prf, fitshape=7) f = basic_phot(image=image, init_guesses=INTAB) for n in ['x', 'y', 'flux']: assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-6) @pytest.mark.skipif('not HAS_SCIPY') def test_tune_coordinates(): """ Test psf_photometry with discrete PRF model and coordinates that need to be adjusted in the fit. """ prf = DiscretePRF(test_psf, subsampling=1) prf.x_0.fixed = False prf.y_0.fixed = False # Shift all sources by 0.3 pixels intab = INTAB.copy() intab['x_0'] += 0.3 basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2), bkg_estimator=None, psf_model=prf, fitshape=7) f = basic_phot(image=image, init_guesses=intab) for n in ['x', 'y', 'flux']: assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-3) @pytest.mark.skipif('not HAS_SCIPY') def test_psf_boundary(): """ Test psf_photometry with discrete PRF model at the boundary of the data. """ prf = DiscretePRF(test_psf, subsampling=1) basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2), bkg_estimator=None, psf_model=prf, fitshape=7, aperture_radius=5.5) intab = Table(data=[[1], [1]], names=['x_0', 'y_0']) f = basic_phot(image=image, init_guesses=intab) assert_allclose(f['flux_fit'], 0, atol=1e-8) @pytest.mark.skipif('not HAS_SCIPY') def test_aperture_radius_value_error(): """ Test psf_photometry with discrete PRF model at the boundary of the data. """ prf = DiscretePRF(test_psf, subsampling=1) basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2), bkg_estimator=None, psf_model=prf, fitshape=7) intab = Table(data=[[1], [1]], names=['x_0', 'y_0']) with pytest.raises(ValueError) as err: basic_phot(image=image, init_guesses=intab) assert 'aperture_radius is None' in str(err.value) @pytest.mark.skipif('not HAS_SCIPY') def test_psf_boundary_gaussian(): """ Test psf_photometry with discrete PRF model at the boundary of the data. """ psf = IntegratedGaussianPRF(GAUSSIAN_WIDTH) basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2), bkg_estimator=None, psf_model=psf, fitshape=7) intab = Table(data=[[1], [1]], names=['x_0', 'y_0']) f = basic_phot(image=image, init_guesses=intab) assert_allclose(f['flux_fit'], 0, atol=1e-8) @pytest.mark.skipif('not HAS_SCIPY') def test_psf_photometry_gaussian(): """ Test psf_photometry with Gaussian PSF model. """ psf = IntegratedGaussianPRF(sigma=GAUSSIAN_WIDTH) basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2), bkg_estimator=None, psf_model=psf, fitshape=7) f = basic_phot(image=image, init_guesses=INTAB) for n in ['x', 'y', 'flux']: assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-3) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize("renormalize_psf", (True, False)) def test_psf_photometry_gaussian2(renormalize_psf): """ Test psf_photometry with Gaussian PSF model from Astropy. """ psf = Gaussian2D(1. / (2 * np.pi * GAUSSIAN_WIDTH ** 2), PSF_SIZE // 2, PSF_SIZE // 2, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH) psf = prepare_psf_model(psf, xname='x_mean', yname='y_mean', renormalize_psf=renormalize_psf) basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2), bkg_estimator=None, psf_model=psf, fitshape=7) f = basic_phot(image=image, init_guesses=INTAB) for n in ['x', 'y']: assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-3) # flux error worse, because of integration scheme ? assert_allclose(f['flux_0'], f['flux_fit'], rtol=1) @pytest.mark.skipif('not HAS_SCIPY') def test_psf_photometry_moffat(): """ Test psf_photometry with Moffat PSF model from Astropy. """ psf = Moffat2D(1. / (2 * np.pi * GAUSSIAN_WIDTH ** 2), PSF_SIZE // 2, PSF_SIZE // 2, 1, 1) psf = prepare_psf_model(psf, xname='x_0', yname='y_0', renormalize_psf=False) basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2), bkg_estimator=None, psf_model=psf, fitshape=7) f = basic_phot(image=image, init_guesses=INTAB) f.pprint(max_width=-1) for n in ['x', 'y']: assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-3) # image was created with a gaussian, so flux won't match exactly assert_allclose(f['flux_0'], f['flux_fit'], rtol=1) @pytest.mark.skipif('not HAS_SCIPY') def test_psf_fitting_data_on_edge(): """ No mask is input explicitly here, but source 2 is so close to the edge that the subarray that's extracted gets a mask internally. """ psf_guess = IntegratedGaussianPRF(flux=1, sigma=WIDE_GAUSSIAN_WIDTH) psf_guess.flux.fixed = psf_guess.x_0.fixed = psf_guess.y_0.fixed = False basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2), bkg_estimator=None, psf_model=psf_guess, fitshape=7) outtab = basic_phot(image=wide_image, init_guesses=WIDE_INTAB) for n in ['x', 'y', 'flux']: assert_allclose(outtab[n + '_0'], outtab[n + '_fit'], rtol=0.05, atol=0.1) photutils-0.4/photutils/psf/tests/test_prf_adapter.py0000644000214200020070000000604513175634532025476 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import division import numpy as np from numpy.testing import assert_allclose from .. import PRFAdapter import pytest from astropy.modeling.models import Moffat2D try: HAS_SCIPY = True # noqa except ImportError: HAS_SCIPY = False def normalize_moffat(mof): # this is the analytic value needed to get a total flux of 1 mof = mof.copy() mof.amplitude = (mof.alpha-1)/(np.pi*mof.gamma**2) return mof @pytest.mark.parametrize("adapterkwargs", [ dict(xname='x_0', yname='y_0', fluxname=None, renormalize_psf=False), dict(xname=None, yname=None, fluxname=None, renormalize_psf=False), dict(xname='x_0', yname='y_0', fluxname='amplitude', renormalize_psf=False)]) @pytest.mark.skipif('not HAS_SCIPY') def test_create_eval_prfadapter(adapterkwargs): mof = Moffat2D(gamma=1, alpha=4.8) prf = PRFAdapter(mof, **adapterkwargs) # make sure these can be set without anything freaking out prf.x_0 = 0.5 prf.y_0 = -0.5 prf.flux = 1.2 prf(0, 0) # just make sure it runs at all @pytest.mark.parametrize("adapterkwargs", [ dict(xname='x_0', yname='y_0', fluxname=None, renormalize_psf=True), dict(xname='x_0', yname='y_0', fluxname=None, renormalize_psf=False), dict(xname=None, yname=None, fluxname=None, renormalize_psf=False) ]) @pytest.mark.skipif('not HAS_SCIPY') def test_prfadapter_integrates(adapterkwargs): from scipy.integrate import dblquad mof = Moffat2D(gamma=1.5, alpha=4.8) if not adapterkwargs['renormalize_psf']: mof = normalize_moffat(mof) prf1 = PRFAdapter(mof, **adapterkwargs) # first check that the PRF over a central grid ends up summing to the # integrand over the whole PSF xg, yg = np.meshgrid(*([(-1, 0, 1)]*2)) evalmod = prf1(xg, yg) if adapterkwargs['renormalize_psf']: mof = normalize_moffat(mof) integrando, itol = dblquad(mof, -1.5, 1.5, lambda x: -1.5, lambda x: 1.5) assert_allclose(np.sum(evalmod), integrando, atol=itol * 10) @pytest.mark.parametrize("adapterkwargs", [ dict(xname='x_0', yname='y_0', fluxname=None, renormalize_psf=False), dict(xname=None, yname=None, fluxname=None, renormalize_psf=False)]) @pytest.mark.skipif('not HAS_SCIPY') def test_prfadapter_sizematch(adapterkwargs): from scipy.integrate import dblquad mof1 = normalize_moffat(Moffat2D(gamma=1, alpha=4.8)) prf1 = PRFAdapter(mof1, **adapterkwargs) # now try integrating over differently-sampled PRFs # and check that they match mof2 = normalize_moffat(Moffat2D(gamma=2, alpha=4.8)) prf2 = PRFAdapter(mof2, **adapterkwargs) xg1, yg1 = np.meshgrid(*([(-0.5, 0.5)]*2)) xg2, yg2 = np.meshgrid(*([(-1.5, -0.5, 0.5, 1.5)]*2)) eval11 = prf1(xg1, yg1) eval22 = prf2(xg2, yg2) integrand, itol = dblquad(mof1, -2, 2, lambda x: -2, lambda x: 2) # it's a bit of a guess that the above itol is appropriate, but it should # be a similar ballpark assert_allclose(np.sum(eval11), np.sum(eval22), atol=itol*100) photutils-0.4/photutils/psf/tests/test_sandbox.py0000644000214200020070000000747613175634532024656 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import division import numpy as np from numpy.testing import assert_allclose from astropy.modeling.models import Gaussian2D from astropy.convolution.utils import discretize_model from astropy.table import Table import pytest from ..sandbox import DiscretePRF PSF_SIZE = 11 GAUSSIAN_WIDTH = 1. IMAGE_SIZE = 101 # Position and FLUXES of test sources INTAB = Table([[50., 23, 12, 86], [50., 83, 80, 84], [np.pi * 10, 3.654, 20., 80 / np.sqrt(3)]], names=['x_0', 'y_0', 'flux_0']) # Create test psf psf_model = Gaussian2D(1. / (2 * np.pi * GAUSSIAN_WIDTH ** 2), PSF_SIZE // 2, PSF_SIZE // 2, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH) test_psf = discretize_model(psf_model, (0, PSF_SIZE), (0, PSF_SIZE), mode='oversample') # Set up grid for test image image = np.zeros((IMAGE_SIZE, IMAGE_SIZE)) # Add sources to test image for x, y, flux in INTAB: model = Gaussian2D(flux / (2 * np.pi * GAUSSIAN_WIDTH ** 2), x, y, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH) image += discretize_model(model, (0, IMAGE_SIZE), (0, IMAGE_SIZE), mode='oversample') # Some tests require an image with wider sources. WIDE_GAUSSIAN_WIDTH = 3. WIDE_INTAB = Table([[50, 23.2], [50.5, 1], [10, 20]], names=['x_0', 'y_0', 'flux_0']) wide_image = np.zeros((IMAGE_SIZE, IMAGE_SIZE)) # Add sources to test image for x, y, flux in WIDE_INTAB: model = Gaussian2D(flux / (2 * np.pi * WIDE_GAUSSIAN_WIDTH ** 2), x, y, WIDE_GAUSSIAN_WIDTH, WIDE_GAUSSIAN_WIDTH) wide_image += discretize_model(model, (0, IMAGE_SIZE), (0, IMAGE_SIZE), mode='oversample') def test_create_prf_mean(): """ Check if create_prf works correctly on simulated data. Position input format: list """ prf = DiscretePRF.create_from_image(image, list(INTAB['x_0', 'y_0'].as_array()), PSF_SIZE, subsampling=1, mode='mean') assert_allclose(prf._prf_array[0, 0], test_psf, atol=1E-8) def test_create_prf_median(): """ Check if create_prf works correctly on simulated data. Position input format: astropy.table.Table """ prf = DiscretePRF.create_from_image(image, np.array(INTAB['x_0', 'y_0']), PSF_SIZE, subsampling=1, mode='median') assert_allclose(prf._prf_array[0, 0], test_psf, atol=1E-8) def test_create_prf_nan(): """ Check if create_prf deals correctly with nan values. """ image_nan = image.copy() image_nan[52, 52] = np.nan image_nan[52, 48] = np.nan prf = DiscretePRF.create_from_image(image, np.array(INTAB['x_0', 'y_0']), PSF_SIZE, subsampling=1, fix_nan=True) assert not np.isnan(prf._prf_array[0, 0]).any() def test_create_prf_flux(): """ Check if create_prf works correctly when FLUXES are specified. """ prf = DiscretePRF.create_from_image(image, np.array(INTAB['x_0', 'y_0']), PSF_SIZE, subsampling=1, mode='median', fluxes=INTAB['flux_0']) assert_allclose(prf._prf_array[0, 0].sum(), 1) assert_allclose(prf._prf_array[0, 0], test_psf, atol=1E-8) def test_create_prf_excessive_subsampling(): """ Check if a helpful error is raised if the subsampling parameter is too high. """ with pytest.raises(ValueError) as exc: DiscretePRF.create_from_image(image, list(INTAB['x_0', 'y_0'].as_array()), PSF_SIZE, subsampling=999) assert('subsampling' in exc.value.args[0]) photutils-0.4/photutils/segmentation/0000755000214200020070000000000013175654702022335 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/segmentation/__init__.py0000644000214200020070000000056013175634532024446 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains tools for detecting sources using image segmentation and measuring their centroids, photometry, and morphological properties. """ from .core import * # noqa from .deblend import * # noqa from .detect import * # noqa from .properties import * # noqa photutils-0.4/photutils/segmentation/core.py0000644000214200020070000005206213175634532023643 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) from copy import deepcopy import numpy as np from astropy.utils import lazyproperty from ..utils.colormaps import random_cmap __all__ = ['SegmentationImage'] __doctest_requires__ = {('SegmentationImage', 'SegmentationImage.*'): ['scipy', 'skimage']} class SegmentationImage(object): """ Class for a segmentation image. Parameters ---------- data : array_like (int) A 2D segmentation image where sources are labeled by different positive integer values. A value of zero is reserved for the background. """ def __init__(self, data): self.data = np.asanyarray(data, dtype=np.int) @property def data(self): """ The 2D segmentation image. """ return self._data @data.setter def data(self, value): if np.min(value) < 0: raise ValueError('The segmentation image cannot contain ' 'negative integers.') self._data = value # be sure to delete any lazy properties to reset their values. del (self.data_masked, self.shape, self.labels, self.nlabels, self.max, self.slices, self.areas, self.is_sequential) @property def array(self): """ The 2D segmentation image. """ return self._data def __array__(self): """ Array representation of the segmentation image (e.g., for matplotlib). """ return self._data @lazyproperty def data_masked(self): """ A `~numpy.ma.MaskedArray` version of the segmentation image where the background (label = 0) has been masked. """ return np.ma.masked_where(self.data == 0, self.data) @staticmethod def _labels(data): """ Return a sorted array of the non-zero labels in the segmentation image. Parameters ---------- data : array_like (int) A 2D segmentation image where sources are labeled by different positive integer values. A value of zero is reserved for the background. Returns ------- result : `~numpy.ndarray` An array of non-zero label numbers. Notes ----- This is a separate static method so it can be used on masked versions of the segmentation image (cf. ``~photutils.SegmentationImage.remove_masked_labels``. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm._labels(segm.data) array([1, 3, 4, 5, 7]) """ return np.unique(data[data != 0]) @lazyproperty def shape(self): """ The shape of the 2D segmentation image. """ return self._data.shape @lazyproperty def labels(self): """The sorted non-zero labels in the segmentation image.""" return self._labels(self.data) @lazyproperty def nlabels(self): """The number of non-zero labels in the segmentation image.""" return len(self.labels) @lazyproperty def max(self): """The maximum non-zero label in the segmentation image.""" return np.max(self.data) @lazyproperty def slices(self): """The minimal bounding box slices for each labeled region.""" from scipy.ndimage import find_objects return find_objects(self._data) @lazyproperty def areas(self): """The areas (in pixel**2) of all labeled regions.""" return np.bincount(self.data.ravel()) def area(self, labels): """ The areas (in pixel**2) of the regions for the input labels. Parameters ---------- labels : int, array-like (1D, int) The label(s) for which to return areas. Returns ------- areas : `~numpy.ndarray` The areas of the labeled regions. """ labels = np.atleast_1d(labels) for label in labels: self.check_label(label, allow_zero=True) return self.areas[labels] @lazyproperty def is_sequential(self): """ Determine whether or not the non-zero labels in the segmenation image are sequential (with no missing values). """ if (self.labels[-1] - self.labels[0] + 1) == self.nlabels: return True else: return False def copy(self): """ Return a deep copy of this class instance. Deep copy is used so that all attributes and values are copied. """ return deepcopy(self) def check_label(self, label, allow_zero=False): """ Check for a valid label label number within the segmentation image. Parameters ---------- label : int The label number to check. allow_zero : bool If `True` then a label of 0 is valid, otherwise 0 is invalid. Raises ------ ValueError If the input ``label`` is invalid. """ if label == 0: if allow_zero: return else: raise ValueError('label "0" is reserved for the background') if label < 0: raise ValueError('label must be a positive integer, got ' '"{0}"'.format(label)) if label not in self.labels: raise ValueError('label "{0}" is not in the segmentation ' 'image'.format(label)) def cmap(self, background_color='#000000', random_state=None): """ A matplotlib colormap consisting of random (muted) colors. This is very useful for plotting the segmentation image. Parameters ---------- background_color : str or `None`, optional A hex string in the "#rrggbb" format defining the first color in the colormap. This color will be used as the background color (label = 0) when plotting the segmentation image. The default is black. random_state : int or `~numpy.random.RandomState`, optional The pseudo-random number generator state used for random sampling. Separate function calls with the same ``random_state`` will generate the same colormap. """ from matplotlib import colors cmap = random_cmap(self.max + 1, random_state=random_state) if background_color is not None: cmap.colors[0] = colors.hex2color(background_color) return cmap def outline_segments(self, mask_background=False): """ Outline the labeled segments. The "outlines" represent the pixels *just inside* the segments, leaving the background pixels unmodified. This corresponds to the ``mode='inner'`` in `skimage.segmentation.find_boundaries`. Parameters ---------- mask_background : bool, optional Set to `True` to mask the background pixels (labels = 0) in the returned image. This is useful for overplotting the segment outlines on an image. The default is `False`. Returns ------- boundaries : 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray` An image with the same shape of the segmenation image containing only the outlines of the labeled segments. The pixel values in the outlines correspond to the labels in the segmentation image. If ``mask_background`` is `True`, then a `~numpy.ma.MaskedArray` is returned. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[0, 0, 0, 0, 0, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 2, 2, 2, 2, 0], ... [0, 0, 0, 0, 0, 0]]) >>> segm.outline_segments() array([[0, 0, 0, 0, 0, 0], [0, 2, 2, 2, 2, 0], [0, 2, 0, 0, 2, 0], [0, 2, 0, 0, 2, 0], [0, 2, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0]]) """ # requires scikit-image >= 0.11 from skimage.segmentation import find_boundaries outlines = self.data * find_boundaries(self.data, mode='inner') if mask_background: outlines = np.ma.masked_where(outlines == 0, outlines) return outlines def relabel(self, labels, new_label): """ Relabel one or more label numbers. The input ``labels`` will all be relabeled to ``new_label``. Parameters ---------- labels : int, array-like (1D, int) The label numbers(s) to relabel. new_label : int The relabeled label number. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.relabel(labels=[1, 7], new_label=2) >>> segm.data array([[2, 2, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [2, 0, 0, 0, 0, 5], [2, 2, 0, 5, 5, 5], [2, 2, 0, 0, 5, 5]]) """ labels = np.atleast_1d(labels) for label in labels: data = self.data data[np.where(data == label)] = new_label self.data = data # needed to call the data setter def relabel_sequential(self, start_label=1): """ Relabel the label numbers sequentially, such that there are no missing label numbers (up to the maximum label number). Parameters ---------- start_label : int, optional The starting label number, which should be a positive integer. The default is 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.relabel_sequential() >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [5, 0, 0, 0, 0, 4], [5, 5, 0, 4, 4, 4], [5, 5, 0, 0, 4, 4]]) """ if start_label <= 0: raise ValueError('start_label must be > 0.') if self.is_sequential and (self.labels[0] == start_label): return forward_map = np.zeros(self.max + 1, dtype=np.int) forward_map[self.labels] = np.arange(self.nlabels) + start_label self.data = forward_map[self.data] def keep_labels(self, labels, relabel=False): """ Keep only the specified label numbers. Parameters ---------- labels : int, array-like (1D, int) The label number(s) to keep. Labels of zero and those not in the segmentation image will be ignored. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in sequential order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_labels(labels=3) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_labels(labels=[5, 3]) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 5], [0, 0, 0, 5, 5, 5], [0, 0, 0, 0, 5, 5]]) """ labels = np.atleast_1d(labels) labels_tmp = list(set(self.labels) - set(labels)) self.remove_labels(labels_tmp, relabel=relabel) def remove_labels(self, labels, relabel=False): """ Remove one or more label numbers. Parameters ---------- labels : int, array-like (1D, int) The label number(s) to remove. Labels of zero and those not in the segmentation image will be ignored. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in sequential order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=5) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3]) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) """ self.relabel(labels, new_label=0) if relabel: self.relabel_sequential() def remove_border_labels(self, border_width, partial_overlap=True, relabel=False): """ Remove labeled segments near the image border. Labels within the defined border region will be removed. Parameters ---------- border_width : int The width of the border region in pixels. partial_overlap : bool, optional If this is set to `True` (the default), a segment that partially extends into the border region will be removed. Segments that are completely within the border region are always removed. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in sequential order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_border_labels(border_width=1) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_border_labels(border_width=1, ... partial_overlap=False) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) """ if border_width >= min(self.shape) / 2: raise ValueError('border_width must be smaller than half the ' 'image size in either dimension') border = np.zeros(self.shape, dtype=np.bool) border[:border_width, :] = True border[-border_width:, :] = True border[:, :border_width] = True border[:, -border_width:] = True self.remove_masked_labels(border, partial_overlap=partial_overlap, relabel=relabel) def remove_masked_labels(self, mask, partial_overlap=True, relabel=False): """ Remove labeled segments located within a masked region. Parameters ---------- mask : array_like (bool) A boolean mask, with the same shape as the segmentation image (``.data``), where `True` values indicate masked pixels. partial_overlap : bool, optional If this is set to `True` (the default), a segment that partially extends into a masked region will also be removed. Segments that are completely within a masked region are always removed. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in sequential order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> mask = np.zeros_like(segm.data, dtype=np.bool) >>> mask[0, :] = True # mask the first row >>> segm.remove_masked_labels(mask) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_masked_labels(mask, partial_overlap=False) >>> segm.data array([[0, 0, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) """ if mask.shape != self.shape: raise ValueError('mask must have the same shape as the ' 'segmentation image') remove_labels = self._labels(self.data[mask]) if not partial_overlap: interior_labels = self._labels(self.data[~mask]) remove_labels = list(set(remove_labels) - set(interior_labels)) self.remove_labels(remove_labels, relabel=relabel) photutils-0.4/photutils/segmentation/deblend.py0000644000214200020070000002744513175634532024317 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) from copy import deepcopy import warnings import numpy as np from astropy.utils.exceptions import AstropyUserWarning from .core import SegmentationImage from .detect import detect_sources from ..utils.convolution import filter_data __all__ = ['deblend_sources'] def deblend_sources(data, segment_img, npixels, filter_kernel=None, labels=None, nlevels=32, contrast=0.001, mode='exponential', connectivity=8, relabel=True): """ Deblend overlapping sources labeled in a segmentation image. Sources are deblended using a combination of multi-thresholding and `watershed segmentation `_. In order to deblend sources, they must be separated enough such that there is a saddle between them. .. note:: This function is experimental. Please report any issues on the `Photutils GitHub issue tracker `_ Parameters ---------- data : array_like The 2D array of the image. segment_img : `~photutils.segmentation.SegmentationImage` or array_like (int) A 2D segmentation image, either as a `~photutils.segmentation.SegmentationImage` object or an `~numpy.ndarray`, with the same shape as ``data`` where sources are labeled by different positive integer values. A value of zero is reserved for the background. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. labels : int or array-like of int, optional The label numbers to deblend. If `None` (default), then all labels in the segmentation image will be deblended. nlevels : int, optional The number of multi-thresholding levels to use. Each source will be re-thresholded at ``nlevels``, spaced exponentially or linearly (see the ``mode`` keyword), between its minimum and maximum values within the source segment. contrast : float, optional The fraction of the total (blended) source flux that a local peak must have to be considered as a separate object. ``contrast`` must be between 0 and 1, inclusive. If ``contrast = 0`` then every local peak will be made a separate object (maximum deblending). If ``contrast = 1`` then no deblending will occur. The default is 0.001, which will deblend sources with a magnitude differences of about 7.5. mode : {'exponential', 'linear'}, optional The mode used in defining the spacing between the multi-thresholding levels (see the ``nlevels`` keyword). connectivity : {4, 8}, optional The type of pixel connectivity used in determining how pixels are grouped into a detected source. The options are 4 or 8 (default). 4-connected pixels touch along their edges. 8-connected pixels touch along their edges or corners. For reference, SExtractor uses 8-connected pixels. relabel : bool If `True` (default), then the segmentation image will be relabeled such that the labels are in sequential order starting from 1. Returns ------- segment_image : `~photutils.segmentation.SegmentationImage` A 2D segmentation image, with the same shape as ``data``, where sources are marked by different positive integer values. A value of zero is reserved for the background. See Also -------- :func:`photutils.detect_sources` """ if not isinstance(segment_img, SegmentationImage): segment_img = SegmentationImage(segment_img) if segment_img.shape != data.shape: raise ValueError('The data and segmentation image must have ' 'the same shape') if labels is None: labels = segment_img.labels labels = np.atleast_1d(labels) data = filter_data(data, filter_kernel, mode='constant', fill_value=0.0) last_label = segment_img.max segm_deblended = deepcopy(segment_img) for label in labels: segment_img.check_label(label) source_slice = segment_img.slices[label - 1] source_data = data[source_slice] source_segm = SegmentationImage(np.copy( segment_img.data[source_slice])) source_segm.keep_labels(label) # include only one label source_deblended = _deblend_source( source_data, source_segm, npixels, nlevels=nlevels, contrast=contrast, mode=mode, connectivity=connectivity) if not np.array_equal(source_deblended.data.astype(bool), source_segm.data.astype(bool)): raise ValueError('Deblending failed for source "{0}". Please ' 'ensure you used the same pixel connectivity ' 'in detect_sources and deblend_sources. If ' 'this issue persists, then please inform the ' 'developers.'.format(label)) if source_deblended.nlabels > 1: # replace the original source with the deblended source source_mask = (source_deblended.data > 0) segm_tmp = segm_deblended.data segm_tmp[source_slice][source_mask] = ( source_deblended.data[source_mask] + last_label) segm_deblended.data = segm_tmp # needed to call data setter last_label += source_deblended.nlabels if relabel: segm_deblended.relabel_sequential() return segm_deblended def _deblend_source(data, segment_img, npixels, nlevels=32, contrast=0.001, mode='exponential', connectivity=8): """ Deblend a single labeled source. Parameters ---------- data : array_like The 2D array of the image. The should be a cutout for a single source. ``data`` should already be smoothed by the same filter used in :func:`~photutils.detect_sources`, if applicable. segment_img : `~photutils.segmentation.SegmentationImage` A cutout `~photutils.segmentation.SegmentationImage` object with the same shape as ``data``. ``segment_img`` should contain only *one* source label. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. nlevels : int, optional The number of multi-thresholding levels to use. Each source will be re-thresholded at ``nlevels``, spaced exponentially or linearly (see the ``mode`` keyword), between its minimum and maximum values within the source segment. contrast : float, optional The fraction of the total (blended) source flux that a local peak must have to be considered as a separate object. ``contrast`` must be between 0 and 1, inclusive. If ``contrast = 0`` then every local peak will be made a separate object (maximum deblending). If ``contrast = 1`` then no deblending will occur. The default is 0.001, which will deblend sources with a magnitude differences of about 7.5. mode : {'exponential', 'linear'}, optional The mode used in defining the spacing between the multi-thresholding levels (see the ``nlevels`` keyword). connectivity : {4, 8}, optional The type of pixel connectivity used in determining how pixels are grouped into a detected source. The options are 4 or 8 (default). 4-connected pixels touch along their edges. 8-connected pixels touch along their edges or corners. For reference, SExtractor uses 8-connected pixels. Returns ------- segment_image : `~photutils.segmentation.SegmentationImage` A 2D segmentation image, with the same shape as ``data``, where sources are marked by different positive integer values. A value of zero is reserved for the background. """ from scipy import ndimage from skimage.morphology import watershed if nlevels < 1: raise ValueError('nlevels must be >= 1, got "{0}"'.format(nlevels)) if contrast < 0 or contrast > 1: raise ValueError('contrast must be >= 0 or <= 1, got ' '"{0}"'.format(contrast)) if connectivity == 4: selem = ndimage.generate_binary_structure(2, 1) elif connectivity == 8: selem = ndimage.generate_binary_structure(2, 2) else: raise ValueError('Invalid connectivity={0}. ' 'Options are 4 or 8'.format(connectivity)) segm_mask = (segment_img.data > 0) source_values = data[segm_mask] source_min = np.min(source_values) source_max = np.max(source_values) if source_min == source_max: return segment_img # no deblending if source_min < 0: warnings.warn('Source "{0}" contains negative values, setting ' 'deblending mode to "linear"'.format( segment_img.labels[0]), AstropyUserWarning) mode = 'linear' source_sum = float(np.sum(source_values)) steps = np.arange(1., nlevels + 1) if mode == 'exponential': if source_min == 0: source_min = source_max * 0.01 thresholds = source_min * ((source_max / source_min) ** (steps / (nlevels + 1))) elif mode == 'linear': thresholds = source_min + ((source_max - source_min) / (nlevels + 1)) * steps else: raise ValueError('"{0}" is an invalid mode; mode must be ' '"exponential" or "linear"') # create top-down tree of local peaks segm_tree = [] for level in thresholds[::-1]: segm_tmp = detect_sources(data, level, npixels=npixels, connectivity=connectivity) if segm_tmp.nlabels >= 2: fluxes = [] for i in segm_tmp.labels: fluxes.append(np.sum(data[segm_tmp == i])) idx = np.where((np.array(fluxes) / source_sum) >= contrast)[0] if len(idx >= 2): segm_tree.append(segm_tmp) nbranch = len(segm_tree) if nbranch == 0: return segment_img else: for j in np.arange(nbranch - 1, 0, -1): intersect_mask = (segm_tree[j].data * segm_tree[j - 1].data).astype(bool) intersect_labels = np.unique(segm_tree[j].data[intersect_mask]) if segm_tree[j - 1].nlabels <= len(intersect_labels): segm_tree[j - 1] = segm_tree[j] else: # If a higher tree level has more peaks than in the # intersected label(s) with the level below, then remove # the intersected label(s) in the lower level, add the # higher level, and relabel. segm_tree[j].remove_labels(intersect_labels) new_segments = segm_tree[j].data + segm_tree[j - 1].data new_segm, nsegm = ndimage.label(new_segments) segm_tree[j - 1] = SegmentationImage(new_segm) return SegmentationImage(watershed(-data, segm_tree[0].data, mask=segment_img.data, connectivity=selem)) photutils-0.4/photutils/segmentation/detect.py0000644000214200020070000002206413175634532024162 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from astropy.stats import gaussian_fwhm_to_sigma from astropy.convolution import Gaussian2DKernel from .core import SegmentationImage from ..detection import detect_threshold from ..utils.convolution import filter_data __all__ = ['detect_sources', 'make_source_mask'] def detect_sources(data, threshold, npixels, filter_kernel=None, connectivity=8): """ Detect sources above a specified threshold value in an image and return a `~photutils.segmentation.SegmentationImage` object. Detected sources must have ``npixels`` connected pixels that are each greater than the ``threshold`` value. If the filtering option is used, then the ``threshold`` is applied to the filtered image. This function does not deblend overlapping sources. First use this function to detect sources followed by :func:`~photutils.segmentation.deblend_sources` to deblend sources. Parameters ---------- data : array_like The 2D array of the image. threshold : float or array-like The data value or pixel-wise data values to be used for the detection threshold. A 2D ``threshold`` must have the same shape as ``data``. See `~photutils.detection.detect_threshold` for one way to create a ``threshold`` image. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. connectivity : {4, 8}, optional The type of pixel connectivity used in determining how pixels are grouped into a detected source. The options are 4 or 8 (default). 4-connected pixels touch along their edges. 8-connected pixels touch along their edges or corners. For reference, SExtractor uses 8-connected pixels. Returns ------- segment_image : `~photutils.segmentation.SegmentationImage` A 2D segmentation image, with the same shape as ``data``, where sources are marked by different positive integer values. A value of zero is reserved for the background. See Also -------- :func:`photutils.detection.detect_threshold`, :class:`photutils.segmentation.SegmentationImage`, :func:`photutils.segmentation.source_properties` :func:`photutils.segmentation.deblend_sources` Examples -------- .. plot:: :include-source: # make a table of Gaussian sources from astropy.table import Table table = Table() table['amplitude'] = [50, 70, 150, 210] table['x_mean'] = [160, 25, 150, 90] table['y_mean'] = [70, 40, 25, 60] table['x_stddev'] = [15.2, 5.1, 3., 8.1] table['y_stddev'] = [2.6, 2.5, 3., 4.7] table['theta'] = np.array([145., 20., 0., 60.]) * np.pi / 180. # make an image of the sources with Gaussian noise from photutils.datasets import make_gaussian_sources_image from photutils.datasets import make_noise_image shape = (100, 200) sources = make_gaussian_sources_image(shape, table) noise = make_noise_image(shape, type='gaussian', mean=0., stddev=5., random_state=12345) image = sources + noise # detect the sources from photutils import detect_threshold, detect_sources threshold = detect_threshold(image, snr=3) from astropy.convolution import Gaussian2DKernel sigma = 3.0 / (2.0 * np.sqrt(2.0 * np.log(2.0))) # FWHM = 3 kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) kernel.normalize() segm = detect_sources(image, threshold, npixels=5, filter_kernel=kernel) # plot the image and the segmentation image import matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(image, origin='lower', interpolation='nearest') ax2.imshow(segm.data, origin='lower', interpolation='nearest') """ from scipy import ndimage if (npixels <= 0) or (int(npixels) != npixels): raise ValueError('npixels must be a positive integer, got ' '"{0}"'.format(npixels)) image = (filter_data(data, filter_kernel, mode='constant', fill_value=0.0, check_normalization=True) > threshold) if connectivity == 4: selem = ndimage.generate_binary_structure(2, 1) elif connectivity == 8: selem = ndimage.generate_binary_structure(2, 2) else: raise ValueError('Invalid connectivity={0}. ' 'Options are 4 or 8'.format(connectivity)) objlabels, nobj = ndimage.label(image, structure=selem) objslices = ndimage.find_objects(objlabels) # remove objects with less than npixels for objslice in objslices: objlabel = objlabels[objslice] obj_npix = len(np.where(objlabel.ravel() != 0)[0]) if obj_npix < npixels: objlabels[objslice] = 0 # relabel to make sequential label indices objlabels, nobj = ndimage.label(objlabels, structure=selem) return SegmentationImage(objlabels) def make_source_mask(data, snr, npixels, mask=None, mask_value=None, filter_fwhm=None, filter_size=3, filter_kernel=None, sigclip_sigma=3.0, sigclip_iters=5, dilate_size=11): """ Make a source mask using source segmentation and binary dilation. Parameters ---------- data : array_like The 2D array of the image. snr : float The signal-to-noise ratio per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. filter_fwhm : float, optional The full-width at half-maximum (FWHM) of the Gaussian kernel to filter the image before thresholding. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_size : float, optional The size of the square Gaussian kernel image. Used only if ``filter_fwhm`` is defined. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. ``filter_kernel`` overrides ``filter_fwhm`` and ``filter_size``. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. dilate_size : int, optional The size of the square array used to dilate the segmentation image. Returns ------- mask : 2D `~numpy.ndarray`, bool A 2D boolean image containing the source mask. """ from scipy import ndimage threshold = detect_threshold(data, snr, background=None, error=None, mask=mask, mask_value=None, sigclip_sigma=sigclip_sigma, sigclip_iters=sigclip_iters) kernel = None if filter_kernel is not None: kernel = filter_kernel if filter_fwhm is not None: sigma = filter_fwhm * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(sigma, x_size=filter_size, y_size=filter_size) if kernel is not None: kernel.normalize() segm = detect_sources(data, threshold, npixels, filter_kernel=kernel) selem = np.ones((dilate_size, dilate_size)) return ndimage.binary_dilation(segm.data.astype(np.bool), selem) photutils-0.4/photutils/segmentation/properties.py0000644000214200020070000016446413175634532025121 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import warnings import numpy as np import astropy.units as u from astropy.coordinates import SkyCoord from astropy.table import QTable from astropy.utils import deprecated, lazyproperty from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.wcs.utils import pixel_to_skycoord from .core import SegmentationImage from ..utils.convolution import filter_data __all__ = ['SourceProperties', 'source_properties', 'SourceCatalog', 'properties_table'] __doctest_requires__ = {('SourceProperties', 'SourceProperties.*', 'SourceCatalog', 'SourceCatalog.*', 'source_properties', 'properties_table'): ['scipy', 'skimage']} class SourceProperties(object): """ Class to calculate photometry and morphological properties of a single labeled source. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array from which to calculate the source photometry and properties. If ``filtered_data`` is input, then it will be used instead of ``data`` to calculate the source centroid and morphological properties. Source photometry is always measured from ``data``. For accurate source properties and photometry, ``data`` should be background-subtracted. segment_img : `SegmentationImage` or array_like (int) A 2D segmentation image, either as a `SegmentationImage` object or an `~numpy.ndarray`, with the same shape as ``data`` where sources are labeled by different positive integer values. A value of zero is reserved for the background. label : int The label number of the source whose properties to calculate. filtered_data : array-like or `~astropy.units.Quantity`, optional The filtered version of the background-subtracted ``data`` from which to calculate the source centroid and morphological properties. The kernel used to perform the filtering should be the same one used in defining the source segments (e.g., see :func:`~photutils.detect_sources`). If `None`, then the unfiltered ``data`` will be used instead. Note that SExtractor's centroid and morphological parameters are calculated from the filtered "detection" image. error : array_like or `~astropy.units.Quantity`, optional The pixel-wise Gaussian 1-sigma errors of the input ``data``. ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources (see `~photutils.utils.calc_total_error`) . ``error`` must have the same shape as the input ``data``. See the Notes section below for details on the error propagation. mask : array_like (bool), optional A boolean mask with the same shape as ``data`` where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was *previously* present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. wcs : `~astropy.wcs.WCS` The WCS transformation to use. If `None`, then any sky-based properties will be set to `None`. Notes ----- `SExtractor`_'s centroid and morphological parameters are always calculated from the filtered "detection" image. The usual downside of the filtering is the sources will be made more circular than they actually are. If you wish to reproduce `SExtractor`_ results, then use the ``filtered_data`` input. If ``filtered_data`` is `None`, then the unfiltered ``data`` will be used for the source centroid and morphological parameters. Negative (background-subtracted) data values within the source segment are set to zero when measuring morphological properties based on image moments. This could occur, for example, if the segmentation image was defined from a different image (e.g., different bandpass) or if the background was oversubtracted. Note that `~photutils.SourceProperties.source_sum` includes the contribution of negative (background-subtracted) data values. The input ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources. `~photutils.SourceProperties.source_sum_err` is simply the quadrature sum of the pixel-wise total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is `~photutils.SourceProperties.source_sum_err`, :math:`S` are the non-masked pixels in the source segment, and :math:`\\sigma_{\\mathrm{tot}, i}` is the input ``error`` array. Custom errors for source segments can be calculated using the `~photutils.SourceProperties.error_cutout_ma` and `~photutils.SourceProperties.background_cutout_ma` properties, which are 2D `~numpy.ma.MaskedArray` cutout versions of the input ``error`` and ``background``. The mask is `True` for both pixels outside of the source segment and masked pixels from the ``mask`` input. .. _SExtractor: http://www.astromatic.net/software/sextractor """ def __init__(self, data, segment_img, label, filtered_data=None, error=None, mask=None, background=None, wcs=None): if not isinstance(segment_img, SegmentationImage): segment_img = SegmentationImage(segment_img) if segment_img.shape != data.shape: raise ValueError('segment_img and data must have the same shape.') if error is not None: error = np.atleast_1d(error) if len(error) == 1: error = np.zeros(data.shape) + error if error.shape != data.shape: raise ValueError('error and data must have the same shape.') if mask is np.ma.nomask: mask = np.zeros(data.shape).astype(bool) if mask is not None: if mask.shape != data.shape: raise ValueError('mask and data must have the same shape.') if background is not None: background = np.atleast_1d(background) if len(background) == 1: background = np.zeros(data.shape) + background if background.shape != data.shape: raise ValueError('background and data must have the same ' 'shape.') # data and filtered_data should be background-subtracted # for accurate source photometry and properties self._data = data if filtered_data is None: self._filtered_data = data else: self._filtered_data = filtered_data self._error = error # total error; 2D array self._background = background # 2D array segment_img.check_label(label) self.label = label self._slice = segment_img.slices[label - 1] self._segment_img = segment_img self._mask = mask self._wcs = wcs def __getitem__(self, key): return getattr(self, key, None) def make_cutout(self, data, masked_array=False): """ Create a (masked) cutout array from the input ``data`` using the minimal bounding box of the source segment. Parameters ---------- data : array-like (2D) The data array from which to create the masked cutout array. ``data`` must have the same shape as the segmentation image input into `SourceProperties`. masked_array : bool, optional If `True` then a `~numpy.ma.MaskedArray` will be created where the mask is `True` for both pixels outside of the source segment and any masked pixels. If `False`, then a `~numpy.ndarray` will be generated. Returns ------- result : `~numpy.ndarray` or `~numpy.ma.MaskedArray` (2D) The 2D cutout array or masked array. """ if data is None: return None data = np.asanyarray(data) if data.shape != self._data.shape: raise ValueError('data must have the same shape as the ' 'segmentation image input to SourceProperties') if masked_array: return np.ma.masked_array(data[self._slice], mask=self._cutout_total_mask) else: return data[self._slice] def to_table(self, columns=None, exclude_columns=None): """ Create a `~astropy.table.QTable` of properties. If ``columns`` or ``exclude_columns`` are not input, then the `~astropy.table.QTable` will include all scalar-valued properties. Multi-dimensional properties, e.g. `~photutils.SourceProperties.data_cutout`, can be included in the ``columns`` input. Parameters ---------- columns : str or list of str, optional Names of columns, in order, to include in the output `~astropy.table.QTable`. The allowed column names are any of the attributes of `SourceProperties`. exclude_columns : str or list of str, optional Names of columns to exclude from the default properties list in the output `~astropy.table.QTable`. The default properties are those with scalar values. Returns ------- table : `~astropy.table.QTable` A single-row table of properties of the source. """ return _properties_table(self, columns=columns, exclude_columns=exclude_columns) @lazyproperty def _cutout_segment_bool(self): """ _cutout_segment_bool is `True` only for pixels in the source segment of interest. Pixels from other sources within the rectangular cutout are not included. """ return self._segment_img.data[self._slice] == self.label @lazyproperty def _cutout_total_mask(self): """ _cutout_total_mask is `True` for regions outside of the source segment or where the input mask is `True`. """ mask = ~self._cutout_segment_bool if self._mask is not None: mask |= self._mask[self._slice] return mask @lazyproperty def data_cutout(self): """ A 2D cutout from the (background-subtracted) data of the source segment. """ return self.make_cutout(self._data, masked_array=False) @lazyproperty def data_cutout_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout from the (background-subtracted) data, where the mask is `True` for both pixels outside of the source segment and masked pixels. """ return self.make_cutout(self._data, masked_array=True) @lazyproperty def _data_cutout_maskzeroed_double(self): """ A 2D cutout from the (background-subtracted) (filtered) data, where pixels outside of the source segment and masked pixels are set to zero. Invalid values (e.g. NaNs or infs) are set to zero. Negative data values are also set to zero because negative pixels (especially at large radii) can result in image moments that result in negative variances. The cutout image is double precision, which is required for scikit-image's Cython-based moment functions. """ cutout = self.make_cutout(self._filtered_data, masked_array=False) cutout = np.where(np.isfinite(cutout), cutout, 0.) cutout = np.where(cutout > 0, cutout, 0.) # negative pixels -> 0 return (cutout * ~self._cutout_total_mask).astype(np.float64) @lazyproperty def error_cutout_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout from the input ``error`` image, where the mask is `True` for both pixels outside of the source segment and masked pixels. If ``error`` is `None`, then ``error_cutout_ma`` is also `None`. """ return self.make_cutout(self._error, masked_array=True) @lazyproperty def background_cutout_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout from the input ``background``, where the mask is `True` for both pixels outside of the source segment and masked pixels. If ``background`` is `None`, then ``background_cutout_ma`` is also `None`. """ return self.make_cutout(self._background, masked_array=True) @lazyproperty def coords(self): """ A tuple of `~numpy.ndarray` containing the ``y`` and ``x`` pixel coordinates of the source segment. Masked pixels are not included. """ yy, xx = np.nonzero(self.data_cutout_ma) coords = (yy + self._slice[0].start, xx + self._slice[1].start) return coords @lazyproperty def values(self): """ A `~numpy.ndarray` of the (background-subtracted) pixel values within the source segment. Masked pixels are not included. """ return self.data_cutout[~self._cutout_total_mask] @lazyproperty def moments(self): """Spatial moments up to 3rd order of the source.""" from skimage.measure import moments return moments(self._data_cutout_maskzeroed_double, 3) @lazyproperty def moments_central(self): """ Central moments (translation invariant) of the source up to 3rd order. """ from skimage.measure import moments_central ycentroid, xcentroid = self.cutout_centroid.value return moments_central(self._data_cutout_maskzeroed_double, ycentroid, xcentroid, 3) @lazyproperty def id(self): """ The source identification number corresponding to the object label in the segmentation image. """ return self.label @lazyproperty def cutout_centroid(self): """ The ``(y, x)`` coordinate, relative to the `data_cutout`, of the centroid within the source segment. """ m = self.moments if m[0, 0] != 0: ycentroid = m[0, 1] / m[0, 0] xcentroid = m[1, 0] / m[0, 0] return (ycentroid, xcentroid) * u.pix else: return (np.nan, np.nan) * u.pix @lazyproperty def centroid(self): """ The ``(y, x)`` coordinate of the centroid within the source segment. """ ycen, xcen = self.cutout_centroid.value return (ycen + self._slice[0].start, xcen + self._slice[1].start) * u.pix @lazyproperty def xcentroid(self): """ The ``x`` coordinate of the centroid within the source segment. """ return self.centroid[1] @lazyproperty def ycentroid(self): """ The ``y`` coordinate of the centroid within the source segment. """ return self.centroid[0] @lazyproperty def sky_centroid(self): """ The sky coordinates of the centroid within the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The output coordinate frame is the same as the input WCS. """ if self._wcs is not None: return pixel_to_skycoord(self.xcentroid.value, self.ycentroid.value, self._wcs, origin=0) else: return None @lazyproperty def sky_centroid_icrs(self): """ The sky coordinates, in the International Celestial Reference System (ICRS) frame, of the centroid within the source segment, returned as a `~astropy.coordinates.SkyCoord` object. """ if self._wcs is not None: return self.sky_centroid.icrs else: return None @lazyproperty @deprecated(0.4, alternative='sky_centroid_icrs') def icrs_centroid(self): """ The sky coordinates, in the International Celestial Reference System (ICRS) frame, of the centroid within the source segment, returned as a `~astropy.coordinates.SkyCoord` object. """ return self.sky_centroid_icrs @lazyproperty @deprecated(0.4, alternative='sky_centroid_icrs.ra') def ra_icrs_centroid(self): """ The ICRS Right Ascension coordinate (in degrees) of the centroid within the source segment. """ if self._wcs is not None: return self.sky_centroid_icrs.ra.degree * u.deg else: return None @lazyproperty @deprecated(0.4, alternative='sky_centroid_icrs.dec') def dec_icrs_centroid(self): """ The ICRS Declination coordinate (in degrees) of the centroid within the source segment. """ if self._wcs is not None: return self.sky_centroid_icrs.dec.degree * u.deg else: return None @lazyproperty def bbox(self): """ The bounding box ``(ymin, xmin, ymax, xmax)`` of the minimal rectangular region containing the source segment. """ # (stop - 1) to return the max pixel location, not the slice index return (self._slice[0].start, self._slice[1].start, self._slice[0].stop - 1, self._slice[1].stop - 1) * u.pix @lazyproperty def xmin(self): """ The minimum ``x`` pixel location of the minimal bounding box (`~photutils.SourceProperties.bbox`) of the source segment. """ return self.bbox[1] @lazyproperty def xmax(self): """ The maximum ``x`` pixel location of the minimal bounding box (`~photutils.SourceProperties.bbox`) of the source segment. """ return self.bbox[3] @lazyproperty def ymin(self): """ The minimum ``y`` pixel location of the minimal bounding box (`~photutils.SourceProperties.bbox`) of the source segment. """ return self.bbox[0] @lazyproperty def ymax(self): """ The maximum ``y`` pixel location of the minimal bounding box (`~photutils.SourceProperties.bbox`) of the source segment. """ return self.bbox[2] @lazyproperty def sky_bbox_ll(self): """ The sky coordinates of the lower-left vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmin.value - 0.5, self.ymin.value - 0.5, self._wcs, origin=0) else: return None @lazyproperty def sky_bbox_ul(self): """ The sky coordinates of the upper-left vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmin.value - 0.5, self.ymax.value + 0.5, self._wcs, origin=0) else: return None @lazyproperty def sky_bbox_lr(self): """ The sky coordinates of the lower-right vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmax.value + 0.5, self.ymin.value - 0.5, self._wcs, origin=0) else: return None @lazyproperty def sky_bbox_ur(self): """ The sky coordinates of the upper-right vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmax.value + 0.5, self.ymax.value + 0.5, self._wcs, origin=0) else: return None @lazyproperty def min_value(self): """ The minimum pixel value of the (background-subtracted) data within the source segment. """ return np.min(self.values) @lazyproperty def max_value(self): """ The maximum pixel value of the (background-subtracted) data within the source segment. """ return np.max(self.values) @lazyproperty def minval_cutout_pos(self): """ The ``(y, x)`` coordinate, relative to the `data_cutout`, of the minimum pixel value of the (background-subtracted) data. """ return np.argwhere(self.data_cutout_ma == self.min_value)[0] * u.pix @lazyproperty def maxval_cutout_pos(self): """ The ``(y, x)`` coordinate, relative to the `data_cutout`, of the maximum pixel value of the (background-subtracted) data. """ return np.argwhere(self.data_cutout_ma == self.max_value)[0] * u.pix @lazyproperty def minval_pos(self): """ The ``(y, x)`` coordinate of the minimum pixel value of the (background-subtracted) data. """ yp, xp = np.array(self.minval_cutout_pos) return (yp + self._slice[0].start, xp + self._slice[1].start) * u.pix @lazyproperty def maxval_pos(self): """ The ``(y, x)`` coordinate of the maximum pixel value of the (background-subtracted) data. """ yp, xp = np.array(self.maxval_cutout_pos) return (yp + self._slice[0].start, xp + self._slice[1].start) * u.pix @lazyproperty def minval_xpos(self): """ The ``x`` coordinate of the minimum pixel value of the (background-subtracted) data. """ return self.minval_pos[1] @lazyproperty def minval_ypos(self): """ The ``y`` coordinate of the minimum pixel value of the (background-subtracted) data. """ return self.minval_pos[0] @lazyproperty def maxval_xpos(self): """ The ``x`` coordinate of the maximum pixel value of the (background-subtracted) data. """ return self.maxval_pos[1] @lazyproperty def maxval_ypos(self): """ The ``y`` coordinate of the maximum pixel value of the (background-subtracted) data. """ return self.maxval_pos[0] @lazyproperty def area(self): """The area of the source segment in units of pixels**2.""" return len(self.values) * u.pix**2 @lazyproperty def equivalent_radius(self): """ The radius of a circle with the same `area` as the source segment. """ return np.sqrt(self.area / np.pi) @lazyproperty def perimeter(self): """ The perimeter of the source segment, approximated lines through the centers of the border pixels using a 4-connectivity. """ from skimage.measure import perimeter return perimeter(self._cutout_segment_bool, 4) * u.pix @lazyproperty def inertia_tensor(self): """ The inertia tensor of the source for the rotation around its center of mass. """ mu = self.moments_central a = mu[2, 0] b = -mu[1, 1] c = mu[0, 2] return np.array([[a, b], [b, c]]) * u.pix**2 @lazyproperty def covariance(self): """ The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source. """ mu = self.moments_central if mu[0, 0] != 0: m = mu / mu[0, 0] covariance = self._check_covariance( np.array([[m[2, 0], m[1, 1]], [m[1, 1], m[0, 2]]])) return covariance * u.pix**2 else: return np.empty((2, 2)) * np.nan * u.pix**2 @staticmethod def _check_covariance(covariance): """ Check and modify the covariance matrix in the case of "infinitely" thin detections. This follows SExtractor's prescription of incrementally increasing the diagonal elements by 1/12. """ p = 1. / 12 # arbitrary SExtractor value val = (covariance[0, 0] * covariance[1, 1]) - covariance[0, 1]**2 if val >= p**2: return covariance else: covar = np.copy(covariance) while val < p**2: covar[0, 0] += p covar[1, 1] += p val = (covar[0, 0] * covar[1, 1]) - covar[0, 1]**2 return covar @lazyproperty def covariance_eigvals(self): """ The two eigenvalues of the `covariance` matrix in decreasing order. """ if not np.isnan(np.sum(self.covariance)): eigvals = np.linalg.eigvals(self.covariance) if np.any(eigvals < 0): # negative variance return (np.nan, np.nan) * u.pix**2 return (np.max(eigvals), np.min(eigvals)) * u.pix**2 else: return (np.nan, np.nan) * u.pix**2 @lazyproperty def semimajor_axis_sigma(self): """ The 1-sigma standard deviation along the semimajor axis of the 2D Gaussian function that has the same second-order central moments as the source. """ # this matches SExtractor's A parameter return np.sqrt(self.covariance_eigvals[0]) @lazyproperty def semiminor_axis_sigma(self): """ The 1-sigma standard deviation along the semiminor axis of the 2D Gaussian function that has the same second-order central moments as the source. """ # this matches SExtractor's B parameter return np.sqrt(self.covariance_eigvals[1]) @lazyproperty def eccentricity(self): """ The eccentricity of the 2D Gaussian function that has the same second-order moments as the source. The eccentricity is the fraction of the distance along the semimajor axis at which the focus lies. .. math:: e = \\sqrt{1 - \\frac{b^2}{a^2}} where :math:`a` and :math:`b` are the lengths of the semimajor and semiminor axes, respectively. """ l1, l2 = self.covariance_eigvals if l1 == 0: return 0. return np.sqrt(1. - (l2 / l1)) @lazyproperty def orientation(self): """ The angle in radians between the ``x`` axis and the major axis of the 2D Gaussian function that has the same second-order moments as the source. The angle increases in the counter-clockwise direction. """ a, b, b, c = self.covariance.flat if a < 0 or c < 0: # negative variance return np.nan * u.rad return 0.5 * np.arctan2(2. * b, (a - c)) @lazyproperty def elongation(self): """ The ratio of the lengths of the semimajor and semiminor axes: .. math:: \\mathrm{elongation} = \\frac{a}{b} where :math:`a` and :math:`b` are the lengths of the semimajor and semiminor axes, respectively. Note that this is the same as `SExtractor`_'s elongation parameter. """ return self.semimajor_axis_sigma / self.semiminor_axis_sigma @lazyproperty def ellipticity(self): """ ``1`` minus the ratio of the lengths of the semimajor and semiminor axes (or ``1`` minus the `elongation`): .. math:: \\mathrm{ellipticity} = 1 - \\frac{b}{a} where :math:`a` and :math:`b` are the lengths of the semimajor and semiminor axes, respectively. Note that this is the same as `SExtractor`_'s ellipticity parameter. """ return 1.0 - (self.semiminor_axis_sigma / self.semimajor_axis_sigma) @lazyproperty def covar_sigx2(self): """ The ``(0, 0)`` element of the `covariance` matrix, representing :math:`\\sigma_x^2`, in units of pixel**2. Note that this is the same as `SExtractor`_'s X2 parameter. """ return self.covariance[0, 0] @lazyproperty def covar_sigy2(self): """ The ``(1, 1)`` element of the `covariance` matrix, representing :math:`\\sigma_y^2`, in units of pixel**2. Note that this is the same as `SExtractor`_'s Y2 parameter. """ return self.covariance[1, 1] @lazyproperty def covar_sigxy(self): """ The ``(0, 1)`` and ``(1, 0)`` elements of the `covariance` matrix, representing :math:`\\sigma_x \\sigma_y`, in units of pixel**2. Note that this is the same as `SExtractor`_'s XY parameter. """ return self.covariance[0, 1] @lazyproperty def cxx(self): """ `SExtractor`_'s CXX ellipse parameter in units of pixel**(-2). The ellipse is defined as .. math:: cxx (x - \\bar{x})^2 + cxy (x - \\bar{x}) (y - \\bar{y}) + cyy (y - \\bar{y})^2 = R^2 where :math:`R` is a parameter which scales the ellipse (in units of the axes lengths). `SExtractor`_ reports that the isophotal limit of a source is well represented by :math:`R \\approx 3`. """ return ((np.cos(self.orientation) / self.semimajor_axis_sigma)**2 + (np.sin(self.orientation) / self.semiminor_axis_sigma)**2) @lazyproperty def cyy(self): """ `SExtractor`_'s CYY ellipse parameter in units of pixel**(-2). The ellipse is defined as .. math:: cxx (x - \\bar{x})^2 + cxy (x - \\bar{x}) (y - \\bar{y}) + cyy (y - \\bar{y})^2 = R^2 where :math:`R` is a parameter which scales the ellipse (in units of the axes lengths). `SExtractor`_ reports that the isophotal limit of a source is well represented by :math:`R \\approx 3`. """ return ((np.sin(self.orientation) / self.semimajor_axis_sigma)**2 + (np.cos(self.orientation) / self.semiminor_axis_sigma)**2) @lazyproperty def cxy(self): """ `SExtractor`_'s CXY ellipse parameter in units of pixel**(-2). The ellipse is defined as .. math:: cxx (x - \\bar{x})^2 + cxy (x - \\bar{x}) (y - \\bar{y}) + cyy (y - \\bar{y})^2 = R^2 where :math:`R` is a parameter which scales the ellipse (in units of the axes lengths). `SExtractor`_ reports that the isophotal limit of a source is well represented by :math:`R \\approx 3`. """ return (2. * np.cos(self.orientation) * np.sin(self.orientation) * ((1. / self.semimajor_axis_sigma**2) - (1. / self.semiminor_axis_sigma**2))) @lazyproperty def source_sum(self): """ The sum of the non-masked (background-subtracted) data values within the source segment. .. math:: F = \\sum_{i \\in S} (I_i - B_i) where :math:`F` is ``source_sum``, :math:`(I_i - B_i)` is the background-subtracted input ``data``, and :math:`S` are the non-masked pixels in the source segment. """ return np.sum(np.ma.masked_array(self._data[self._slice], mask=self._cutout_total_mask)) @lazyproperty def source_sum_err(self): """ The uncertainty of `~photutils.SourceProperties.source_sum`, propagated from the input ``error`` array. ``source_sum_err`` is the quadrature sum of the total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is ``source_sum_err``, :math:`\\sigma_{\\mathrm{tot, i}}` are the pixel-wise total errors, and :math:`S` are the non-masked pixels in the source segment. """ if self._error is not None: # power doesn't work here, see astropy #2968 # return np.sqrt(np.sum(self.error_cutout_ma**2)) return np.sqrt(np.sum( np.ma.masked_array(self.error_cutout_ma.data**2, mask=self.error_cutout_ma.mask))) else: return None @lazyproperty def background_sum(self): """The sum of ``background`` values within the source segment.""" if self._background is not None: return np.sum(self.background_cutout_ma) else: return None @lazyproperty def background_mean(self): """The mean of ``background`` values within the source segment.""" if self._background is not None: return np.mean(self.background_cutout_ma) else: return None @lazyproperty def background_at_centroid(self): """ The value of the ``background`` at the position of the source centroid. Fractional position values are determined using bilinear interpolation. """ from scipy.ndimage import map_coordinates if self._background is None: return None else: value = map_coordinates(self._background, [[self.ycentroid.value], [self.xcentroid.value]])[0] if isinstance(self._background, u.Quantity): value *= self._background.unit return value def source_properties(data, segment_img, error=None, mask=None, background=None, filter_kernel=None, wcs=None, labels=None): """ Calculate photometry and morphological properties of sources defined by a labeled segmentation image. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array from which to calculate the source photometry and properties. ``data`` should be background-subtracted. segment_img : `SegmentationImage` or array_like (int) A 2D segmentation image, either as a `SegmentationImage` object or an `~numpy.ndarray`, with the same shape as ``data`` where sources are labeled by different positive integer values. A value of zero is reserved for the background. error : array_like or `~astropy.units.Quantity`, optional The pixel-wise Gaussian 1-sigma errors of the input ``data``. ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources (see `~photutils.utils.calc_total_error`) . ``error`` must have the same shape as the input ``data``. See the Notes section below for details on the error propagation. mask : array_like (bool), optional A boolean mask with the same shape as ``data`` where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was *previously* present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the data prior to calculating the source centroid and morphological parameters. The kernel should be the same one used in defining the source segments (e.g., see :func:`~photutils.detect_sources`). If `None`, then the unfiltered ``data`` will be used instead. Note that `SExtractor`_'s centroid and morphological parameters are calculated from the filtered "detection" image. wcs : `~astropy.wcs.WCS` The WCS transformation to use. If `None`, then any sky-based properties will be set to `None`. labels : int, array-like (1D, int) Subset of segmentation labels for which to calculate the properties. If `None`, then the properties will be calculated for all labeled sources (the default). Returns ------- output : list of `SourceProperties` objects A list of `SourceProperties` objects, one for each source. The properties can be accessed as attributes or keys. Notes ----- `SExtractor`_'s centroid and morphological parameters are always calculated from the filtered "detection" image. The usual downside of the filtering is the sources will be made more circular than they actually are. If you wish to reproduce `SExtractor`_ results, then use the ``filtered_data`` input. If ``filtered_data`` is `None`, then the unfiltered ``data`` will be used for the source centroid and morphological parameters. Negative (background-subtracted) data values within the source segment are set to zero when measuring morphological properties based on image moments. This could occur, for example, if the segmentation image was defined from a different image (e.g., different bandpass) or if the background was oversubtracted. Note that `~photutils.SourceProperties.source_sum` includes the contribution of negative (background-subtracted) data values. The input ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources. `~photutils.SourceProperties.source_sum_err` is simply the quadrature sum of the pixel-wise total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is `~photutils.SourceProperties.source_sum_err`, :math:`S` are the non-masked pixels in the source segment, and :math:`\\sigma_{\\mathrm{tot}, i}` is the input ``error`` array. .. _SExtractor: http://www.astromatic.net/software/sextractor See Also -------- SegmentationImage, SourceProperties, detect_sources Examples -------- >>> import numpy as np >>> from photutils import SegmentationImage, source_properties >>> image = np.arange(16.).reshape(4, 4) >>> print(image) [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.] [ 12. 13. 14. 15.]] >>> segm = SegmentationImage([[1, 1, 0, 0], ... [1, 0, 0, 2], ... [0, 0, 2, 2], ... [0, 2, 2, 0]]) >>> props = source_properties(image, segm) Print some properties of the first object (labeled with ``1`` in the segmentation image): >>> props[0].id # id corresponds to segment label number 1 >>> props[0].centroid # doctest: +FLOAT_CMP >>> props[0].source_sum # doctest: +FLOAT_CMP 5.0 >>> props[0].area # doctest: +FLOAT_CMP >>> props[0].max_value # doctest: +FLOAT_CMP 4.0 Print some properties of the second object (labeled with ``2`` in the segmentation image): >>> props[1].id # id corresponds to segment label number 2 >>> props[1].centroid # doctest: +FLOAT_CMP >>> props[1].perimeter # doctest: +FLOAT_CMP >>> props[1].orientation # doctest: +FLOAT_CMP """ if not isinstance(segment_img, SegmentationImage): segment_img = SegmentationImage(segment_img) if segment_img.shape != data.shape: raise ValueError('segment_img and data must have the same shape.') # filter the data once, instead of repeating for each source if filter_kernel is not None: filtered_data = filter_data(data, filter_kernel, mode='constant', fill_value=0.0, check_normalization=True) else: filtered_data = None if labels is None: labels = segment_img.labels labels = np.atleast_1d(labels) sources_props = [] for label in labels: if label not in segment_img.labels: continue # skip invalid labels (without warnings) sources_props.append(SourceProperties( data, segment_img, label, filtered_data=filtered_data, error=error, mask=mask, background=background, wcs=wcs)) return SourceCatalog(sources_props, wcs=wcs) class SourceCatalog(object): """ Class to hold source catalogs. """ def __init__(self, properties_list, wcs=None): if isinstance(properties_list, SourceProperties): self._data = [properties_list] elif isinstance(properties_list, list): self._data = properties_list else: raise ValueError('invalid input.') self.wcs = wcs self._cache = {} def __len__(self): return len(self._data) def __getitem__(self, index): return self._data[index] # python 2 only def __getslice__(self, i, j): return self.__getitem__(slice(i, j)) def __delitem__(self, index): del self._data[index] def __iter__(self): for i in self._data: yield i def __getattr__(self, attr): exclude = ['sky_centroid', 'sky_centroid_icrs', 'icrs_centroid', 'ra_icrs_centroid', 'dec_icrs_centroid', 'sky_bbox_ll', 'sky_bbox_ul', 'sky_bbox_lr', 'sky_bbox_ur'] if attr not in exclude: if attr not in self._cache: values = [getattr(p, attr) for p in self._data] if isinstance(values[0], u.Quantity): # turn list of Quantities into a Quantity array values = u.Quantity(values) if isinstance(values[0], SkyCoord): # pragma: no cover # turn list of SkyCoord into a SkyCoord array values = SkyCoord(values) self._cache[attr] = values return self._cache[attr] @lazyproperty def sky_centroid(self): if self.wcs is not None: # For a large catalog, it's much faster to calculate world # coordinates using the complete list of (x, y) instead of # looping through the individual (x, y). It's also much # faster to recalculate the world coordinates than to create a # SkyCoord array from a loop-generated SkyCoord list. The # assumption here is that the wcs is the same for each # SourceProperties instance. return pixel_to_skycoord(self.xcentroid, self.ycentroid, self.wcs, origin=0) else: return None @lazyproperty def sky_centroid_icrs(self): if self.wcs is not None: return self.sky_centroid.icrs else: return None @lazyproperty @deprecated(0.4, alternative='sky_centroid_icrs') def icrs_centroid(self): if self.wcs is not None: return self.sky_centroid_icrs else: return None @lazyproperty @deprecated(0.4, alternative='sky_centroid_icrs.ra') def ra_icrs_centroid(self): if self.wcs is not None: return self.sky_centroid_icrs.ra.deg * u.deg else: return None @lazyproperty @deprecated(0.4, alternative='sky_centroid_icrs.dec') def dec_icrs_centroid(self): if self.wcs is not None: return self.sky_centroid_icrs.dec.deg * u.deg else: return None @lazyproperty def sky_bbox_ll(self): if self.wcs is not None: return pixel_to_skycoord(self.xmin.value - 0.5, self.ymin.value - 0.5, self.wcs, origin=0) else: return None @lazyproperty def sky_bbox_ul(self): if self.wcs is not None: return pixel_to_skycoord(self.xmin.value - 0.5, self.ymin.value + 0.5, self.wcs, origin=0) else: return None @lazyproperty def sky_bbox_lr(self): if self.wcs is not None: return pixel_to_skycoord(self.xmin.value + 0.5, self.ymin.value - 0.5, self.wcs, origin=0) else: return None @lazyproperty def sky_bbox_ur(self): if self.wcs is not None: return pixel_to_skycoord(self.xmin.value + 0.5, self.ymin.value + 0.5, self.wcs, origin=0) else: return None def to_table(self, columns=None, exclude_columns=None): """ Construct a `~astropy.table.QTable` of source properties from a `SourceCatalog` object. If ``columns`` or ``exclude_columns`` are not input, then the `~astropy.table.QTable` will include most scalar-valued source properties. Multi-dimensional properties, e.g. `~photutils.SourceProperties.data_cutout`, can be included in the ``columns`` input, but they will not be preserved when writing the table to a file. This is a limitation of multi-dimensional columns in astropy tables. Parameters ---------- columns : str or list of str, optional Names of columns, in order, to include in the output `~astropy.table.QTable`. The allowed column names are any of the attributes of `SourceProperties`. exclude_columns : str or list of str, optional Names of columns to exclude from the default properties list in the output `~astropy.table.QTable`. The default properties are those with scalar values: 'id', 'xcentroid', 'ycentroid', 'sky_centroid', 'sky_centroid_icrs', 'source_sum', 'source_sum_err', 'background_sum', 'background_mean', 'background_at_centroid', 'xmin', 'xmax', 'ymin', 'ymax', 'min_value', 'max_value', 'minval_xpos', 'minval_ypos', 'maxval_xpos', 'maxval_ypos', 'area', 'equivalent_radius', 'perimeter', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'eccentricity', 'orientation', 'ellipticity', 'elongation', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'cxx', 'cxy', 'cyy' Returns ------- table : `~astropy.table.QTable` A table of source properties with one row per source. See Also -------- SegmentationImage, SourceProperties, source_properties, detect_sources Examples -------- >>> import numpy as np >>> from photutils import source_properties >>> image = np.arange(16.).reshape(4, 4) >>> print(image) [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.] [ 12. 13. 14. 15.]] >>> segm = SegmentationImage([[1, 1, 0, 0], ... [1, 0, 0, 2], ... [0, 0, 2, 2], ... [0, 2, 2, 0]]) >>> cat = source_properties(image, segm) >>> columns = ['id', 'xcentroid', 'ycentroid', 'source_sum'] >>> tbl = cat.to_table(columns=columns) >>> tbl['xcentroid'].info.format = '.10f' # optional format >>> tbl['ycentroid'].info.format = '.10f' # optional format >>> print(tbl) id xcentroid ycentroid source_sum pix pix --- ------------ ------------ ---------- 1 0.2000000000 0.8000000000 5.0 2 2.0909090909 2.3636363636 55.0 """ return _properties_table(self, columns=columns, exclude_columns=exclude_columns) def _properties_table(obj, columns=None, exclude_columns=None): if isinstance(obj, SourceCatalog) and len(obj) == 0: raise ValueError('SourceCatalog contains no sources.') # all scalar-valued properties columns_all = ['id', 'xcentroid', 'ycentroid', 'sky_centroid', 'sky_centroid_icrs', 'source_sum', 'source_sum_err', 'background_sum', 'background_mean', 'background_at_centroid', 'xmin', 'xmax', 'ymin', 'ymax', 'min_value', 'max_value', 'minval_xpos', 'minval_ypos', 'maxval_xpos', 'maxval_ypos', 'area', 'equivalent_radius', 'perimeter', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'eccentricity', 'orientation', 'ellipticity', 'elongation', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'cxx', 'cxy', 'cyy'] table_columns = None if exclude_columns is not None: table_columns = [s for s in columns_all if s not in exclude_columns] if columns is not None: table_columns = np.atleast_1d(columns) if table_columns is None: table_columns = columns_all tbl = QTable() for column in table_columns: values = getattr(obj, column) if isinstance(obj, SourceProperties): values = np.atleast_1d(values) if isinstance(values[0], u.Quantity): # turn list of Quantities into a Quantity array values = u.Quantity(values) if isinstance(values[0], SkyCoord): # pragma: no cover # turn list of SkyCoord into a SkyCoord array values = SkyCoord(values) if isinstance(obj, SourceCatalog) and values is None: values = [None] * len(obj) tbl[column] = values return tbl @deprecated(0.4, alternative='SourceCatalog.to_table()') def properties_table(source_props, columns=None, exclude_columns=None): """ Construct a `~astropy.table.QTable` of properties from a list of `SourceProperties` objects. If ``columns`` or ``exclude_columns`` are not input, then the `~astropy.table.QTable` will include most scalar-valued source properties. Multi-dimensional properties, e.g. `~photutils.SourceProperties.data_cutout`, can be included in the ``columns`` input. Parameters ---------- source_props : `SourceProperties` or list of `SourceProperties` A `SourceProperties` object or list of `SourceProperties` objects, one for each source. columns : str or list of str, optional Names of columns, in order, to include in the output `~astropy.table.QTable`. The allowed column names are any of the attributes of `SourceProperties`. exclude_columns : str or list of str, optional Names of columns to exclude from the default properties list in the output `~astropy.table.QTable`. The default properties are those with scalar values. Returns ------- table : `~astropy.table.QTable` A table of properties of the segmented sources, one row per source. See Also -------- SegmentationImage, SourceProperties, source_properties, detect_sources """ if ((isinstance(source_props, list) or isinstance(source_props, SourceCatalog)) and len(source_props) == 0): raise ValueError('source_props is an empty list') source_props = np.atleast_1d(source_props) # all scalar-valued properties columns_all = ['id', 'xcentroid', 'ycentroid', 'sky_centroid', 'sky_centroid_icrs', 'source_sum', 'source_sum_err', 'background_sum', 'background_mean', 'background_at_centroid', 'xmin', 'xmax', 'ymin', 'ymax', 'min_value', 'max_value', 'minval_xpos', 'minval_ypos', 'maxval_xpos', 'maxval_ypos', 'area', 'equivalent_radius', 'perimeter', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'eccentricity', 'orientation', 'ellipticity', 'elongation', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'cxx', 'cxy', 'cyy'] table_columns = None if exclude_columns is not None: table_columns = [s for s in columns_all if s not in exclude_columns] if columns is not None: table_columns = np.atleast_1d(columns) if table_columns is None: table_columns = columns_all # For a large catalog, it's much faster to calculate world # coordinates using the complete list of (x, y) instead of looping # through the individual (x, y). It's also much faster to recalculate # world coordinates than to create a SkyCoord array from a # loop-generated SkyCoord list. The assumption here is that the wcs # is the same for each element of source_props. sky_colnames = ['sky_centroid', 'sky_centroid_icrs', 'icrs_centroid', 'ra_icrs_centroid', 'dec_icrs_centroid'] calc_skycoords = any(sky_colname in table_columns for sky_colname in sky_colnames) if calc_skycoords: if source_props[0]._wcs is not None: xcentroid = [props.xcentroid.value for props in source_props] ycentroid = [props.ycentroid.value for props in source_props] sky_centroid = pixel_to_skycoord( xcentroid, ycentroid, source_props[0]._wcs, origin=0) sky_centroid_icrs = sky_centroid.icrs icrs_centroid = sky_centroid_icrs ra_icrs_centroid = sky_centroid_icrs.ra.deg * u.deg dec_icrs_centroid = sky_centroid_icrs.dec.deg * u.deg else: nprops = len(source_props) sky_centroid = sky_centroid_icrs = [None] * nprops icrs_centroid = [None] * nprops ra_icrs_centroid = dec_icrs_centroid = [None] * nprops bbox_colnames = ['sky_bbox_ll', 'sky_bbox_ul', 'sky_bbox_lr', 'sky_bbox_ur'] calc_bboxcoords = any(bbox_colname in table_columns for bbox_colname in bbox_colnames) if calc_bboxcoords: if source_props[0]._wcs is not None: xmin = np.array([props.xmin.value for props in source_props]) ymin = np.array([props.ymin.value for props in source_props]) xmax = np.array([props.xmax.value for props in source_props]) ymax = np.array([props.ymax.value for props in source_props]) wcs = source_props[0]._wcs sky_bbox_ll = pixel_to_skycoord(xmin - 0.5, ymin - 0.5, wcs, origin=0) sky_bbox_ul = pixel_to_skycoord(xmin - 0.5, ymax + 0.5, wcs, origin=0) sky_bbox_lr = pixel_to_skycoord(xmax + 0.5, ymin - 0.5, wcs, origin=0) sky_bbox_ur = pixel_to_skycoord(xmax + 0.5, ymax + 0.5, wcs, origin=0) else: nprops = len(source_props) sky_bbox_ll = sky_bbox_ul = [None] * nprops sky_bbox_lr = sky_bbox_ur = [None] * nprops props_table = QTable() for column in table_columns: if column == 'sky_centroid': props_table[column] = sky_centroid elif column == 'sky_centroid_icrs': props_table[column] = sky_centroid_icrs elif column == 'icrs_centroid': warnings.warn('The icrs_centroid property is deprecated and ' 'may be removed in a future version. Use ' 'sky_centroid_icrs instead', AstropyDeprecationWarning) props_table[column] = icrs_centroid elif column == 'ra_icrs_centroid': warnings.warn('The ra_icrs_centroid property is deprecated and ' 'may be removed in a future version. Use ' 'sky_centroid_icrs.ra instead', AstropyDeprecationWarning) props_table[column] = ra_icrs_centroid elif column == 'dec_icrs_centroid': warnings.warn('The dec_icrs_centroid property is deprecated and ' 'may be removed in a future version. Use ' 'sky_centroid_icrs.dec instead', AstropyDeprecationWarning) props_table[column] = dec_icrs_centroid elif column == 'sky_bbox_ll': props_table[column] = sky_bbox_ll elif column == 'sky_bbox_ul': props_table[column] = sky_bbox_ul elif column == 'sky_bbox_lr': props_table[column] = sky_bbox_lr elif column == 'sky_bbox_ur': props_table[column] = sky_bbox_ur else: values = [getattr(props, column) for props in source_props] if isinstance(values[0], u.Quantity): # turn list of Quantities into a Quantity array values = u.Quantity(values) if isinstance(values[0], SkyCoord): # pragma: no cover # turn list of SkyCoord into a SkyCoord array values = SkyCoord(values) props_table[column] = values return props_table photutils-0.4/photutils/segmentation/tests/0000755000214200020070000000000013175654702023477 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/segmentation/tests/__init__.py0000644000214200020070000000017013055576313025604 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains affiliated package tests. """ photutils-0.4/photutils/segmentation/tests/test_core.py0000644000214200020070000002143213175634532026041 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_allclose import pytest from ..core import SegmentationImage try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False try: import skimage # noqa HAS_SKIMAGE = True except ImportError: HAS_SKIMAGE = False @pytest.mark.skipif('not HAS_SKIMAGE') @pytest.mark.skipif('not HAS_SCIPY') class TestSegmentationImage(object): def setup_class(self): self.data = [[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]] self.segm = SegmentationImage(self.data) def test_array(self): assert_allclose(self.segm.data, self.segm.array) assert_allclose(self.segm.data, self.segm.__array__()) def test_copy(self): segm = SegmentationImage(self.data) segm2 = segm.copy() assert segm.data is not segm2.data assert segm.labels is not segm2.labels segm.data[0, 0] = 100. assert segm.data[0, 0] != segm2.data[0, 0] def test_negative_data(self): data = np.arange(-1, 8).reshape(3, 3) with pytest.raises(ValueError): SegmentationImage(data) def test_zero_label(self): with pytest.raises(ValueError): self.segm.check_label(0) def test_negative_label(self): with pytest.raises(ValueError): self.segm.check_label(-1) def test_invalid_label(self): with pytest.raises(ValueError): self.segm.check_label(2) def test_data_masked(self): assert isinstance(self.segm.data_masked, np.ma.MaskedArray) assert np.ma.count(self.segm.data_masked) == 18 assert np.ma.count_masked(self.segm.data_masked) == 18 def test_labels(self): assert_allclose(self.segm.labels, [1, 3, 4, 5, 7]) def test_nlabels(self): assert self.segm.nlabels == 5 def test_max(self): assert self.segm.max == 7 def test_areas(self): expected = np.array([18, 2, 0, 2, 3, 6, 0, 5]) assert_allclose(self.segm.areas, expected) def test_area(self): expected = np.array([18, 2, 0, 2, 3, 6, 0, 5]) assert self.segm.area(0) == expected[0] labels = [3, 1, 4] assert_allclose(self.segm.area(labels), expected[labels]) def test_outline_segments(self): segm_array = np.zeros((5, 5)).astype(int) segm_array[1:4, 1:4] = 2 segm = SegmentationImage(segm_array) segm_array_ref = np.copy(segm_array) segm_array_ref[2, 2] = 0 assert_allclose(segm.outline_segments(), segm_array_ref) def test_outline_segments_masked_background(self): segm_array = np.zeros((5, 5)).astype(int) segm_array[1:4, 1:4] = 2 segm = SegmentationImage(segm_array) segm_array_ref = np.copy(segm_array) segm_array_ref[2, 2] = 0 segm_outlines = segm.outline_segments(mask_background=True) assert isinstance(segm_outlines, np.ma.MaskedArray) assert np.ma.count(segm_outlines) == 8 assert np.ma.count_masked(segm_outlines) == 17 def test_relabel(self): segm = SegmentationImage(self.data) segm.relabel(labels=[1, 7], new_label=2) ref_data = np.array([[2, 2, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [2, 0, 0, 0, 0, 5], [2, 2, 0, 5, 5, 5], [2, 2, 0, 0, 5, 5]]) assert_allclose(segm.data, ref_data) assert segm.nlabels == len(segm.slices) - segm.slices.count(None) @pytest.mark.parametrize('start_label', [1, 5]) def test_relabel_sequential(self, start_label): segm = SegmentationImage(self.data) ref_data = np.array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [5, 0, 0, 0, 0, 4], [5, 5, 0, 4, 4, 4], [5, 5, 0, 0, 4, 4]]) ref_data[ref_data != 0] += (start_label - 1) segm.relabel_sequential(start_label=start_label) assert_allclose(segm.data, ref_data) # relabel_sequential should do nothing if already sequential segm.relabel_sequential(start_label=start_label) assert_allclose(segm.data, ref_data) assert segm.nlabels == len(segm.slices) - segm.slices.count(None) @pytest.mark.parametrize('start_label', [0, -1]) def test_relabel_sequential_start_invalid(self, start_label): with pytest.raises(ValueError): segm = SegmentationImage(self.data) segm.relabel_sequential(start_label=start_label) def test_keep_labels(self): ref_data = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 5], [0, 0, 0, 5, 5, 5], [0, 0, 0, 0, 5, 5]]) segm = SegmentationImage(self.data) segm.keep_labels([5, 3]) assert_allclose(segm.data, ref_data) def test_keep_labels_relabel(self): ref_data = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 2], [0, 0, 0, 2, 2, 2], [0, 0, 0, 0, 2, 2]]) segm = SegmentationImage(self.data) segm.keep_labels([5, 3], relabel=True) assert_allclose(segm.data, ref_data) def test_remove_labels(self): ref_data = np.array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) segm = SegmentationImage(self.data) segm.remove_labels(labels=[5, 3]) assert_allclose(segm.data, ref_data) def test_remove_labels_relabel(self): ref_data = np.array([[1, 1, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0, 0], [3, 0, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0]]) segm = SegmentationImage(self.data) segm.remove_labels(labels=[5, 3], relabel=True) assert_allclose(segm.data, ref_data) def test_remove_border_labels(self): ref_data = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) segm = SegmentationImage(self.data) segm.remove_border_labels(border_width=1) assert_allclose(segm.data, ref_data) def test_remove_border_labels_border_width(self): with pytest.raises(ValueError): segm = SegmentationImage(self.data) segm.remove_border_labels(border_width=3) def test_remove_masked_labels(self): ref_data = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) segm = SegmentationImage(self.data) mask = np.zeros_like(segm.data, dtype=np.bool) mask[0, :] = True segm.remove_masked_labels(mask) assert_allclose(segm.data, ref_data) def test_remove_masked_labels_without_partial_overlap(self): ref_data = np.array([[0, 0, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) segm = SegmentationImage(self.data) mask = np.zeros_like(segm.data, dtype=np.bool) mask[0, :] = True segm.remove_masked_labels(mask, partial_overlap=False) assert_allclose(segm.data, ref_data) def test_remove_masked_segments_mask_shape(self): segm = SegmentationImage(np.ones((5, 5))) mask = np.zeros((3, 3), dtype=np.bool) with pytest.raises(ValueError): segm.remove_masked_labels(mask) photutils-0.4/photutils/segmentation/tests/test_deblend.py0000644000214200020070000001344113175634532026507 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_allclose import pytest from astropy.tests.helper import catch_warnings from astropy.utils.exceptions import AstropyUserWarning from astropy.modeling import models from ..core import SegmentationImage from ..deblend import deblend_sources from ..detect import detect_sources try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False try: import skimage # noqa HAS_SKIMAGE = True except ImportError: HAS_SKIMAGE = False @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.skipif('not HAS_SKIMAGE') class TestDeblendSources(object): def setup_class(self): g1 = models.Gaussian2D(100, 50, 50, 5, 5) g2 = models.Gaussian2D(100, 35, 50, 5, 5) g3 = models.Gaussian2D(30, 70, 50, 5, 5) y, x = np.mgrid[0:100, 0:100] self.x = x self.y = y self.data = g1(x, y) + g2(x, y) self.data3 = self.data + g3(x, y) self.threshold = 10 self.npixels = 5 self.segm = detect_sources(self.data, self.threshold, self.npixels) self.segm3 = detect_sources(self.data3, self.threshold, self.npixels) @pytest.mark.parametrize('mode', ['exponential', 'linear']) def test_deblend_sources(self, mode): result = deblend_sources(self.data, self.segm, self.npixels, mode=mode) assert result.nlabels == 2 assert result.nlabels == len(result.slices) mask1 = (result.data == 1) mask2 = (result.data == 2) assert_allclose(len(result.data[mask1]), len(result.data[mask2])) assert_allclose(np.sum(self.data[mask1]), np.sum(self.data[mask2])) assert_allclose(np.nonzero(self.segm), np.nonzero(result)) def test_deblend_multiple_sources(self): g4 = models.Gaussian2D(100, 50, 15, 5, 5) g5 = models.Gaussian2D(100, 35, 15, 5, 5) g6 = models.Gaussian2D(100, 50, 85, 5, 5) g7 = models.Gaussian2D(100, 35, 85, 5, 5) x = self.x y = self.y data = self.data + g4(x, y) + g5(x, y) + g6(x, y) + g7(x, y) segm = detect_sources(data, self.threshold, self.npixels) result = deblend_sources(data, segm, self.npixels) assert result.nlabels == 6 assert result.nlabels == len(result.slices) assert result.area(1) == result.area(2) assert result.area(1) == result.area(3) assert result.area(1) == result.area(4) assert result.area(1) == result.area(5) assert result.area(1) == result.area(6) @pytest.mark.parametrize('mode', ['exponential', 'linear']) def test_deblend_sources_norelabel(self, mode): result = deblend_sources(self.data, self.segm, self.npixels, mode=mode, relabel=False) assert result.nlabels == 2 assert len(result.slices) <= result.max assert len(result.slices) == 3 # label 1 is None assert_allclose(np.nonzero(self.segm), np.nonzero(result)) @pytest.mark.parametrize('mode', ['exponential', 'linear']) def test_deblend_three_sources(self, mode): result = deblend_sources(self.data3, self.segm3, self.npixels, mode=mode) assert result.nlabels == 3 assert_allclose(np.nonzero(self.segm3), np.nonzero(result)) def test_deblend_sources_segm_array(self): result = deblend_sources(self.data, self.segm.data, self.npixels) assert result.nlabels == 2 def test_segment_img_badshape(self): segm_wrong = np.zeros((2, 2)) with pytest.raises(ValueError): deblend_sources(self.data, segm_wrong, self.npixels) def test_invalid_nlevels(self): with pytest.raises(ValueError): deblend_sources(self.data, self.segm, self.npixels, nlevels=0) def test_invalid_contrast(self): with pytest.raises(ValueError): deblend_sources(self.data, self.segm, self.npixels, contrast=-1) def test_invalid_mode(self): with pytest.raises(ValueError): deblend_sources(self.data, self.segm, self.npixels, mode='invalid') def test_invalid_connectivity(self): with pytest.raises(ValueError): deblend_sources(self.data, self.segm, self.npixels, connectivity='invalid') def test_constant_source(self): data = self.data.copy() data[data.nonzero()] = 1. result = deblend_sources(data, self.segm, self.npixels) assert_allclose(result, self.segm) def test_source_with_negval(self): data = self.data.copy() data -= 20 with catch_warnings(AstropyUserWarning) as warning_lines: deblend_sources(data, self.segm, self.npixels) assert ('contains negative values' in str(warning_lines[0].message)) def test_source_zero_min(self): data = self.data.copy() data -= data[self.segm.data > 0].min() result1 = deblend_sources(self.data, self.segm, self.npixels) result2 = deblend_sources(data, self.segm, self.npixels) assert_allclose(result1, result2) def test_connectivity(self): """Regression test for #341.""" data = np.zeros((3, 3)) data[0, 0] = 2 data[1, 1] = 2 data[2, 2] = 1 segm = np.zeros_like(data) segm[data.nonzero()] = 1 segm = SegmentationImage(segm) data = data * 100. segm_deblend = deblend_sources(data, segm, npixels=1, connectivity=8) assert segm_deblend.nlabels == 1 with pytest.raises(ValueError): deblend_sources(data, segm, npixels=1, connectivity=4) photutils-0.4/photutils/segmentation/tests/test_detect.py0000644000214200020070000001213413175634532026360 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_array_equal, assert_allclose import pytest from astropy.tests.helper import catch_warnings from astropy.utils.exceptions import AstropyUserWarning from astropy.convolution import Gaussian2DKernel from astropy.stats import gaussian_fwhm_to_sigma from ..detect import detect_sources, make_source_mask from ...datasets import make_4gaussians_image try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False try: import skimage # noqa HAS_SKIMAGE = True except ImportError: HAS_SKIMAGE = False @pytest.mark.skipif('not HAS_SCIPY') class TestDetectSources(object): def setup_class(self): self.data = np.array([[0, 1, 0], [0, 2, 0], [0, 0, 0]]).astype(np.float) self.ref1 = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) self.ref2 = np.array([[0, 1, 0], [0, 1, 0], [0, 0, 0]]) fwhm2sigma = 1.0 / (2.0 * np.sqrt(2.0 * np.log(2.0))) filter_kernel = Gaussian2DKernel(2. * fwhm2sigma, x_size=3, y_size=3) filter_kernel.normalize() self.filter_kernel = filter_kernel def test_detection(self): """Test basic detection.""" segm = detect_sources(self.data, threshold=0.9, npixels=2) assert_array_equal(segm.data, self.ref2) def test_small_sources(self): """Test detection where sources are smaller than npixels size.""" segm = detect_sources(self.data, threshold=0.9, npixels=5) assert_array_equal(segm.data, self.ref1) def test_zerothresh(self): """Test detection with zero threshold.""" segm = detect_sources(self.data, threshold=0., npixels=2) assert_array_equal(segm.data, self.ref2) def test_zerodet(self): """Test detection with large snr_threshold giving no detections.""" segm = detect_sources(self.data, threshold=7, npixels=2) assert_array_equal(segm.data, self.ref1) def test_8connectivity(self): """Test detection with connectivity=8.""" data = np.eye(3) segm = detect_sources(data, threshold=0.9, npixels=1, connectivity=8) assert_array_equal(segm.data, data) def test_4connectivity(self): """Test detection with connectivity=4.""" data = np.eye(3) ref = np.diag([1, 2, 3]) segm = detect_sources(data, threshold=0.9, npixels=1, connectivity=4) assert_array_equal(segm.data, ref) def test_basic_filter_kernel(self): """Test detection with filter_kernel.""" kernel = np.ones((3, 3)) / 9. threshold = 0.3 expected = np.ones((3, 3)) expected[2] = 0 segm = detect_sources(self.data, threshold, npixels=1, filter_kernel=kernel) assert_array_equal(segm.data, expected) def test_npixels_nonint(self): """Test if error raises if npixel is non-integer.""" with pytest.raises(ValueError): detect_sources(self.data, threshold=1, npixels=0.1) def test_npixels_negative(self): """Test if error raises if npixel is negative.""" with pytest.raises(ValueError): detect_sources(self.data, threshold=1, npixels=-1) def test_connectivity_invalid(self): """Test if error raises if connectivity is invalid.""" with pytest.raises(ValueError): detect_sources(self.data, threshold=1, npixels=1, connectivity=10) def test_filter_kernel_array(self): segm = detect_sources(self.data, 0.1, npixels=1, filter_kernel=self.filter_kernel.array) assert_array_equal(segm.data, np.ones((3, 3))) def test_filter_kernel(self): segm = detect_sources(self.data, 0.1, npixels=1, filter_kernel=self.filter_kernel) assert_array_equal(segm.data, np.ones((3, 3))) def test_unnormalized_filter_kernel(self): with catch_warnings(AstropyUserWarning) as warning_lines: detect_sources(self.data, 0.1, npixels=1, filter_kernel=self.filter_kernel*10.) assert warning_lines[0].category == AstropyUserWarning assert ('The kernel is not normalized.' in str(warning_lines[0].message)) @pytest.mark.skipif('not HAS_SCIPY') class TestMakeSourceMask(object): def setup_class(self): self.data = make_4gaussians_image() def test_dilate_size(self): mask1 = make_source_mask(self.data, 5, 10) mask2 = make_source_mask(self.data, 5, 10, dilate_size=20) assert np.count_nonzero(mask2) > np.count_nonzero(mask1) def test_kernel(self): mask1 = make_source_mask(self.data, 5, 10, filter_fwhm=2, filter_size=3) sigma = 2 * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) mask2 = make_source_mask(self.data, 5, 10, filter_kernel=kernel) assert_allclose(mask1, mask2) photutils-0.4/photutils/segmentation/tests/test_properties.py0000644000214200020070000004015313175634532027306 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import itertools import numpy as np from numpy.testing import assert_allclose import pytest from astropy.tests.helper import assert_quantity_allclose from astropy.modeling import models from astropy.table import QTable import astropy.units as u from astropy.utils.misc import isiterable import astropy.wcs as WCS from ..properties import (SourceProperties, source_properties, SourceCatalog, properties_table) try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False try: import skimage # noqa HAS_SKIMAGE = True except ImportError: HAS_SKIMAGE = False XCEN = 51. YCEN = 52.7 MAJOR_SIG = 8. MINOR_SIG = 3. THETA = np.pi / 6. g = models.Gaussian2D(111., XCEN, YCEN, MAJOR_SIG, MINOR_SIG, theta=THETA) y, x = np.mgrid[0:100, 0:100] IMAGE = g(x, y) THRESHOLD = 0.1 SEGM = (IMAGE >= THRESHOLD).astype(np.int) ERR_VALS = [0., 2.5] BACKGRD_VALS = [None, 0., 1., 3.5] @pytest.mark.skipif('not HAS_SKIMAGE') @pytest.mark.skipif('not HAS_SCIPY') class TestSourceProperties(object): def test_segment_shape(self): with pytest.raises(ValueError): SourceProperties(IMAGE, np.zeros((2, 2)), label=1) @pytest.mark.parametrize('label', (0, -1)) def test_label_invalid(self, label): with pytest.raises(ValueError): SourceProperties(IMAGE, SEGM, label=label) @pytest.mark.parametrize('label', (0, -1)) def test_label_missing(self, label): segm = SEGM.copy() segm[0:2, 0:2] = 3 # skip label 2 with pytest.raises(ValueError): SourceProperties(IMAGE, segm, label=2) def test_wcs(self): mywcs = WCS.WCS(naxis=2) rho = np.pi / 3. scale = 0.1 / 3600. mywcs.wcs.cd = [[scale*np.cos(rho), -scale*np.sin(rho)], [scale*np.sin(rho), scale*np.cos(rho)]] mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] props = SourceProperties(IMAGE, SEGM, wcs=mywcs, label=1) assert props.sky_centroid_icrs is not None assert props.sky_bbox_ll is not None assert props.sky_bbox_ul is not None assert props.sky_bbox_lr is not None assert props.sky_bbox_ur is not None def test_nowcs(self): props = SourceProperties(IMAGE, SEGM, wcs=None, label=1) assert props.sky_centroid_icrs is None def test_to_table(self): props = SourceProperties(IMAGE, SEGM, label=1) t1 = props.to_table() t2 = properties_table(props) assert isinstance(t1, QTable) assert isinstance(t2, QTable) assert len(t1) == 1 props = ['xcentroid', 'ycentroid', 'source_sum'] for prop in props: assert t1[prop] == t2[prop] @pytest.mark.skipif('not HAS_SKIMAGE') @pytest.mark.skipif('not HAS_SCIPY') class TestSourcePropertiesFunctionInputs(object): def test_segment_shape(self): wrong_shape = np.zeros((2, 2)) with pytest.raises(ValueError): source_properties(IMAGE, wrong_shape) def test_error_shape(self): wrong_shape = np.zeros((2, 2)) with pytest.raises(ValueError): source_properties(IMAGE, SEGM, error=wrong_shape) def test_background_shape(self): wrong_shape = np.zeros((2, 2)) with pytest.raises(ValueError): source_properties(IMAGE, SEGM, background=wrong_shape) def test_mask_shape(self): wrong_shape = np.zeros((2, 2)) with pytest.raises(ValueError): source_properties(IMAGE, SEGM, mask=wrong_shape) def test_labels(self): props = source_properties(IMAGE, SEGM, labels=1) assert props[0].id == 1 def test_invalidlabels(self): props = source_properties(IMAGE, SEGM, labels=-1) assert len(props) == 0 @pytest.mark.skipif('not HAS_SKIMAGE') @pytest.mark.skipif('not HAS_SCIPY') class TestSourcePropertiesFunction(object): def test_properties(self): props = source_properties(IMAGE, SEGM) assert props[0].id == 1 assert_quantity_allclose(props[0].xcentroid, XCEN*u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].ycentroid, YCEN*u.pix, rtol=1.e-2) assert_allclose(props[0].source_sum, IMAGE[IMAGE >= THRESHOLD].sum()) assert_quantity_allclose(props[0].semimajor_axis_sigma, MAJOR_SIG*u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].semiminor_axis_sigma, MINOR_SIG*u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].orientation, THETA*u.rad, rtol=1.e-3) assert_allclose(props[0].bbox.value, [35, 25, 70, 77]) assert_quantity_allclose(props[0].area, 1058.0*u.pix**2) assert_allclose(len(props[0].values), props[0].area.value) assert_allclose(len(props[0].coords), 2) assert_allclose(len(props[0].coords[0]), props[0].area.value) properties = ['background_at_centroid', 'background_mean', 'eccentricity', 'ellipticity', 'elongation', 'equivalent_radius', 'max_value', 'maxval_xpos', 'maxval_ypos', 'min_value', 'minval_xpos', 'minval_ypos', 'perimeter', 'cxx', 'cxy', 'cyy', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'xmax', 'xmin', 'ymax', 'ymin'] for propname in properties: assert not isiterable(getattr(props[0], propname)) properties = ['centroid', 'covariance_eigvals', 'cutout_centroid', 'maxval_cutout_pos', 'minval_cutout_pos'] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (2,) properties = ['covariance', 'inertia_tensor'] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (2, 2) properties = ['moments', 'moments_central'] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (4, 4) def test_properties_background_notNone(self): value = 1. props = source_properties(IMAGE, SEGM, background=value) assert props[0].background_mean == value assert_allclose(props[0].background_at_centroid, value) def test_properties_error_background_None(self): props = source_properties(IMAGE, SEGM) assert props[0].background_cutout_ma is None assert props[0].error_cutout_ma is None def test_cutout_shapes(self): error = np.ones_like(IMAGE) * 1. props = source_properties(IMAGE, SEGM, error=error, background=1.) bbox = props[0].bbox.value true_shape = (bbox[2] - bbox[0] + 1, bbox[3] - bbox[1] + 1) properties = ['background_cutout_ma', 'data_cutout', 'data_cutout_ma', 'error_cutout_ma'] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == true_shape def test_make_cutout(self): props = source_properties(IMAGE, SEGM) data = np.ones((2, 2)) with pytest.raises(ValueError): props[0].make_cutout(data) @pytest.mark.parametrize(('error_value', 'background'), list(itertools.product(ERR_VALS, BACKGRD_VALS))) def test_segmentation_inputs(self, error_value, background): error = np.ones_like(IMAGE) * error_value props = source_properties(IMAGE, SEGM, error=error, background=background) assert_quantity_allclose(props[0].xcentroid, XCEN*u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].ycentroid, YCEN*u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].semimajor_axis_sigma, MAJOR_SIG*u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].semiminor_axis_sigma, MINOR_SIG*u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].orientation, THETA*u.rad, rtol=1.e-3) assert_allclose(props[0].bbox.value, [35, 25, 70, 77]) area = props[0].area.value assert_allclose(area, 1058.0) if background is not None: assert_allclose(props[0].background_sum, area * background) true_sum = IMAGE[IMAGE >= THRESHOLD].sum() assert_allclose(props[0].source_sum, true_sum) true_error = np.sqrt(props[0].area.value) * error_value assert_allclose(props[0].source_sum_err, true_error) def test_data_allzero(self): props = source_properties(IMAGE*0., SEGM) proplist = ['xcentroid', 'ycentroid', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'eccentricity', 'orientation', 'ellipticity', 'elongation', 'cxx', 'cxy', 'cyy'] for prop in proplist: assert np.isnan(getattr(props[0], prop)) def test_mask(self): data = np.zeros((3, 3)) data[0, 1] = 1. data[1, 1] = 1. mask = np.zeros_like(data, dtype=np.bool) mask[0, 1] = True segm = data.astype(np.int) props = source_properties(data, segm, mask=mask) assert_allclose(props[0].xcentroid.value, 1) assert_allclose(props[0].ycentroid.value, 1) assert_allclose(props[0].source_sum, 1) assert_allclose(props[0].area.value, 1) def test_single_pixel_segment(self): segm = np.zeros_like(SEGM) segm[50, 50] = 1 props = source_properties(IMAGE, segm) assert props[0].eccentricity == 0 def test_filtering(self): from astropy.convolution import Gaussian2DKernel FWHM2SIGMA = 1.0 / (2.0 * np.sqrt(2.0 * np.log(2.0))) filter_kernel = Gaussian2DKernel(2.*FWHM2SIGMA, x_size=3, y_size=3) error = np.sqrt(IMAGE) props1 = source_properties(IMAGE, SEGM, error=error) props2 = source_properties(IMAGE, SEGM, error=error, filter_kernel=filter_kernel.array) p1, p2 = props1[0], props2[0] keys = ['source_sum', 'source_sum_err'] for key in keys: assert p1[key] == p2[key] keys = ['semimajor_axis_sigma', 'semiminor_axis_sigma'] for key in keys: assert p1[key] != p2[key] def test_filtering_kernel(self): data = np.zeros((3, 3)) data[1, 1] = 1. from astropy.convolution import Gaussian2DKernel FWHM2SIGMA = 1.0 / (2.0 * np.sqrt(2.0 * np.log(2.0))) filter_kernel = Gaussian2DKernel(2.*FWHM2SIGMA, x_size=3, y_size=3) error = np.sqrt(IMAGE) props1 = source_properties(IMAGE, SEGM, error=error) props2 = source_properties(IMAGE, SEGM, error=error, filter_kernel=filter_kernel) p1, p2 = props1[0], props2[0] keys = ['source_sum', 'source_sum_err'] for key in keys: assert p1[key] == p2[key] keys = ['semimajor_axis_sigma', 'semiminor_axis_sigma'] for key in keys: assert p1[key] != p2[key] @pytest.mark.skipif('not HAS_SKIMAGE') @pytest.mark.skipif('not HAS_SCIPY') class TestSourceCatalog(object): def test_basic(self): segm = np.zeros(IMAGE.shape) x = y = np.arange(0, 100, 10) segm[y, x] = np.arange(10) cat = source_properties(IMAGE, segm) assert len(cat) == 9 cat2 = cat[0:5] assert len(cat2) == 5 cat3 = SourceCatalog(cat2) del cat3[4] assert len(cat3) == 4 def test_inputs(self): cat = source_properties(IMAGE, SEGM) cat2 = SourceCatalog(cat[0]) assert len(cat) == 1 assert len(cat2) == 1 with pytest.raises(ValueError): SourceCatalog('a') def test_table(self): cat = source_properties(IMAGE, SEGM) t = cat.to_table() assert isinstance(t, QTable) assert len(t) == 1 def test_table_include(self): cat = source_properties(IMAGE, SEGM) columns = ['id', 'xcentroid'] t = cat.to_table(columns=columns) assert isinstance(t, QTable) assert len(t) == 1 assert t.colnames == columns def test_table_include_invalidname(self): cat = source_properties(IMAGE, SEGM) columns = ['idzz', 'xcentroidzz'] with pytest.raises(AttributeError): cat.to_table(columns=columns) def test_table_exclude(self): cat = source_properties(IMAGE, SEGM) exclude = ['id', 'xcentroid'] t = cat.to_table(exclude_columns=exclude) assert isinstance(t, QTable) assert len(t) == 1 with pytest.raises(KeyError): t['id'] def test_table_empty_props(self): cat = source_properties(IMAGE, SEGM, labels=-1) with pytest.raises(ValueError): cat.to_table() def test_table_wcs(self): mywcs = WCS.WCS(naxis=2) rho = np.pi / 3. scale = 0.1 / 3600. mywcs.wcs.cd = [[scale*np.cos(rho), -scale*np.sin(rho)], [scale*np.sin(rho), scale*np.cos(rho)]] mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] cat = source_properties(IMAGE, SEGM, wcs=mywcs) columns = ['sky_centroid', 'sky_centroid_icrs', 'icrs_centroid', 'ra_icrs_centroid', 'dec_icrs_centroid', 'sky_bbox_ll', 'sky_bbox_ul', 'sky_bbox_lr', 'sky_bbox_ur'] t = cat.to_table(columns=columns) assert t[0]['sky_centroid'] is not None assert t.colnames == columns cat = source_properties(IMAGE, SEGM) columns = ['sky_centroid', 'sky_centroid_icrs', 'icrs_centroid', 'ra_icrs_centroid', 'dec_icrs_centroid', 'sky_bbox_ll', 'sky_bbox_ul', 'sky_bbox_lr', 'sky_bbox_ur'] t = cat.to_table(columns=columns) assert t[0]['sky_centroid'] is None assert t.colnames == columns @pytest.mark.skipif('not HAS_SKIMAGE') @pytest.mark.skipif('not HAS_SCIPY') class TestPropertiesTable(object): def test_properties_table(self): props = source_properties(IMAGE, SEGM) t = properties_table(props) assert isinstance(t, QTable) assert len(t) == 1 def test_properties_table_include(self): props = source_properties(IMAGE, SEGM) columns = ['id', 'xcentroid'] t = properties_table(props, columns=columns) assert isinstance(t, QTable) assert len(t) == 1 assert t.colnames == columns def test_properties_table_include_invalidname(self): props = source_properties(IMAGE, SEGM) columns = ['idzz', 'xcentroidzz'] with pytest.raises(AttributeError): properties_table(props, columns=columns) def test_properties_table_exclude(self): props = source_properties(IMAGE, SEGM) exclude = ['id', 'xcentroid'] t = properties_table(props, exclude_columns=exclude) assert isinstance(t, QTable) assert len(t) == 1 with pytest.raises(KeyError): t['id'] def test_properties_table_empty_props(self): props = source_properties(IMAGE, SEGM, labels=-1) with pytest.raises(ValueError): properties_table(props) def test_properties_table_empty_list(self): with pytest.raises(ValueError): properties_table([]) def test_properties_table_wcs(self): mywcs = WCS.WCS(naxis=2) rho = np.pi / 3. scale = 0.1 / 3600. mywcs.wcs.cd = [[scale*np.cos(rho), -scale*np.sin(rho)], [scale*np.sin(rho), scale*np.cos(rho)]] mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] props = source_properties(IMAGE, SEGM, wcs=mywcs) columns = ['sky_centroid', 'sky_centroid_icrs', 'icrs_centroid', 'ra_icrs_centroid', 'dec_icrs_centroid', 'sky_bbox_ll', 'sky_bbox_ul', 'sky_bbox_lr', 'sky_bbox_ur'] t = properties_table(props, columns=columns) assert t.colnames == columns assert t[0]['sky_centroid_icrs'] is not None photutils-0.4/photutils/tests/0000755000214200020070000000000013175654702021002 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/tests/__init__.py0000644000214200020070000000017012444404542023102 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains affiliated package tests. """ photutils-0.4/photutils/tests/coveragerc0000644000214200020070000000124512444404542023040 0ustar lbradleySTSCI\science00000000000000[run] source = photutils omit = photutils/*__init__* photutils/_astropy_init.py photutils/conftest* photutils/cython_version* photutils/*setup* photutils/*tests/* photutils/version* photutils/extern/* [report] exclude_lines = # Have to re-enable the standard pragma pragma: no cover # Don't complain about packages we have installed except ImportError # Don't complain if tests don't hit assertions raise AssertionError raise NotImplementedError # Don't complain about script hooks def main\(.*\): # Ignore branches that don't pertain to this version of Python pragma: py{ignore_python_version} photutils-0.4/photutils/tests/setup_package.py0000644000214200020070000000015213175634532024164 0ustar lbradleySTSCI\science00000000000000def get_package_data(): return { _ASTROPY_PACKAGE_NAME_ + '.tests': ['coveragerc']} # noqa photutils-0.4/photutils/utils/0000755000214200020070000000000013175654702021000 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/utils/__init__.py0000644000214200020070000000071413175634532023112 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ General-purpose utility functions. """ from .check_random_state import * # noqa from .colormaps import * # noqa from .convolution import * # noqa from .cutouts import * # noqa from .errors import * # noqa from .interpolation import * # noqa from .misc import * # noqa from .stats import * # noqa photutils-0.4/photutils/utils/check_random_state.py0000644000214200020070000000255413055576313025173 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numbers import numpy as np __all__ = ['check_random_state'] def check_random_state(seed): """ Turn seed into a `numpy.random.RandomState` instance. Parameters ---------- seed : `None`, int, or `numpy.random.RandomState` If ``seed`` is `None`, return the `~numpy.random.RandomState` singleton used by ``numpy.random``. If ``seed`` is an `int`, return a new `~numpy.random.RandomState` instance seeded with ``seed``. If ``seed`` is already a `~numpy.random.RandomState`, return it. Otherwise raise ``ValueError``. Returns ------- random_state : `numpy.random.RandomState` RandomState object. Notes ----- This routine is from scikit-learn. See http://scikit-learn.org/stable/developers/utilities.html#validation-tools. """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, (numbers.Integral, np.integer)): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError('{0!r} cannot be used to seed a numpy.random.RandomState' ' instance'.format(seed)) photutils-0.4/photutils/utils/colormaps.py0000644000214200020070000000246113175634532023353 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from .check_random_state import check_random_state __all__ = ['random_cmap'] def random_cmap(ncolors=256, random_state=None): """ Generate a matplotlib colormap consisting of random (muted) colors. A random colormap is very useful for plotting segmentation images. Parameters ---------- ncolors : int, optional The number of colors in the colormap. The default is 256. random_state : int or `~numpy.random.RandomState`, optional The pseudo-random number generator state used for random sampling. Separate function calls with the same ``random_state`` will generate the same colormap. Returns ------- cmap : `matplotlib.colors.Colormap` The matplotlib colormap with random colors. """ from matplotlib import colors prng = check_random_state(random_state) h = prng.uniform(low=0.0, high=1.0, size=ncolors) s = prng.uniform(low=0.2, high=0.7, size=ncolors) v = prng.uniform(low=0.5, high=1.0, size=ncolors) hsv = np.dstack((h, s, v)) rgb = np.squeeze(colors.hsv_to_rgb(hsv)) return colors.ListedColormap(rgb) photutils-0.4/photutils/utils/convolution.py0000644000214200020070000000420513055576313023730 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import warnings import numpy as np from astropy.convolution import Kernel2D from astropy.utils.exceptions import AstropyUserWarning __all__ = ['filter_data'] def filter_data(data, kernel, mode='constant', fill_value=0.0, check_normalization=False): """ Convolve a 2D image with a 2D kernel. The kernel may either be a 2D `~numpy.ndarray` or a `~astropy.convolution.Kernel2D` object. Parameters ---------- data : array_like The 2D array of the image. kernel : array-like (2D) or `~astropy.convolution.Kernel2D` The 2D kernel used to filter the input ``data``. Filtering the ``data`` will smooth the noise and maximize detectability of objects with a shape similar to the kernel. mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional The ``mode`` determines how the array borders are handled. For the ``'constant'`` mode, values outside the array borders are set to ``fill_value``. The default is ``'constant'``. fill_value : scalar, optional Value to fill data values beyond the array borders if ``mode`` is ``'constant'``. The default is ``0.0``. check_normalization : bool, optional If `True` then a warning will be issued if the kernel is not normalized to 1. """ from scipy import ndimage if kernel is not None: if isinstance(kernel, Kernel2D): kernel_array = kernel.array else: kernel_array = kernel if check_normalization: if not np.allclose(np.sum(kernel_array), 1.0): warnings.warn('The kernel is not normalized.', AstropyUserWarning) # NOTE: astropy.convolution.convolve fails with zero-sum # kernels (used in findstars) (cf. astropy #1647) return ndimage.convolve(data, kernel_array, mode=mode, cval=fill_value) else: return data photutils-0.4/photutils/utils/cutouts.py0000644000214200020070000000673313063003335023053 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import collections import numpy as np from astropy.nddata.utils import overlap_slices __all__ = ['cutout_footprint'] def cutout_footprint(data, position, box_size=3, footprint=None, mask=None, error=None): """ Cut out a region from data (and optional mask and error) centered at specified (x, y) position. The size of the region is specified via the ``box_size`` or ``footprint`` keywords. The output mask for the cutout region represents the combination of the input mask and footprint mask. Parameters ---------- data : array_like The 2D array of the image. position : 2 tuple The ``(x, y)`` pixel coordinate of the center of the region. box_size : scalar or tuple, optional The size of the region to cutout from ``data``. If ``box_size`` is a scalar then a square box of size ``box_size`` will be used. If ``box_size`` has two elements, they should be in ``(ny, nx)`` order. Either ``box_size`` or ``footprint`` must be defined. If they are both defined, then ``footprint`` overrides ``box_size``. footprint : `~numpy.ndarray` of bools, optional A boolean array where `True` values describe the local footprint region. ``box_size=(n, m)`` is equivalent to ``footprint=np.ones((n, m))``. Either ``box_size`` or ``footprint`` must be defined. If they are both defined, then ``footprint`` overrides ``box_size``. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. Returns ------- region_data : `~numpy.ndarray` The ``data`` cutout. region_mask : `~numpy.ndarray` The ``mask`` cutout. region_error : `~numpy.ndarray` The ``error`` cutout. slices : tuple of slices Slices in each dimension of the ``data`` array used to define the cutout region. """ if len(position) != 2: raise ValueError('position must have a length of 2') if footprint is None: if box_size is None: raise ValueError('box_size or footprint must be defined.') if not isinstance(box_size, collections.Iterable): shape = (box_size, box_size) else: if len(box_size) != 2: raise ValueError('box_size must have a length of 2') shape = box_size footprint = np.ones(shape, dtype=bool) else: footprint = np.asanyarray(footprint, dtype=bool) slices_large, slices_small = overlap_slices(data.shape, footprint.shape, position[::-1]) region_data = data[slices_large] if error is not None: region_error = error[slices_large] else: region_error = None if mask is not None: region_mask = mask[slices_large] else: region_mask = np.zeros_like(region_data, dtype=bool) footprint_mask = ~footprint footprint_mask = footprint_mask[slices_small] # trim if necessary region_mask = np.logical_or(region_mask, footprint_mask) return region_data, region_mask, region_error, slices_large photutils-0.4/photutils/utils/errors.py0000644000214200020070000001326213175634532022671 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import astropy.units as u from astropy.utils.misc import isiterable __all__ = ['calc_total_error'] def calc_total_error(data, bkg_error, effective_gain): """ Calculate a total error array, combining a background-only error array with the Poisson noise of sources. Parameters ---------- data : array_like or `~astropy.units.Quantity` The data array. bkg_error : array_like or `~astropy.units.Quantity` The pixel-wise Gaussian 1-sigma background-only errors of the input ``data``. ``bkg_error`` should include all sources of "background" error but *exclude* the Poisson error of the sources. ``bkg_error`` must have the same shape as ``data``. If ``data`` and ``bkg_error`` are `~astropy.units.Quantity` objects, then they must have the same units. effective_gain : float, array-like, or `~astropy.units.Quantity` Ratio of counts (e.g., electrons or photons) to the units of ``data`` used to calculate the Poisson error of the sources. Returns ------- total_error : `~numpy.ndarray` or `~astropy.units.Quantity` The total error array. If ``data``, ``bkg_error``, and ``effective_gain`` are all `~astropy.units.Quantity` objects, then ``total_error`` will also be returned as a `~astropy.units.Quantity` object with the same units as the input ``data``. Otherwise, a `~numpy.ndarray` will be returned. Notes ----- To use units, ``data``, ``bkg_error``, and ``effective_gain`` must *all* be `~astropy.units.Quantity` objects. ``data`` and ``bkg_error`` must have the same units. A `ValueError` will be raised if only some of the inputs are `~astropy.units.Quantity` objects or if the ``data`` and ``bkg_error`` units differ. The total error array, :math:`\\sigma_{\\mathrm{tot}}` is: .. math:: \\sigma_{\\mathrm{tot}} = \\sqrt{\\sigma_{\\mathrm{b}}^2 + \\frac{I}{g}} where :math:`\\sigma_b`, :math:`I`, and :math:`g` are the background ``bkg_error`` image, ``data`` image, and ``effective_gain``, respectively. Pixels where ``data`` (:math:`I_i)` is negative do not contribute additional Poisson noise to the total error, i.e. :math:`\\sigma_{\\mathrm{tot}, i} = \\sigma_{\\mathrm{b}, i}`. Note that this is different from `SExtractor`_, which sums the total variance in the segment, including pixels where :math:`I_i` is negative. In such cases, `SExtractor`_ underestimates the total errors. Also note that SExtractor computes Poisson errors from background-subtracted data, which also results in an underestimation of the Poisson noise. ``effective_gain`` can either be a scalar value or a 2D image with the same shape as the ``data``. A 2D image is useful with mosaic images that have variable depths (i.e., exposure times) across the field. For example, one should use an exposure-time map as the ``effective_gain`` for a variable depth mosaic image in count-rate units. As an example, if your input ``data`` are in units of ADU, then ``effective_gain`` should be in units of electrons/ADU (or photons/ADU). If your input ``data`` are in units of electrons/s then ``effective_gain`` should be the exposure time or an exposure time map (e.g., for mosaics with non-uniform exposure times). .. _SExtractor: http://www.astromatic.net/software/sextractor """ data = np.asanyarray(data) bkg_error = np.asanyarray(bkg_error) inputs = [data, bkg_error, effective_gain] has_unit = [hasattr(x, 'unit') for x in inputs] use_units = all(has_unit) if any(has_unit) and not use_units: raise ValueError('If any of data, bkg_error, or effective_gain has ' 'units, then they all must all have units.') if use_units: if data.unit != bkg_error.unit: raise ValueError('data and bkg_error must have the same units.') count_units = [u.electron, u.photon] datagain_unit = data.unit * effective_gain.unit if datagain_unit not in count_units: raise u.UnitsError('(data * effective_gain) has units of "{0}", ' 'but it must have count units (e.g. ' 'u.electron or u.photon).' .format(datagain_unit)) if not isiterable(effective_gain): effective_gain = np.zeros(data.shape) + effective_gain else: effective_gain = np.asanyarray(effective_gain) if effective_gain.shape != data.shape: raise ValueError('If input effective_gain is 2D, then it must ' 'have the same shape as the input data.') if np.any(effective_gain <= 0): raise ValueError('effective_gain must be strictly positive ' 'everywhere.') # This calculation assumes that data and bkg_error have the same # units. source_variance is calculated to have units of # (data.unit)**2 so that it can be added with bkg_error**2 below. The # final returned error will have units of data.unit. np.maximum is # used to ensure that negative data values do not contribute to the # Poisson noise. if use_units: unit = data.unit data = data.value effective_gain = effective_gain.value source_variance = np.maximum(data / effective_gain, 0) * unit**2 else: source_variance = np.maximum(data / effective_gain, 0) return np.sqrt(bkg_error**2 + source_variance) photutils-0.4/photutils/utils/interpolation.py0000644000214200020070000004152013175647744024253 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import warnings import numpy as np from astropy.utils.decorators import deprecated from astropy.utils.exceptions import AstropyUserWarning __all__ = ['ShepardIDWInterpolator', 'interpolate_masked_data', 'mask_to_mirrored_num'] __doctest_requires__ = {('ShepardIDWInterpolator'): ['scipy']} class ShepardIDWInterpolator(object): """ Class to perform Inverse Distance Weighted (IDW) interpolation. This interpolator uses a modified version of `Shepard's method `_ (see the Notes section for details). Parameters ---------- coordinates : float, 1D array-like, or NxM-array-like Coordinates of the known data points. In general, it is expected that these coordinates are in a form of a NxM-like array where N is the number of points and M is dimension of the coordinate space. When M=1 (1D space), then the ``coordinates`` parameter may be entered as a 1D array or, if only one data point is available, ``coordinates`` can be a scalar number representing the 1D coordinate of the data point. .. note:: If the dimensionality of ``coordinates`` is larger than 2, e.g., if it is of the form N1 x N2 x N3 x ... x Nn x M, then it will be flattened to form an array of size NxM where N = N1 * N2 * ... * Nn. values : float or 1D array-like Values of the data points corresponding to each coordinate provided in ``coordinates``. In general a 1D array is expected. When a single data point is available, then ``values`` can be a scalar number. .. note:: If the dimensionality of ``values`` is larger than 1 then it will be flattened. weights : float or 1D array-like, optional Weights to be associated with each data value. These weights, if provided, will be combined with inverse distance weights (see the Notes section for details). When ``weights`` is `None` (default), then only inverse distance weights will be used. When provided, this input parameter must have the same form as ``values``. leafsize : float, optional The number of points at which the k-d tree algorithm switches over to brute-force. ``leafsize`` must be positive. See `scipy.spatial.cKDTree` for further information. Notes ----- This interpolator uses a slightly modified version of `Shepard's method `_. The essential difference is the introduction of a "regularization" parameter (``reg``) that is used when computing the inverse distance weights: .. math:: w_i = 1 / (d(x, x_i)^{power} + r) By supplying a positive regularization parameter one can avoid singularities at the locations of the data points as well as control the "smoothness" of the interpolation (e.g., make the weights of the neighbors less varied). The "smoothness" of interpolation can also be controlled by the power parameter (``power``). Examples -------- This class can can be instantiated using the following syntax:: >>> from photutils.utils import ShepardIDWInterpolator as idw Example of interpolating 1D data:: >>> import numpy as np >>> np.random.seed(123) >>> x = np.random.random(100) >>> y = np.sin(x) >>> f = idw(x, y) >>> f(0.4) # doctest: +FLOAT_CMP 0.38862424043228855 >>> np.sin(0.4) # doctest: +FLOAT_CMP 0.38941834230865052 >>> xi = np.random.random(4) >>> xi array([ 0.51312815, 0.66662455, 0.10590849, 0.13089495]) >>> f(xi) # doctest: +FLOAT_CMP array([ 0.49086423, 0.62647862, 0.1056854 , 0.13048335]) >>> np.sin(xi) array([ 0.49090493, 0.6183367 , 0.10571061, 0.13052149]) NOTE: In the last example, ``xi`` may be a ``Nx1`` array instead of a 1D vector. Example of interpolating 2D data:: >>> pos = np.random.rand(1000, 2) >>> val = np.sin(pos[:, 0] + pos[:, 1]) >>> f = idw(pos, val) >>> f([0.5, 0.6]) # doctest: +FLOAT_CMP 0.89312649587405657 >>> np.sin(0.5 + 0.6) 0.89120736006143542 """ def __init__(self, coordinates, values, weights=None, leafsize=10): from scipy.spatial import cKDTree coordinates = np.atleast_2d(coordinates) if coordinates.shape[0] == 1: coordinates = np.transpose(coordinates) if coordinates.ndim != 2: coordinates = np.reshape(coordinates, (-1, coordinates.shape[-1])) values = np.asanyarray(values).ravel() ncoords = coordinates.shape[0] if ncoords < 1: raise ValueError('You must enter at least one data point.') if values.shape[0] != ncoords: raise ValueError('The number of values must match the number ' 'of coordinates.') if weights is not None: weights = np.asanyarray(weights).ravel() if weights.shape[0] != ncoords: raise ValueError('The number of weights must match the ' 'number of coordinates.') if np.any(weights < 0.0): raise ValueError('All weight values must be non-negative ' 'numbers.') self.coordinates = coordinates self.ncoords = ncoords self.coords_ndim = coordinates.shape[1] self.values = values self.weights = weights self.kdtree = cKDTree(coordinates, leafsize=leafsize) def __call__(self, positions, n_neighbors=8, eps=0.0, power=1.0, reg=0.0, conf_dist=1e-12, dtype=np.float): """ Evaluate the interpolator at the given positions. Parameters ---------- positions : float, 1D array-like, or NxM-array-like Coordinates of the position(s) at which the interpolator should be evaluated. In general, it is expected that these coordinates are in a form of a NxM-like array where N is the number of points and M is dimension of the coordinate space. When M=1 (1D space), then the ``positions`` parameter may be input as a 1D-like array or, if only one data point is available, ``positions`` can be a scalar number representing the 1D coordinate of the data point. .. note:: If the dimensionality of the ``positions`` argument is larger than 2, e.g., if it is of the form N1 x N2 x N3 x ... x Nn x M, then it will be flattened to form an array of size NxM where N = N1 * N2 * ... * Nn. .. warning:: The dimensionality of ``positions`` must match the dimensionality of the ``coordinates`` used during the initialization of the interpolator. n_neighbors : int, optional The maximum number of nearest neighbors to use during the interpolation. eps : float, optional Set to use approximate nearest neighbors; the kth neighbor is guaranteed to be no further than (1 + ``eps``) times the distance to the real *k*-th nearest neighbor. See `scipy.spatial.cKDTree.query` for further information. power : float, optional The power of the inverse distance used for the interpolation weights. See the Notes section for more details. reg : float, optional The regularization parameter. It may be used to control the smoothness of the interpolator. See the Notes section for more details. conf_dist : float, optional The confusion distance below which the interpolator should use the value of the closest data point instead of attempting to interpolate. This is used to avoid singularities at the known data points, especially if ``reg`` is 0.0. dtype : data-type The data type of the output interpolated values. If `None` then the type will be inferred from the type of the ``values`` parameter used during the initialization of the interpolator. """ n_neighbors = int(n_neighbors) if n_neighbors < 1: raise ValueError('n_neighbors must be a positive integer') if conf_dist is not None and conf_dist <= 0.0: conf_dist = None positions = np.asanyarray(positions) if positions.ndim == 0: # assume we have a single 1D coordinate if self.coords_ndim != 1: raise ValueError('The dimensionality of the input position ' 'does not match the dimensionality of the ' 'coordinates used to initialize the ' 'interpolator.') elif positions.ndim == 1: # assume we have a single point if (self.coords_ndim != 1 and (positions.shape[-1] != self.coords_ndim)): raise ValueError('The input position was provided as a 1D ' 'array, but its length does not match the ' 'dimensionality of the coordinates used ' 'to initialize the interpolator.') elif positions.ndim != 2: raise ValueError('The input positions must be an array-like ' 'object of dimensionality no larger than 2.') positions = np.reshape(positions, (-1, self.coords_ndim)) npositions = positions.shape[0] distances, idx = self.kdtree.query(positions, k=n_neighbors, eps=eps) if n_neighbors == 1: return self.values[idx] if dtype is None: dtype = self.values.dtype interp_values = np.zeros(npositions, dtype=dtype) for k in range(npositions): valid_idx = np.isfinite(distances[k]) idk = idx[k][valid_idx] dk = distances[k][valid_idx] if dk.shape[0] == 0: interp_values[k] = np.nan continue if conf_dist is not None: # check if we are close to a known data point confused = (dk <= conf_dist) if np.any(confused): interp_values[k] = self.values[idk[confused][0]] continue w = 1.0 / ((dk ** power) + reg) if self.weights is not None: w *= self.weights[idk] wtot = np.sum(w) if wtot > 0.0: interp_values[k] = np.dot(w, self.values[idk]) / wtot else: interp_values[k] = np.nan if len(interp_values) == 1: return interp_values[0] else: return interp_values @deprecated('0.4') def interpolate_masked_data(data, mask, error=None, background=None): """ Interpolate over masked pixels in data and optional error or background images. The value of masked pixels are replaced by the mean value of the connected neighboring non-masked pixels. This function is intended for single, isolated masked pixels (e.g. hot/warm pixels). Parameters ---------- data : array_like or `~astropy.units.Quantity` The data array. mask : array_like (bool) A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. error : array_like or `~astropy.units.Quantity`, optional The pixel-wise Gaussian 1-sigma errors of the input ``data``. ``error`` must have the same shape as ``data``. background : array_like, or `~astropy.units.Quantity`, optional The pixel-wise background level of the input ``data``. ``background`` must have the same shape as ``data``. Returns ------- data : `~numpy.ndarray` or `~astropy.units.Quantity` Input ``data`` with interpolated masked pixels. error : `~numpy.ndarray` or `~astropy.units.Quantity` Input ``error`` with interpolated masked pixels. `None` if input ``error`` is not input. background : `~numpy.ndarray` or `~astropy.units.Quantity` Input ``background`` with interpolated masked pixels. `None` if input ``background`` is not input. """ if data.shape != mask.shape: raise ValueError('data and mask must have the same shape') data_out = np.copy(data) # do not alter input data mask_idx = mask.nonzero() if mask_idx[0].size == 0: raise ValueError('All items in data are masked') for x in zip(*mask_idx): X = np.array([[max(x[i] - 1, 0), min(x[i] + 1, data.shape[i] - 1)] for i in range(len(data.shape))]) goodpix = ~mask[X] if not np.any(goodpix): warnings.warn('The masked pixel at "{}" is completely ' 'surrounded by (connected) masked pixels, ' 'thus unable to interpolate'.format(x,), AstropyUserWarning) continue data_out[x] = np.mean(data[X][goodpix]) if background is not None: if background.shape != data.shape: raise ValueError('background and data must have the same ' 'shape') background_out = np.copy(background) background_out[x] = np.mean(background[X][goodpix]) else: background_out = None if error is not None: if error.shape != data.shape: raise ValueError('error and data must have the same ' 'shape') error_out = np.copy(error) error_out[x] = np.sqrt(np.mean(error[X][goodpix]**2)) else: error_out = None return data_out, error_out, background_out def mask_to_mirrored_num(image, mask_image, center_position, bbox=None): """ Replace masked pixels with the value of the pixel mirrored across a given ``center_position``. If the mirror pixel is unavailable (i.e. itself masked or outside of the image), then the masked pixel value is set to zero. Parameters ---------- image : `numpy.ndarray`, 2D The 2D array of the image. mask_image : array-like, bool A boolean mask with the same shape as ``image``, where a `True` value indicates the corresponding element of ``image`` is considered bad. center_position : 2-tuple (x, y) center coordinates around which masked pixels will be mirrored. bbox : list, tuple, `numpy.ndarray`, optional The bounding box (x_min, x_max, y_min, y_max) over which to replace masked pixels. Returns ------- result : `numpy.ndarray`, 2D A 2D array with replaced masked pixels. Examples -------- >>> import numpy as np >>> from photutils.utils import mask_to_mirrored_num >>> image = np.arange(16).reshape(4, 4) >>> mask = np.zeros_like(image, dtype=bool) >>> mask[0, 0] = True >>> mask[1, 1] = True >>> mask_to_mirrored_num(image, mask, (1.5, 1.5)) array([[15, 1, 2, 3], [ 4, 10, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) """ if bbox is None: ny, nx = image.shape bbox = [0, nx, 0, ny] subdata = np.copy(image[bbox[2]:bbox[3]+1, bbox[0]:bbox[1]+1]) submask = mask_image[bbox[2]:bbox[3]+1, bbox[0]:bbox[1]+1] y_masked, x_masked = np.nonzero(submask) x_mirror = (2 * (center_position[0] - bbox[0]) - x_masked + 0.5).astype('int32') y_mirror = (2 * (center_position[1] - bbox[2]) - y_masked + 0.5).astype('int32') # Reset mirrored pixels that go out of the image. outofimage = ((x_mirror < 0) | (y_mirror < 0) | (x_mirror >= subdata.shape[1]) | (y_mirror >= subdata.shape[0])) if outofimage.any(): x_mirror[outofimage] = x_masked[outofimage].astype('int32') y_mirror[outofimage] = y_masked[outofimage].astype('int32') subdata[y_masked, x_masked] = subdata[y_mirror, x_mirror] # Set pixels that mirrored to another masked pixel to zero. # This will also set to zero any pixels that mirrored out of # the image. mirror_is_masked = submask[y_mirror, x_mirror] x_bad = x_masked[mirror_is_masked] y_bad = y_masked[mirror_is_masked] subdata[y_bad, x_bad] = 0.0 outimage = np.copy(image) outimage[bbox[2]:bbox[3]+1, bbox[0]:bbox[1]+1] = subdata return outimage photutils-0.4/photutils/utils/misc.py0000644000214200020070000000122213067540023022270 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) __all__ = ['get_version_info'] def get_version_info(): """ Return astropy and photutils versions. Returns ------- result : str The astropy and photutils versions. """ from astropy import __version__ astropy_version = __version__ from photutils import __version__ photutils_version = __version__ return 'astropy: {0}, photutils: {1}'.format(astropy_version, photutils_version) photutils-0.4/photutils/utils/stats.py0000644000214200020070000000530413055576313022510 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np __all__ = ['std_blocksum'] def _mesh_values(data, box_size): """ Extract all the data values in boxes of size ``box_size``. Values from incomplete boxes, either because of the image edges or masked pixels, are not returned. Parameters ---------- data : 2D `~numpy.ma.MaskedArray` The input masked array. box_size : int The box size. Returns ------- result : 2D `~numpy.ndarray` A 2D array containing the data values in the boxes (along the x axis). """ data = np.ma.asanyarray(data) ny, nx = data.shape nyboxes = ny // box_size nxboxes = nx // box_size # include only complete boxes ny_crop = nyboxes * box_size nx_crop = nxboxes * box_size data = data[0:ny_crop, 0:nx_crop] # a reshaped 2D masked array with mesh data along the x axis data = np.ma.swapaxes(data.reshape( nyboxes, box_size, nxboxes, box_size), 1, 2).reshape( nyboxes * nxboxes, box_size * box_size) # include only boxes without any masked pixels idx = np.where(np.ma.count_masked(data, axis=1) == 0) return data[idx] def std_blocksum(data, block_sizes, mask=None): """ Calculate the standard deviation of block-summed data values at sizes of ``block_sizes``. Values from incomplete blocks, either because of the image edges or masked pixels, are not included. Parameters ---------- data : array-like The 2D array to block sum. block_sizes : int, array-like of int An array of integer (square) block sizes. mask : array-like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Blocks that contain *any* masked data are excluded from calculations. Returns ------- result : `~numpy.ndarray` An array of the standard deviations of the block-summed array for the input ``block_sizes``. """ data = np.ma.asanyarray(data) if mask is not None and mask is not np.ma.nomask: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape.') data.mask |= mask stds = [] block_sizes = np.atleast_1d(block_sizes) for block_size in block_sizes: mesh_values = _mesh_values(data, block_size) block_sums = np.sum(mesh_values, axis=1) stds.append(np.std(block_sums)) return np.array(stds) photutils-0.4/photutils/utils/tests/0000755000214200020070000000000013175654702022142 5ustar lbradleySTSCI\science00000000000000photutils-0.4/photutils/utils/tests/__init__.py0000644000214200020070000000017013055576313024247 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains affiliated package tests. """ photutils-0.4/photutils/utils/tests/test_colormaps.py0000644000214200020070000000116113175634532025550 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) from numpy.testing import assert_allclose import pytest from ..colormaps import random_cmap try: import matplotlib # noqa HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False @pytest.mark.skipif('not HAS_MATPLOTLIB') def test_colormap(): ncolors = 100 cmap = random_cmap(ncolors, random_state=12345) assert len(cmap.colors) == ncolors assert_allclose(cmap.colors[0], [0.9234715, 0.64837165, 0.76454726]) photutils-0.4/photutils/utils/tests/test_cutouts.py0000644000214200020070000000415313175634532025263 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_allclose import pytest from ..cutouts import cutout_footprint XCS = [25.7] YCS = [26.2] XSTDDEVS = [3.2, 4.0] YSTDDEVS = [5.7, 4.1] THETAS = np.array([30., 45.]) * np.pi / 180. DATA = np.zeros((3, 3)) DATA[0:2, 1] = 1. DATA[1, 0:2] = 1. DATA[1, 1] = 2. class TestCutoutFootprint(object): def test_dataonly(self): data = np.ones((5, 5)) position = (2, 2) result1 = cutout_footprint(data, position, 3) result2 = cutout_footprint(data, position, footprint=np.ones((3, 3))) assert_allclose(result1[:-2], result2[:-2]) assert result1[-2] is None assert result2[-2] is None assert result1[-1] == result2[-1] def test_mask_error(self): data = error = np.ones((5, 5)) mask = np.zeros_like(data, dtype=bool) position = (2, 2) box_size1 = 3 box_size2 = (3, 3) footprint = np.ones((3, 3)) result1 = cutout_footprint(data, position, box_size1, mask=mask, error=error) result2 = cutout_footprint(data, position, box_size2, mask=mask, error=error) result3 = cutout_footprint(data, position, box_size1, footprint=footprint, mask=mask, error=error) assert_allclose(result1[:-1], result2[:-1]) assert_allclose(result1[:-1], result3[:-1]) assert result1[-1] == result2[-1] def test_position_len(self): with pytest.raises(ValueError): cutout_footprint(np.ones((3, 3)), [1]) def test_nofootprint(self): with pytest.raises(ValueError): cutout_footprint(np.ones((3, 3)), (1, 1), box_size=None, footprint=None) def test_wrongboxsize(self): with pytest.raises(ValueError): cutout_footprint(np.ones((3, 3)), (1, 1), box_size=(1, 2, 3)) photutils-0.4/photutils/utils/tests/test_errors.py0000644000214200020070000000472513175634532025076 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_allclose import pytest import astropy.units as u from ..errors import calc_total_error SHAPE = (5, 5) DATAVAL = 2. DATA = np.ones(SHAPE) * DATAVAL MASK = np.zeros_like(DATA, dtype=bool) MASK[2, 2] = True BKG_ERROR = np.ones(SHAPE) EFFGAIN = np.ones(SHAPE) * DATAVAL BACKGROUND = np.ones(SHAPE) WRONG_SHAPE = np.ones((2, 2)) class TestCalculateTotalError(object): def test_error_shape(self): with pytest.raises(ValueError): calc_total_error(DATA, WRONG_SHAPE, EFFGAIN) def test_gain_shape(self): with pytest.raises(ValueError): calc_total_error(DATA, BKG_ERROR, WRONG_SHAPE) @pytest.mark.parametrize('effective_gain', (0, -1)) def test_gain_le_zero(self, effective_gain): with pytest.raises(ValueError): calc_total_error(DATA, BKG_ERROR, effective_gain) def test_gain_scalar(self): error_tot = calc_total_error(DATA, BKG_ERROR, 2.) assert_allclose(error_tot, np.sqrt(2.) * BKG_ERROR) def test_gain_array(self): error_tot = calc_total_error(DATA, BKG_ERROR, EFFGAIN) assert_allclose(error_tot, np.sqrt(2.) * BKG_ERROR) def test_units(self): units = u.electron / u.s error_tot1 = calc_total_error(DATA * units, BKG_ERROR * units, EFFGAIN * u.s) assert error_tot1.unit == units error_tot2 = calc_total_error(DATA, BKG_ERROR, EFFGAIN) assert_allclose(error_tot1.value, error_tot2) def test_error_units(self): units = u.electron / u.s with pytest.raises(ValueError): calc_total_error(DATA * units, BKG_ERROR * u.electron, EFFGAIN * u.s) def test_effgain_units(self): units = u.electron / u.s with pytest.raises(u.UnitsError): calc_total_error(DATA * units, BKG_ERROR * units, EFFGAIN * u.km) def test_missing_bkgerror_units(self): units = u.electron / u.s with pytest.raises(ValueError): calc_total_error(DATA * units, BKG_ERROR, EFFGAIN * u.s) def test_missing_effgain_units(self): units = u.electron / u.s with pytest.raises(ValueError): calc_total_error(DATA * units, BKG_ERROR * units, EFFGAIN) photutils-0.4/photutils/utils/tests/test_interpolation.py0000644000214200020070000001611513175647744026456 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import warnings import numpy as np from numpy.testing import assert_allclose import pytest from astropy.utils.exceptions import AstropyDeprecationWarning from .. import ShepardIDWInterpolator as idw from .. import interpolate_masked_data, mask_to_mirrored_num try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False SHAPE = (5, 5) DATA = np.ones(SHAPE) * 2.0 MASK = np.zeros_like(DATA, dtype=bool) MASK[2, 2] = True ERROR = np.ones(SHAPE) BACKGROUND = np.ones(SHAPE) WRONG_SHAPE = np.ones((2, 2)) @pytest.mark.skipif('not HAS_SCIPY') class TestShepardIDWInterpolator(object): def setup_class(self): np.random.seed(123) self.x = np.random.random(100) self.y = np.sin(self.x) self.f = idw(self.x, self.y) @pytest.mark.parametrize('positions', [0.4, np.arange(2, 5)*0.1]) def test_idw_1d(self, positions): f = idw(self.x, self.y) assert_allclose(f(positions), np.sin(positions), atol=1e-2) def test_idw_weights(self): weights = self.y * 0.1 f = idw(self.x, self.y, weights=weights) pos = 0.4 assert_allclose(f(pos), np.sin(pos), atol=1e-2) def test_idw_2d(self): pos = np.random.rand(1000, 2) val = np.sin(pos[:, 0] + pos[:, 1]) f = idw(pos, val) x = 0.5 y = 0.6 assert_allclose(f([x, y]), np.sin(x + y), atol=1e-2) def test_idw_3d(self): val = np.ones((3, 3, 3)) pos = np.indices(val.shape) f = idw(pos, val) assert_allclose(f([0.5, 0.5, 0.5]), 1.0) def test_no_coordinates(self): with pytest.raises(ValueError): idw([], 0) def test_values_invalid_shape(self): with pytest.raises(ValueError): idw(self.x, 0) def test_weights_invalid_shape(self): with pytest.raises(ValueError): idw(self.x, self.y, weights=10) def test_weights_negative(self): with pytest.raises(ValueError): idw(self.x, self.y, weights=-self.y) def test_n_neighbors_one(self): assert_allclose(self.f(0.5, n_neighbors=1), 0.48103656) def test_n_neighbors_negative(self): with pytest.raises(ValueError): self.f(0.5, n_neighbors=-1) def test_conf_dist_negative(self): assert_allclose(self.f(0.5, conf_dist=-1), self.f(0.5, conf_dist=None)) def test_dtype_none(self): result = self.f(0.5, dtype=None) assert result.dtype.type == np.float64 def test_positions_0d_nomatch(self): """test when position ndim doesn't match coordinates ndim""" pos = np.random.rand(10, 2) val = np.sin(pos[:, 0] + pos[:, 1]) f = idw(pos, val) with pytest.raises(ValueError): f(0.5) def test_positions_1d_nomatch(self): """test when position ndim doesn't match coordinates ndim""" pos = np.random.rand(10, 2) val = np.sin(pos[:, 0] + pos[:, 1]) f = idw(pos, val) with pytest.raises(ValueError): f([0.5]) def test_positions_3d(self): with pytest.raises(ValueError): self.f(np.ones((3, 3, 3))) class TestInterpolateMaskedData(object): def setup_class(cls): """Ignore all deprecation warnings here.""" warnings.simplefilter('ignore', AstropyDeprecationWarning) def teardown_class(cls): warnings.resetwarnings() def test_mask_shape(self): with pytest.raises(ValueError): interpolate_masked_data(DATA, WRONG_SHAPE) def test_error_shape(self): with pytest.raises(ValueError): interpolate_masked_data(DATA, MASK, error=WRONG_SHAPE) def test_background_shape(self): with pytest.raises(ValueError): interpolate_masked_data(DATA, MASK, background=WRONG_SHAPE) def test_interpolation(self): data2 = DATA.copy() data2[2, 2] = 100. error2 = ERROR.copy() error2[2, 2] = 100. background2 = BACKGROUND.copy() background2[2, 2] = 100. data, error, background = interpolate_masked_data( data2, MASK, error=error2, background=background2) assert_allclose(data, DATA) assert_allclose(error, ERROR) assert_allclose(background, BACKGROUND) def test_interpolation_larger_mask(self): data2 = DATA.copy() data2[2, 2] = 100. error2 = ERROR.copy() error2[2, 2] = 100. background2 = BACKGROUND.copy() background2[2, 2] = 100. mask2 = MASK.copy() mask2[1:4, 1:4] = True data, error, background = interpolate_masked_data( data2, MASK, error=error2, background=background2) assert_allclose(data, DATA) assert_allclose(error, ERROR) assert_allclose(background, BACKGROUND) class TestMaskToMirroredNum(object): def test_mask_to_mirrored_num(self): """ Test mask_to_mirrored_num. """ center = (1.5, 1.5) data = np.arange(16).reshape(4, 4) mask = np.zeros_like(data, dtype=bool) mask[0, 0] = True mask[1, 1] = True data_ref = data.copy() data_ref[0, 0] = data[3, 3] data_ref[1, 1] = data[2, 2] mirror_data = mask_to_mirrored_num(data, mask, center) assert_allclose(mirror_data, data_ref, rtol=0, atol=1.e-6) def test_mask_to_mirrored_num_range(self): """ Test mask_to_mirrored_num when mirrored pixels are outside of the image. """ center = (2.5, 2.5) data = np.arange(16).reshape(4, 4) mask = np.zeros_like(data, dtype=bool) mask[0, 0] = True mask[1, 1] = True data_ref = data.copy() data_ref[0, 0] = 0. data_ref[1, 1] = 0. mirror_data = mask_to_mirrored_num(data, mask, center) assert_allclose(mirror_data, data_ref, rtol=0, atol=1.e-6) def test_mask_to_mirrored_num_masked(self): """ Test mask_to_mirrored_num when mirrored pixels are also masked. """ center = (0.5, 0.5) data = np.arange(16).reshape(4, 4) data[0, 0] = 100 mask = np.zeros_like(data, dtype=bool) mask[0, 0] = True mask[1, 1] = True data_ref = data.copy() data_ref[0, 0] = 0. data_ref[1, 1] = 0. mirror_data = mask_to_mirrored_num(data, mask, center) assert_allclose(mirror_data, data_ref, rtol=0, atol=1.e-6) def test_mask_to_mirrored_num_bbox(self): """ Test mask_to_mirrored_num with a bounding box. """ center = (1.5, 1.5) data = np.arange(16).reshape(4, 4) data[0, 0] = 100 mask = np.zeros_like(data, dtype=bool) mask[0, 0] = True mask[1, 1] = True data_ref = data.copy() data_ref[1, 1] = data[2, 2] bbox = (1, 2, 1, 2) mirror_data = mask_to_mirrored_num(data, mask, center, bbox=bbox) assert_allclose(mirror_data, data_ref, rtol=0, atol=1.e-6) photutils-0.4/photutils/utils/tests/test_random_state.py0000644000214200020070000000112613175634532026232 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import pytest from .. import check_random_state @pytest.mark.parametrize('seed', [None, np.random, 1, np.random.RandomState(1)]) def test_seed(seed): assert isinstance(check_random_state(seed), np.random.RandomState) @pytest.mark.parametrize('seed', [1., [1, 2]]) def test_invalid_seed(seed): with pytest.raises(ValueError): check_random_state(seed) photutils-0.4/photutils/utils/tests/test_stats.py0000644000214200020070000000202213175634532024704 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_allclose import pytest from ..stats import std_blocksum from ...datasets import make_noise_image def test_std_blocksum(): stddev = 5 data = make_noise_image((100, 100), mean=0, stddev=stddev, random_state=12345) block_sizes = np.array([5, 7, 10]) stds = std_blocksum(data, block_sizes) expected = np.array([stddev, stddev, stddev]) assert_allclose(stds / block_sizes, expected, atol=0.2) mask = np.zeros_like(data, dtype=np.bool) mask[25:50, 25:50] = True stds2 = std_blocksum(data, block_sizes, mask=mask) assert_allclose(stds2 / block_sizes, expected, atol=0.3) def test_std_blocksum_mask_shape(): with pytest.raises(ValueError): data = np.ones((10, 10)) mask = np.ones((2, 2)) std_blocksum(data, 10, mask=mask) photutils-0.4/photutils/utils/wcs_helpers.py0000644000214200020070000001015213067540023023655 0ustar lbradleySTSCI\science00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from astropy import units as u from astropy.coordinates import UnitSphericalRepresentation from astropy.wcs.utils import skycoord_to_pixel, pixel_to_skycoord def pixel_scale_angle_at_skycoord(skycoord, wcs, offset=1. * u.arcsec): """ Calculate the pixel scale and WCS rotation angle at the position of a SkyCoord coordinate. Parameters ---------- skycoord : `~astropy.coordinates.SkyCoord` The SkyCoord coordinate. wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. offset : `~astropy.units.Quantity` A small angular offset to use to compute the pixel scale and position angle. Returns ------- scale : `~astropy.units.Quantity` The pixel scale in arcsec/pixel. angle : `~astropy.units.Quantity` The angle (in degrees) measured counterclockwise from the positive x axis to the "North" axis of the celestial coordinate system. Notes ----- If distortions are present in the image, the x and y pixel scales likely differ. This function computes a single pixel scale along the North/South axis. """ # We take a point directly "above" (in latitude) the input position # and convert it to pixel coordinates, then we use the pixel deltas # between the input and offset point to calculate the pixel scale and # angle. # Find the coordinates as a representation object coord = skycoord.represent_as('unitspherical') # Add a a small perturbation in the latitude direction (since longitude # is more difficult because it is not directly an angle) coord_new = UnitSphericalRepresentation(coord.lon, coord.lat + offset) coord_offset = skycoord.realize_frame(coord_new) # Find pixel coordinates of offset coordinates and pixel deltas x_offset, y_offset = skycoord_to_pixel(coord_offset, wcs, mode='all') x, y = skycoord_to_pixel(skycoord, wcs, mode='all') dx = x_offset - x dy = y_offset - y scale = offset.to(u.arcsec) / (np.hypot(dx, dy) * u.pixel) angle = (np.arctan2(dy, dx) * u.radian).to(u.deg) return scale, angle def assert_angle_or_pixel(name, q): """ Check that ``q`` is either an angular or a pixel :class:`~astropy.units.Quantity`. """ if isinstance(q, u.Quantity): if q.unit.physical_type == 'angle' or q.unit is u.pixel: pass else: raise ValueError("{0} should have angular or pixel " "units".format(name)) else: raise TypeError("{0} should be a Quantity instance".format(name)) def assert_angle(name, q): """ Check that ``q`` is an angular :class:`~astropy.units.Quantity`. """ if isinstance(q, u.Quantity): if q.unit.physical_type == 'angle': pass else: raise ValueError("{0} should have angular units".format(name)) else: raise TypeError("{0} should be a Quantity instance".format(name)) def pixel_to_icrs_coords(x, y, wcs): """ Convert pixel coordinates to ICRS Right Ascension and Declination. This is merely a convenience function to extract RA and Dec. from a `~astropy.coordinates.SkyCoord` instance so they can be put in separate columns in a `~astropy.table.Table`. Parameters ---------- x : float or array-like The x pixel coordinate. y : float or array-like The y pixel coordinate. wcs : `~astropy.wcs.WCS` The WCS transformation to use to convert from pixel coordinates to ICRS world coordinates. `~astropy.table.Table`. Returns ------- ra : `~astropy.units.Quantity` The ICRS Right Ascension in degrees. dec : `~astropy.units.Quantity` The ICRS Declination in degrees. """ icrs_coords = pixel_to_skycoord(x, y, wcs).icrs icrs_ra = icrs_coords.ra.degree * u.deg icrs_dec = icrs_coords.dec.degree * u.deg return icrs_ra, icrs_dec photutils-0.4/photutils/version.py0000644000214200020070000001621213175654676021713 0ustar lbradleySTSCI\science00000000000000# Autogenerated by Astropy-affiliated package photutils's setup.py on 2017-10-30 16:56:30 from __future__ import unicode_literals import datetime import locale import os import subprocess import warnings def _decode_stdio(stream): try: stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8' except ValueError: stdio_encoding = 'utf-8' try: text = stream.decode(stdio_encoding) except UnicodeDecodeError: # Final fallback text = stream.decode('latin1') return text def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: # otherwise it's already the true/release version return version def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 128: # git returns 128 if the command is not run from within a git # repository tree. In this case, a warning is produced above but we # return the default dev version of '0'. return '0' elif not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip() # This function is tested but it is only ever executed within a subprocess when # creating a fake package, so it doesn't get picked up by coverage metrics. def _get_repo_path(pathname, levels=None): # pragma: no cover """ Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo. """ if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None _packagename = "photutils" _last_generated_version = "0.4" _last_githash = "9d4e35caa1a00526432395beafb379b9914e866d" # Determine where the source code for this module # lives. If __file__ is not a filesystem path then # it is assumed not to live in a git repo at all. if _get_repo_path(__file__, levels=len(_packagename.split('.'))): version = update_git_devstr(_last_generated_version, path=__file__) githash = get_git_devstr(sha=True, show_warning=False, path=__file__) or _last_githash else: # The file does not appear to live in a git repo so don't bother # invoking git version = _last_generated_version githash = _last_githash major = 0 minor = 4 bugfix = 0 release = True timestamp = datetime.datetime(2017, 10, 30, 16, 56, 30) debug = False try: from ._compiler import compiler except ImportError: compiler = "unknown" try: from .cython_version import cython_version except ImportError: cython_version = "unknown" photutils-0.4/PKG-INFO0000644000214200020070000000263213175654702016705 0ustar lbradleySTSCI\science00000000000000Metadata-Version: 1.1 Name: photutils Version: 0.4 Summary: An Astropy package for photometry Home-page: http://photutils.readthedocs.io/ Author: The Photutils Developers Author-email: photutils.team@gmail.com License: BSD Description-Content-Type: UNKNOWN Description: * Code: https://github.com/astropy/photutils * Docs: https://photutils.readthedocs.io/ **Photutils** is an `affiliated package `_ of `Astropy `_ to provide tools for detecting and performing photometry of astronomical sources. It is an open source (BSD licensed) Python package. Contributions welcome! Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Intended Audience :: Science/Research Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: C Classifier: Programming Language :: Cython Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Topic :: Scientific/Engineering :: Astronomy photutils-0.4/README.rst0000644000214200020070000000374213175634532017301 0ustar lbradleySTSCI\science00000000000000Photutils ========= .. image:: http://img.shields.io/pypi/v/photutils.svg?text=version :target: https://pypi.python.org/pypi/photutils/ :alt: Latest release .. image:: https://anaconda.org/astropy/photutils/badges/version.svg :target: https://anaconda.org/astropy/photutils .. image:: https://anaconda.org/astropy/photutils/badges/downloads.svg :target: https://anaconda.org/astropy/photutils .. image:: http://img.shields.io/badge/powered%20by-AstroPy-orange.svg?style=flat :target: http://www.astropy.org/ .. raw:: html
Photutils is an `AstroPy`_ affiliated package to provide tools for detecting and performing photometry of astronomical sources. Project Status -------------- Documentation: .. image:: https://readthedocs.org/projects/photutils/badge/?version=stable :target: http://photutils.readthedocs.io/en/stable/ :alt: Stable Documentation Status .. image:: https://readthedocs.org/projects/photutils/badge/?version=latest :target: http://photutils.readthedocs.io/en/latest/ :alt: Latest Documentation Status Tests: .. image:: https://travis-ci.org/astropy/photutils.svg?branch=master :target: https://travis-ci.org/astropy/photutils .. image:: https://coveralls.io/repos/astropy/photutils/badge.svg?branch=master :target: https://coveralls.io/r/astropy/photutils .. image:: https://ci.appveyor.com/api/projects/status/by27a71echj18b4f/branch/master?svg=true :target: https://ci.appveyor.com/project/Astropy/photutils/branch/master .. image:: https://img.shields.io/badge/benchmarked%20by-asv-green.svg?style=flat :target: http://astropy.org/photutils-benchmarks/ Citing Photutils ---------------- .. image:: https://zenodo.org/badge/2640766.svg :target: https://zenodo.org/badge/latestdoi/2640766 If you use Photutils, please cite the package via its Zenodo record. License ------- Photutils is licensed under a 3-clause BSD style license. Please see the ``LICENSE.rst`` file. .. _AstroPy: http://www.astropy.org/ photutils-0.4/setup.cfg0000644000214200020070000000116313175634532017426 0ustar lbradleySTSCI\science00000000000000[build_sphinx] source-dir = docs build-dir = docs/_build all_files = 1 [upload_docs] upload-dir = docs/_build/html show-response = 1 [tool:pytest] minversion = 3.1 norecursedirs = "docs[\/]_build" "photutils[\/]extern" doctest_plus = enabled addopts = --pyargs -p no:warnings [ah_bootstrap] auto_use = True [metadata] package_name = photutils description = An Astropy package for photometry author = The Photutils Developers author_email = photutils.team@gmail.com license = BSD url = http://photutils.readthedocs.io/ edit_on_github = False github_project = astropy/photutils install_requires = astropy six [entry_points] photutils-0.4/setup.py0000744000214200020070000001121713175652637017327 0ustar lbradleySTSCI\science00000000000000#!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst import glob import os import sys import ah_bootstrap from setuptools import setup # A dirty hack to get around some early import/configurations ambiguities if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins builtins._ASTROPY_SETUP_ = True from astropy_helpers.setup_helpers import ( register_commands, get_debug_option, get_package_info) from astropy_helpers.git_helpers import get_git_devstr from astropy_helpers.version_helpers import generate_version_py # Get some values from the setup.cfg try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser conf = ConfigParser() conf.read(['setup.cfg']) metadata = dict(conf.items('metadata')) PACKAGENAME = metadata.get('package_name', 'packagename') DESCRIPTION = metadata.get('description', 'Astropy affiliated package') AUTHOR = metadata.get('author', '') AUTHOR_EMAIL = metadata.get('author_email', '') LICENSE = metadata.get('license', 'unknown') URL = metadata.get('url', 'http://astropy.org') # Get the long description from the package's docstring LONG_DESCRIPTION = open('LONG_DESCRIPTION.rst').read() # Store the package name in a built-in variable so it's easy # to get from other parts of the setup infrastructure builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME # VERSION should be PEP440 compatible (http://www.python.org/dev/peps/pep-0440) VERSION = '0.4' # Indicates if this version is a release version RELEASE = 'dev' not in VERSION if not RELEASE: VERSION += get_git_devstr(False) # Populate the dict of setup command overrides; this should be done before # invoking any other functionality from distutils since it can potentially # modify distutils' behavior. cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE) # Freeze build information in version.py generate_version_py(PACKAGENAME, VERSION, RELEASE, get_debug_option(PACKAGENAME)) # Treat everything in scripts except README* as a script to be installed scripts = [fname for fname in glob.glob(os.path.join('scripts', '*')) if not os.path.basename(fname).startswith('README')] # Get configuration information from all of the various subpackages. # See the docstring for setup_helpers.update_package_files for more # details. package_info = get_package_info() # Add the project-global data package_info['package_data'].setdefault(PACKAGENAME, []) package_info['package_data'][PACKAGENAME].append('data/*') # Define entry points for command-line scripts entry_points = {'console_scripts': []} entry_point_list = conf.items('entry_points') for entry_point in entry_point_list: entry_points['console_scripts'].append('{0} = {1}'.format(entry_point[0], entry_point[1])) # Include all .c files, recursively, including those generated by # Cython, since we can not do this in MANIFEST.in with a "dynamic" # directory name. c_files = [] for root, dirs, files in os.walk(PACKAGENAME): for filename in files: if filename.endswith('.c'): c_files.append( os.path.join( os.path.relpath(root, PACKAGENAME), filename)) package_info['package_data'][PACKAGENAME].extend(c_files) # Note that requires and provides should not be included in the call to # ``setup``, since these are now deprecated. See this link for more details: # https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM setup(name=PACKAGENAME, version=VERSION, description=DESCRIPTION, scripts=scripts, install_requires=metadata.get('install_requires', 'astropy').strip().split(), author=AUTHOR, author_email=AUTHOR_EMAIL, license=LICENSE, url=URL, long_description=LONG_DESCRIPTION, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: C', 'Programming Language :: Cython', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Scientific/Engineering :: Astronomy', ], cmdclass=cmdclassd, zip_safe=False, use_2to3=False, entry_points=entry_points, **package_info )