spectral-cube-0.3.1/0000755000077000000240000000000012654610601014266 5ustar adamstaff00000000000000spectral-cube-0.3.1/ah_bootstrap.py0000644000077000000240000010650312551776560017347 0ustar adamstaff00000000000000""" This bootstrap module contains code for ensuring that the astropy_helpers package will be importable by the time the setup.py script runs. It also includes some workarounds to ensure that a recent-enough version of setuptools is being used for the installation. This module should be the first thing imported in the setup.py of distributions that make use of the utilities in astropy_helpers. If the distribution ships with its own copy of astropy_helpers, this module will first attempt to import from the shipped copy. However, it will also check PyPI to see if there are any bug-fix releases on top of the current version that may be useful to get past platform-specific bugs that have been fixed. When running setup.py, use the ``--offline`` command-line option to disable the auto-upgrade checks. When this module is imported or otherwise executed it automatically calls a main function that attempts to read the project's setup.cfg file, which it checks for a configuration section called ``[ah_bootstrap]`` the presences of that section, and options therein, determine the next step taken: If it contains an option called ``auto_use`` with a value of ``True``, it will automatically call the main function of this module called `use_astropy_helpers` (see that function's docstring for full details). Otherwise no further action is taken (however, ``ah_bootstrap.use_astropy_helpers`` may be called manually from within the setup.py script). Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same names as the arguments to `use_astropy_helpers`, and can be used to configure the bootstrap script when ``auto_use = True``. See https://github.com/astropy/astropy-helpers for more details, and for the latest version of this module. """ import contextlib import errno import imp import io import locale import os import re import subprocess as sp import sys try: from ConfigParser import ConfigParser, RawConfigParser except ImportError: from configparser import ConfigParser, RawConfigParser if sys.version_info[0] < 3: _str_types = (str, unicode) _text_type = unicode PY3 = False else: _str_types = (str, bytes) _text_type = str PY3 = True # What follows are several import statements meant to deal with install-time # issues with either missing or misbehaving pacakges (including making sure # setuptools itself is installed): # Some pre-setuptools checks to ensure that either distribute or setuptools >= # 0.7 is used (over pre-distribute setuptools) if it is available on the path; # otherwise the latest setuptools will be downloaded and bootstrapped with # ``ez_setup.py``. This used to be included in a separate file called # setuptools_bootstrap.py; but it was combined into ah_bootstrap.py try: import pkg_resources _setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7') # This may raise a DistributionNotFound in which case no version of # setuptools or distribute is properly installed _setuptools = pkg_resources.get_distribution('setuptools') if _setuptools not in _setuptools_req: # Older version of setuptools; check if we have distribute; again if # this results in DistributionNotFound we want to give up _distribute = pkg_resources.get_distribution('distribute') if _setuptools != _distribute: # It's possible on some pathological systems to have an old version # of setuptools and distribute on sys.path simultaneously; make # sure distribute is the one that's used sys.path.insert(1, _distribute.location) _distribute.activate() imp.reload(pkg_resources) except: # There are several types of exceptions that can occur here; if all else # fails bootstrap and use the bootstrapped version from ez_setup import use_setuptools use_setuptools() # Note: The following import is required as a workaround to # https://github.com/astropy/astropy-helpers/issues/89; if we don't import this # module now, it will get cleaned up after `run_setup` is called, but that will # later cause the TemporaryDirectory class defined in it to stop working when # used later on by setuptools try: import setuptools.py31compat except ImportError: pass # matplotlib can cause problems if it is imported from within a call of # run_setup(), because in some circumstances it will try to write to the user's # home directory, resulting in a SandboxViolation. See # https://github.com/matplotlib/matplotlib/pull/4165 # Making sure matplotlib, if it is available, is imported early in the setup # process can mitigate this (note importing matplotlib.pyplot has the same # issue) try: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot except: # Ignore if this fails for *any* reason* pass # End compatibility imports... # In case it didn't successfully import before the ez_setup checks import pkg_resources from setuptools import Distribution from setuptools.package_index import PackageIndex from setuptools.sandbox import run_setup from distutils import log from distutils.debug import DEBUG # TODO: Maybe enable checking for a specific version of astropy_helpers? DIST_NAME = 'astropy-helpers' PACKAGE_NAME = 'astropy_helpers' # Defaults for other options DOWNLOAD_IF_NEEDED = True INDEX_URL = 'https://pypi.python.org/simple' USE_GIT = True OFFLINE = False AUTO_UPGRADE = True # A list of all the configuration options and their required types CFG_OPTIONS = [ ('auto_use', bool), ('path', str), ('download_if_needed', bool), ('index_url', str), ('use_git', bool), ('offline', bool), ('auto_upgrade', bool) ] class _Bootstrapper(object): """ Bootstrapper implementation. See ``use_astropy_helpers`` for parameter documentation. """ def __init__(self, path=None, index_url=None, use_git=None, offline=None, download_if_needed=None, auto_upgrade=None): if path is None: path = PACKAGE_NAME if not (isinstance(path, _str_types) or path is False): raise TypeError('path must be a string or False') if PY3 and not isinstance(path, _text_type): fs_encoding = sys.getfilesystemencoding() path = path.decode(fs_encoding) # path to unicode self.path = path # Set other option attributes, using defaults where necessary self.index_url = index_url if index_url is not None else INDEX_URL self.offline = offline if offline is not None else OFFLINE # If offline=True, override download and auto-upgrade if self.offline: download_if_needed = False auto_upgrade = False self.download = (download_if_needed if download_if_needed is not None else DOWNLOAD_IF_NEEDED) self.auto_upgrade = (auto_upgrade if auto_upgrade is not None else AUTO_UPGRADE) # If this is a release then the .git directory will not exist so we # should not use git. git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git')) if use_git is None and not git_dir_exists: use_git = False self.use_git = use_git if use_git is not None else USE_GIT # Declared as False by default--later we check if astropy-helpers can be # upgraded from PyPI, but only if not using a source distribution (as in # the case of import from a git submodule) self.is_submodule = False @classmethod def main(cls, argv=None): if argv is None: argv = sys.argv config = cls.parse_config() config.update(cls.parse_command_line(argv)) auto_use = config.pop('auto_use', False) bootstrapper = cls(**config) if auto_use: # Run the bootstrapper, otherwise the setup.py is using the old # use_astropy_helpers() interface, in which case it will run the # bootstrapper manually after reconfiguring it. bootstrapper.run() return bootstrapper @classmethod def parse_config(cls): if not os.path.exists('setup.cfg'): return {} cfg = ConfigParser() try: cfg.read('setup.cfg') except Exception as e: if DEBUG: raise log.error( "Error reading setup.cfg: {0!r}\n{1} will not be " "automatically bootstrapped and package installation may fail." "\n{2}".format(e, PACKAGE_NAME, _err_help_msg)) return {} if not cfg.has_section('ah_bootstrap'): return {} config = {} for option, type_ in CFG_OPTIONS: if not cfg.has_option('ah_bootstrap', option): continue if type_ is bool: value = cfg.getboolean('ah_bootstrap', option) else: value = cfg.get('ah_bootstrap', option) config[option] = value return config @classmethod def parse_command_line(cls, argv=None): if argv is None: argv = sys.argv config = {} # For now we just pop recognized ah_bootstrap options out of the # arg list. This is imperfect; in the unlikely case that a setup.py # custom command or even custom Distribution class defines an argument # of the same name then we will break that. However there's a catch22 # here that we can't just do full argument parsing right here, because # we don't yet know *how* to parse all possible command-line arguments. if '--no-git' in argv: config['use_git'] = False argv.remove('--no-git') if '--offline' in argv: config['offline'] = True argv.remove('--offline') return config def run(self): strategies = ['local_directory', 'local_file', 'index'] dist = None # First, remove any previously imported versions of astropy_helpers; # this is necessary for nested installs where one package's installer # is installing another package via setuptools.sandbox.run_setup, as in # the case of setup_requires for key in list(sys.modules): try: if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'): del sys.modules[key] except AttributeError: # Sometimes mysterious non-string things can turn up in # sys.modules continue # Check to see if the path is a submodule self.is_submodule = self._check_submodule() for strategy in strategies: method = getattr(self, 'get_{0}_dist'.format(strategy)) dist = method() if dist is not None: break else: raise _AHBootstrapSystemExit( "No source found for the {0!r} package; {0} must be " "available and importable as a prerequisite to building " "or installing this package.".format(PACKAGE_NAME)) # This is a bit hacky, but if astropy_helpers was loaded from a # directory/submodule its Distribution object gets a "precedence" of # "DEVELOP_DIST". However, in other cases it gets a precedence of # "EGG_DIST". However, when activing the distribution it will only be # placed early on sys.path if it is treated as an EGG_DIST, so always # do that dist = dist.clone(precedence=pkg_resources.EGG_DIST) # Otherwise we found a version of astropy-helpers, so we're done # Just active the found distribution on sys.path--if we did a # download this usually happens automatically but it doesn't hurt to # do it again # Note: Adding the dist to the global working set also activates it # (makes it importable on sys.path) by default. try: pkg_resources.working_set.add(dist, replace=True) except TypeError: # Some (much) older versions of setuptools do not have the # replace=True option here. These versions are old enough that all # bets may be off anyways, but it's easy enough to work around just # in case... if dist.key in pkg_resources.working_set.by_key: del pkg_resources.working_set.by_key[dist.key] pkg_resources.working_set.add(dist) @property def config(self): """ A `dict` containing the options this `_Bootstrapper` was configured with. """ return dict((optname, getattr(self, optname)) for optname, _ in CFG_OPTIONS if hasattr(self, optname)) def get_local_directory_dist(self): """ Handle importing a vendored package from a subdirectory of the source distribution. """ if not os.path.isdir(self.path): return log.info('Attempting to import astropy_helpers from {0} {1!r}'.format( 'submodule' if self.is_submodule else 'directory', self.path)) dist = self._directory_import() if dist is None: log.warn( 'The requested path {0!r} for importing {1} does not ' 'exist, or does not contain a copy of the {1} ' 'package.'.format(self.path, PACKAGE_NAME)) elif self.auto_upgrade and not self.is_submodule: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist def get_local_file_dist(self): """ Handle importing from a source archive; this also uses setup_requires but points easy_install directly to the source archive. """ if not os.path.isfile(self.path): return log.info('Attempting to unpack and import astropy_helpers from ' '{0!r}'.format(self.path)) try: dist = self._do_download(find_links=[self.path]) except Exception as e: if DEBUG: raise log.warn( 'Failed to import {0} from the specified archive {1!r}: ' '{2}'.format(PACKAGE_NAME, self.path, str(e))) dist = None if dist is not None and self.auto_upgrade: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist def get_index_dist(self): if not self.download: log.warn('Downloading {0!r} disabled.'.format(DIST_NAME)) return False log.warn( "Downloading {0!r}; run setup.py with the --offline option to " "force offline installation.".format(DIST_NAME)) try: dist = self._do_download() except Exception as e: if DEBUG: raise log.warn( 'Failed to download and/or install {0!r} from {1!r}:\n' '{2}'.format(DIST_NAME, self.index_url, str(e))) dist = None # No need to run auto-upgrade here since we've already presumably # gotten the most up-to-date version from the package index return dist def _directory_import(self): """ Import astropy_helpers from the given path, which will be added to sys.path. Must return True if the import succeeded, and False otherwise. """ # Return True on success, False on failure but download is allowed, and # otherwise raise SystemExit path = os.path.abspath(self.path) # Use an empty WorkingSet rather than the man # pkg_resources.working_set, since on older versions of setuptools this # will invoke a VersionConflict when trying to install an upgrade ws = pkg_resources.WorkingSet([]) ws.add_entry(path) dist = ws.by_key.get(DIST_NAME) if dist is None: # We didn't find an egg-info/dist-info in the given path, but if a # setup.py exists we can generate it setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): with _silence(): run_setup(os.path.join(path, 'setup.py'), ['egg_info']) for dist in pkg_resources.find_distributions(path, True): # There should be only one... return dist return dist def _do_download(self, version='', find_links=None): if find_links: allow_hosts = '' index_url = None else: allow_hosts = None index_url = self.index_url # Annoyingly, setuptools will not handle other arguments to # Distribution (such as options) before handling setup_requires, so it # is not straightforward to programmatically augment the arguments which # are passed to easy_install class _Distribution(Distribution): def get_option_dict(self, command_name): opts = Distribution.get_option_dict(self, command_name) if command_name == 'easy_install': if find_links is not None: opts['find_links'] = ('setup script', find_links) if index_url is not None: opts['index_url'] = ('setup script', index_url) if allow_hosts is not None: opts['allow_hosts'] = ('setup script', allow_hosts) return opts if version: req = '{0}=={1}'.format(DIST_NAME, version) else: req = DIST_NAME attrs = {'setup_requires': [req]} try: if DEBUG: _Distribution(attrs=attrs) else: with _silence(): _Distribution(attrs=attrs) # If the setup_requires succeeded it will have added the new dist to # the main working_set return pkg_resources.working_set.by_key.get(DIST_NAME) except Exception as e: if DEBUG: raise msg = 'Error retrieving {0} from {1}:\n{2}' if find_links: source = find_links[0] elif index_url != INDEX_URL: source = index_url else: source = 'PyPI' raise Exception(msg.format(DIST_NAME, source, repr(e))) def _do_upgrade(self, dist): # Build up a requirement for a higher bugfix release but a lower minor # release (so API compatibility is guaranteed) next_version = _next_version(dist.parsed_version) req = pkg_resources.Requirement.parse( '{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version)) package_index = PackageIndex(index_url=self.index_url) upgrade = package_index.obtain(req) if upgrade is not None: return self._do_download(version=upgrade.version) def _check_submodule(self): """ Check if the given path is a git submodule. See the docstrings for ``_check_submodule_using_git`` and ``_check_submodule_no_git`` for further details. """ if (self.path is None or (os.path.exists(self.path) and not os.path.isdir(self.path))): return False if self.use_git: return self._check_submodule_using_git() else: return self._check_submodule_no_git() def _check_submodule_using_git(self): """ Check if the given path is a git submodule. If so, attempt to initialize and/or update the submodule if needed. This function makes calls to the ``git`` command in subprocesses. The ``_check_submodule_no_git`` option uses pure Python to check if the given path looks like a git submodule, but it cannot perform updates. """ cmd = ['git', 'submodule', 'status', '--', self.path] try: log.info('Running `{0}`; use the --no-git option to disable git ' 'commands'.format(' '.join(cmd))) returncode, stdout, stderr = run_cmd(cmd) except _CommandNotFound: # The git command simply wasn't found; this is most likely the # case on user systems that don't have git and are simply # trying to install the package from PyPI or a source # distribution. Silently ignore this case and simply don't try # to use submodules return False stderr = stderr.strip() if returncode != 0 and stderr: # Unfortunately the return code alone cannot be relied on, as # earlier versions of git returned 0 even if the requested submodule # does not exist # This is a warning that occurs in perl (from running git submodule) # which only occurs with a malformatted locale setting which can # happen sometimes on OSX. See again # https://github.com/astropy/astropy/issues/2749 perl_warning = ('perl: warning: Falling back to the standard locale ' '("C").') if not stderr.strip().endswith(perl_warning): # Some other unknown error condition occurred log.warn('git submodule command failed ' 'unexpectedly:\n{0}'.format(stderr)) return False # Output of `git submodule status` is as follows: # # 1: Status indicator: '-' for submodule is uninitialized, '+' if # submodule is initialized but is not at the commit currently indicated # in .gitmodules (and thus needs to be updated), or 'U' if the # submodule is in an unstable state (i.e. has merge conflicts) # # 2. SHA-1 hash of the current commit of the submodule (we don't really # need this information but it's useful for checking that the output is # correct) # # 3. The output of `git describe` for the submodule's current commit # hash (this includes for example what branches the commit is on) but # only if the submodule is initialized. We ignore this information for # now _git_submodule_status_re = re.compile( '^(?P[+-U ])(?P[0-9a-f]{40}) ' '(?P\S+)( .*)?$') # The stdout should only contain one line--the status of the # requested submodule m = _git_submodule_status_re.match(stdout) if m: # Yes, the path *is* a git submodule self._update_submodule(m.group('submodule'), m.group('status')) return True else: log.warn( 'Unexpected output from `git submodule status`:\n{0}\n' 'Will attempt import from {1!r} regardless.'.format( stdout, self.path)) return False def _check_submodule_no_git(self): """ Like ``_check_submodule_using_git``, but simply parses the .gitmodules file to determine if the supplied path is a git submodule, and does not exec any subprocesses. This can only determine if a path is a submodule--it does not perform updates, etc. This function may need to be updated if the format of the .gitmodules file is changed between git versions. """ gitmodules_path = os.path.abspath('.gitmodules') if not os.path.isfile(gitmodules_path): return False # This is a minimal reader for gitconfig-style files. It handles a few of # the quirks that make gitconfig files incompatible with ConfigParser-style # files, but does not support the full gitconfig syntax (just enough # needed to read a .gitmodules file). gitmodules_fileobj = io.StringIO() # Must use io.open for cross-Python-compatible behavior wrt unicode with io.open(gitmodules_path) as f: for line in f: # gitconfig files are more flexible with leading whitespace; just # go ahead and remove it line = line.lstrip() # comments can start with either # or ; if line and line[0] in (':', ';'): continue gitmodules_fileobj.write(line) gitmodules_fileobj.seek(0) cfg = RawConfigParser() try: cfg.readfp(gitmodules_fileobj) except Exception as exc: log.warn('Malformatted .gitmodules file: {0}\n' '{1} cannot be assumed to be a git submodule.'.format( exc, self.path)) return False for section in cfg.sections(): if not cfg.has_option(section, 'path'): continue submodule_path = cfg.get(section, 'path').rstrip(os.sep) if submodule_path == self.path.rstrip(os.sep): return True return False def _update_submodule(self, submodule, status): if status == ' ': # The submodule is up to date; no action necessary return elif status == '-': if self.offline: raise _AHBootstrapSystemExit( "Cannot initialize the {0} submodule in --offline mode; " "this requires being able to clone the submodule from an " "online repository.".format(submodule)) cmd = ['update', '--init'] action = 'Initializing' elif status == '+': cmd = ['update'] action = 'Updating' if self.offline: cmd.append('--no-fetch') elif status == 'U': raise _AHBoostrapSystemExit( 'Error: Submodule {0} contains unresolved merge conflicts. ' 'Please complete or abandon any changes in the submodule so that ' 'it is in a usable state, then try again.'.format(submodule)) else: log.warn('Unknown status {0!r} for git submodule {1!r}. Will ' 'attempt to use the submodule as-is, but try to ensure ' 'that the submodule is in a clean state and contains no ' 'conflicts or errors.\n{2}'.format(status, submodule, _err_help_msg)) return err_msg = None cmd = ['git', 'submodule'] + cmd + ['--', submodule] log.warn('{0} {1} submodule with: `{2}`'.format( action, submodule, ' '.join(cmd))) try: log.info('Running `{0}`; use the --no-git option to disable git ' 'commands'.format(' '.join(cmd))) returncode, stdout, stderr = run_cmd(cmd) except OSError as e: err_msg = str(e) else: if returncode != 0: err_msg = stderr if err_msg is not None: log.warn('An unexpected error occurred updating the git submodule ' '{0!r}:\n{1}\n{2}'.format(submodule, err_msg, _err_help_msg)) class _CommandNotFound(OSError): """ An exception raised when a command run with run_cmd is not found on the system. """ def run_cmd(cmd): """ Run a command in a subprocess, given as a list of command-line arguments. Returns a ``(returncode, stdout, stderr)`` tuple. """ try: p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) # XXX: May block if either stdout or stderr fill their buffers; # however for the commands this is currently used for that is # unlikely (they should have very brief output) stdout, stderr = p.communicate() except OSError as e: if DEBUG: raise if e.errno == errno.ENOENT: msg = 'Command not found: `{0}`'.format(' '.join(cmd)) raise _CommandNotFound(msg, cmd) else: raise _AHBoostrapSystemExit( 'An unexpected error occurred when running the ' '`{0}` command:\n{1}'.format(' '.join(cmd), str(e))) # Can fail of the default locale is not configured properly. See # https://github.com/astropy/astropy/issues/2749. For the purposes under # consideration 'latin1' is an acceptable fallback. try: stdio_encoding = locale.getdefaultlocale()[1] or 'latin1' except ValueError: # Due to an OSX oddity locale.getdefaultlocale() can also crash # depending on the user's locale/language settings. See: # http://bugs.python.org/issue18378 stdio_encoding = 'latin1' # Unlikely to fail at this point but even then let's be flexible if not isinstance(stdout, _text_type): stdout = stdout.decode(stdio_encoding, 'replace') if not isinstance(stderr, _text_type): stderr = stderr.decode(stdio_encoding, 'replace') return (p.returncode, stdout, stderr) def _next_version(version): """ Given a parsed version from pkg_resources.parse_version, returns a new version string with the next minor version. Examples ======== >>> _next_version(pkg_resources.parse_version('1.2.3')) '1.3.0' """ if hasattr(version, 'base_version'): # New version parsing from setuptools >= 8.0 if version.base_version: parts = version.base_version.split('.') else: parts = [] else: parts = [] for part in version: if part.startswith('*'): break parts.append(part) parts = [int(p) for p in parts] if len(parts) < 3: parts += [0] * (3 - len(parts)) major, minor, micro = parts[:3] return '{0}.{1}.{2}'.format(major, minor + 1, 0) class _DummyFile(object): """A noop writeable object.""" errors = '' # Required for Python 3.x encoding = 'utf-8' def write(self, s): pass def flush(self): pass @contextlib.contextmanager def _silence(): """A context manager that silences sys.stdout and sys.stderr.""" old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() exception_occurred = False try: yield except: exception_occurred = True # Go ahead and clean up so that exception handling can work normally sys.stdout = old_stdout sys.stderr = old_stderr raise if not exception_occurred: sys.stdout = old_stdout sys.stderr = old_stderr _err_help_msg = """ If the problem persists consider installing astropy_helpers manually using pip (`pip install astropy_helpers`) or by manually downloading the source archive, extracting it, and installing by running `python setup.py install` from the root of the extracted source code. """ class _AHBootstrapSystemExit(SystemExit): def __init__(self, *args): if not args: msg = 'An unknown problem occurred bootstrapping astropy_helpers.' else: msg = args[0] msg += '\n' + _err_help_msg super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:]) if sys.version_info[:2] < (2, 7): # In Python 2.6 the distutils log does not log warnings, errors, etc. to # stderr so we have to wrap it to ensure consistency at least in this # module import distutils class log(object): def __getattr__(self, attr): return getattr(distutils.log, attr) def warn(self, msg, *args): self._log_to_stderr(distutils.log.WARN, msg, *args) def error(self, msg): self._log_to_stderr(distutils.log.ERROR, msg, *args) def fatal(self, msg): self._log_to_stderr(distutils.log.FATAL, msg, *args) def log(self, level, msg, *args): if level in (distutils.log.WARN, distutils.log.ERROR, distutils.log.FATAL): self._log_to_stderr(level, msg, *args) else: distutils.log.log(level, msg, *args) def _log_to_stderr(self, level, msg, *args): # This is the only truly 'public' way to get the current threshold # of the log current_threshold = distutils.log.set_threshold(distutils.log.WARN) distutils.log.set_threshold(current_threshold) if level >= current_threshold: if args: msg = msg % args sys.stderr.write('%s\n' % msg) sys.stderr.flush() log = log() BOOTSTRAPPER = _Bootstrapper.main() def use_astropy_helpers(**kwargs): """ Ensure that the `astropy_helpers` module is available and is importable. This supports automatic submodule initialization if astropy_helpers is included in a project as a git submodule, or will download it from PyPI if necessary. Parameters ---------- path : str or None, optional A filesystem path relative to the root of the project's source code that should be added to `sys.path` so that `astropy_helpers` can be imported from that path. If the path is a git submodule it will automatically be initialized and/or updated. The path may also be to a ``.tar.gz`` archive of the astropy_helpers source distribution. In this case the archive is automatically unpacked and made temporarily available on `sys.path` as a ``.egg`` archive. If `None` skip straight to downloading. download_if_needed : bool, optional If the provided filesystem path is not found an attempt will be made to download astropy_helpers from PyPI. It will then be made temporarily available on `sys.path` as a ``.egg`` archive (using the ``setup_requires`` feature of setuptools. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. index_url : str, optional If provided, use a different URL for the Python package index than the main PyPI server. use_git : bool, optional If `False` no git commands will be used--this effectively disables support for git submodules. If the ``--no-git`` option is given at the command line the value of this argument is overridden to `False`. auto_upgrade : bool, optional By default, when installing a package from a non-development source distribution ah_boostrap will try to automatically check for patch releases to astropy-helpers on PyPI and use the patched version over any bundled versions. Setting this to `False` will disable that functionality. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. offline : bool, optional If `False` disable all actions that require an internet connection, including downloading packages from the package index and fetching updates to any git submodule. Defaults to `True`. """ global BOOTSTRAPPER config = BOOTSTRAPPER.config config.update(**kwargs) # Create a new bootstrapper with the updated configuration and run it BOOTSTRAPPER = _Bootstrapper(**config) BOOTSTRAPPER.run() spectral-cube-0.3.1/astropy_helpers/0000755000077000000240000000000012654610601017511 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/.coveragerc0000644000077000000240000000063312340434262021633 0ustar adamstaff00000000000000[run] source = astropy_helpers ah_bootstrap omit = astropy_helpers/tests* [report] exclude_lines = # Have to re-enable the standard pragma pragma: no cover # Don't complain about packages we have installed except ImportError # Don't complain if tests don't hit assertions raise AssertionError raise NotImplementedError # Don't complain about script hooks def main\(.*\): spectral-cube-0.3.1/astropy_helpers/.travis.yml0000644000077000000240000000343112533471373021632 0ustar adamstaff00000000000000# We set the language to c because python isn't supported on the MacOS X nodes # on Travis. However, the language ends up being irrelevant anyway, since we # install Python ourselves using conda. language: c os: - osx - linux env: matrix: - PYTHON_VERSION=2.6 - PYTHON_VERSION=2.7 - PYTHON_VERSION=3.3 - PYTHON_VERSION=3.4 global: - SETUPTOOLS_VERSION=stable matrix: include: - os: linux env: PYTHON_VERSION=2.7 SETUPTOOLS_VERSION=dev before_install: # Use utf8 encoding. Should be default, but this is insurance against # future changes - export PYTHONIOENCODING=UTF8 # Install conda - source continuous-integration/travis/install_conda_$TRAVIS_OS_NAME.sh # Install graphviz - source continuous-integration/travis/install_graphviz_$TRAVIS_OS_NAME.sh install: - conda create --yes -n test python=$PYTHON_VERSION - source activate test - conda install --yes pip "pytest<2.6" sphinx cython numpy - pip install coveralls pytest-cov # We cannot install the developer version of setuptools using pip because # pip tries to remove the previous version of setuptools before the # installation is complete, which causes issues. Instead, we just install # setuptools manually. - if [[ $SETUPTOOLS_VERSION == dev ]]; then conda install --yes mercurial mock; fi - if [[ $SETUPTOOLS_VERSION == dev ]]; then hg clone https://bitbucket.org/pypa/setuptools; cd setuptools; python setup.py install; cd ..; fi before_script: # Some of the tests use git commands that require a user to be configured - git config --global user.name "A U Thor" - git config --global user.email "author@example.com" script: - py.test --cov astropy_helpers astropy_helpers after_success: - coveralls spectral-cube-0.3.1/astropy_helpers/ah_bootstrap.py0000644000077000000240000010650312533471373022564 0ustar adamstaff00000000000000""" This bootstrap module contains code for ensuring that the astropy_helpers package will be importable by the time the setup.py script runs. It also includes some workarounds to ensure that a recent-enough version of setuptools is being used for the installation. This module should be the first thing imported in the setup.py of distributions that make use of the utilities in astropy_helpers. If the distribution ships with its own copy of astropy_helpers, this module will first attempt to import from the shipped copy. However, it will also check PyPI to see if there are any bug-fix releases on top of the current version that may be useful to get past platform-specific bugs that have been fixed. When running setup.py, use the ``--offline`` command-line option to disable the auto-upgrade checks. When this module is imported or otherwise executed it automatically calls a main function that attempts to read the project's setup.cfg file, which it checks for a configuration section called ``[ah_bootstrap]`` the presences of that section, and options therein, determine the next step taken: If it contains an option called ``auto_use`` with a value of ``True``, it will automatically call the main function of this module called `use_astropy_helpers` (see that function's docstring for full details). Otherwise no further action is taken (however, ``ah_bootstrap.use_astropy_helpers`` may be called manually from within the setup.py script). Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same names as the arguments to `use_astropy_helpers`, and can be used to configure the bootstrap script when ``auto_use = True``. See https://github.com/astropy/astropy-helpers for more details, and for the latest version of this module. """ import contextlib import errno import imp import io import locale import os import re import subprocess as sp import sys try: from ConfigParser import ConfigParser, RawConfigParser except ImportError: from configparser import ConfigParser, RawConfigParser if sys.version_info[0] < 3: _str_types = (str, unicode) _text_type = unicode PY3 = False else: _str_types = (str, bytes) _text_type = str PY3 = True # What follows are several import statements meant to deal with install-time # issues with either missing or misbehaving pacakges (including making sure # setuptools itself is installed): # Some pre-setuptools checks to ensure that either distribute or setuptools >= # 0.7 is used (over pre-distribute setuptools) if it is available on the path; # otherwise the latest setuptools will be downloaded and bootstrapped with # ``ez_setup.py``. This used to be included in a separate file called # setuptools_bootstrap.py; but it was combined into ah_bootstrap.py try: import pkg_resources _setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7') # This may raise a DistributionNotFound in which case no version of # setuptools or distribute is properly installed _setuptools = pkg_resources.get_distribution('setuptools') if _setuptools not in _setuptools_req: # Older version of setuptools; check if we have distribute; again if # this results in DistributionNotFound we want to give up _distribute = pkg_resources.get_distribution('distribute') if _setuptools != _distribute: # It's possible on some pathological systems to have an old version # of setuptools and distribute on sys.path simultaneously; make # sure distribute is the one that's used sys.path.insert(1, _distribute.location) _distribute.activate() imp.reload(pkg_resources) except: # There are several types of exceptions that can occur here; if all else # fails bootstrap and use the bootstrapped version from ez_setup import use_setuptools use_setuptools() # Note: The following import is required as a workaround to # https://github.com/astropy/astropy-helpers/issues/89; if we don't import this # module now, it will get cleaned up after `run_setup` is called, but that will # later cause the TemporaryDirectory class defined in it to stop working when # used later on by setuptools try: import setuptools.py31compat except ImportError: pass # matplotlib can cause problems if it is imported from within a call of # run_setup(), because in some circumstances it will try to write to the user's # home directory, resulting in a SandboxViolation. See # https://github.com/matplotlib/matplotlib/pull/4165 # Making sure matplotlib, if it is available, is imported early in the setup # process can mitigate this (note importing matplotlib.pyplot has the same # issue) try: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot except: # Ignore if this fails for *any* reason* pass # End compatibility imports... # In case it didn't successfully import before the ez_setup checks import pkg_resources from setuptools import Distribution from setuptools.package_index import PackageIndex from setuptools.sandbox import run_setup from distutils import log from distutils.debug import DEBUG # TODO: Maybe enable checking for a specific version of astropy_helpers? DIST_NAME = 'astropy-helpers' PACKAGE_NAME = 'astropy_helpers' # Defaults for other options DOWNLOAD_IF_NEEDED = True INDEX_URL = 'https://pypi.python.org/simple' USE_GIT = True OFFLINE = False AUTO_UPGRADE = True # A list of all the configuration options and their required types CFG_OPTIONS = [ ('auto_use', bool), ('path', str), ('download_if_needed', bool), ('index_url', str), ('use_git', bool), ('offline', bool), ('auto_upgrade', bool) ] class _Bootstrapper(object): """ Bootstrapper implementation. See ``use_astropy_helpers`` for parameter documentation. """ def __init__(self, path=None, index_url=None, use_git=None, offline=None, download_if_needed=None, auto_upgrade=None): if path is None: path = PACKAGE_NAME if not (isinstance(path, _str_types) or path is False): raise TypeError('path must be a string or False') if PY3 and not isinstance(path, _text_type): fs_encoding = sys.getfilesystemencoding() path = path.decode(fs_encoding) # path to unicode self.path = path # Set other option attributes, using defaults where necessary self.index_url = index_url if index_url is not None else INDEX_URL self.offline = offline if offline is not None else OFFLINE # If offline=True, override download and auto-upgrade if self.offline: download_if_needed = False auto_upgrade = False self.download = (download_if_needed if download_if_needed is not None else DOWNLOAD_IF_NEEDED) self.auto_upgrade = (auto_upgrade if auto_upgrade is not None else AUTO_UPGRADE) # If this is a release then the .git directory will not exist so we # should not use git. git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git')) if use_git is None and not git_dir_exists: use_git = False self.use_git = use_git if use_git is not None else USE_GIT # Declared as False by default--later we check if astropy-helpers can be # upgraded from PyPI, but only if not using a source distribution (as in # the case of import from a git submodule) self.is_submodule = False @classmethod def main(cls, argv=None): if argv is None: argv = sys.argv config = cls.parse_config() config.update(cls.parse_command_line(argv)) auto_use = config.pop('auto_use', False) bootstrapper = cls(**config) if auto_use: # Run the bootstrapper, otherwise the setup.py is using the old # use_astropy_helpers() interface, in which case it will run the # bootstrapper manually after reconfiguring it. bootstrapper.run() return bootstrapper @classmethod def parse_config(cls): if not os.path.exists('setup.cfg'): return {} cfg = ConfigParser() try: cfg.read('setup.cfg') except Exception as e: if DEBUG: raise log.error( "Error reading setup.cfg: {0!r}\n{1} will not be " "automatically bootstrapped and package installation may fail." "\n{2}".format(e, PACKAGE_NAME, _err_help_msg)) return {} if not cfg.has_section('ah_bootstrap'): return {} config = {} for option, type_ in CFG_OPTIONS: if not cfg.has_option('ah_bootstrap', option): continue if type_ is bool: value = cfg.getboolean('ah_bootstrap', option) else: value = cfg.get('ah_bootstrap', option) config[option] = value return config @classmethod def parse_command_line(cls, argv=None): if argv is None: argv = sys.argv config = {} # For now we just pop recognized ah_bootstrap options out of the # arg list. This is imperfect; in the unlikely case that a setup.py # custom command or even custom Distribution class defines an argument # of the same name then we will break that. However there's a catch22 # here that we can't just do full argument parsing right here, because # we don't yet know *how* to parse all possible command-line arguments. if '--no-git' in argv: config['use_git'] = False argv.remove('--no-git') if '--offline' in argv: config['offline'] = True argv.remove('--offline') return config def run(self): strategies = ['local_directory', 'local_file', 'index'] dist = None # First, remove any previously imported versions of astropy_helpers; # this is necessary for nested installs where one package's installer # is installing another package via setuptools.sandbox.run_setup, as in # the case of setup_requires for key in list(sys.modules): try: if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'): del sys.modules[key] except AttributeError: # Sometimes mysterious non-string things can turn up in # sys.modules continue # Check to see if the path is a submodule self.is_submodule = self._check_submodule() for strategy in strategies: method = getattr(self, 'get_{0}_dist'.format(strategy)) dist = method() if dist is not None: break else: raise _AHBootstrapSystemExit( "No source found for the {0!r} package; {0} must be " "available and importable as a prerequisite to building " "or installing this package.".format(PACKAGE_NAME)) # This is a bit hacky, but if astropy_helpers was loaded from a # directory/submodule its Distribution object gets a "precedence" of # "DEVELOP_DIST". However, in other cases it gets a precedence of # "EGG_DIST". However, when activing the distribution it will only be # placed early on sys.path if it is treated as an EGG_DIST, so always # do that dist = dist.clone(precedence=pkg_resources.EGG_DIST) # Otherwise we found a version of astropy-helpers, so we're done # Just active the found distribution on sys.path--if we did a # download this usually happens automatically but it doesn't hurt to # do it again # Note: Adding the dist to the global working set also activates it # (makes it importable on sys.path) by default. try: pkg_resources.working_set.add(dist, replace=True) except TypeError: # Some (much) older versions of setuptools do not have the # replace=True option here. These versions are old enough that all # bets may be off anyways, but it's easy enough to work around just # in case... if dist.key in pkg_resources.working_set.by_key: del pkg_resources.working_set.by_key[dist.key] pkg_resources.working_set.add(dist) @property def config(self): """ A `dict` containing the options this `_Bootstrapper` was configured with. """ return dict((optname, getattr(self, optname)) for optname, _ in CFG_OPTIONS if hasattr(self, optname)) def get_local_directory_dist(self): """ Handle importing a vendored package from a subdirectory of the source distribution. """ if not os.path.isdir(self.path): return log.info('Attempting to import astropy_helpers from {0} {1!r}'.format( 'submodule' if self.is_submodule else 'directory', self.path)) dist = self._directory_import() if dist is None: log.warn( 'The requested path {0!r} for importing {1} does not ' 'exist, or does not contain a copy of the {1} ' 'package.'.format(self.path, PACKAGE_NAME)) elif self.auto_upgrade and not self.is_submodule: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist def get_local_file_dist(self): """ Handle importing from a source archive; this also uses setup_requires but points easy_install directly to the source archive. """ if not os.path.isfile(self.path): return log.info('Attempting to unpack and import astropy_helpers from ' '{0!r}'.format(self.path)) try: dist = self._do_download(find_links=[self.path]) except Exception as e: if DEBUG: raise log.warn( 'Failed to import {0} from the specified archive {1!r}: ' '{2}'.format(PACKAGE_NAME, self.path, str(e))) dist = None if dist is not None and self.auto_upgrade: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist def get_index_dist(self): if not self.download: log.warn('Downloading {0!r} disabled.'.format(DIST_NAME)) return False log.warn( "Downloading {0!r}; run setup.py with the --offline option to " "force offline installation.".format(DIST_NAME)) try: dist = self._do_download() except Exception as e: if DEBUG: raise log.warn( 'Failed to download and/or install {0!r} from {1!r}:\n' '{2}'.format(DIST_NAME, self.index_url, str(e))) dist = None # No need to run auto-upgrade here since we've already presumably # gotten the most up-to-date version from the package index return dist def _directory_import(self): """ Import astropy_helpers from the given path, which will be added to sys.path. Must return True if the import succeeded, and False otherwise. """ # Return True on success, False on failure but download is allowed, and # otherwise raise SystemExit path = os.path.abspath(self.path) # Use an empty WorkingSet rather than the man # pkg_resources.working_set, since on older versions of setuptools this # will invoke a VersionConflict when trying to install an upgrade ws = pkg_resources.WorkingSet([]) ws.add_entry(path) dist = ws.by_key.get(DIST_NAME) if dist is None: # We didn't find an egg-info/dist-info in the given path, but if a # setup.py exists we can generate it setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): with _silence(): run_setup(os.path.join(path, 'setup.py'), ['egg_info']) for dist in pkg_resources.find_distributions(path, True): # There should be only one... return dist return dist def _do_download(self, version='', find_links=None): if find_links: allow_hosts = '' index_url = None else: allow_hosts = None index_url = self.index_url # Annoyingly, setuptools will not handle other arguments to # Distribution (such as options) before handling setup_requires, so it # is not straightforward to programmatically augment the arguments which # are passed to easy_install class _Distribution(Distribution): def get_option_dict(self, command_name): opts = Distribution.get_option_dict(self, command_name) if command_name == 'easy_install': if find_links is not None: opts['find_links'] = ('setup script', find_links) if index_url is not None: opts['index_url'] = ('setup script', index_url) if allow_hosts is not None: opts['allow_hosts'] = ('setup script', allow_hosts) return opts if version: req = '{0}=={1}'.format(DIST_NAME, version) else: req = DIST_NAME attrs = {'setup_requires': [req]} try: if DEBUG: _Distribution(attrs=attrs) else: with _silence(): _Distribution(attrs=attrs) # If the setup_requires succeeded it will have added the new dist to # the main working_set return pkg_resources.working_set.by_key.get(DIST_NAME) except Exception as e: if DEBUG: raise msg = 'Error retrieving {0} from {1}:\n{2}' if find_links: source = find_links[0] elif index_url != INDEX_URL: source = index_url else: source = 'PyPI' raise Exception(msg.format(DIST_NAME, source, repr(e))) def _do_upgrade(self, dist): # Build up a requirement for a higher bugfix release but a lower minor # release (so API compatibility is guaranteed) next_version = _next_version(dist.parsed_version) req = pkg_resources.Requirement.parse( '{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version)) package_index = PackageIndex(index_url=self.index_url) upgrade = package_index.obtain(req) if upgrade is not None: return self._do_download(version=upgrade.version) def _check_submodule(self): """ Check if the given path is a git submodule. See the docstrings for ``_check_submodule_using_git`` and ``_check_submodule_no_git`` for further details. """ if (self.path is None or (os.path.exists(self.path) and not os.path.isdir(self.path))): return False if self.use_git: return self._check_submodule_using_git() else: return self._check_submodule_no_git() def _check_submodule_using_git(self): """ Check if the given path is a git submodule. If so, attempt to initialize and/or update the submodule if needed. This function makes calls to the ``git`` command in subprocesses. The ``_check_submodule_no_git`` option uses pure Python to check if the given path looks like a git submodule, but it cannot perform updates. """ cmd = ['git', 'submodule', 'status', '--', self.path] try: log.info('Running `{0}`; use the --no-git option to disable git ' 'commands'.format(' '.join(cmd))) returncode, stdout, stderr = run_cmd(cmd) except _CommandNotFound: # The git command simply wasn't found; this is most likely the # case on user systems that don't have git and are simply # trying to install the package from PyPI or a source # distribution. Silently ignore this case and simply don't try # to use submodules return False stderr = stderr.strip() if returncode != 0 and stderr: # Unfortunately the return code alone cannot be relied on, as # earlier versions of git returned 0 even if the requested submodule # does not exist # This is a warning that occurs in perl (from running git submodule) # which only occurs with a malformatted locale setting which can # happen sometimes on OSX. See again # https://github.com/astropy/astropy/issues/2749 perl_warning = ('perl: warning: Falling back to the standard locale ' '("C").') if not stderr.strip().endswith(perl_warning): # Some other unknown error condition occurred log.warn('git submodule command failed ' 'unexpectedly:\n{0}'.format(stderr)) return False # Output of `git submodule status` is as follows: # # 1: Status indicator: '-' for submodule is uninitialized, '+' if # submodule is initialized but is not at the commit currently indicated # in .gitmodules (and thus needs to be updated), or 'U' if the # submodule is in an unstable state (i.e. has merge conflicts) # # 2. SHA-1 hash of the current commit of the submodule (we don't really # need this information but it's useful for checking that the output is # correct) # # 3. The output of `git describe` for the submodule's current commit # hash (this includes for example what branches the commit is on) but # only if the submodule is initialized. We ignore this information for # now _git_submodule_status_re = re.compile( '^(?P[+-U ])(?P[0-9a-f]{40}) ' '(?P\S+)( .*)?$') # The stdout should only contain one line--the status of the # requested submodule m = _git_submodule_status_re.match(stdout) if m: # Yes, the path *is* a git submodule self._update_submodule(m.group('submodule'), m.group('status')) return True else: log.warn( 'Unexpected output from `git submodule status`:\n{0}\n' 'Will attempt import from {1!r} regardless.'.format( stdout, self.path)) return False def _check_submodule_no_git(self): """ Like ``_check_submodule_using_git``, but simply parses the .gitmodules file to determine if the supplied path is a git submodule, and does not exec any subprocesses. This can only determine if a path is a submodule--it does not perform updates, etc. This function may need to be updated if the format of the .gitmodules file is changed between git versions. """ gitmodules_path = os.path.abspath('.gitmodules') if not os.path.isfile(gitmodules_path): return False # This is a minimal reader for gitconfig-style files. It handles a few of # the quirks that make gitconfig files incompatible with ConfigParser-style # files, but does not support the full gitconfig syntax (just enough # needed to read a .gitmodules file). gitmodules_fileobj = io.StringIO() # Must use io.open for cross-Python-compatible behavior wrt unicode with io.open(gitmodules_path) as f: for line in f: # gitconfig files are more flexible with leading whitespace; just # go ahead and remove it line = line.lstrip() # comments can start with either # or ; if line and line[0] in (':', ';'): continue gitmodules_fileobj.write(line) gitmodules_fileobj.seek(0) cfg = RawConfigParser() try: cfg.readfp(gitmodules_fileobj) except Exception as exc: log.warn('Malformatted .gitmodules file: {0}\n' '{1} cannot be assumed to be a git submodule.'.format( exc, self.path)) return False for section in cfg.sections(): if not cfg.has_option(section, 'path'): continue submodule_path = cfg.get(section, 'path').rstrip(os.sep) if submodule_path == self.path.rstrip(os.sep): return True return False def _update_submodule(self, submodule, status): if status == ' ': # The submodule is up to date; no action necessary return elif status == '-': if self.offline: raise _AHBootstrapSystemExit( "Cannot initialize the {0} submodule in --offline mode; " "this requires being able to clone the submodule from an " "online repository.".format(submodule)) cmd = ['update', '--init'] action = 'Initializing' elif status == '+': cmd = ['update'] action = 'Updating' if self.offline: cmd.append('--no-fetch') elif status == 'U': raise _AHBoostrapSystemExit( 'Error: Submodule {0} contains unresolved merge conflicts. ' 'Please complete or abandon any changes in the submodule so that ' 'it is in a usable state, then try again.'.format(submodule)) else: log.warn('Unknown status {0!r} for git submodule {1!r}. Will ' 'attempt to use the submodule as-is, but try to ensure ' 'that the submodule is in a clean state and contains no ' 'conflicts or errors.\n{2}'.format(status, submodule, _err_help_msg)) return err_msg = None cmd = ['git', 'submodule'] + cmd + ['--', submodule] log.warn('{0} {1} submodule with: `{2}`'.format( action, submodule, ' '.join(cmd))) try: log.info('Running `{0}`; use the --no-git option to disable git ' 'commands'.format(' '.join(cmd))) returncode, stdout, stderr = run_cmd(cmd) except OSError as e: err_msg = str(e) else: if returncode != 0: err_msg = stderr if err_msg is not None: log.warn('An unexpected error occurred updating the git submodule ' '{0!r}:\n{1}\n{2}'.format(submodule, err_msg, _err_help_msg)) class _CommandNotFound(OSError): """ An exception raised when a command run with run_cmd is not found on the system. """ def run_cmd(cmd): """ Run a command in a subprocess, given as a list of command-line arguments. Returns a ``(returncode, stdout, stderr)`` tuple. """ try: p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) # XXX: May block if either stdout or stderr fill their buffers; # however for the commands this is currently used for that is # unlikely (they should have very brief output) stdout, stderr = p.communicate() except OSError as e: if DEBUG: raise if e.errno == errno.ENOENT: msg = 'Command not found: `{0}`'.format(' '.join(cmd)) raise _CommandNotFound(msg, cmd) else: raise _AHBoostrapSystemExit( 'An unexpected error occurred when running the ' '`{0}` command:\n{1}'.format(' '.join(cmd), str(e))) # Can fail of the default locale is not configured properly. See # https://github.com/astropy/astropy/issues/2749. For the purposes under # consideration 'latin1' is an acceptable fallback. try: stdio_encoding = locale.getdefaultlocale()[1] or 'latin1' except ValueError: # Due to an OSX oddity locale.getdefaultlocale() can also crash # depending on the user's locale/language settings. See: # http://bugs.python.org/issue18378 stdio_encoding = 'latin1' # Unlikely to fail at this point but even then let's be flexible if not isinstance(stdout, _text_type): stdout = stdout.decode(stdio_encoding, 'replace') if not isinstance(stderr, _text_type): stderr = stderr.decode(stdio_encoding, 'replace') return (p.returncode, stdout, stderr) def _next_version(version): """ Given a parsed version from pkg_resources.parse_version, returns a new version string with the next minor version. Examples ======== >>> _next_version(pkg_resources.parse_version('1.2.3')) '1.3.0' """ if hasattr(version, 'base_version'): # New version parsing from setuptools >= 8.0 if version.base_version: parts = version.base_version.split('.') else: parts = [] else: parts = [] for part in version: if part.startswith('*'): break parts.append(part) parts = [int(p) for p in parts] if len(parts) < 3: parts += [0] * (3 - len(parts)) major, minor, micro = parts[:3] return '{0}.{1}.{2}'.format(major, minor + 1, 0) class _DummyFile(object): """A noop writeable object.""" errors = '' # Required for Python 3.x encoding = 'utf-8' def write(self, s): pass def flush(self): pass @contextlib.contextmanager def _silence(): """A context manager that silences sys.stdout and sys.stderr.""" old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() exception_occurred = False try: yield except: exception_occurred = True # Go ahead and clean up so that exception handling can work normally sys.stdout = old_stdout sys.stderr = old_stderr raise if not exception_occurred: sys.stdout = old_stdout sys.stderr = old_stderr _err_help_msg = """ If the problem persists consider installing astropy_helpers manually using pip (`pip install astropy_helpers`) or by manually downloading the source archive, extracting it, and installing by running `python setup.py install` from the root of the extracted source code. """ class _AHBootstrapSystemExit(SystemExit): def __init__(self, *args): if not args: msg = 'An unknown problem occurred bootstrapping astropy_helpers.' else: msg = args[0] msg += '\n' + _err_help_msg super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:]) if sys.version_info[:2] < (2, 7): # In Python 2.6 the distutils log does not log warnings, errors, etc. to # stderr so we have to wrap it to ensure consistency at least in this # module import distutils class log(object): def __getattr__(self, attr): return getattr(distutils.log, attr) def warn(self, msg, *args): self._log_to_stderr(distutils.log.WARN, msg, *args) def error(self, msg): self._log_to_stderr(distutils.log.ERROR, msg, *args) def fatal(self, msg): self._log_to_stderr(distutils.log.FATAL, msg, *args) def log(self, level, msg, *args): if level in (distutils.log.WARN, distutils.log.ERROR, distutils.log.FATAL): self._log_to_stderr(level, msg, *args) else: distutils.log.log(level, msg, *args) def _log_to_stderr(self, level, msg, *args): # This is the only truly 'public' way to get the current threshold # of the log current_threshold = distutils.log.set_threshold(distutils.log.WARN) distutils.log.set_threshold(current_threshold) if level >= current_threshold: if args: msg = msg % args sys.stderr.write('%s\n' % msg) sys.stderr.flush() log = log() BOOTSTRAPPER = _Bootstrapper.main() def use_astropy_helpers(**kwargs): """ Ensure that the `astropy_helpers` module is available and is importable. This supports automatic submodule initialization if astropy_helpers is included in a project as a git submodule, or will download it from PyPI if necessary. Parameters ---------- path : str or None, optional A filesystem path relative to the root of the project's source code that should be added to `sys.path` so that `astropy_helpers` can be imported from that path. If the path is a git submodule it will automatically be initialized and/or updated. The path may also be to a ``.tar.gz`` archive of the astropy_helpers source distribution. In this case the archive is automatically unpacked and made temporarily available on `sys.path` as a ``.egg`` archive. If `None` skip straight to downloading. download_if_needed : bool, optional If the provided filesystem path is not found an attempt will be made to download astropy_helpers from PyPI. It will then be made temporarily available on `sys.path` as a ``.egg`` archive (using the ``setup_requires`` feature of setuptools. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. index_url : str, optional If provided, use a different URL for the Python package index than the main PyPI server. use_git : bool, optional If `False` no git commands will be used--this effectively disables support for git submodules. If the ``--no-git`` option is given at the command line the value of this argument is overridden to `False`. auto_upgrade : bool, optional By default, when installing a package from a non-development source distribution ah_boostrap will try to automatically check for patch releases to astropy-helpers on PyPI and use the patched version over any bundled versions. Setting this to `False` will disable that functionality. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. offline : bool, optional If `False` disable all actions that require an internet connection, including downloading packages from the package index and fetching updates to any git submodule. Defaults to `True`. """ global BOOTSTRAPPER config = BOOTSTRAPPER.config config.update(**kwargs) # Create a new bootstrapper with the updated configuration and run it BOOTSTRAPPER = _Bootstrapper(**config) BOOTSTRAPPER.run() spectral-cube-0.3.1/astropy_helpers/appveyor.yml0000644000077000000240000000301612533471373022110 0ustar adamstaff00000000000000# AppVeyor.com is a Continuous Integration service to build and run tests under # Windows environment: global: PYTHON: "C:\\conda" MINICONDA_VERSION: "3.5.5" CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\continuous-integration\\appveyor\\windows_sdk.cmd" PYTHON_ARCH: "64" # needs to be set for CMD_IN_ENV to succeed. If a mix # of 32 bit and 64 bit builds are needed, move this # to the matrix section. matrix: - PYTHON_VERSION: "2.6" - PYTHON_VERSION: "2.7" - PYTHON_VERSION: "3.4" platform: -x64 install: # Install miniconda using a powershell script. - "powershell continuous-integration/appveyor/install-miniconda.ps1" - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" # Install the build and runtime dependencies of the project. - "conda update --yes conda" # Create a conda environment - "conda create -q --yes -n test python=%PYTHON_VERSION%" - "activate test" # Check that we have the expected version of Python - "python --version" # Install specified version of numpy and dependencies - "conda install -q --yes numpy Cython sphinx pytest" # Some of the tests use git commands that require a user to be configured - git config --global user.name "A U Thor" - git config --global user.email "author@example.com" # Install graphviz - cinst graphviz.portable # Not a .NET project, we build SunPy in the install step instead build: false test_script: - "%CMD_IN_ENV% py.test" spectral-cube-0.3.1/astropy_helpers/astropy_helpers/0000755000077000000240000000000012654610601022734 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/__init__.py0000644000077000000240000000250212533471373025053 0ustar adamstaff00000000000000try: from .version import version as __version__ from .version import githash as __githash__ except ImportError: __version__ = '' __githash__ = '' # If we've made it as far as importing astropy_helpers, we don't need # ah_bootstrap in sys.modules anymore. Getting rid of it is actually necessary # if the package we're installing has a setup_requires of another package that # uses astropy_helpers (and possibly a different version at that) # See https://github.com/astropy/astropy/issues/3541 import sys if 'ah_bootstrap' in sys.modules: del sys.modules['ah_bootstrap'] # Note, this is repeated from ah_bootstrap.py, but is here too in case this # astropy-helpers was upgraded to from an older version that did not have this # check in its ah_bootstrap. # matplotlib can cause problems if it is imported from within a call of # run_setup(), because in some circumstances it will try to write to the user's # home directory, resulting in a SandboxViolation. See # https://github.com/matplotlib/matplotlib/pull/4165 # Making sure matplotlib, if it is available, is imported early in the setup # process can mitigate this (note importing matplotlib.pyplot has the same # issue) try: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot except: # Ignore if this fails for *any* reason* pass spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/0000755000077000000240000000000012654610601024535 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/__init__.py0000644000077000000240000000000012533471373026643 0ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/build_ext.py0000644000077000000240000001623312533471373027102 0ustar adamstaff00000000000000import errno import os import shutil from distutils.core import Extension from setuptools.command.build_ext import build_ext as SetuptoolsBuildExt from ..utils import get_numpy_include_path, invalidate_caches from ..version_helpers import get_pkg_version_module def should_build_with_cython(package, release=None): """Returns the previously used Cython version (or 'unknown' if not previously built) if Cython should be used to build extension modules from pyx files. If the ``release`` parameter is not specified an attempt is made to determine the release flag from `astropy.version`. """ from ..setup_helpers import _module_state try: version_module = __import__(package + '.cython_version', fromlist=['release', 'cython_version']) except ImportError: version_module = None if release is None and version_module is not None: try: release = version_module.release except AttributeError: pass try: cython_version = version_module.cython_version except AttributeError: cython_version = 'unknown' # Only build with Cython if, of course, Cython is installed, we're in a # development version (i.e. not release) or the Cython-generated source # files haven't been created yet (cython_version == 'unknown'). The latter # case can happen even when release is True if checking out a release tag # from the repository if (_module_state['have_cython'] and (not release or cython_version == 'unknown')): return cython_version else: return False # TODO: I think this can be reworked without having to create the class # programmatically. def generate_build_ext_command(packagename, release): """ Creates a custom 'build_ext' command that allows for manipulating some of the C extension options at build time. We use a function to build the class since the base class for build_ext may be different depending on certain build-time parameters (for example, we may use Cython's build_ext instead of the default version in distutils). Uses the default distutils.command.build_ext by default. """ uses_cython = should_build_with_cython(packagename, release) if uses_cython: from Cython.Distutils import build_ext as basecls else: basecls = SetuptoolsBuildExt attrs = dict(basecls.__dict__) orig_run = getattr(basecls, 'run', None) orig_finalize = getattr(basecls, 'finalize_options', None) def finalize_options(self): # Add a copy of the _compiler.so module as well, but only if there are # in fact C modules to compile (otherwise there's no reason to include # a record of the compiler used) # Note, self.extensions may not be set yet, but # self.distribution.ext_modules is where any extension modules passed # to setup() can be found extensions = self.distribution.ext_modules if extensions: src_path = os.path.relpath( os.path.join(os.path.dirname(__file__), 'src')) shutil.copy2(os.path.join(src_path, 'compiler.c'), os.path.join(self.package_name, '_compiler.c')) ext = Extension(self.package_name + '._compiler', [os.path.join(self.package_name, '_compiler.c')]) extensions.insert(0, ext) if orig_finalize is not None: orig_finalize(self) # Generate if self.uses_cython: try: from Cython import __version__ as cython_version except ImportError: # This shouldn't happen if we made it this far cython_version = None if (cython_version is not None and cython_version != self.uses_cython): self.force_rebuild = True # Update the used cython version self.uses_cython = cython_version # Regardless of the value of the '--force' option, force a rebuild if # the debug flag changed from the last build if self.force_rebuild: self.force = True def run(self): # For extensions that require 'numpy' in their include dirs, replace # 'numpy' with the actual paths np_include = get_numpy_include_path() for extension in self.extensions: if 'numpy' in extension.include_dirs: idx = extension.include_dirs.index('numpy') extension.include_dirs.insert(idx, np_include) extension.include_dirs.remove('numpy') # Replace .pyx with C-equivalents, unless c files are missing for jdx, src in enumerate(extension.sources): if src.endswith('.pyx'): pyxfn = src cfn = src[:-4] + '.c' elif src.endswith('.c'): pyxfn = src[:-2] + '.pyx' cfn = src if not os.path.isfile(pyxfn): continue if self.uses_cython: extension.sources[jdx] = pyxfn else: if os.path.isfile(cfn): extension.sources[jdx] = cfn else: msg = ( 'Could not find C file {0} for Cython file {1} ' 'when building extension {2}. Cython must be ' 'installed to build from a git checkout.'.format( cfn, pyxfn, extension.name)) raise IOError(errno.ENOENT, msg, cfn) if orig_run is not None: # This should always be the case for a correctly implemented # distutils command. orig_run(self) # Update cython_version.py if building with Cython try: cython_version = get_pkg_version_module( packagename, fromlist=['cython_version'])[0] except (AttributeError, ImportError): cython_version = 'unknown' if self.uses_cython and self.uses_cython != cython_version: package_dir = os.path.relpath(packagename) cython_py = os.path.join(package_dir, 'cython_version.py') with open(cython_py, 'w') as f: f.write('# Generated file; do not modify\n') f.write('cython_version = {0!r}\n'.format(self.uses_cython)) if os.path.isdir(self.build_lib): # The build/lib directory may not exist if the build_py command # was not previously run, which may sometimes be the case self.copy_file(cython_py, os.path.join(self.build_lib, cython_py), preserve_mode=False) invalidate_caches() attrs['run'] = run attrs['finalize_options'] = finalize_options attrs['force_rebuild'] = False attrs['uses_cython'] = uses_cython attrs['package_name'] = packagename attrs['user_options'] = basecls.user_options[:] attrs['boolean_options'] = basecls.boolean_options[:] return type('build_ext', (basecls, object), attrs) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/build_py.py0000644000077000000240000000265612533471373026736 0ustar adamstaff00000000000000from setuptools.command.build_py import build_py as SetuptoolsBuildPy from ..utils import _get_platlib_dir class AstropyBuildPy(SetuptoolsBuildPy): user_options = SetuptoolsBuildPy.user_options[:] boolean_options = SetuptoolsBuildPy.boolean_options[:] def finalize_options(self): # Update build_lib settings from the build command to always put # build files in platform-specific subdirectories of build/, even # for projects with only pure-Python source (this is desirable # specifically for support of multiple Python version). build_cmd = self.get_finalized_command('build') platlib_dir = _get_platlib_dir(build_cmd) build_cmd.build_purelib = platlib_dir build_cmd.build_lib = platlib_dir self.build_lib = platlib_dir SetuptoolsBuildPy.finalize_options(self) def run_2to3(self, files, doctests=False): # Filter the files to exclude things that shouldn't be 2to3'd skip_2to3 = self.distribution.skip_2to3 filtered_files = [] for filename in files: for package in skip_2to3: if filename[len(self.build_lib) + 1:].startswith(package): break else: filtered_files.append(filename) SetuptoolsBuildPy.run_2to3(self, filtered_files, doctests) def run(self): # first run the normal build_py SetuptoolsBuildPy.run(self) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/build_sphinx.py0000644000077000000240000002214712533471373027614 0ustar adamstaff00000000000000import inspect import os import pkgutil import re import shutil import subprocess import sys import textwrap from distutils import log from distutils.cmd import DistutilsOptionError import sphinx from sphinx.setup_command import BuildDoc as SphinxBuildDoc from ..utils import minversion PY3 = sys.version_info[0] >= 3 class AstropyBuildSphinx(SphinxBuildDoc): """ A version of the ``build_sphinx`` command that uses the version of Astropy that is built by the setup ``build`` command, rather than whatever is installed on the system. To build docs against the installed version, run ``make html`` in the ``astropy/docs`` directory. This also automatically creates the docs/_static directories--this is needed because GitHub won't create the _static dir because it has no tracked files. """ description = 'Build Sphinx documentation for Astropy environment' user_options = SphinxBuildDoc.user_options[:] user_options.append(('warnings-returncode', 'w', 'Parses the sphinx output and sets the return code to 1 if there ' 'are any warnings. Note that this will cause the sphinx log to ' 'only update when it completes, rather than continuously as is ' 'normally the case.')) user_options.append(('clean-docs', 'l', 'Completely clean previous builds, including ' 'automodapi-generated files before building new ones')) user_options.append(('no-intersphinx', 'n', 'Skip intersphinx, even if conf.py says to use it')) user_options.append(('open-docs-in-browser', 'o', 'Open the docs in a browser (using the webbrowser module) if the ' 'build finishes successfully.')) boolean_options = SphinxBuildDoc.boolean_options[:] boolean_options.append('warnings-returncode') boolean_options.append('clean-docs') boolean_options.append('no-intersphinx') boolean_options.append('open-docs-in-browser') _self_iden_rex = re.compile(r"self\.([^\d\W][\w]+)", re.UNICODE) def initialize_options(self): SphinxBuildDoc.initialize_options(self) self.clean_docs = False self.no_intersphinx = False self.open_docs_in_browser = False self.warnings_returncode = False def finalize_options(self): #Clear out previous sphinx builds, if requested if self.clean_docs: dirstorm = [os.path.join(self.source_dir, 'api')] if self.build_dir is None: dirstorm.append('docs/_build') else: dirstorm.append(self.build_dir) for d in dirstorm: if os.path.isdir(d): log.info('Cleaning directory ' + d) shutil.rmtree(d) else: log.info('Not cleaning directory ' + d + ' because ' 'not present or not a directory') SphinxBuildDoc.finalize_options(self) def run(self): # TODO: Break this method up into a few more subroutines and # document them better import webbrowser if PY3: from urllib.request import pathname2url else: from urllib import pathname2url # This is used at the very end of `run` to decide if sys.exit should # be called. If it's None, it won't be. retcode = None # If possible, create the _static dir if self.build_dir is not None: # the _static dir should be in the same place as the _build dir # for Astropy basedir, subdir = os.path.split(self.build_dir) if subdir == '': # the path has a trailing /... basedir, subdir = os.path.split(basedir) staticdir = os.path.join(basedir, '_static') if os.path.isfile(staticdir): raise DistutilsOptionError( 'Attempted to build_sphinx in a location where' + staticdir + 'is a file. Must be a directory.') self.mkpath(staticdir) # Now make sure Astropy is built and determine where it was built build_cmd = self.reinitialize_command('build') build_cmd.inplace = 0 self.run_command('build') build_cmd = self.get_finalized_command('build') build_cmd_path = os.path.abspath(build_cmd.build_lib) ah_importer = pkgutil.get_importer('astropy_helpers') ah_path = os.path.abspath(ah_importer.path) # Now generate the source for and spawn a new process that runs the # command. This is needed to get the correct imports for the built # version runlines, runlineno = inspect.getsourcelines(SphinxBuildDoc.run) subproccode = textwrap.dedent(""" from sphinx.setup_command import * os.chdir({srcdir!r}) sys.path.insert(0, {build_cmd_path!r}) sys.path.insert(0, {ah_path!r}) """).format(build_cmd_path=build_cmd_path, ah_path=ah_path, srcdir=self.source_dir) # runlines[1:] removes 'def run(self)' on the first line subproccode += textwrap.dedent(''.join(runlines[1:])) # All "self.foo" in the subprocess code needs to be replaced by the # values taken from the current self in *this* process subproccode = self._self_iden_rex.split(subproccode) for i in range(1, len(subproccode), 2): iden = subproccode[i] val = getattr(self, iden) if iden.endswith('_dir'): # Directories should be absolute, because the `chdir` call # in the new process moves to a different directory subproccode[i] = repr(os.path.abspath(val)) else: subproccode[i] = repr(val) subproccode = ''.join(subproccode) # This is a quick gross hack, but it ensures that the code grabbed from # SphinxBuildDoc.run will work in Python 2 if it uses the print # function if minversion(sphinx, '1.3'): subproccode = 'from __future__ import print_function' + subproccode if self.no_intersphinx: # the confoverrides variable in sphinx.setup_command.BuildDoc can # be used to override the conf.py ... but this could well break # if future versions of sphinx change the internals of BuildDoc, # so remain vigilant! subproccode = subproccode.replace('confoverrides = {}', 'confoverrides = {\'intersphinx_mapping\':{}}') log.debug('Starting subprocess of {0} with python code:\n{1}\n' '[CODE END])'.format(sys.executable, subproccode)) # To return the number of warnings, we need to capture stdout. This # prevents a continuous updating at the terminal, but there's no # apparent way around this. if self.warnings_returncode: proc = subprocess.Popen([sys.executable], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdo, stde = proc.communicate(subproccode.encode('utf-8')) print(stdo) stdolines = stdo.splitlines() if b'build succeeded.' in stdolines: retcode = 0 else: retcode = 1 if retcode != 0: if os.environ.get('TRAVIS', None) == 'true': #this means we are in the travis build, so customize #the message appropriately. msg = ('The build_sphinx travis build FAILED ' 'because sphinx issued documentation ' 'warnings (scroll up to see the warnings).') else: # standard failure message msg = ('build_sphinx returning a non-zero exit ' 'code because sphinx issued documentation ' 'warnings.') log.warn(msg) else: proc = subprocess.Popen([sys.executable], stdin=subprocess.PIPE) proc.communicate(subproccode.encode('utf-8')) if proc.returncode == 0: if self.open_docs_in_browser: if self.builder == 'html': absdir = os.path.abspath(self.builder_target_dir) index_path = os.path.join(absdir, 'index.html') fileurl = 'file://' + pathname2url(index_path) webbrowser.open(fileurl) else: log.warn('open-docs-in-browser option was given, but ' 'the builder is not html! Ignoring.') else: log.warn('Sphinx Documentation subprocess failed with return ' 'code ' + str(proc.returncode)) if retcode is not None: # this is potentially dangerous in that there might be something # after the call to `setup` in `setup.py`, and exiting here will # prevent that from running. But there's no other apparent way # to signal what the return code should be. sys.exit(retcode) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/install.py0000644000077000000240000000074612533471373026573 0ustar adamstaff00000000000000from setuptools.command.install import install as SetuptoolsInstall from ..utils import _get_platlib_dir class AstropyInstall(SetuptoolsInstall): user_options = SetuptoolsInstall.user_options[:] boolean_options = SetuptoolsInstall.boolean_options[:] def finalize_options(self): build_cmd = self.get_finalized_command('build') platlib_dir = _get_platlib_dir(build_cmd) self.build_lib = platlib_dir SetuptoolsInstall.finalize_options(self) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/install_lib.py0000644000077000000240000000100012533471373027401 0ustar adamstaff00000000000000from setuptools.command.install_lib import install_lib as SetuptoolsInstallLib from ..utils import _get_platlib_dir class AstropyInstallLib(SetuptoolsInstallLib): user_options = SetuptoolsInstallLib.user_options[:] boolean_options = SetuptoolsInstallLib.boolean_options[:] def finalize_options(self): build_cmd = self.get_finalized_command('build') platlib_dir = _get_platlib_dir(build_cmd) self.build_dir = platlib_dir SetuptoolsInstallLib.finalize_options(self) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/register.py0000644000077000000240000000454712533471373026754 0ustar adamstaff00000000000000from setuptools.command.register import register as SetuptoolsRegister class AstropyRegister(SetuptoolsRegister): """Extends the built in 'register' command to support a ``--hidden`` option to make the registered version hidden on PyPI by default. The result of this is that when a version is registered as "hidden" it can still be downloaded from PyPI, but it does not show up in the list of actively supported versions under http://pypi.python.org/pypi/astropy, and is not set as the most recent version. Although this can always be set through the web interface it may be more convenient to be able to specify via the 'register' command. Hidden may also be considered a safer default when running the 'register' command, though this command uses distutils' normal behavior if the ``--hidden`` option is omitted. """ user_options = SetuptoolsRegister.user_options + [ ('hidden', None, 'mark this release as hidden on PyPI by default') ] boolean_options = SetuptoolsRegister.boolean_options + ['hidden'] def initialize_options(self): SetuptoolsRegister.initialize_options(self) self.hidden = False def build_post_data(self, action): data = SetuptoolsRegister.build_post_data(self, action) if action == 'submit' and self.hidden: data['_pypi_hidden'] = '1' return data def _set_config(self): # The original register command is buggy--if you use .pypirc with a # server-login section *at all* the repository you specify with the -r # option will be overwritten with either the repository in .pypirc or # with the default, # If you do not have a .pypirc using the -r option will just crash. # Way to go distutils # If we don't set self.repository back to a default value _set_config # can crash if there was a user-supplied value for this option; don't # worry, we'll get the real value back afterwards self.repository = 'pypi' SetuptoolsRegister._set_config(self) options = self.distribution.get_option_dict('register') if 'repository' in options: source, value = options['repository'] # Really anything that came from setup.cfg or the command line # should override whatever was in .pypirc self.repository = value spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/setup_package.py0000644000077000000240000000016712533471373027735 0ustar adamstaff00000000000000from os.path import join def get_package_data(): return {'astropy_helpers.commands': [join('src', 'compiler.c')]} spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/src/0000755000077000000240000000000012654610601025324 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/commands/src/compiler.c0000644000077000000240000000573112533471373027317 0ustar adamstaff00000000000000#include /*************************************************************************** * Macros for determining the compiler version. * * These are borrowed from boost, and majorly abridged to include only * the compilers we care about. ***************************************************************************/ #ifndef PY3K #if PY_MAJOR_VERSION >= 3 #define PY3K 1 #else #define PY3K 0 #endif #endif #define STRINGIZE(X) DO_STRINGIZE(X) #define DO_STRINGIZE(X) #X #if defined __clang__ /* Clang C++ emulates GCC, so it has to appear early. */ # define COMPILER "Clang version " __clang_version__ #elif defined(__INTEL_COMPILER) || defined(__ICL) || defined(__ICC) || defined(__ECC) /* Intel */ # if defined(__INTEL_COMPILER) # define INTEL_VERSION __INTEL_COMPILER # elif defined(__ICL) # define INTEL_VERSION __ICL # elif defined(__ICC) # define INTEL_VERSION __ICC # elif defined(__ECC) # define INTEL_VERSION __ECC # endif # define COMPILER "Intel C compiler version " STRINGIZE(INTEL_VERSION) #elif defined(__GNUC__) /* gcc */ # define COMPILER "GCC version " __VERSION__ #elif defined(__SUNPRO_CC) /* Sun Workshop Compiler */ # define COMPILER "Sun compiler version " STRINGIZE(__SUNPRO_CC) #elif defined(_MSC_VER) /* Microsoft Visual C/C++ Must be last since other compilers define _MSC_VER for compatibility as well */ # if _MSC_VER < 1200 # define COMPILER_VERSION 5.0 # elif _MSC_VER < 1300 # define COMPILER_VERSION 6.0 # elif _MSC_VER == 1300 # define COMPILER_VERSION 7.0 # elif _MSC_VER == 1310 # define COMPILER_VERSION 7.1 # elif _MSC_VER == 1400 # define COMPILER_VERSION 8.0 # elif _MSC_VER == 1500 # define COMPILER_VERSION 9.0 # elif _MSC_VER == 1600 # define COMPILER_VERSION 10.0 # else # define COMPILER_VERSION _MSC_VER # endif # define COMPILER "Microsoft Visual C++ version " STRINGIZE(COMPILER_VERSION) #else /* Fallback */ # define COMPILER "Unknown compiler" #endif /*************************************************************************** * Module-level ***************************************************************************/ struct module_state { /* The Sun compiler can't handle empty structs */ #if defined(__SUNPRO_C) || defined(_MSC_VER) int _dummy; #endif }; #if PY3K static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_compiler", NULL, sizeof(struct module_state), NULL, NULL, NULL, NULL, NULL }; #define INITERROR return NULL PyMODINIT_FUNC PyInit__compiler(void) #else #define INITERROR return PyMODINIT_FUNC init_compiler(void) #endif { PyObject* m; #if PY3K m = PyModule_Create(&moduledef); #else m = Py_InitModule3("_compiler", NULL, NULL); #endif if (m == NULL) INITERROR; PyModule_AddStringConstant(m, "compiler", COMPILER); #if PY3K return m; #endif } spectral-cube-0.3.1/astropy_helpers/astropy_helpers/compat/0000755000077000000240000000000012654610601024217 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/compat/__init__.py0000644000077000000240000000056012340434262026330 0ustar adamstaff00000000000000def _fix_user_options(options): """ This is for Python 2.x and 3.x compatibility. distutils expects Command options to all be byte strings on Python 2 and Unicode strings on Python 3. """ def to_str_or_none(x): if x is None: return None return str(x) return [tuple(to_str_or_none(x) for x in y) for y in options] spectral-cube-0.3.1/astropy_helpers/astropy_helpers/compat/_subprocess_py2/0000755000077000000240000000000012654610601027340 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/compat/_subprocess_py2/__init__.py0000644000077000000240000000243212340434262031451 0ustar adamstaff00000000000000from __future__ import absolute_import from subprocess import * def check_output(*popenargs, **kwargs): r"""Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example:: >>> check_output(["ls", "-l", "/dev/null"]) 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT.:: >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) 'ls: non_existent_file: No such file or directory\n' """ if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = Popen(stdout=PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise CalledProcessError(retcode, cmd) return output spectral-cube-0.3.1/astropy_helpers/astropy_helpers/compat/subprocess.py0000644000077000000240000000105012340434262026754 0ustar adamstaff00000000000000""" A replacement wrapper around the subprocess module that adds check_output (which was only added to Python in 2.7. Instead of importing subprocess, other modules should use this as follows:: from astropy.utils.compat import subprocess This module is safe to import from anywhere within astropy. """ from __future__ import absolute_import, print_function import subprocess # python2.7 and later provide a check_output method if not hasattr(subprocess, 'check_output'): from ._subprocess_py2 import check_output from subprocess import * spectral-cube-0.3.1/astropy_helpers/astropy_helpers/distutils_helpers.py0000644000077000000240000001735512533471373027076 0ustar adamstaff00000000000000""" This module contains various utilities for introspecting the distutils module and the setup process. Some of these utilities require the `astropy_helpers.setup_helpers.register_commands` function to be called first, as it will affect introspection of setuptools command-line arguments. Other utilities in this module do not have that restriction. """ import os import sys from distutils import ccompiler from distutils.dist import Distribution from distutils.errors import DistutilsError from .utils import silence # This function, and any functions that call it, require the setup in # `astropy_helpers.setup_helpers.register_commands` to be run first. def get_dummy_distribution(): """ Returns a distutils Distribution object used to instrument the setup environment before calling the actual setup() function. """ from .setup_helpers import _module_state if _module_state['registered_commands'] is None: raise RuntimeError( 'astropy_helpers.setup_helpers.register_commands() must be ' 'called before using ' 'astropy_helpers.setup_helpers.get_dummy_distribution()') # Pre-parse the Distutils command-line options and config files to if # the option is set. dist = Distribution({'script_name': os.path.basename(sys.argv[0]), 'script_args': sys.argv[1:]}) dist.cmdclass.update(_module_state['registered_commands']) with silence(): try: dist.parse_config_files() dist.parse_command_line() except (DistutilsError, AttributeError, SystemExit): # Let distutils handle DistutilsErrors itself AttributeErrors can # get raise for ./setup.py --help SystemExit can be raised if a # display option was used, for example pass return dist def get_distutils_option(option, commands): """ Returns the value of the given distutils option. Parameters ---------- option : str The name of the option commands : list of str The list of commands on which this option is available Returns ------- val : str or None the value of the given distutils option. If the option is not set, returns None. """ dist = get_dummy_distribution() for cmd in commands: cmd_opts = dist.command_options.get(cmd) if cmd_opts is not None and option in cmd_opts: return cmd_opts[option][1] else: return None def get_distutils_build_option(option): """ Returns the value of the given distutils build option. Parameters ---------- option : str The name of the option Returns ------- val : str or None The value of the given distutils build option. If the option is not set, returns None. """ return get_distutils_option(option, ['build', 'build_ext', 'build_clib']) def get_distutils_install_option(option): """ Returns the value of the given distutils install option. Parameters ---------- option : str The name of the option Returns ------- val : str or None The value of the given distutils build option. If the option is not set, returns None. """ return get_distutils_option(option, ['install']) def get_distutils_build_or_install_option(option): """ Returns the value of the given distutils build or install option. Parameters ---------- option : str The name of the option Returns ------- val : str or None The value of the given distutils build or install option. If the option is not set, returns None. """ return get_distutils_option(option, ['build', 'build_ext', 'build_clib', 'install']) def get_compiler_option(): """ Determines the compiler that will be used to build extension modules. Returns ------- compiler : str The compiler option specified for the build, build_ext, or build_clib command; or the default compiler for the platform if none was specified. """ compiler = get_distutils_build_option('compiler') if compiler is None: return ccompiler.get_default_compiler() return compiler def add_command_option(command, name, doc, is_bool=False): """ Add a custom option to a setup command. Issues a warning if the option already exists on that command. Parameters ---------- command : str The name of the command as given on the command line name : str The name of the build option doc : str A short description of the option, for the `--help` message is_bool : bool, optional When `True`, the option is a boolean option and doesn't require an associated value. """ dist = get_dummy_distribution() cmdcls = dist.get_command_class(command) if (hasattr(cmdcls, '_astropy_helpers_options') and name in cmdcls._astropy_helpers_options): return attr = name.replace('-', '_') if hasattr(cmdcls, attr): raise RuntimeError( '{0!r} already has a {1!r} class attribute, barring {2!r} from ' 'being usable as a custom option name.'.format(cmdcls, attr, name)) for idx, cmd in enumerate(cmdcls.user_options): if cmd[0] == name: log.warn('Overriding existing {0!r} option ' '{1!r}'.format(command, name)) del cmdcls.user_options[idx] if name in cmdcls.boolean_options: cmdcls.boolean_options.remove(name) break cmdcls.user_options.append((name, None, doc)) if is_bool: cmdcls.boolean_options.append(name) # Distutils' command parsing requires that a command object have an # attribute with the same name as the option (with '-' replaced with '_') # in order for that option to be recognized as valid setattr(cmdcls, attr, None) # This caches the options added through add_command_option so that if it is # run multiple times in the same interpreter repeated adds are ignored # (this way we can still raise a RuntimeError if a custom option overrides # a built-in option) if not hasattr(cmdcls, '_astropy_helpers_options'): cmdcls._astropy_helpers_options = set([name]) else: cmdcls._astropy_helpers_options.add(name) def get_distutils_display_options(): """ Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or -- """ short_display_opts = set('-' + o[1] for o in Distribution.display_options if o[1]) long_display_opts = set('--' + o[0] for o in Distribution.display_options) # Include -h and --help which are not explicitly listed in # Distribution.display_options (as they are handled by optparse) short_display_opts.add('-h') long_display_opts.add('--help') # This isn't the greatest approach to hardcode these commands. # However, there doesn't seem to be a good way to determine # whether build *will be* run as part of the command at this # phase. display_commands = set([ 'clean', 'register', 'setopt', 'saveopts', 'egg_info', 'alias']) return short_display_opts.union(long_display_opts.union(display_commands)) def is_distutils_display_option(): """ Returns True if sys.argv contains any of the distutils display options such as --version or --name. """ display_options = get_distutils_display_options() return bool(set(sys.argv[1:]).intersection(display_options)) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/git_helpers.py0000644000077000000240000001372712533471373025634 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Utilities for retrieving revision information from a project's git repository. """ # Do not remove the following comment; it is used by # astropy_helpers.version_helpers to determine the beginning of the code in # this module # BEGIN import locale import os import subprocess import warnings def _decode_stdio(stream): try: stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8' except ValueError: stdio_encoding = 'utf-8' try: text = stream.decode(stdio_encoding) except UnicodeDecodeError: # Final fallback text = stream.decode('latin1') return text def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: #otherwise it's already the true/release version return version def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not _get_repo_path(path, levels=0): return '' if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip() def _get_repo_path(pathname, levels=None): """ Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo. """ if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None spectral-cube-0.3.1/astropy_helpers/astropy_helpers/setup_helpers.py0000644000077000000240000007367112533471373026215 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains a number of utilities for use during setup/build/packaging that are useful to astropy as a whole. """ from __future__ import absolute_import, print_function import collections import os import re import shlex import shutil import subprocess import sys import textwrap import traceback from distutils import log, ccompiler, sysconfig from distutils.dist import Distribution from distutils.errors import DistutilsOptionError, DistutilsModuleError from distutils.core import Extension from distutils.core import Command from distutils.command.sdist import sdist as DistutilsSdist from setuptools import find_packages as _find_packages from .distutils_helpers import * from .version_helpers import get_pkg_version_module from .test_helpers import AstropyTest from .utils import (silence, walk_skip_hidden, import_file, extends_doc, resolve_name) from .commands.build_ext import generate_build_ext_command from .commands.build_py import AstropyBuildPy from .commands.install import AstropyInstall from .commands.install_lib import AstropyInstallLib from .commands.register import AstropyRegister # This import is not used in this module, but it is included for backwards # compat with version 0.4, which included this function in the public API # for this module from .utils import get_numpy_include_path, write_if_different from .commands.build_ext import should_build_with_cython _module_state = { 'adjusted_compiler': False, 'registered_commands': None, 'have_cython': False, 'have_sphinx': False, 'package_cache': None, 'compiler_version_cache': {} } try: import Cython _module_state['have_cython'] = True except ImportError: pass try: import sphinx _module_state['have_sphinx'] = True except ValueError as e: # This can occur deep in the bowels of Sphinx's imports by way of docutils # and an occurrence of this bug: http://bugs.python.org/issue18378 # In this case sphinx is effectively unusable if 'unknown locale' in e.args[0]: log.warn( "Possible misconfiguration of one of the environment variables " "LC_ALL, LC_CTYPES, LANG, or LANGUAGE. For an example of how to " "configure your system's language environment on OSX see " "http://blog.remibergsma.com/2012/07/10/" "setting-locales-correctly-on-mac-osx-terminal-application/") except ImportError: pass except SyntaxError: # occurs if markupsafe is recent version, which doesn't support Python 3.2 pass PY3 = sys.version_info[0] >= 3 # This adds a new keyword to the setup() function Distribution.skip_2to3 = [] def adjust_compiler(package): """ This function detects broken compilers and switches to another. If the environment variable CC is explicitly set, or a compiler is specified on the commandline, no override is performed -- the purpose here is to only override a default compiler. The specific compilers with problems are: * The default compiler in XCode-4.2, llvm-gcc-4.2, segfaults when compiling wcslib. The set of broken compilers can be updated by changing the compiler_mapping variable. It is a list of 2-tuples where the first in the pair is a regular expression matching the version of the broken compiler, and the second is the compiler to change to. """ compiler_mapping = [ (b'i686-apple-darwin[0-9]*-llvm-gcc-4.2', 'clang') ] if _module_state['adjusted_compiler']: return # Whatever the result of this function is, it only needs to be run once _module_state['adjusted_compiler'] = True if 'CC' in os.environ: # Check that CC is not set to llvm-gcc-4.2 c_compiler = os.environ['CC'] try: version = get_compiler_version(c_compiler) except OSError: msg = textwrap.dedent( """ The C compiler set by the CC environment variable: {compiler:s} cannot be found or executed. """.format(compiler=c_compiler)) log.warn(msg) sys.exit(1) for broken, fixed in compiler_mapping: if re.match(broken, version): msg = textwrap.dedent( """Compiler specified by CC environment variable ({compiler:s}:{version:s}) will fail to compile {pkg:s}. Please set CC={fixed:s} and try again. You can do this, for example, by running: CC={fixed:s} python setup.py where is the command you ran. """.format(compiler=c_compiler, version=version, pkg=package, fixed=fixed)) log.warn(msg) sys.exit(1) # If C compiler is set via CC, and isn't broken, we are good to go. We # should definitely not try accessing the compiler specified by # ``sysconfig.get_config_var('CC')`` lower down, because this may fail # if the compiler used to compile Python is missing (and maybe this is # why the user is setting CC). For example, the official Python 2.7.3 # MacOS X binary was compiled with gcc-4.2, which is no longer available # in XCode 4. return if get_distutils_build_option('compiler'): return compiler_type = ccompiler.get_default_compiler() if compiler_type == 'unix': # We have to get the compiler this way, as this is the one that is # used if os.environ['CC'] is not set. It is actually read in from # the Python Makefile. Note that this is not necessarily the same # compiler as returned by ccompiler.new_compiler() c_compiler = sysconfig.get_config_var('CC') try: version = get_compiler_version(c_compiler) except OSError: msg = textwrap.dedent( """ The C compiler used to compile Python {compiler:s}, and which is normally used to compile C extensions, is not available. You can explicitly specify which compiler to use by setting the CC environment variable, for example: CC=gcc python setup.py or if you are using MacOS X, you can try: CC=clang python setup.py """.format(compiler=c_compiler)) log.warn(msg) sys.exit(1) for broken, fixed in compiler_mapping: if re.match(broken, version): os.environ['CC'] = fixed break def get_compiler_version(compiler): if compiler in _module_state['compiler_version_cache']: return _module_state['compiler_version_cache'][compiler] # Different flags to try to get the compiler version # TODO: It might be worth making this configurable to support # arbitrary odd compilers; though all bets may be off in such # cases anyway flags = ['--version', '--Version', '-version', '-Version', '-v', '-V'] def try_get_version(flag): process = subprocess.Popen( shlex.split(compiler) + [flag], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() if process.returncode != 0: return 'unknown' output = stdout.strip() if not output: # Some compilers return their version info on stderr output = stderr.strip() if not output: output = 'unknown' return output for flag in flags: version = try_get_version(flag) if version != 'unknown': break # Cache results to speed up future calls _module_state['compiler_version_cache'][compiler] = version return version def get_debug_option(packagename): """ Determines if the build is in debug mode. Returns ------- debug : bool True if the current build was started with the debug option, False otherwise. """ try: current_debug = get_pkg_version_module(packagename, fromlist=['debug'])[0] except (ImportError, AttributeError): current_debug = None # Only modify the debug flag if one of the build commands was explicitly # run (i.e. not as a sub-command of something else) dist = get_dummy_distribution() if any(cmd in dist.commands for cmd in ['build', 'build_ext']): debug = bool(get_distutils_build_option('debug')) else: debug = bool(current_debug) if current_debug is not None and current_debug != debug: build_ext_cmd = dist.get_command_class('build_ext') build_ext_cmd.force_rebuild = True return debug def register_commands(package, version, release, srcdir='.'): if _module_state['registered_commands'] is not None: return _module_state['registered_commands'] if _module_state['have_sphinx']: from .commands.build_sphinx import AstropyBuildSphinx else: AstropyBuildSphinx = FakeBuildSphinx _module_state['registered_commands'] = registered_commands = { 'test': generate_test_command(package), # Use distutils' sdist because it respects package_data. # setuptools/distributes sdist requires duplication of information in # MANIFEST.in 'sdist': DistutilsSdist, # The exact form of the build_ext command depends on whether or not # we're building a release version 'build_ext': generate_build_ext_command(package, release), # We have a custom build_py to generate the default configuration file 'build_py': AstropyBuildPy, # Since install can (in some circumstances) be run without # first building, we also need to override install and # install_lib. See #2223 'install': AstropyInstall, 'install_lib': AstropyInstallLib, 'register': AstropyRegister, 'build_sphinx': AstropyBuildSphinx } # Need to override the __name__ here so that the commandline options are # presented as being related to the "build" command, for example; normally # this wouldn't be necessary since commands also have a command_name # attribute, but there is a bug in distutils' help display code that it # uses __name__ instead of command_name. Yay distutils! for name, cls in registered_commands.items(): cls.__name__ = name # Add a few custom options; more of these can be added by specific packages # later for option in [ ('use-system-libraries', "Use system libraries whenever possible", True)]: add_command_option('build', *option) add_command_option('install', *option) add_command_hooks(registered_commands, srcdir=srcdir) return registered_commands def add_command_hooks(commands, srcdir='.'): """ Look through setup_package.py modules for functions with names like ``pre__hook`` and ``post__hook`` where ```` is the name of a ``setup.py`` command (e.g. build_ext). If either hook is present this adds a wrapped version of that command to the passed in ``commands`` `dict`. ``commands`` may be pre-populated with other custom distutils command classes that should be wrapped if there are hooks for them (e.g. `AstropyBuildPy`). """ hook_re = re.compile(r'^(pre|post)_(.+)_hook$') # Distutils commands have a method of the same name, but it is not a # *classmethod* (which probably didn't exist when distutils was first # written) def get_command_name(cmdcls): if hasattr(cmdcls, 'command_name'): return cmdcls.command_name else: return cmdcls.__name__ packages = filter_packages(find_packages(srcdir)) dist = get_dummy_distribution() hooks = collections.defaultdict(dict) for setuppkg in iter_setup_packages(srcdir, packages): for name, obj in vars(setuppkg).items(): match = hook_re.match(name) if not match: continue hook_type = match.group(1) cmd_name = match.group(2) cmd_cls = dist.get_command_class(cmd_name) if hook_type not in hooks[cmd_name]: hooks[cmd_name][hook_type] = [] hooks[cmd_name][hook_type].append((setuppkg.__name__, obj)) for cmd_name, cmd_hooks in hooks.items(): commands[cmd_name] = generate_hooked_command( cmd_name, dist.get_command_class(cmd_name), cmd_hooks) def generate_hooked_command(cmd_name, cmd_cls, hooks): """ Returns a generated subclass of ``cmd_cls`` that runs the pre- and post-command hooks for that command before and after the ``cmd_cls.run`` method. """ def run(self, orig_run=cmd_cls.run): self.run_command_hooks('pre_hooks') orig_run(self) self.run_command_hooks('post_hooks') return type(cmd_name, (cmd_cls, object), {'run': run, 'run_command_hooks': run_command_hooks, 'pre_hooks': hooks.get('pre', []), 'post_hooks': hooks.get('post', [])}) def run_command_hooks(cmd_obj, hook_kind): """Run hooks registered for that command and phase. *cmd_obj* is a finalized command object; *hook_kind* is either 'pre_hook' or 'post_hook'. """ hooks = getattr(cmd_obj, hook_kind, None) if not hooks: return for modname, hook in hooks: if isinstance(hook, str): try: hook_obj = resolve_name(hook) except ImportError as exc: raise DistutilsModuleError( 'cannot find hook {0}: {1}'.format(hook, err)) else: hook_obj = hook if not callable(hook_obj): raise DistutilsOptionError('hook {0!r} is not callable' % hook) log.info('running {0} from {1} for {2} command'.format( hook_kind.rstrip('s'), modname, cmd_obj.get_command_name())) try : hook_obj(cmd_obj) except Exception as exc: log.error('{0} command hook {1} raised an exception: %s\n'.format( hook_obj.__name__, cmd_obj.get_command_name())) log.error(traceback.format_exc()) sys.exit(1) def generate_test_command(package_name): """ Creates a custom 'test' command for the given package which sets the command's ``package_name`` class attribute to the name of the package being tested. """ return type(package_name.title() + 'Test', (AstropyTest,), {'package_name': package_name}) def update_package_files(srcdir, extensions, package_data, packagenames, package_dirs): """ This function is deprecated and maintained for backward compatibility with affiliated packages. Affiliated packages should update their setup.py to use `get_package_info` instead. """ info = get_package_info(srcdir) extensions.extend(info['ext_modules']) package_data.update(info['package_data']) packagenames = list(set(packagenames + info['packages'])) package_dirs.update(info['package_dir']) def get_package_info(srcdir='.', exclude=()): """ Collates all of the information for building all subpackages subpackages and returns a dictionary of keyword arguments that can be passed directly to `distutils.setup`. The purpose of this function is to allow subpackages to update the arguments to the package's ``setup()`` function in its setup.py script, rather than having to specify all extensions/package data directly in the ``setup.py``. See Astropy's own ``setup.py`` for example usage and the Astropy development docs for more details. This function obtains that information by iterating through all packages in ``srcdir`` and locating a ``setup_package.py`` module. This module can contain the following functions: ``get_extensions()``, ``get_package_data()``, ``get_build_options()``, ``get_external_libraries()``, and ``requires_2to3()``. Each of those functions take no arguments. - ``get_extensions`` returns a list of `distutils.extension.Extension` objects. - ``get_package_data()`` returns a dict formatted as required by the ``package_data`` argument to ``setup()``. - ``get_build_options()`` returns a list of tuples describing the extra build options to add. - ``get_external_libraries()`` returns a list of libraries that can optionally be built using external dependencies. - ``get_entry_points()`` returns a dict formatted as required by the ``entry_points`` argument to ``setup()``. - ``requires_2to3()`` should return `True` when the source code requires `2to3` processing to run on Python 3.x. If ``requires_2to3()`` is missing, it is assumed to return `True`. """ ext_modules = [] packages = [] package_data = {} package_dir = {} skip_2to3 = [] # Use the find_packages tool to locate all packages and modules packages = filter_packages(find_packages(srcdir, exclude=exclude)) # For each of the setup_package.py modules, extract any # information that is needed to install them. The build options # are extracted first, so that their values will be available in # subsequent calls to `get_extensions`, etc. for setuppkg in iter_setup_packages(srcdir, packages): if hasattr(setuppkg, 'get_build_options'): options = setuppkg.get_build_options() for option in options: add_command_option('build', *option) if hasattr(setuppkg, 'get_external_libraries'): libraries = setuppkg.get_external_libraries() for library in libraries: add_external_library(library) if hasattr(setuppkg, 'requires_2to3'): requires_2to3 = setuppkg.requires_2to3() else: requires_2to3 = True if not requires_2to3: skip_2to3.append( os.path.dirname(setuppkg.__file__)) for setuppkg in iter_setup_packages(srcdir, packages): # get_extensions must include any Cython extensions by their .pyx # filename. if hasattr(setuppkg, 'get_extensions'): ext_modules.extend(setuppkg.get_extensions()) if hasattr(setuppkg, 'get_package_data'): package_data.update(setuppkg.get_package_data()) # Locate any .pyx files not already specified, and add their extensions in. # The default include dirs include numpy to facilitate numerical work. ext_modules.extend(get_cython_extensions(srcdir, packages, ext_modules, ['numpy'])) # Now remove extensions that have the special name 'skip_cython', as they # exist Only to indicate that the cython extensions shouldn't be built for i, ext in reversed(list(enumerate(ext_modules))): if ext.name == 'skip_cython': del ext_modules[i] # On Microsoft compilers, we need to pass the '/MANIFEST' # commandline argument. This was the default on MSVC 9.0, but is # now required on MSVC 10.0, but it doesn't seem to hurt to add # it unconditionally. if get_compiler_option() == 'msvc': for ext in ext_modules: ext.extra_link_args.append('/MANIFEST') return { 'ext_modules': ext_modules, 'packages': packages, 'package_dir': package_dir, 'package_data': package_data, 'skip_2to3': skip_2to3 } def iter_setup_packages(srcdir, packages): """ A generator that finds and imports all of the ``setup_package.py`` modules in the source packages. Returns ------- modgen : generator A generator that yields (modname, mod), where `mod` is the module and `modname` is the module name for the ``setup_package.py`` modules. """ for packagename in packages: package_parts = packagename.split('.') package_path = os.path.join(srcdir, *package_parts) setup_package = os.path.relpath( os.path.join(package_path, 'setup_package.py')) if os.path.isfile(setup_package): module = import_file(setup_package, name=packagename + '.setup_package') yield module def iter_pyx_files(package_dir, package_name): """ A generator that yields Cython source files (ending in '.pyx') in the source packages. Returns ------- pyxgen : generator A generator that yields (extmod, fullfn) where `extmod` is the full name of the module that the .pyx file would live in based on the source directory structure, and `fullfn` is the path to the .pyx file. """ for dirpath, dirnames, filenames in walk_skip_hidden(package_dir): for fn in filenames: if fn.endswith('.pyx'): fullfn = os.path.relpath(os.path.join(dirpath, fn)) # Package must match file name extmod = '.'.join([package_name, fn[:-4]]) yield (extmod, fullfn) break # Don't recurse into subdirectories def get_cython_extensions(srcdir, packages, prevextensions=tuple(), extincludedirs=None): """ Looks for Cython files and generates Extensions if needed. Parameters ---------- srcdir : str Path to the root of the source directory to search. prevextensions : list of `~distutils.core.Extension` objects The extensions that are already defined. Any .pyx files already here will be ignored. extincludedirs : list of str or None Directories to include as the `include_dirs` argument to the generated `~distutils.core.Extension` objects. Returns ------- exts : list of `~distutils.core.Extension` objects The new extensions that are needed to compile all .pyx files (does not include any already in `prevextensions`). """ # Vanilla setuptools and old versions of distribute include Cython files # as .c files in the sources, not .pyx, so we cannot simply look for # existing .pyx sources in the previous sources, but we should also check # for .c files with the same remaining filename. So we look for .pyx and # .c files, and we strip the extension. prevsourcepaths = [] ext_modules = [] for ext in prevextensions: for s in ext.sources: if s.endswith(('.pyx', '.c')): sourcepath = os.path.realpath(os.path.splitext(s)[0]) prevsourcepaths.append(sourcepath) for package_name in packages: package_parts = package_name.split('.') package_path = os.path.join(srcdir, *package_parts) for extmod, pyxfn in iter_pyx_files(package_path, package_name): sourcepath = os.path.realpath(os.path.splitext(pyxfn)[0]) if sourcepath not in prevsourcepaths: ext_modules.append(Extension(extmod, [pyxfn], include_dirs=extincludedirs)) return ext_modules class DistutilsExtensionArgs(collections.defaultdict): """ A special dictionary whose default values are the empty list. This is useful for building up a set of arguments for `distutils.Extension` without worrying whether the entry is already present. """ def __init__(self, *args, **kwargs): def default_factory(): return [] super(DistutilsExtensionArgs, self).__init__( default_factory, *args, **kwargs) def update(self, other): for key, val in other.items(): self[key].extend(val) def pkg_config(packages, default_libraries, executable='pkg-config'): """ Uses pkg-config to update a set of distutils Extension arguments to include the flags necessary to link against the given packages. If the pkg-config lookup fails, default_libraries is applied to libraries. Parameters ---------- packages : list of str A list of pkg-config packages to look up. default_libraries : list of str A list of library names to use if the pkg-config lookup fails. Returns ------- config : dict A dictionary containing keyword arguments to `distutils.Extension`. These entries include: - ``include_dirs``: A list of include directories - ``library_dirs``: A list of library directories - ``libraries``: A list of libraries - ``define_macros``: A list of macro defines - ``undef_macros``: A list of macros to undefine - ``extra_compile_args``: A list of extra arguments to pass to the compiler """ flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries', '-D': 'define_macros', '-U': 'undef_macros'} command = "{0} --libs --cflags {1}".format(executable, ' '.join(packages)), result = DistutilsExtensionArgs() try: pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0].strip() except subprocess.CalledProcessError as e: lines = [ "{0} failed. This may cause the build to fail below.".format(executable), " command: {0}".format(e.cmd), " returncode: {0}".format(e.returncode), " output: {0}".format(e.output) ] log.warn('\n'.join(lines)) result['libraries'].extend(default_libraries) else: if pipe.returncode != 0: lines = [ "pkg-config could not lookup up package(s) {0}.".format( ", ".join(packages)), "This may cause the build to fail below." ] log.warn('\n'.join(lines)) result['libraries'].extend(default_libraries) else: for token in output.split(): # It's not clear what encoding the output of # pkg-config will come to us in. It will probably be # some combination of pure ASCII (for the compiler # flags) and the filesystem encoding (for any argument # that includes directories or filenames), but this is # just conjecture, as the pkg-config documentation # doesn't seem to address it. arg = token[:2].decode('ascii') value = token[2:].decode(sys.getfilesystemencoding()) if arg in flag_map: if arg == '-D': value = tuple(value.split('=', 1)) result[flag_map[arg]].append(value) else: result['extra_compile_args'].append(value) return result def add_external_library(library): """ Add a build option for selecting the internal or system copy of a library. Parameters ---------- library : str The name of the library. If the library is `foo`, the build option will be called `--use-system-foo`. """ for command in ['build', 'build_ext', 'install']: add_command_option(command, str('use-system-' + library), 'Use the system {0} library'.format(library), is_bool=True) def use_system_library(library): """ Returns `True` if the build configuration indicates that the given library should use the system copy of the library rather than the internal one. For the given library `foo`, this will be `True` if `--use-system-foo` or `--use-system-libraries` was provided at the commandline or in `setup.cfg`. Parameters ---------- library : str The name of the library Returns ------- use_system : bool `True` if the build should use the system copy of the library. """ return ( get_distutils_build_or_install_option('use_system_{0}'.format(library)) or get_distutils_build_or_install_option('use_system_libraries')) @extends_doc(_find_packages) def find_packages(where='.', exclude=(), invalidate_cache=False): """ This version of ``find_packages`` caches previous results to speed up subsequent calls. Use ``invalide_cache=True`` to ignore cached results from previous ``find_packages`` calls, and repeat the package search. """ if not invalidate_cache and _module_state['package_cache'] is not None: return _module_state['package_cache'] packages = _find_packages(where=where, exclude=exclude) _module_state['package_cache'] = packages return packages def filter_packages(packagenames): """ Removes some packages from the package list that shouldn't be installed on the current version of Python. """ if PY3: exclude = '_py2' else: exclude = '_py3' return [x for x in packagenames if not x.endswith(exclude)] class FakeBuildSphinx(Command): """ A dummy build_sphinx command that is called if Sphinx is not installed and displays a relevant error message """ #user options inherited from sphinx.setup_command.BuildDoc user_options = [ ('fresh-env', 'E', '' ), ('all-files', 'a', ''), ('source-dir=', 's', ''), ('build-dir=', None, ''), ('config-dir=', 'c', ''), ('builder=', 'b', ''), ('project=', None, ''), ('version=', None, ''), ('release=', None, ''), ('today=', None, ''), ('link-index', 'i', ''), ] #user options appended in astropy.setup_helpers.AstropyBuildSphinx user_options.append(('warnings-returncode', 'w','')) user_options.append(('clean-docs', 'l', '')) user_options.append(('no-intersphinx', 'n', '')) user_options.append(('open-docs-in-browser', 'o','')) def initialize_options(self): try: raise RuntimeError("Sphinx must be installed for build_sphinx") except: log.error('error : Sphinx must be installed for build_sphinx') sys.exit(1) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/0000755000077000000240000000000012654610601024245 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/__init__.py0000644000077000000240000000041412340434262026354 0ustar adamstaff00000000000000""" This package contains utilities and extensions for the Astropy sphinx documentation. In particular, the `astropy.sphinx.conf` should be imported by the sphinx ``conf.py`` file for affiliated packages that wish to make use of the Astropy documentation format. """ spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/conf.py0000644000077000000240000002477212533471373025567 0ustar adamstaff00000000000000# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # # Astropy shared Sphinx settings. These settings are shared between # astropy itself and affiliated packages. # # Note that not all possible configuration values are present in this file. # # All configuration values have a default; values that are commented out # serve to show the default. import warnings from os import path # -- General configuration ---------------------------------------------------- # The version check in Sphinx itself can only compare the major and # minor parts of the version number, not the micro. To do a more # specific version check, call check_sphinx_version("x.y.z.") from # your project's conf.py needs_sphinx = '1.2' def check_sphinx_version(expected_version): import sphinx from distutils import version sphinx_version = version.LooseVersion(sphinx.__version__) expected_version = version.LooseVersion(expected_version) if sphinx_version < expected_version: raise RuntimeError( "At least Sphinx version {0} is required to build this " "documentation. Found {1}.".format( expected_version, sphinx_version)) # Configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('http://docs.python.org/', None), 'python3': ('http://docs.python.org/3/', path.abspath(path.join(path.dirname(__file__), 'local/python3links.inv'))), 'numpy': ('http://docs.scipy.org/doc/numpy/', None), 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None), 'matplotlib': ('http://matplotlib.org/', None), 'astropy': ('http://docs.astropy.org/en/stable/', None), 'h5py': ('http://docs.h5py.org/en/latest/', None) } # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # The reST default role (used for this markup: `text`) to use for all # documents. Set to the "smart" one. default_role = 'obj' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # This is added to the end of RST files - a good place to put substitutions to # be used globally. rst_epilog = """ .. _Astropy: http://astropy.org """ # -- Project information ------------------------------------------------------ # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. #pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Settings for extensions and extension options ---------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.inheritance_diagram', 'astropy_helpers.sphinx.ext.numpydoc', 'astropy_helpers.sphinx.ext.astropyautosummary', 'astropy_helpers.sphinx.ext.autodoc_enhancements', 'astropy_helpers.sphinx.ext.automodsumm', 'astropy_helpers.sphinx.ext.automodapi', 'astropy_helpers.sphinx.ext.tocdepthfix', 'astropy_helpers.sphinx.ext.doctest', 'astropy_helpers.sphinx.ext.changelog_links', 'astropy_helpers.sphinx.ext.viewcode', # Use patched version of viewcode 'astropy_helpers.sphinx.ext.smart_resolver' ] # Above, we use a patched version of viewcode rather than 'sphinx.ext.viewcode' # This can be changed to the sphinx version once the following issue is fixed # in sphinx: # https://bitbucket.org/birkenfeld/sphinx/issue/623/ # extension-viewcode-fails-with-function try: import matplotlib.sphinxext.plot_directive extensions += [matplotlib.sphinxext.plot_directive.__name__] # AttributeError is checked here in case matplotlib is installed but # Sphinx isn't. Note that this module is imported by the config file # generator, even if we're not building the docs. except (ImportError, AttributeError): warnings.warn( "matplotlib's plot_directive could not be imported. " + "Inline plots will not be included in the output") # Don't show summaries of the members in each class along with the # class' docstring numpydoc_show_class_members = False autosummary_generate = True automodapi_toctreedirnm = 'api' # Class documentation should contain *both* the class docstring and # the __init__ docstring autoclass_content = "both" # Render inheritance diagrams in SVG graphviz_output_format = "svg" # -- Options for HTML output ------------------------------------------------- # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [path.abspath(path.join(path.dirname(__file__), 'themes'))] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'bootstrap-astropy' # Custom sidebar templates, maps document names to template names. html_sidebars = { '**': ['localtoc.html'], 'search': [], 'genindex': [], 'py-modindex': [], } # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # included in the bootstrap-astropy theme html_favicon = path.join(html_theme_path[0], html_theme, 'static', 'astropy_logo.ico') # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%d %b %Y' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. latex_use_parts = True # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. latex_preamble = r""" % Use a more modern-looking monospace font \usepackage{inconsolata} % The enumitem package provides unlimited nesting of lists and enums. % Sphinx may use this in the future, in which case this can be removed. % See https://bitbucket.org/birkenfeld/sphinx/issue/777/latex-output-too-deeply-nested \usepackage{enumitem} \setlistdepth{15} % In the parameters section, place a newline after the Parameters % header. (This is stolen directly from Numpy's conf.py, since it % affects Numpy-style docstrings). \usepackage{expdlist} \let\latexdescription=\description \def\description{\latexdescription{}{} \breaklabel} % Support the superscript Unicode numbers used by the "unicode" units % formatter \DeclareUnicodeCharacter{2070}{\ensuremath{^0}} \DeclareUnicodeCharacter{00B9}{\ensuremath{^1}} \DeclareUnicodeCharacter{00B2}{\ensuremath{^2}} \DeclareUnicodeCharacter{00B3}{\ensuremath{^3}} \DeclareUnicodeCharacter{2074}{\ensuremath{^4}} \DeclareUnicodeCharacter{2075}{\ensuremath{^5}} \DeclareUnicodeCharacter{2076}{\ensuremath{^6}} \DeclareUnicodeCharacter{2077}{\ensuremath{^7}} \DeclareUnicodeCharacter{2078}{\ensuremath{^8}} \DeclareUnicodeCharacter{2079}{\ensuremath{^9}} \DeclareUnicodeCharacter{207B}{\ensuremath{^-}} \DeclareUnicodeCharacter{00B0}{\ensuremath{^{\circ}}} \DeclareUnicodeCharacter{2032}{\ensuremath{^{\prime}}} \DeclareUnicodeCharacter{2033}{\ensuremath{^{\prime\prime}}} % Make the "warning" and "notes" sections use a sans-serif font to % make them stand out more. \renewenvironment{notice}[2]{ \def\py@noticetype{#1} \csname py@noticestart@#1\endcsname \textsf{\textbf{#2}} }{\csname py@noticeend@\py@noticetype\endcsname} """ # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # -- Options for the linkcheck builder ---------------------------------------- # A timeout value, in seconds, for the linkcheck builder linkcheck_timeout = 60 spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/0000755000077000000240000000000012654610601025045 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/__init__.py0000644000077000000240000000013612412505144027153 0ustar adamstaff00000000000000from __future__ import division, absolute_import, print_function from .numpydoc import setup spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/astropyautosummary.py0000644000077000000240000000761712533471373031451 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This sphinx extension builds off of `sphinx.ext.autosummary` to clean up some issues it presents in the Astropy docs. The main issue this fixes is the summary tables getting cut off before the end of the sentence in some cases. Note: Sphinx 1.2 appears to have fixed the the main issues in the stock autosummary extension that are addressed by this extension. So use of this extension with newer versions of Sphinx is deprecated. """ import re from distutils.version import LooseVersion import sphinx from sphinx.ext.autosummary import Autosummary from ...utils import deprecated # used in AstropyAutosummary.get_items _itemsummrex = re.compile(r'^([A-Z].*?\.(?:\s|$))') @deprecated('1.0', message='AstropyAutosummary is only needed when used ' 'with Sphinx versions less than 1.2') class AstropyAutosummary(Autosummary): def get_items(self, names): """Try to import the given names, and return a list of ``[(name, signature, summary_string, real_name), ...]``. """ from sphinx.ext.autosummary import (get_import_prefixes_from_env, import_by_name, get_documenter, mangle_signature) env = self.state.document.settings.env prefixes = get_import_prefixes_from_env(env) items = [] max_item_chars = 50 for name in names: display_name = name if name.startswith('~'): name = name[1:] display_name = name.split('.')[-1] try: import_by_name_values = import_by_name(name, prefixes=prefixes) except ImportError: self.warn('[astropyautosummary] failed to import %s' % name) items.append((name, '', '', name)) continue # to accommodate Sphinx v1.2.2 and v1.2.3 if len(import_by_name_values) == 3: real_name, obj, parent = import_by_name_values elif len(import_by_name_values) == 4: real_name, obj, parent, module_name = import_by_name_values # NB. using real_name here is important, since Documenters # handle module prefixes slightly differently documenter = get_documenter(obj, parent)(self, real_name) if not documenter.parse_name(): self.warn('[astropyautosummary] failed to parse name %s' % real_name) items.append((display_name, '', '', real_name)) continue if not documenter.import_object(): self.warn('[astropyautosummary] failed to import object %s' % real_name) items.append((display_name, '', '', real_name)) continue # -- Grab the signature sig = documenter.format_signature() if not sig: sig = '' else: max_chars = max(10, max_item_chars - len(display_name)) sig = mangle_signature(sig, max_chars=max_chars) sig = sig.replace('*', r'\*') # -- Grab the summary doc = list(documenter.process_doc(documenter.get_doc())) while doc and not doc[0].strip(): doc.pop(0) m = _itemsummrex.search(" ".join(doc).strip()) if m: summary = m.group(1).strip() elif doc: summary = doc[0].strip() else: summary = '' items.append((display_name, sig, summary, real_name)) return items def setup(app): # need autosummary, of course app.setup_extension('sphinx.ext.autosummary') # Don't make the replacement if Sphinx is at least 1.2 if LooseVersion(sphinx.__version__) < LooseVersion('1.2.0'): # this replaces the default autosummary with the astropy one app.add_directive('autosummary', AstropyAutosummary) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/autodoc_enhancements.py0000644000077000000240000000401412533471373031613 0ustar adamstaff00000000000000""" Miscellaneous enhancements to help autodoc along. """ # See # https://github.com/astropy/astropy-helpers/issues/116#issuecomment-71254836 # for further background on this. def type_object_attrgetter(obj, attr, *defargs): """ This implements an improved attrgetter for type objects (i.e. classes) that can handle class attributes that are implemented as properties on a metaclass. Normally `getattr` on a class with a `property` (say, "foo"), would return the `property` object itself. However, if the class has a metaclass which *also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find the "foo" property on the metaclass and resolve it. For the purposes of autodoc we just want to document the "foo" property defined on the class, not on the metaclass. For example:: >>> class Meta(type): ... @property ... def foo(cls): ... return 'foo' ... >>> class MyClass(metaclass=Meta): ... @property ... def foo(self): ... \"\"\"Docstring for MyClass.foo property.\"\"\" ... return 'myfoo' ... >>> getattr(MyClass, 'foo') 'foo' >>> type_object_attrgetter(MyClass, 'foo') >>> type_object_attrgetter(MyClass, 'foo').__doc__ 'Docstring for MyClass.foo property.' The last line of the example shows the desired behavior for the purposes of autodoc. """ for base in obj.__mro__: if attr in base.__dict__: if isinstance(base.__dict__[attr], property): # Note, this should only be used for properties--for any other # type of descriptor (classmethod, for example) this can mess # up existing expectations of what getattr(cls, ...) returns return base.__dict__[attr] break return getattr(obj, attr, *defargs) def setup(app): app.add_autodoc_attrgetter(type, type_object_attrgetter) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/automodapi.py0000644000077000000240000003164712533471373027603 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This sphinx extension adds a tools to simplify generating the API documentation for Astropy packages and affiliated packages. .. _automodapi: ======================== automodapi directive ======================== This directive takes a single argument that must be a module or package. It will produce a block of documentation that includes the docstring for the package, an :ref:`automodsumm` directive, and an :ref:`automod-diagram` if there are any classes in the module. If only the main docstring of the module/package is desired in the documentation, use `automodule`_ instead of `automodapi`_. It accepts the following options: * ``:no-inheritance-diagram:`` If present, the inheritance diagram will not be shown even if the module/package has classes. * ``:skip: str`` This option results in the specified object being skipped, that is the object will *not* be included in the generated documentation. This option may appear any number of times to skip multiple objects. * ``:no-main-docstr:`` If present, the docstring for the module/package will not be generated. The function and class tables will still be used, however. * ``:headings: str`` Specifies the characters (in one string) used as the heading levels used for the generated section. This must have at least 2 characters (any after 2 will be ignored). This also *must* match the rest of the documentation on this page for sphinx to be happy. Defaults to "-^", which matches the convention used for Python's documentation, assuming the automodapi call is inside a top-level section (which usually uses '='). * ``:no-heading:`` If specified do not create a top level heading for the section. That is, do not create a title heading with text like "packagename Package". The actual docstring for the package/module will still be shown, though, unless ``:no-main-docstr:`` is given. * ``:allowed-package-names: str`` Specifies the packages that functions/classes documented here are allowed to be from, as comma-separated list of package names. If not given, only objects that are actually in a subpackage of the package currently being documented are included. This extension also adds two sphinx configuration options: * ``automodapi_toctreedirnm`` This must be a string that specifies the name of the directory the automodsumm generated documentation ends up in. This directory path should be relative to the documentation root (e.g., same place as ``index.rst``). Defaults to ``'api'``. * ``automodapi_writereprocessed`` Should be a bool, and if `True`, will cause `automodapi`_ to write files with any `automodapi`_ sections replaced with the content Sphinx processes after `automodapi`_ has run. The output files are not actually used by sphinx, so this option is only for figuring out the cause of sphinx warnings or other debugging. Defaults to `False`. .. _automodule: http://sphinx-doc.org/latest/ext/autodoc.html?highlight=automodule#directive-automodule """ # Implementation note: # The 'automodapi' directive is not actually implemented as a docutils # directive. Instead, this extension searches for the 'automodapi' text in # all sphinx documents, and replaces it where necessary from a template built # into this extension. This is necessary because automodsumm (and autosummary) # use the "builder-inited" event, which comes before the directives are # actually built. import inspect import os import re import sys from .utils import find_mod_objs if sys.version_info[0] == 3: text_type = str else: text_type = unicode automod_templ_modheader = """ {modname} {pkgormod} {modhds}{pkgormodhds} {automoduleline} """ automod_templ_classes = """ Classes {clshds} .. automodsumm:: {modname} :classes-only: {clsfuncoptions} """ automod_templ_funcs = """ Functions {funchds} .. automodsumm:: {modname} :functions-only: {clsfuncoptions} """ automod_templ_inh = """ Class Inheritance Diagram {clsinhsechds} .. automod-diagram:: {modname} :private-bases: :parts: 1 {allowedpkgnms} """ _automodapirex = re.compile(r'^(?:\s*\.\.\s+automodapi::\s*)([A-Za-z0-9_.]+)' r'\s*$((?:\n\s+:[a-zA-Z_\-]+:.*$)*)', flags=re.MULTILINE) # the last group of the above regex is intended to go into finall with the below _automodapiargsrex = re.compile(r':([a-zA-Z_\-]+):(.*)$', flags=re.MULTILINE) def automodapi_replace(sourcestr, app, dotoctree=True, docname=None, warnings=True): """ Replaces `sourcestr`'s entries of ".. automdapi::" with the automodapi template form based on provided options. This is used with the sphinx event 'source-read' to replace `automodapi`_ entries before sphinx actually processes them, as automodsumm needs the code to be present to generate stub documentation. Parameters ---------- sourcestr : str The string with sphinx source to be checked for automodapi replacement. app : `sphinx.application.Application` The sphinx application. dotoctree : bool If `True`, a ":toctree:" option will be added in the ".. automodsumm::" sections of the template, pointing to the appropriate "generated" directory based on the Astropy convention (e.g. in ``docs/api``) docname : str The name of the file for this `sourcestr` (if known - if not, it can be `None`). If not provided and `dotoctree` is `True`, the generated files may end up in the wrong place. warnings : bool If `False`, all warnings that would normally be issued are silenced. Returns ------- newstr :str The string with automodapi entries replaced with the correct sphinx markup. """ spl = _automodapirex.split(sourcestr) if len(spl) > 1: # automodsumm is in this document if dotoctree: toctreestr = ':toctree: ' dirnm = app.config.automodapi_toctreedirnm if not dirnm.endswith("/"): dirnm += "/" if docname is not None: toctreestr += '../' * docname.count('/') + dirnm else: toctreestr += dirnm else: toctreestr = '' newstrs = [spl[0]] for grp in range(len(spl) // 3): modnm = spl[grp * 3 + 1] # find where this is in the document for warnings if docname is None: location = None else: location = (docname, spl[0].count('\n')) # initialize default options toskip = [] inhdiag = maindocstr = top_head = True hds = '-^' allowedpkgnms = [] # look for actual options unknownops = [] for opname, args in _automodapiargsrex.findall(spl[grp * 3 + 2]): if opname == 'skip': toskip.append(args.strip()) elif opname == 'no-inheritance-diagram': inhdiag = False elif opname == 'no-main-docstr': maindocstr = False elif opname == 'headings': hds = args elif opname == 'no-heading': top_head = False elif opname == 'allowed-package-names': allowedpkgnms.append(args.strip()) else: unknownops.append(opname) #join all the allowedpkgnms if len(allowedpkgnms) == 0: allowedpkgnms = '' onlylocals = True else: allowedpkgnms = ':allowed-package-names: ' + ','.join(allowedpkgnms) onlylocals = allowedpkgnms # get the two heading chars if len(hds) < 2: msg = 'Not enough headings (got {0}, need 2), using default -^' if warnings: app.warn(msg.format(len(hds)), location) hds = '-^' h1, h2 = hds.lstrip()[:2] # tell sphinx that the remaining args are invalid. if len(unknownops) > 0 and app is not None: opsstrs = ','.join(unknownops) msg = 'Found additional options ' + opsstrs + ' in automodapi.' if warnings: app.warn(msg, location) ispkg, hascls, hasfuncs = _mod_info(modnm, toskip, onlylocals=onlylocals) # add automodule directive only if no-main-docstr isn't present if maindocstr: automodline = '.. automodule:: {modname}'.format(modname=modnm) else: automodline = '' if top_head: newstrs.append(automod_templ_modheader.format(modname=modnm, modhds=h1 * len(modnm), pkgormod='Package' if ispkg else 'Module', pkgormodhds=h1 * (8 if ispkg else 7), automoduleline=automodline)) else: newstrs.append(automod_templ_modheader.format( modname='', modhds='', pkgormod='', pkgormodhds='', automoduleline=automodline)) #construct the options for the class/function sections #start out indented at 4 spaces, but need to keep the indentation. clsfuncoptions = [] if toctreestr: clsfuncoptions.append(toctreestr) if toskip: clsfuncoptions.append(':skip: ' + ','.join(toskip)) if allowedpkgnms: clsfuncoptions.append(allowedpkgnms) clsfuncoptionstr = '\n '.join(clsfuncoptions) if hasfuncs: newstrs.append(automod_templ_funcs.format( modname=modnm, funchds=h2 * 9, clsfuncoptions=clsfuncoptionstr)) if hascls: newstrs.append(automod_templ_classes.format( modname=modnm, clshds=h2 * 7, clsfuncoptions=clsfuncoptionstr)) if inhdiag and hascls: # add inheritance diagram if any classes are in the module newstrs.append(automod_templ_inh.format( modname=modnm, clsinhsechds=h2 * 25, allowedpkgnms=allowedpkgnms)) newstrs.append(spl[grp * 3 + 3]) newsourcestr = ''.join(newstrs) if app.config.automodapi_writereprocessed: # sometimes they are unicode, sometimes not, depending on how # sphinx has processed things if isinstance(newsourcestr, text_type): ustr = newsourcestr else: ustr = newsourcestr.decode(app.config.source_encoding) if docname is None: with open(os.path.join(app.srcdir, 'unknown.automodapi'), 'a') as f: f.write('\n**NEW DOC**\n\n') f.write(ustr) else: env = app.builder.env # Determine the filename associated with this doc (specifically # the extension) filename = docname + os.path.splitext(env.doc2path(docname))[1] filename += '.automodapi' with open(os.path.join(app.srcdir, filename), 'w') as f: f.write(ustr) return newsourcestr else: return sourcestr def _mod_info(modname, toskip=[], onlylocals=True): """ Determines if a module is a module or a package and whether or not it has classes or functions. """ hascls = hasfunc = False for localnm, fqnm, obj in zip(*find_mod_objs(modname, onlylocals=onlylocals)): if localnm not in toskip: hascls = hascls or inspect.isclass(obj) hasfunc = hasfunc or inspect.isroutine(obj) if hascls and hasfunc: break # find_mod_objs has already imported modname # TODO: There is probably a cleaner way to do this, though this is pretty # reliable for all Python versions for most cases that we care about. pkg = sys.modules[modname] ispkg = (hasattr(pkg, '__file__') and isinstance(pkg.__file__, str) and os.path.split(pkg.__file__)[1].startswith('__init__.py')) return ispkg, hascls, hasfunc def process_automodapi(app, docname, source): source[0] = automodapi_replace(source[0], app, True, docname) def setup(app): # need automodsumm for automodapi app.setup_extension('astropy_helpers.sphinx.ext.automodsumm') app.connect('source-read', process_automodapi) app.add_config_value('automodapi_toctreedirnm', 'api', True) app.add_config_value('automodapi_writereprocessed', False, True) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/automodsumm.py0000644000077000000240000005574712533471373030022 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This sphinx extension adds two directives for summarizing the public members of a module or package. These directives are primarily for use with the `automodapi`_ extension, but can be used independently. .. _automodsumm: ======================= automodsumm directive ======================= This directive will produce an "autosummary"-style table for public attributes of a specified module. See the `sphinx.ext.autosummary`_ extension for details on this process. The main difference from the `autosummary`_ directive is that `autosummary`_ requires manually inputting all attributes that appear in the table, while this captures the entries automatically. This directive requires a single argument that must be a module or package. It also accepts any options supported by the `autosummary`_ directive- see `sphinx.ext.autosummary`_ for details. It also accepts two additional options: * ``:classes-only:`` If present, the autosummary table will only contain entries for classes. This cannot be used at the same time with ``:functions-only:`` . * ``:functions-only:`` If present, the autosummary table will only contain entries for functions. This cannot be used at the same time with ``:classes-only:`` . * ``:skip: obj1, [obj2, obj3, ...]`` If present, specifies that the listed objects should be skipped and not have their documentation generated, nor be included in the summary table. * ``:allowed-package-names: pkgormod1, [pkgormod2, pkgormod3, ...]`` Specifies the packages that functions/classes documented here are allowed to be from, as comma-separated list of package names. If not given, only objects that are actually in a subpackage of the package currently being documented are included. This extension also adds one sphinx configuration option: * ``automodsumm_writereprocessed`` Should be a bool, and if True, will cause `automodsumm`_ to write files with any ``automodsumm`` sections replaced with the content Sphinx processes after ``automodsumm`` has run. The output files are not actually used by sphinx, so this option is only for figuring out the cause of sphinx warnings or other debugging. Defaults to `False`. .. _sphinx.ext.autosummary: http://sphinx-doc.org/latest/ext/autosummary.html .. _autosummary: http://sphinx-doc.org/latest/ext/autosummary.html#directive-autosummary .. _automod-diagram: =========================== automod-diagram directive =========================== This directive will produce an inheritance diagram like that of the `sphinx.ext.inheritance_diagram`_ extension. This directive requires a single argument that must be a module or package. It accepts no options. .. note:: Like 'inheritance-diagram', 'automod-diagram' requires `graphviz `_ to generate the inheritance diagram. .. _sphinx.ext.inheritance_diagram: http://sphinx-doc.org/latest/ext/inheritance.html """ import inspect import os import re from distutils.version import LooseVersion import sphinx from sphinx.ext.autosummary import Autosummary from sphinx.ext.inheritance_diagram import InheritanceDiagram from docutils.parsers.rst.directives import flag from .utils import find_mod_objs from .astropyautosummary import AstropyAutosummary # Don't use AstropyAutosummary with newer versions of Sphinx # See https://github.com/astropy/astropy-helpers/pull/129 if LooseVersion(sphinx.__version__) < LooseVersion('1.2.0'): BaseAutosummary = AstropyAutosummary else: BaseAutosummary = Autosummary def _str_list_converter(argument): """ A directive option conversion function that converts the option into a list of strings. Used for 'skip' option. """ if argument is None: return [] else: return [s.strip() for s in argument.split(',')] class Automodsumm(BaseAutosummary): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = False has_content = False option_spec = dict(Autosummary.option_spec) option_spec['functions-only'] = flag option_spec['classes-only'] = flag option_spec['skip'] = _str_list_converter option_spec['allowed-package-names'] = _str_list_converter def run(self): env = self.state.document.settings.env modname = self.arguments[0] self.warnings = [] nodelist = [] try: localnames, fqns, objs = find_mod_objs(modname) except ImportError: self.warnings = [] self.warn("Couldn't import module " + modname) return self.warnings try: # set self.content to trick the Autosummary internals. # Be sure to respect functions-only and classes-only. funconly = 'functions-only' in self.options clsonly = 'classes-only' in self.options skipnames = [] if 'skip' in self.options: option_skipnames = set(self.options['skip']) for lnm in localnames: if lnm in option_skipnames: option_skipnames.remove(lnm) skipnames.append(lnm) if len(option_skipnames) > 0: self.warn('Tried to skip objects {objs} in module {mod}, ' 'but they were not present. Ignoring.'.format( objs=option_skipnames, mod=modname)) if funconly and not clsonly: cont = [] for nm, obj in zip(localnames, objs): if nm not in skipnames and inspect.isroutine(obj): cont.append(nm) elif clsonly: cont = [] for nm, obj in zip(localnames, objs): if nm not in skipnames and inspect.isclass(obj): cont.append(nm) else: if clsonly and funconly: self.warning('functions-only and classes-only both ' 'defined. Skipping.') cont = [nm for nm in localnames if nm not in skipnames] self.content = cont # for some reason, even though ``currentmodule`` is substituted in, # sphinx doesn't necessarily recognize this fact. So we just force # it internally, and that seems to fix things env.temp_data['py:module'] = modname # can't use super because Sphinx/docutils has trouble return # super(Autosummary,self).run() nodelist.extend(Autosummary.run(self)) return self.warnings + nodelist finally: # has_content = False for the Automodsumm self.content = [] def get_items(self, names): self.genopt['imported-members'] = True return Autosummary.get_items(self, names) #<-------------------automod-diagram stuff------------------------------------> class Automoddiagram(InheritanceDiagram): option_spec = dict(InheritanceDiagram.option_spec) option_spec['allowed-package-names'] = _str_list_converter def run(self): try: ols = self.options.get('allowed-package-names', []) ols = True if len(ols) == 0 else ols # if none are given, assume only local nms, objs = find_mod_objs(self.arguments[0], onlylocals=ols)[1:] except ImportError: self.warnings = [] self.warn("Couldn't import module " + self.arguments[0]) return self.warnings clsnms = [] for n, o in zip(nms, objs): if inspect.isclass(o): clsnms.append(n) oldargs = self.arguments try: if len(clsnms) > 0: self.arguments = [' '.join(clsnms)] return InheritanceDiagram.run(self) finally: self.arguments = oldargs #<---------------------automodsumm generation stuff---------------------------> def process_automodsumm_generation(app): env = app.builder.env filestosearch = [] for docname in env.found_docs: filename = env.doc2path(docname) if os.path.isfile(filename): filestosearch.append(docname + os.path.splitext(filename)[1]) liness = [] for sfn in filestosearch: lines = automodsumm_to_autosummary_lines(sfn, app) liness.append(lines) if app.config.automodsumm_writereprocessed: if lines: # empty list means no automodsumm entry is in the file outfn = os.path.join(app.srcdir, sfn) + '.automodsumm' with open(outfn, 'w') as f: for l in lines: f.write(l) f.write('\n') for sfn, lines in zip(filestosearch, liness): suffix = os.path.splitext(sfn)[1] if len(lines) > 0: generate_automodsumm_docs(lines, sfn, builder=app.builder, warn=app.warn, info=app.info, suffix=suffix, base_path=app.srcdir) #_automodsummrex = re.compile(r'^(\s*)\.\. automodsumm::\s*([A-Za-z0-9_.]+)\s*' # r'\n\1(\s*)(\S|$)', re.MULTILINE) _lineendrex = r'(?:\n|$)' _hdrex = r'^\n?(\s*)\.\. automodsumm::\s*(\S+)\s*' + _lineendrex _oprex1 = r'(?:\1(\s+)\S.*' + _lineendrex + ')' _oprex2 = r'(?:\1\4\S.*' + _lineendrex + ')' _automodsummrex = re.compile(_hdrex + '(' + _oprex1 + '?' + _oprex2 + '*)', re.MULTILINE) def automodsumm_to_autosummary_lines(fn, app): """ Generates lines from a file with an "automodsumm" entry suitable for feeding into "autosummary". Searches the provided file for `automodsumm` directives and returns a list of lines specifying the `autosummary` commands for the modules requested. This does *not* return the whole file contents - just an autosummary section in place of any :automodsumm: entries. Note that any options given for `automodsumm` are also included in the generated `autosummary` section. Parameters ---------- fn : str The name of the file to search for `automodsumm` entries. app : sphinx.application.Application The sphinx Application object Return ------ lines : list of str Lines for all `automodsumm` entries with the entries replaced by `autosummary` and the module's members added. """ fullfn = os.path.join(app.builder.env.srcdir, fn) with open(fullfn) as fr: if 'astropy_helpers.sphinx.ext.automodapi' in app._extensions: from astropy_helpers.sphinx.ext.automodapi import automodapi_replace # Must do the automodapi on the source to get the automodsumm # that might be in there docname = os.path.splitext(fn)[0] filestr = automodapi_replace(fr.read(), app, True, docname, False) else: filestr = fr.read() spl = _automodsummrex.split(filestr) #0th entry is the stuff before the first automodsumm line indent1s = spl[1::5] mods = spl[2::5] opssecs = spl[3::5] indent2s = spl[4::5] remainders = spl[5::5] # only grab automodsumm sections and convert them to autosummary with the # entries for all the public objects newlines = [] #loop over all automodsumms in this document for i, (i1, i2, modnm, ops, rem) in enumerate(zip(indent1s, indent2s, mods, opssecs, remainders)): allindent = i1 + ('' if i2 is None else i2) #filter out functions-only and classes-only options if present oplines = ops.split('\n') toskip = [] allowedpkgnms = [] funcsonly = clssonly = False for i, ln in reversed(list(enumerate(oplines))): if ':functions-only:' in ln: funcsonly = True del oplines[i] if ':classes-only:' in ln: clssonly = True del oplines[i] if ':skip:' in ln: toskip.extend(_str_list_converter(ln.replace(':skip:', ''))) del oplines[i] if ':allowed-package-names:' in ln: allowedpkgnms.extend(_str_list_converter(ln.replace(':allowed-package-names:', ''))) del oplines[i] if funcsonly and clssonly: msg = ('Defined both functions-only and classes-only options. ' 'Skipping this directive.') lnnum = sum([spl[j].count('\n') for j in range(i * 5 + 1)]) app.warn('[automodsumm]' + msg, (fn, lnnum)) continue # Use the currentmodule directive so we can just put the local names # in the autosummary table. Note that this doesn't always seem to # actually "take" in Sphinx's eyes, so in `Automodsumm.run`, we have to # force it internally, as well. newlines.extend([i1 + '.. currentmodule:: ' + modnm, '', '.. autosummary::']) newlines.extend(oplines) ols = True if len(allowedpkgnms) == 0 else allowedpkgnms for nm, fqn, obj in zip(*find_mod_objs(modnm, onlylocals=ols)): if nm in toskip: continue if funcsonly and not inspect.isroutine(obj): continue if clssonly and not inspect.isclass(obj): continue newlines.append(allindent + nm) # add one newline at the end of the autosummary block newlines.append('') return newlines def generate_automodsumm_docs(lines, srcfn, suffix='.rst', warn=None, info=None, base_path=None, builder=None, template_dir=None): """ This function is adapted from `sphinx.ext.autosummary.generate.generate_autosummmary_docs` to generate source for the automodsumm directives that should be autosummarized. Unlike generate_autosummary_docs, this function is called one file at a time. """ from sphinx.jinja2glue import BuiltinTemplateLoader from sphinx.ext.autosummary import import_by_name, get_documenter from sphinx.ext.autosummary.generate import (find_autosummary_in_lines, _simple_info, _simple_warn) from sphinx.util.osutil import ensuredir from sphinx.util.inspect import safe_getattr from jinja2 import FileSystemLoader, TemplateNotFound from jinja2.sandbox import SandboxedEnvironment if info is None: info = _simple_info if warn is None: warn = _simple_warn #info('[automodsumm] generating automodsumm for: ' + srcfn) # Create our own templating environment - here we use Astropy's # templates rather than the default autosummary templates, in order to # allow docstrings to be shown for methods. template_dirs = [os.path.join(os.path.dirname(__file__), 'templates'), os.path.join(base_path, '_templates')] if builder is not None: # allow the user to override the templates template_loader = BuiltinTemplateLoader() template_loader.init(builder, dirs=template_dirs) else: if template_dir: template_dirs.insert(0, template_dir) template_loader = FileSystemLoader(template_dirs) template_env = SandboxedEnvironment(loader=template_loader) # read #items = find_autosummary_in_files(sources) items = find_autosummary_in_lines(lines, filename=srcfn) if len(items) > 0: msg = '[automodsumm] {1}: found {0} automodsumm entries to generate' info(msg.format(len(items), srcfn)) # gennms = [item[0] for item in items] # if len(gennms) > 20: # gennms = gennms[:10] + ['...'] + gennms[-10:] # info('[automodsumm] generating autosummary for: ' + ', '.join(gennms)) # remove possible duplicates items = dict([(item, True) for item in items]).keys() # keep track of new files new_files = [] # write for name, path, template_name in sorted(items): if path is None: # The corresponding autosummary:: directive did not have # a :toctree: option continue path = os.path.abspath(path) ensuredir(path) try: import_by_name_values = import_by_name(name) except ImportError as e: warn('[automodsumm] failed to import %r: %s' % (name, e)) continue # if block to accommodate Sphinx's v1.2.2 and v1.2.3 respectively if len(import_by_name_values) == 3: name, obj, parent = import_by_name_values elif len(import_by_name_values) == 4: name, obj, parent, module_name = import_by_name_values fn = os.path.join(path, name + suffix) # skip it if it exists if os.path.isfile(fn): continue new_files.append(fn) f = open(fn, 'w') try: doc = get_documenter(obj, parent) if template_name is not None: template = template_env.get_template(template_name) else: tmplstr = 'autosummary/%s.rst' try: template = template_env.get_template(tmplstr % doc.objtype) except TemplateNotFound: template = template_env.get_template(tmplstr % 'base') def get_members_mod(obj, typ, include_public=[]): """ typ = None -> all """ items = [] for name in dir(obj): try: documenter = get_documenter(safe_getattr(obj, name), obj) except AttributeError: continue if typ is None or documenter.objtype == typ: items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] return public, items def get_members_class(obj, typ, include_public=[], include_base=False): """ typ = None -> all include_base -> include attrs that are from a base class """ items = [] # using dir gets all of the attributes, including the elements # from the base class, otherwise use __slots__ or __dict__ if include_base: names = dir(obj) else: if hasattr(obj, '__slots__'): names = tuple(getattr(obj, '__slots__')) else: names = getattr(obj, '__dict__').keys() for name in names: try: documenter = get_documenter(safe_getattr(obj, name), obj) except AttributeError: continue if typ is None or documenter.objtype == typ: items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] return public, items ns = {} if doc.objtype == 'module': ns['members'] = get_members_mod(obj, None) ns['functions'], ns['all_functions'] = \ get_members_mod(obj, 'function') ns['classes'], ns['all_classes'] = \ get_members_mod(obj, 'class') ns['exceptions'], ns['all_exceptions'] = \ get_members_mod(obj, 'exception') elif doc.objtype == 'class': api_class_methods = ['__init__', '__call__'] ns['members'] = get_members_class(obj, None) ns['methods'], ns['all_methods'] = \ get_members_class(obj, 'method', api_class_methods) ns['attributes'], ns['all_attributes'] = \ get_members_class(obj, 'attribute') ns['methods'].sort() ns['attributes'].sort() parts = name.split('.') if doc.objtype in ('method', 'attribute'): mod_name = '.'.join(parts[:-2]) cls_name = parts[-2] obj_name = '.'.join(parts[-2:]) ns['class'] = cls_name else: mod_name, obj_name = '.'.join(parts[:-1]), parts[-1] ns['fullname'] = name ns['module'] = mod_name ns['objname'] = obj_name ns['name'] = parts[-1] ns['objtype'] = doc.objtype ns['underline'] = len(name) * '=' # We now check whether a file for reference footnotes exists for # the module being documented. We first check if the # current module is a file or a directory, as this will give a # different path for the reference file. For example, if # documenting astropy.wcs then the reference file is at # ../wcs/references.txt, while if we are documenting # astropy.config.logging_helper (which is at # astropy/config/logging_helper.py) then the reference file is set # to ../config/references.txt if '.' in mod_name: mod_name_dir = mod_name.replace('.', '/').split('/', 1)[1] else: mod_name_dir = mod_name if not os.path.isdir(os.path.join(base_path, mod_name_dir)) \ and os.path.isdir(os.path.join(base_path, mod_name_dir.rsplit('/', 1)[0])): mod_name_dir = mod_name_dir.rsplit('/', 1)[0] # We then have to check whether it exists, and if so, we pass it # to the template. if os.path.exists(os.path.join(base_path, mod_name_dir, 'references.txt')): # An important subtlety here is that the path we pass in has # to be relative to the file being generated, so we have to # figure out the right number of '..'s ndirsback = path.replace(base_path, '').count('/') ref_file_rel_segments = ['..'] * ndirsback ref_file_rel_segments.append(mod_name_dir) ref_file_rel_segments.append('references.txt') ns['referencefile'] = os.path.join(*ref_file_rel_segments) rendered = template.render(**ns) f.write(rendered) finally: f.close() def setup(app): # need our autosummary app.setup_extension('astropy_helpers.sphinx.ext.astropyautosummary') # need inheritance-diagram for automod-diagram app.setup_extension('sphinx.ext.inheritance_diagram') app.add_directive('automod-diagram', Automoddiagram) app.add_directive('automodsumm', Automodsumm) app.connect('builder-inited', process_automodsumm_generation) app.add_config_value('automodsumm_writereprocessed', False, True) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/changelog_links.py0000644000077000000240000000542012412505144030544 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This sphinx extension makes the issue numbers in the changelog into links to GitHub issues. """ from __future__ import print_function import re from docutils.nodes import Text, reference BLOCK_PATTERN = re.compile('\[#.+\]', flags=re.DOTALL) ISSUE_PATTERN = re.compile('#[0-9]+') def process_changelog_links(app, doctree, docname): for rex in app.changelog_links_rexes: if rex.match(docname): break else: # if the doc doesn't match any of the changelog regexes, don't process return app.info('[changelog_links] Adding changelog links to "{0}"'.format(docname)) for item in doctree.traverse(): if not isinstance(item, Text): continue # We build a new list of items to replace the current item. If # a link is found, we need to use a 'reference' item. children = [] # First cycle through blocks of issues (delimited by []) then # iterate inside each one to find the individual issues. prev_block_end = 0 for block in BLOCK_PATTERN.finditer(item): block_start, block_end = block.start(), block.end() children.append(Text(item[prev_block_end:block_start])) block = item[block_start:block_end] prev_end = 0 for m in ISSUE_PATTERN.finditer(block): start, end = m.start(), m.end() children.append(Text(block[prev_end:start])) issue_number = block[start:end] refuri = app.config.github_issues_url + issue_number[1:] children.append(reference(text=issue_number, name=issue_number, refuri=refuri)) prev_end = end prev_block_end = block_end # If no issues were found, this adds the whole item, # otherwise it adds the remaining text. children.append(Text(block[prev_end:block_end])) # If no blocks were found, this adds the whole item, otherwise # it adds the remaining text. children.append(Text(item[prev_block_end:])) # Replace item by the new list of items we have generated, # which may contain links. item.parent.replace(item, children) def setup_patterns_rexes(app): app.changelog_links_rexes = [re.compile(pat) for pat in app.config.changelog_links_docpattern] def setup(app): app.connect('doctree-resolved', process_changelog_links) app.connect('builder-inited', setup_patterns_rexes) app.add_config_value('github_issues_url', None, True) app.add_config_value('changelog_links_docpattern', ['.*changelog.*', 'whatsnew/.*'], True) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/comment_eater.py0000644000077000000240000001245112412505144030241 0ustar adamstaff00000000000000from __future__ import division, absolute_import, print_function import sys if sys.version_info[0] >= 3: from io import StringIO else: from io import StringIO import compiler import inspect import textwrap import tokenize from .compiler_unparse import unparse class Comment(object): """ A comment block. """ is_comment = True def __init__(self, start_lineno, end_lineno, text): # int : The first line number in the block. 1-indexed. self.start_lineno = start_lineno # int : The last line number. Inclusive! self.end_lineno = end_lineno # str : The text block including '#' character but not any leading spaces. self.text = text def add(self, string, start, end, line): """ Add a new comment line. """ self.start_lineno = min(self.start_lineno, start[0]) self.end_lineno = max(self.end_lineno, end[0]) self.text += string def __repr__(self): return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, self.end_lineno, self.text) class NonComment(object): """ A non-comment block of code. """ is_comment = False def __init__(self, start_lineno, end_lineno): self.start_lineno = start_lineno self.end_lineno = end_lineno def add(self, string, start, end, line): """ Add lines to the block. """ if string.strip(): # Only add if not entirely whitespace. self.start_lineno = min(self.start_lineno, start[0]) self.end_lineno = max(self.end_lineno, end[0]) def __repr__(self): return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, self.end_lineno) class CommentBlocker(object): """ Pull out contiguous comment blocks. """ def __init__(self): # Start with a dummy. self.current_block = NonComment(0, 0) # All of the blocks seen so far. self.blocks = [] # The index mapping lines of code to their associated comment blocks. self.index = {} def process_file(self, file): """ Process a file object. """ if sys.version_info[0] >= 3: nxt = file.__next__ else: nxt = file.next for token in tokenize.generate_tokens(nxt): self.process_token(*token) self.make_index() def process_token(self, kind, string, start, end, line): """ Process a single token. """ if self.current_block.is_comment: if kind == tokenize.COMMENT: self.current_block.add(string, start, end, line) else: self.new_noncomment(start[0], end[0]) else: if kind == tokenize.COMMENT: self.new_comment(string, start, end, line) else: self.current_block.add(string, start, end, line) def new_noncomment(self, start_lineno, end_lineno): """ We are transitioning from a noncomment to a comment. """ block = NonComment(start_lineno, end_lineno) self.blocks.append(block) self.current_block = block def new_comment(self, string, start, end, line): """ Possibly add a new comment. Only adds a new comment if this comment is the only thing on the line. Otherwise, it extends the noncomment block. """ prefix = line[:start[1]] if prefix.strip(): # Oops! Trailing comment, not a comment block. self.current_block.add(string, start, end, line) else: # A comment block. block = Comment(start[0], end[0], string) self.blocks.append(block) self.current_block = block def make_index(self): """ Make the index mapping lines of actual code to their associated prefix comments. """ for prev, block in zip(self.blocks[:-1], self.blocks[1:]): if not block.is_comment: self.index[block.start_lineno] = prev def search_for_comment(self, lineno, default=None): """ Find the comment block just before the given line number. Returns None (or the specified default) if there is no such block. """ if not self.index: self.make_index() block = self.index.get(lineno, None) text = getattr(block, 'text', default) return text def strip_comment_marker(text): """ Strip # markers at the front of a block of comment text. """ lines = [] for line in text.splitlines(): lines.append(line.lstrip('#')) text = textwrap.dedent('\n'.join(lines)) return text def get_class_traits(klass): """ Yield all of the documentation for trait definitions on a class object. """ # FIXME: gracefully handle errors here or in the caller? source = inspect.getsource(klass) cb = CommentBlocker() cb.process_file(StringIO(source)) mod_ast = compiler.parse(source) class_ast = mod_ast.node.nodes[0] for node in class_ast.code.nodes: # FIXME: handle other kinds of assignments? if isinstance(node, compiler.ast.Assign): name = node.nodes[0].name rhs = unparse(node.expr).strip() doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) yield name, rhs, doc spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/compiler_unparse.py0000644000077000000240000006024412412505144030771 0ustar adamstaff00000000000000""" Turn compiler.ast structures back into executable python code. The unparse method takes a compiler.ast tree and transforms it back into valid python code. It is incomplete and currently only works for import statements, function calls, function definitions, assignments, and basic expressions. Inspired by python-2.5-svn/Demo/parser/unparse.py fixme: We may want to move to using _ast trees because the compiler for them is about 6 times faster than compiler.compile. """ from __future__ import division, absolute_import, print_function import sys from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add if sys.version_info[0] >= 3: from io import StringIO else: from StringIO import StringIO def unparse(ast, single_line_functions=False): s = StringIO() UnparseCompilerAst(ast, s, single_line_functions) return s.getvalue().lstrip() op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } class UnparseCompilerAst: """ Methods in this class recursively traverse an AST and output source code for the abstract syntax; original formatting is disregarged. """ ######################################################################### # object interface. ######################################################################### def __init__(self, tree, file = sys.stdout, single_line_functions=False): """ Unparser(tree, file=sys.stdout) -> None. Print the source for tree to file. """ self.f = file self._single_func = single_line_functions self._do_indent = True self._indent = 0 self._dispatch(tree) self._write("\n") self.f.flush() ######################################################################### # Unparser private interface. ######################################################################### ### format, output, and dispatch methods ################################ def _fill(self, text = ""): "Indent a piece of text, according to the current indentation level" if self._do_indent: self._write("\n"+" "*self._indent + text) else: self._write(text) def _write(self, text): "Append a piece of text to the current line." self.f.write(text) def _enter(self): "Print ':', and increase the indentation." self._write(": ") self._indent += 1 def _leave(self): "Decrease the indentation level." self._indent -= 1 def _dispatch(self, tree): "_dispatcher function, _dispatching tree type T to method _T." if isinstance(tree, list): for t in tree: self._dispatch(t) return meth = getattr(self, "_"+tree.__class__.__name__) if tree.__class__.__name__ == 'NoneType' and not self._do_indent: return meth(tree) ######################################################################### # compiler.ast unparsing methods. # # There should be one method per concrete grammar type. They are # organized in alphabetical order. ######################################################################### def _Add(self, t): self.__binary_op(t, '+') def _And(self, t): self._write(" (") for i, node in enumerate(t.nodes): self._dispatch(node) if i != len(t.nodes)-1: self._write(") and (") self._write(")") def _AssAttr(self, t): """ Handle assigning an attribute of an object """ self._dispatch(t.expr) self._write('.'+t.attrname) def _Assign(self, t): """ Expression Assignment such as "a = 1". This only handles assignment in expressions. Keyword assignment is handled separately. """ self._fill() for target in t.nodes: self._dispatch(target) self._write(" = ") self._dispatch(t.expr) if not self._do_indent: self._write('; ') def _AssName(self, t): """ Name on left hand side of expression. Treat just like a name on the right side of an expression. """ self._Name(t) def _AssTuple(self, t): """ Tuple on left hand side of an expression. """ # _write each elements, separated by a comma. for element in t.nodes[:-1]: self._dispatch(element) self._write(", ") # Handle the last one without writing comma last_element = t.nodes[-1] self._dispatch(last_element) def _AugAssign(self, t): """ +=,-=,*=,/=,**=, etc. operations """ self._fill() self._dispatch(t.node) self._write(' '+t.op+' ') self._dispatch(t.expr) if not self._do_indent: self._write(';') def _Bitand(self, t): """ Bit and operation. """ for i, node in enumerate(t.nodes): self._write("(") self._dispatch(node) self._write(")") if i != len(t.nodes)-1: self._write(" & ") def _Bitor(self, t): """ Bit or operation """ for i, node in enumerate(t.nodes): self._write("(") self._dispatch(node) self._write(")") if i != len(t.nodes)-1: self._write(" | ") def _CallFunc(self, t): """ Function call. """ self._dispatch(t.node) self._write("(") comma = False for e in t.args: if comma: self._write(", ") else: comma = True self._dispatch(e) if t.star_args: if comma: self._write(", ") else: comma = True self._write("*") self._dispatch(t.star_args) if t.dstar_args: if comma: self._write(", ") else: comma = True self._write("**") self._dispatch(t.dstar_args) self._write(")") def _Compare(self, t): self._dispatch(t.expr) for op, expr in t.ops: self._write(" " + op + " ") self._dispatch(expr) def _Const(self, t): """ A constant value such as an integer value, 3, or a string, "hello". """ self._dispatch(t.value) def _Decorators(self, t): """ Handle function decorators (eg. @has_units) """ for node in t.nodes: self._dispatch(node) def _Dict(self, t): self._write("{") for i, (k, v) in enumerate(t.items): self._dispatch(k) self._write(": ") self._dispatch(v) if i < len(t.items)-1: self._write(", ") self._write("}") def _Discard(self, t): """ Node for when return value is ignored such as in "foo(a)". """ self._fill() self._dispatch(t.expr) def _Div(self, t): self.__binary_op(t, '/') def _Ellipsis(self, t): self._write("...") def _From(self, t): """ Handle "from xyz import foo, bar as baz". """ # fixme: Are From and ImportFrom handled differently? self._fill("from ") self._write(t.modname) self._write(" import ") for i, (name,asname) in enumerate(t.names): if i != 0: self._write(", ") self._write(name) if asname is not None: self._write(" as "+asname) def _Function(self, t): """ Handle function definitions """ if t.decorators is not None: self._fill("@") self._dispatch(t.decorators) self._fill("def "+t.name + "(") defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) for i, arg in enumerate(zip(t.argnames, defaults)): self._write(arg[0]) if arg[1] is not None: self._write('=') self._dispatch(arg[1]) if i < len(t.argnames)-1: self._write(', ') self._write(")") if self._single_func: self._do_indent = False self._enter() self._dispatch(t.code) self._leave() self._do_indent = True def _Getattr(self, t): """ Handle getting an attribute of an object """ if isinstance(t.expr, (Div, Mul, Sub, Add)): self._write('(') self._dispatch(t.expr) self._write(')') else: self._dispatch(t.expr) self._write('.'+t.attrname) def _If(self, t): self._fill() for i, (compare,code) in enumerate(t.tests): if i == 0: self._write("if ") else: self._write("elif ") self._dispatch(compare) self._enter() self._fill() self._dispatch(code) self._leave() self._write("\n") if t.else_ is not None: self._write("else") self._enter() self._fill() self._dispatch(t.else_) self._leave() self._write("\n") def _IfExp(self, t): self._dispatch(t.then) self._write(" if ") self._dispatch(t.test) if t.else_ is not None: self._write(" else (") self._dispatch(t.else_) self._write(")") def _Import(self, t): """ Handle "import xyz.foo". """ self._fill("import ") for i, (name,asname) in enumerate(t.names): if i != 0: self._write(", ") self._write(name) if asname is not None: self._write(" as "+asname) def _Keyword(self, t): """ Keyword value assignment within function calls and definitions. """ self._write(t.name) self._write("=") self._dispatch(t.expr) def _List(self, t): self._write("[") for i,node in enumerate(t.nodes): self._dispatch(node) if i < len(t.nodes)-1: self._write(", ") self._write("]") def _Module(self, t): if t.doc is not None: self._dispatch(t.doc) self._dispatch(t.node) def _Mul(self, t): self.__binary_op(t, '*') def _Name(self, t): self._write(t.name) def _NoneType(self, t): self._write("None") def _Not(self, t): self._write('not (') self._dispatch(t.expr) self._write(')') def _Or(self, t): self._write(" (") for i, node in enumerate(t.nodes): self._dispatch(node) if i != len(t.nodes)-1: self._write(") or (") self._write(")") def _Pass(self, t): self._write("pass\n") def _Printnl(self, t): self._fill("print ") if t.dest: self._write(">> ") self._dispatch(t.dest) self._write(", ") comma = False for node in t.nodes: if comma: self._write(', ') else: comma = True self._dispatch(node) def _Power(self, t): self.__binary_op(t, '**') def _Return(self, t): self._fill("return ") if t.value: if isinstance(t.value, Tuple): text = ', '.join([ name.name for name in t.value.asList() ]) self._write(text) else: self._dispatch(t.value) if not self._do_indent: self._write('; ') def _Slice(self, t): self._dispatch(t.expr) self._write("[") if t.lower: self._dispatch(t.lower) self._write(":") if t.upper: self._dispatch(t.upper) #if t.step: # self._write(":") # self._dispatch(t.step) self._write("]") def _Sliceobj(self, t): for i, node in enumerate(t.nodes): if i != 0: self._write(":") if not (isinstance(node, Const) and node.value is None): self._dispatch(node) def _Stmt(self, tree): for node in tree.nodes: self._dispatch(node) def _Sub(self, t): self.__binary_op(t, '-') def _Subscript(self, t): self._dispatch(t.expr) self._write("[") for i, value in enumerate(t.subs): if i != 0: self._write(",") self._dispatch(value) self._write("]") def _TryExcept(self, t): self._fill("try") self._enter() self._dispatch(t.body) self._leave() for handler in t.handlers: self._fill('except ') self._dispatch(handler[0]) if handler[1] is not None: self._write(', ') self._dispatch(handler[1]) self._enter() self._dispatch(handler[2]) self._leave() if t.else_: self._fill("else") self._enter() self._dispatch(t.else_) self._leave() def _Tuple(self, t): if not t.nodes: # Empty tuple. self._write("()") else: self._write("(") # _write each elements, separated by a comma. for element in t.nodes[:-1]: self._dispatch(element) self._write(", ") # Handle the last one without writing comma last_element = t.nodes[-1] self._dispatch(last_element) self._write(")") def _UnaryAdd(self, t): self._write("+") self._dispatch(t.expr) def _UnarySub(self, t): self._write("-") self._dispatch(t.expr) def _With(self, t): self._fill('with ') self._dispatch(t.expr) if t.vars: self._write(' as ') self._dispatch(t.vars.name) self._enter() self._dispatch(t.body) self._leave() self._write('\n') def _int(self, t): self._write(repr(t)) def __binary_op(self, t, symbol): # Check if parenthesis are needed on left side and then dispatch has_paren = False left_class = str(t.left.__class__) if (left_class in op_precedence.keys() and op_precedence[left_class] < op_precedence[str(t.__class__)]): has_paren = True if has_paren: self._write('(') self._dispatch(t.left) if has_paren: self._write(')') # Write the appropriate symbol for operator self._write(symbol) # Check if parenthesis are needed on the right side and then dispatch has_paren = False right_class = str(t.right.__class__) if (right_class in op_precedence.keys() and op_precedence[right_class] < op_precedence[str(t.__class__)]): has_paren = True if has_paren: self._write('(') self._dispatch(t.right) if has_paren: self._write(')') def _float(self, t): # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' # We prefer str here. self._write(str(t)) def _str(self, t): self._write(repr(t)) def _tuple(self, t): self._write(str(t)) ######################################################################### # These are the methods from the _ast modules unparse. # # As our needs to handle more advanced code increase, we may want to # modify some of the methods below so that they work for compiler.ast. ######################################################################### # # stmt # def _Expr(self, tree): # self._fill() # self._dispatch(tree.value) # # def _Import(self, t): # self._fill("import ") # first = True # for a in t.names: # if first: # first = False # else: # self._write(", ") # self._write(a.name) # if a.asname: # self._write(" as "+a.asname) # ## def _ImportFrom(self, t): ## self._fill("from ") ## self._write(t.module) ## self._write(" import ") ## for i, a in enumerate(t.names): ## if i == 0: ## self._write(", ") ## self._write(a.name) ## if a.asname: ## self._write(" as "+a.asname) ## # XXX(jpe) what is level for? ## # # def _Break(self, t): # self._fill("break") # # def _Continue(self, t): # self._fill("continue") # # def _Delete(self, t): # self._fill("del ") # self._dispatch(t.targets) # # def _Assert(self, t): # self._fill("assert ") # self._dispatch(t.test) # if t.msg: # self._write(", ") # self._dispatch(t.msg) # # def _Exec(self, t): # self._fill("exec ") # self._dispatch(t.body) # if t.globals: # self._write(" in ") # self._dispatch(t.globals) # if t.locals: # self._write(", ") # self._dispatch(t.locals) # # def _Print(self, t): # self._fill("print ") # do_comma = False # if t.dest: # self._write(">>") # self._dispatch(t.dest) # do_comma = True # for e in t.values: # if do_comma:self._write(", ") # else:do_comma=True # self._dispatch(e) # if not t.nl: # self._write(",") # # def _Global(self, t): # self._fill("global") # for i, n in enumerate(t.names): # if i != 0: # self._write(",") # self._write(" " + n) # # def _Yield(self, t): # self._fill("yield") # if t.value: # self._write(" (") # self._dispatch(t.value) # self._write(")") # # def _Raise(self, t): # self._fill('raise ') # if t.type: # self._dispatch(t.type) # if t.inst: # self._write(", ") # self._dispatch(t.inst) # if t.tback: # self._write(", ") # self._dispatch(t.tback) # # # def _TryFinally(self, t): # self._fill("try") # self._enter() # self._dispatch(t.body) # self._leave() # # self._fill("finally") # self._enter() # self._dispatch(t.finalbody) # self._leave() # # def _excepthandler(self, t): # self._fill("except ") # if t.type: # self._dispatch(t.type) # if t.name: # self._write(", ") # self._dispatch(t.name) # self._enter() # self._dispatch(t.body) # self._leave() # # def _ClassDef(self, t): # self._write("\n") # self._fill("class "+t.name) # if t.bases: # self._write("(") # for a in t.bases: # self._dispatch(a) # self._write(", ") # self._write(")") # self._enter() # self._dispatch(t.body) # self._leave() # # def _FunctionDef(self, t): # self._write("\n") # for deco in t.decorators: # self._fill("@") # self._dispatch(deco) # self._fill("def "+t.name + "(") # self._dispatch(t.args) # self._write(")") # self._enter() # self._dispatch(t.body) # self._leave() # # def _For(self, t): # self._fill("for ") # self._dispatch(t.target) # self._write(" in ") # self._dispatch(t.iter) # self._enter() # self._dispatch(t.body) # self._leave() # if t.orelse: # self._fill("else") # self._enter() # self._dispatch(t.orelse) # self._leave # # def _While(self, t): # self._fill("while ") # self._dispatch(t.test) # self._enter() # self._dispatch(t.body) # self._leave() # if t.orelse: # self._fill("else") # self._enter() # self._dispatch(t.orelse) # self._leave # # # expr # def _Str(self, tree): # self._write(repr(tree.s)) ## # def _Repr(self, t): # self._write("`") # self._dispatch(t.value) # self._write("`") # # def _Num(self, t): # self._write(repr(t.n)) # # def _ListComp(self, t): # self._write("[") # self._dispatch(t.elt) # for gen in t.generators: # self._dispatch(gen) # self._write("]") # # def _GeneratorExp(self, t): # self._write("(") # self._dispatch(t.elt) # for gen in t.generators: # self._dispatch(gen) # self._write(")") # # def _comprehension(self, t): # self._write(" for ") # self._dispatch(t.target) # self._write(" in ") # self._dispatch(t.iter) # for if_clause in t.ifs: # self._write(" if ") # self._dispatch(if_clause) # # def _IfExp(self, t): # self._dispatch(t.body) # self._write(" if ") # self._dispatch(t.test) # if t.orelse: # self._write(" else ") # self._dispatch(t.orelse) # # unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} # def _UnaryOp(self, t): # self._write(self.unop[t.op.__class__.__name__]) # self._write("(") # self._dispatch(t.operand) # self._write(")") # # binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", # "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", # "FloorDiv":"//", "Pow": "**"} # def _BinOp(self, t): # self._write("(") # self._dispatch(t.left) # self._write(")" + self.binop[t.op.__class__.__name__] + "(") # self._dispatch(t.right) # self._write(")") # # boolops = {_ast.And: 'and', _ast.Or: 'or'} # def _BoolOp(self, t): # self._write("(") # self._dispatch(t.values[0]) # for v in t.values[1:]: # self._write(" %s " % self.boolops[t.op.__class__]) # self._dispatch(v) # self._write(")") # # def _Attribute(self,t): # self._dispatch(t.value) # self._write(".") # self._write(t.attr) # ## def _Call(self, t): ## self._dispatch(t.func) ## self._write("(") ## comma = False ## for e in t.args: ## if comma: self._write(", ") ## else: comma = True ## self._dispatch(e) ## for e in t.keywords: ## if comma: self._write(", ") ## else: comma = True ## self._dispatch(e) ## if t.starargs: ## if comma: self._write(", ") ## else: comma = True ## self._write("*") ## self._dispatch(t.starargs) ## if t.kwargs: ## if comma: self._write(", ") ## else: comma = True ## self._write("**") ## self._dispatch(t.kwargs) ## self._write(")") # # # slice # def _Index(self, t): # self._dispatch(t.value) # # def _ExtSlice(self, t): # for i, d in enumerate(t.dims): # if i != 0: # self._write(': ') # self._dispatch(d) # # # others # def _arguments(self, t): # first = True # nonDef = len(t.args)-len(t.defaults) # for a in t.args[0:nonDef]: # if first:first = False # else: self._write(", ") # self._dispatch(a) # for a,d in zip(t.args[nonDef:], t.defaults): # if first:first = False # else: self._write(", ") # self._dispatch(a), # self._write("=") # self._dispatch(d) # if t.vararg: # if first:first = False # else: self._write(", ") # self._write("*"+t.vararg) # if t.kwarg: # if first:first = False # else: self._write(", ") # self._write("**"+t.kwarg) # ## def _keyword(self, t): ## self._write(t.arg) ## self._write("=") ## self._dispatch(t.value) # # def _Lambda(self, t): # self._write("lambda ") # self._dispatch(t.args) # self._write(": ") # self._dispatch(t.body) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/docscrape.py0000644000077000000240000003771312412505144027372 0ustar adamstaff00000000000000"""Extract reference documentation from the NumPy source tree. """ from __future__ import division, absolute_import, print_function import inspect import textwrap import re import pydoc from warnings import warn import collections import sys class Reader(object): """A line-based string reader. """ def __init__(self, data): """ Parameters ---------- data : str String with lines separated by '\n'. """ if isinstance(data,list): self._str = data else: self._str = data.split('\n') # store string as list of lines self.reset() def __getitem__(self, n): return self._str[n] def reset(self): self._l = 0 # current line nr def read(self): if not self.eof(): out = self[self._l] self._l += 1 return out else: return '' def seek_next_non_empty_line(self): for l in self[self._l:]: if l.strip(): break else: self._l += 1 def eof(self): return self._l >= len(self._str) def read_to_condition(self, condition_func): start = self._l for line in self[start:]: if condition_func(line): return self[start:self._l] self._l += 1 if self.eof(): return self[start:self._l+1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() def is_empty(line): return not line.strip() return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) def peek(self,n=0): if self._l + n < len(self._str): return self[self._l + n] else: return '' def is_empty(self): return not ''.join(self._str).strip() class NumpyDocString(object): def __init__(self, docstring, config={}): docstring = textwrap.dedent(docstring).split('\n') self._doc = Reader(docstring) self._parsed_data = { 'Signature': '', 'Summary': [''], 'Extended Summary': [], 'Parameters': [], 'Returns': [], 'Raises': [], 'Warns': [], 'Other Parameters': [], 'Attributes': [], 'Methods': [], 'See Also': [], 'Notes': [], 'Warnings': [], 'References': '', 'Examples': '', 'index': {} } self._parse() def __getitem__(self,key): return self._parsed_data[key] def __setitem__(self,key,val): if key not in self._parsed_data: warn("Unknown section %s" % key) else: self._parsed_data[key] = val def _is_at_section(self): self._doc.seek_next_non_empty_line() if self._doc.eof(): return False l1 = self._doc.peek().strip() # e.g. Parameters if l1.startswith('.. index::'): return True l2 = self._doc.peek(1).strip() # ---------- or ========== return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) def _strip(self,doc): i = 0 j = 0 for i,line in enumerate(doc): if line.strip(): break for j,line in enumerate(doc[::-1]): if line.strip(): break return doc[i:len(doc)-j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): if not self._doc.peek(-1).strip(): # previous line was empty section += [''] section += self._doc.read_to_next_empty_line() return section def _read_sections(self): while not self._doc.eof(): data = self._read_to_next_section() name = data[0].strip() if name.startswith('..'): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) def _parse_param_list(self,content): r = Reader(content) params = [] while not r.eof(): header = r.read().strip() if ' : ' in header: arg_name, arg_type = header.split(' : ')[:2] else: arg_name, arg_type = header, '' desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) params.append((arg_name,arg_type,desc)) return params _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) def _parse_see_also(self, content): """ func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3 """ items = [] def parse_item_name(text): """Match ':role:`name`' or 'name'""" m = self._name_rgx.match(text) if m: g = m.groups() if g[1] is None: return g[3], None else: return g[2], g[1] raise ValueError("%s is not a item name" % text) def push_item(name, rest): if not name: return name, role = parse_item_name(name) items.append((name, list(rest), role)) del rest[:] current_func = None rest = [] for line in content: if not line.strip(): continue m = self._name_rgx.match(line) if m and line[m.end():].strip().startswith(':'): push_item(current_func, rest) current_func, line = line[:m.end()], line[m.end():] rest = [line.split(':', 1)[1].strip()] if not rest[0]: rest = [] elif not line.startswith(' '): push_item(current_func, rest) current_func = None if ',' in line: for func in line.split(','): if func.strip(): push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: rest.append(line.strip()) push_item(current_func, rest) return items def _parse_index(self, section, content): """ .. index: default :refguide: something, else, and more """ def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if len(section) > 1: out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if len(line) > 2: out[line[1]] = strip_each_in(line[2].split(',')) return out def _parse_summary(self): """Grab signature (if given) and summary""" if self._is_at_section(): return # If several signatures present, take the last one while True: summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): self['Signature'] = summary_str if not self._is_at_section(): continue break if summary is not None: self['Summary'] = summary if not self._is_at_section(): self['Extended Summary'] = self._read_to_next_section() def _parse(self): self._doc.reset() self._parse_summary() for (section,content) in self._read_sections(): if not section.startswith('..'): section = ' '.join([s.capitalize() for s in section.split(' ')]) if section in ('Parameters', 'Returns', 'Raises', 'Warns', 'Other Parameters', 'Attributes', 'Methods'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) elif section == 'See Also': self['See Also'] = self._parse_see_also(content) else: self[section] = content # string conversion routines def _str_header(self, name, symbol='-'): return [name, len(name)*symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): if self['Signature']: return [self['Signature'].replace('*','\*')] + [''] else: return [''] def _str_summary(self): if self['Summary']: return self['Summary'] + [''] else: return [] def _str_extended_summary(self): if self['Extended Summary']: return self['Extended Summary'] + [''] else: return [] def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) for param,param_type,desc in self[name]: if param_type: out += ['%s : %s' % (param, param_type)] else: out += [param] out += self._str_indent(desc) out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += self[name] out += [''] return out def _str_see_also(self, func_role): if not self['See Also']: return [] out = [] out += self._str_header("See Also") last_had_desc = True for func, desc, role in self['See Also']: if role: link = ':%s:`%s`' % (role, func) elif func_role: link = ':%s:`%s`' % (func_role, func) else: link = "`%s`_" % func if desc or last_had_desc: out += [''] out += [link] else: out[-1] += ", %s" % link if desc: out += self._str_indent([' '.join(desc)]) last_had_desc = True else: last_had_desc = False out += [''] return out def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.items(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] return out def __str__(self, func_role=''): out = [] out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) for s in ('Notes','References','Examples'): out += self._str_section(s) for param_list in ('Attributes', 'Methods'): out += self._str_param_list(param_list) out += self._str_index() return '\n'.join(out) def indent(str,indent=4): indent_str = ' '*indent if str is None: return indent_str lines = str.split('\n') return '\n'.join(indent_str + l for l in lines) def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") def header(text, style='-'): return text + '\n' + style*len(text) + '\n' class FunctionDoc(NumpyDocString): def __init__(self, func, role='func', doc=None, config={}): self._f = func self._role = role # e.g. "func" or "meth" if doc is None: if func is None: raise ValueError("No function or docstring given") doc = inspect.getdoc(func) or '' NumpyDocString.__init__(self, doc) if not self['Signature'] and func is not None: func, func_name = self.get_func() try: # try to read signature if sys.version_info[0] >= 3: argspec = inspect.getfullargspec(func) else: argspec = inspect.getargspec(func) argspec = inspect.formatargspec(*argspec) argspec = argspec.replace('*','\*') signature = '%s%s' % (func_name, argspec) except TypeError as e: signature = '%s()' % func_name self['Signature'] = signature def get_func(self): func_name = getattr(self._f, '__name__', self.__class__.__name__) if inspect.isclass(self._f): func = getattr(self._f, '__call__', self._f.__init__) else: func = self._f return func, func_name def __str__(self): out = '' func, func_name = self.get_func() signature = self['Signature'].replace('*', '\*') roles = {'func': 'function', 'meth': 'method'} if self._role: if self._role not in roles: print("Warning: invalid role %s" % self._role) out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) return out class ClassDoc(NumpyDocString): extra_public_methods = ['__call__'] def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, config={}): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls if modulename and not modulename.endswith('.'): modulename += '.' self._mod = modulename if doc is None: if cls is None: raise ValueError("No class or documentation string given") doc = pydoc.getdoc(cls) NumpyDocString.__init__(self, doc) if config.get('show_class_members', True): def splitlines_x(s): if not s: return [] else: return s.splitlines() for field, items in [('Methods', self.methods), ('Attributes', self.properties)]: if not self[field]: doc_list = [] for name in sorted(items): try: doc_item = pydoc.getdoc(getattr(self._cls, name)) doc_list.append((name, '', splitlines_x(doc_item))) except AttributeError: pass # method doesn't exist self[field] = doc_list @property def methods(self): if self._cls is None: return [] return [name for name,func in inspect.getmembers(self._cls) if ((not name.startswith('_') or name in self.extra_public_methods) and isinstance(func, collections.Callable))] @property def properties(self): if self._cls is None: return [] return [name for name,func in inspect.getmembers(self._cls) if not name.startswith('_') and (func is None or isinstance(func, property) or inspect.isgetsetdescriptor(func))] spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/docscrape_sphinx.py0000644000077000000240000002233512412505144030755 0ustar adamstaff00000000000000from __future__ import division, absolute_import, print_function import sys, re, inspect, textwrap, pydoc import sphinx import collections from .docscrape import NumpyDocString, FunctionDoc, ClassDoc if sys.version_info[0] >= 3: sixu = lambda s: s else: sixu = lambda s: unicode(s, 'unicode_escape') class SphinxDocString(NumpyDocString): def __init__(self, docstring, config={}): NumpyDocString.__init__(self, docstring, config=config) self.load_config(config) def load_config(self, config): self.use_plots = config.get('use_plots', False) self.class_members_toctree = config.get('class_members_toctree', True) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_returns(self): out = [] if self['Returns']: out += self._str_field_list('Returns') out += [''] for param, param_type, desc in self['Returns']: if param_type: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) else: out += self._str_indent([param.strip()]) if desc: out += [''] out += self._str_indent(desc, 8) out += [''] return out def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: if param_type: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) else: out += self._str_indent(['**%s**' % param.strip()]) if desc: out += [''] out += self._str_indent(desc, 8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() # Check if the referenced member can have a docstring or not param_obj = getattr(self._obj, param, None) if not (callable(param_obj) or isinstance(param_obj, property) or inspect.isgetsetdescriptor(param_obj)): param_obj = None if param_obj and (pydoc.getdoc(param_obj) or not desc): # Referenced object has a docstring autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: out += ['.. autosummary::'] if self.class_members_toctree: out += [' :toctree:'] out += [''] + autosum if others: maxlen_0 = max(3, max([len(x[0]) for x in others])) hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10 fmt = sixu('%%%ds %%s ') % (maxlen_0,) out += ['', hdr] for param, param_type, desc in others: desc = sixu(" ").join(x.strip() for x in desc).strip() if param_type: desc = "(%s) %s" % (param_type, desc) out += [fmt % (param.strip(), desc)] out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.items(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it if sphinx.__version__ >= "0.6": out += ['.. only:: latex',''] else: out += ['.. latexonly::',''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() out += self._str_param_list('Parameters') out += self._str_returns() for param_list in ('Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Attributes', 'Methods'): out += self._str_member_list(param_list) out = self._str_indent(out,indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.load_config(config) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.load_config(config) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config={}): self._f = obj self.load_config(config) SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif isinstance(obj, collections.Callable): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/doctest.py0000644000077000000240000000243612431342362027070 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This is a set of three directives that allow us to insert metadata about doctests into the .rst files so the testing framework knows which tests to skip. This is quite different from the doctest extension in Sphinx itself, which actually does something. For astropy, all of the testing is centrally managed from py.test and Sphinx is not used for running tests. """ import re from docutils.nodes import literal_block from sphinx.util.compat import Directive class DoctestSkipDirective(Directive): has_content = True def run(self): # Check if there is any valid argument, and skip it. Currently only # 'win32' is supported in astropy.tests.pytest_plugins. if re.match('win32', self.content[0]): self.content = self.content[2:] code = '\n'.join(self.content) return [literal_block(code, code)] class DoctestRequiresDirective(DoctestSkipDirective): # This is silly, but we really support an unbounded number of # optional arguments optional_arguments = 64 def setup(app): app.add_directive('doctest-requires', DoctestRequiresDirective) app.add_directive('doctest-skip', DoctestSkipDirective) app.add_directive('doctest-skip-all', DoctestSkipDirective) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/edit_on_github.py0000644000077000000240000001340712412505144030404 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This extension makes it easy to edit documentation on github. It adds links associated with each docstring that go to the corresponding view source page on Github. From there, the user can push the "Edit" button, edit the docstring, and submit a pull request. It has the following configuration options (to be set in the project's ``conf.py``): * ``edit_on_github_project`` The name of the github project, in the form "username/projectname". * ``edit_on_github_branch`` The name of the branch to edit. If this is a released version, this should be a git tag referring to that version. For a dev version, it often makes sense for it to be "master". It may also be a git hash. * ``edit_on_github_source_root`` The location within the source tree of the root of the Python package. Defaults to "lib". * ``edit_on_github_doc_root`` The location within the source tree of the root of the documentation source. Defaults to "doc", but it may make sense to set it to "doc/source" if the project uses a separate source directory. * ``edit_on_github_docstring_message`` The phrase displayed in the links to edit a docstring. Defaults to "[edit on github]". * ``edit_on_github_page_message`` The phrase displayed in the links to edit a RST page. Defaults to "[edit this page on github]". * ``edit_on_github_help_message`` The phrase displayed as a tooltip on the edit links. Defaults to "Push the Edit button on the next page" * ``edit_on_github_skip_regex`` When the path to the .rst file matches this regular expression, no "edit this page on github" link will be added. Defaults to ``"_.*"``. """ import inspect import os import re import sys from docutils import nodes from sphinx import addnodes def import_object(modname, name): """ Import the object given by *modname* and *name* and return it. If not found, or the import fails, returns None. """ try: __import__(modname) mod = sys.modules[modname] obj = mod for part in name.split('.'): obj = getattr(obj, part) return obj except: return None def get_url_base(app): return 'http://github.com/%s/tree/%s/' % ( app.config.edit_on_github_project, app.config.edit_on_github_branch) def doctree_read(app, doctree): # Get the configuration parameters if app.config.edit_on_github_project == 'REQUIRED': raise ValueError( "The edit_on_github_project configuration variable must be " "provided in the conf.py") source_root = app.config.edit_on_github_source_root url = get_url_base(app) docstring_message = app.config.edit_on_github_docstring_message # Handle the docstring-editing links for objnode in doctree.traverse(addnodes.desc): if objnode.get('domain') != 'py': continue names = set() for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue modname = signode.get('module') if not modname: continue fullname = signode.get('fullname') if fullname in names: # only one link per name, please continue names.add(fullname) obj = import_object(modname, fullname) anchor = None if obj is not None: try: lines, lineno = inspect.getsourcelines(obj) except: pass else: anchor = '#L%d' % lineno if anchor: real_modname = inspect.getmodule(obj).__name__ path = '%s%s%s.py%s' % ( url, source_root, real_modname.replace('.', '/'), anchor) onlynode = addnodes.only(expr='html') onlynode += nodes.reference( reftitle=app.config.edit_on_github_help_message, refuri=path) onlynode[0] += nodes.inline( '', '', nodes.raw('', ' ', format='html'), nodes.Text(docstring_message), classes=['edit-on-github', 'viewcode-link']) signode += onlynode def html_page_context(app, pagename, templatename, context, doctree): if (templatename == 'page.html' and not re.match(app.config.edit_on_github_skip_regex, pagename)): doc_root = app.config.edit_on_github_doc_root if doc_root != '' and not doc_root.endswith('/'): doc_root += '/' doc_path = os.path.relpath(doctree.get('source'), app.builder.srcdir) url = get_url_base(app) page_message = app.config.edit_on_github_page_message context['edit_on_github'] = url + doc_root + doc_path context['edit_on_github_page_message'] = ( app.config.edit_on_github_page_message) def setup(app): app.add_config_value('edit_on_github_project', 'REQUIRED', True) app.add_config_value('edit_on_github_branch', 'master', True) app.add_config_value('edit_on_github_source_root', 'lib', True) app.add_config_value('edit_on_github_doc_root', 'doc', True) app.add_config_value('edit_on_github_docstring_message', '[edit on github]', True) app.add_config_value('edit_on_github_page_message', 'Edit This Page on Github', True) app.add_config_value('edit_on_github_help_message', 'Push the Edit button on the next page', True) app.add_config_value('edit_on_github_skip_regex', '_.*', True) app.connect('doctree-read', doctree_read) app.connect('html-page-context', html_page_context) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/numpydoc.py0000644000077000000240000001441412412505144027256 0ustar adamstaff00000000000000""" ======== numpydoc ======== Sphinx extension that handles docstrings in the Numpy standard format. [1] It will: - Convert Parameters etc. sections to field lists. - Convert See Also section to a See also entry. - Renumber references. - Extract the signature from the docstring, if it can't be determined otherwise. .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ from __future__ import division, absolute_import, print_function import os, sys, re, pydoc import sphinx import inspect import collections if sphinx.__version__ < '1.0.1': raise RuntimeError("Sphinx 1.0.1 or newer is required") from .docscrape_sphinx import get_doc_object, SphinxDocString from sphinx.util.compat import Directive if sys.version_info[0] >= 3: sixu = lambda s: s else: sixu = lambda s: unicode(s, 'unicode_escape') def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): cfg = dict(use_plots=app.config.numpydoc_use_plots, show_class_members=app.config.numpydoc_show_class_members, class_members_toctree=app.config.numpydoc_class_members_toctree, ) if what == 'module': # Strip top title title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'), re.I|re.S) lines[:] = title_re.sub(sixu(''), sixu("\n").join(lines)).split(sixu("\n")) else: doc = get_doc_object(obj, what, sixu("\n").join(lines), config=cfg) if sys.version_info[0] >= 3: doc = str(doc) else: doc = unicode(doc) lines[:] = doc.split(sixu("\n")) if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ obj.__name__: if hasattr(obj, '__module__'): v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] lines += [sixu(' %s') % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) if m: references.append(m.group(1)) # start renaming from the longest string, to avoid overwriting parts references.sort(key=lambda x: -len(x)) if references: for i, line in enumerate(lines): for r in references: if re.match(sixu('^\\d+$'), r): new_r = sixu("R%d") % (reference_offset[0] + int(r)) else: new_r = sixu("%s%d") % (r, reference_offset[0]) lines[i] = lines[i].replace(sixu('[%s]_') % r, sixu('[%s]_') % new_r) lines[i] = lines[i].replace(sixu('.. [%s]') % r, sixu('.. [%s]') % new_r) reference_offset[0] += len(references) def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature']) return sig, sixu('') def setup(app, get_doc_object_=get_doc_object): if not hasattr(app, 'add_config_value'): return # probably called by nose, better bail out global get_doc_object get_doc_object = get_doc_object_ app.connect('autodoc-process-docstring', mangle_docstrings) app.connect('autodoc-process-signature', mangle_signature) app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) app.add_config_value('numpydoc_class_members_toctree', True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) #------------------------------------------------------------------------------ # Docstring-mangling domains #------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain class ManglingDomainBase(object): directive_mangling_map = {} def __init__(self, *a, **kw): super(ManglingDomainBase, self).__init__(*a, **kw) self.wrap_mangling_directives() def wrap_mangling_directives(self): for name, objtype in list(self.directive_mangling_map.items()): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) class NumpyPythonDomain(ManglingDomainBase, PythonDomain): name = 'np' directive_mangling_map = { 'function': 'function', 'class': 'class', 'exception': 'class', 'method': 'function', 'classmethod': 'function', 'staticmethod': 'function', 'attribute': 'attribute', } indices = [] class NumpyCDomain(ManglingDomainBase, CDomain): name = 'np-c' directive_mangling_map = { 'function': 'function', 'member': 'attribute', 'macro': 'function', 'type': 'class', 'var': 'object', } def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) self.content = ViewList(lines, self.content.parent) return base_directive.run(self) return directive spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/phantom_import.py0000644000077000000240000001333612412505144030462 0ustar adamstaff00000000000000""" ============== phantom_import ============== Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar extensions to use docstrings loaded from an XML file. This extension loads an XML file in the Pydocweb format [1] and creates a dummy module that contains the specified docstrings. This can be used to get the current docstrings from a Pydocweb instance without needing to rebuild the documented module. .. [1] http://code.google.com/p/pydocweb """ from __future__ import division, absolute_import, print_function import imp, sys, compiler, types, os, inspect, re def setup(app): app.connect('builder-inited', initialize) app.add_config_value('phantom_import_file', None, True) def initialize(app): fn = app.config.phantom_import_file if (fn and os.path.isfile(fn)): print("[numpydoc] Phantom importing modules from", fn, "...") import_phantom_module(fn) #------------------------------------------------------------------------------ # Creating 'phantom' modules from an XML description #------------------------------------------------------------------------------ def import_phantom_module(xml_file): """ Insert a fake Python module to sys.modules, based on a XML file. The XML file is expected to conform to Pydocweb DTD. The fake module will contain dummy objects, which guarantee the following: - Docstrings are correct. - Class inheritance relationships are correct (if present in XML). - Function argspec is *NOT* correct (even if present in XML). Instead, the function signature is prepended to the function docstring. - Class attributes are *NOT* correct; instead, they are dummy objects. Parameters ---------- xml_file : str Name of an XML file to read """ import lxml.etree as etree object_cache = {} tree = etree.parse(xml_file) root = tree.getroot() # Sort items so that # - Base classes come before classes inherited from them # - Modules come before their contents all_nodes = dict([(n.attrib['id'], n) for n in root]) def _get_bases(node, recurse=False): bases = [x.attrib['ref'] for x in node.findall('base')] if recurse: j = 0 while True: try: b = bases[j] except IndexError: break if b in all_nodes: bases.extend(_get_bases(all_nodes[b])) j += 1 return bases type_index = ['module', 'class', 'callable', 'object'] def base_cmp(a, b): x = cmp(type_index.index(a.tag), type_index.index(b.tag)) if x != 0: return x if a.tag == 'class' and b.tag == 'class': a_bases = _get_bases(a, recurse=True) b_bases = _get_bases(b, recurse=True) x = cmp(len(a_bases), len(b_bases)) if x != 0: return x if a.attrib['id'] in b_bases: return -1 if b.attrib['id'] in a_bases: return 1 return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) nodes = root.getchildren() nodes.sort(base_cmp) # Create phantom items for node in nodes: name = node.attrib['id'] doc = (node.text or '').decode('string-escape') + "\n" if doc == "\n": doc = "" # create parent, if missing parent = name while True: parent = '.'.join(parent.split('.')[:-1]) if not parent: break if parent in object_cache: break obj = imp.new_module(parent) object_cache[parent] = obj sys.modules[parent] = obj # create object if node.tag == 'module': obj = imp.new_module(name) obj.__doc__ = doc sys.modules[name] = obj elif node.tag == 'class': bases = [object_cache[b] for b in _get_bases(node) if b in object_cache] bases.append(object) init = lambda self: None init.__doc__ = doc obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) obj.__name__ = name.split('.')[-1] elif node.tag == 'callable': funcname = node.attrib['id'].split('.')[-1] argspec = node.attrib.get('argspec') if argspec: argspec = re.sub('^[^(]*', '', argspec) doc = "%s%s\n\n%s" % (funcname, argspec, doc) obj = lambda: 0 obj.__argspec_is_invalid_ = True if sys.version_info[0] >= 3: obj.__name__ = funcname else: obj.func_name = funcname obj.__name__ = name obj.__doc__ = doc if inspect.isclass(object_cache[parent]): obj.__objclass__ = object_cache[parent] else: class Dummy(object): pass obj = Dummy() obj.__name__ = name obj.__doc__ = doc if inspect.isclass(object_cache[parent]): obj.__get__ = lambda: None object_cache[name] = obj if parent: if inspect.ismodule(object_cache[parent]): obj.__module__ = parent setattr(object_cache[parent], name.split('.')[-1], obj) # Populate items for node in root: obj = object_cache.get(node.attrib['id']) if obj is None: continue for ref in node.findall('ref'): if node.tag == 'class': if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): setattr(obj, ref.attrib['name'], object_cache.get(ref.attrib['ref'])) else: setattr(obj, ref.attrib['name'], object_cache.get(ref.attrib['ref'])) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/smart_resolver.py0000644000077000000240000000717612431342362030500 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The classes in the astropy docs are documented by their API location, which is not necessarily where they are defined in the source. This causes a problem when certain automated features of the doc build, such as the inheritance diagrams or the `Bases` list of a class reference a class by its canonical location rather than its "user" location. In the `autodoc-process-docstring` event, a mapping from the actual name to the API name is maintained. Later, in the `missing-reference` event, unresolved references are looked up in this dictionary and corrected if possible. """ from docutils.nodes import literal, reference def process_docstring(app, what, name, obj, options, lines): if isinstance(obj, type): env = app.env if not hasattr(env, 'class_name_mapping'): env.class_name_mapping = {} mapping = env.class_name_mapping mapping[obj.__module__ + '.' + obj.__name__] = name def missing_reference_handler(app, env, node, contnode): if not hasattr(env, 'class_name_mapping'): env.class_name_mapping = {} mapping = env.class_name_mapping reftype = node['reftype'] reftarget = node['reftarget'] if reftype in ('obj', 'class', 'exc', 'meth'): reftarget = node['reftarget'] suffix = '' if reftarget not in mapping: if '.' in reftarget: front, suffix = reftarget.rsplit('.', 1) else: suffix = reftarget if suffix.startswith('_') and not suffix.startswith('__'): # If this is a reference to a hidden class or method, # we can't link to it, but we don't want to have a # nitpick warning. return node[0].deepcopy() if reftype in ('obj', 'meth') and '.' in reftarget: if front in mapping: reftarget = front suffix = '.' + suffix if (reftype in ('class', ) and '.' in reftarget and reftarget not in mapping): if '.' in front: reftarget, _ = front.rsplit('.', 1) suffix = '.' + suffix reftarget = reftarget + suffix prefix = reftarget.rsplit('.')[0] if (reftarget not in mapping and prefix in env.intersphinx_named_inventory): if reftarget in env.intersphinx_named_inventory[prefix]['py:class']: newtarget = env.intersphinx_named_inventory[prefix]['py:class'][reftarget][2] if not node['refexplicit'] and \ '~' not in node.rawsource: contnode = literal(text=reftarget) newnode = reference('', '', internal=True) newnode['reftitle'] = reftarget newnode['refuri'] = newtarget newnode.append(contnode) return newnode if reftarget in mapping: newtarget = mapping[reftarget] + suffix if not node['refexplicit'] and not '~' in node.rawsource: contnode = literal(text=newtarget) newnode = env.domains['py'].resolve_xref( env, node['refdoc'], app.builder, 'class', newtarget, node, contnode) if newnode is not None: newnode['reftitle'] = reftarget return newnode def setup(app): app.connect('autodoc-process-docstring', process_docstring) app.connect('missing-reference', missing_reference_handler) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/templates/0000755000077000000240000000000012654610601027043 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/templates/autosummary_core/0000755000077000000240000000000012654610601032441 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/templates/autosummary_core/base.rst0000644000077000000240000000025212340434262034103 0ustar adamstaff00000000000000{% if referencefile %} .. include:: {{ referencefile }} {% endif %} {{ objname }} {{ underline }} .. currentmodule:: {{ module }} .. auto{{ objtype }}:: {{ objname }} spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/templates/autosummary_core/class.rst0000644000077000000240000000221112340434262034273 0ustar adamstaff00000000000000{% if referencefile %} .. include:: {{ referencefile }} {% endif %} {{ objname }} {{ underline }} .. currentmodule:: {{ module }} .. autoclass:: {{ objname }} :show-inheritance: {% if '__init__' in methods %} {% set caught_result = methods.remove('__init__') %} {% endif %} {% block attributes_summary %} {% if attributes %} .. rubric:: Attributes Summary .. autosummary:: {% for item in attributes %} ~{{ name }}.{{ item }} {%- endfor %} {% endif %} {% endblock %} {% block methods_summary %} {% if methods %} .. rubric:: Methods Summary .. autosummary:: {% for item in methods %} ~{{ name }}.{{ item }} {%- endfor %} {% endif %} {% endblock %} {% block attributes_documentation %} {% if attributes %} .. rubric:: Attributes Documentation {% for item in attributes %} .. autoattribute:: {{ item }} {%- endfor %} {% endif %} {% endblock %} {% block methods_documentation %} {% if methods %} .. rubric:: Methods Documentation {% for item in methods %} .. automethod:: {{ item }} {%- endfor %} {% endif %} {% endblock %} spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/templates/autosummary_core/module.rst0000644000077000000240000000127712340434262034466 0ustar adamstaff00000000000000{% if referencefile %} .. include:: {{ referencefile }} {% endif %} {{ objname }} {{ underline }} .. automodule:: {{ fullname }} {% block functions %} {% if functions %} .. rubric:: Functions .. autosummary:: {% for item in functions %} {{ item }} {%- endfor %} {% endif %} {% endblock %} {% block classes %} {% if classes %} .. rubric:: Classes .. autosummary:: {% for item in classes %} {{ item }} {%- endfor %} {% endif %} {% endblock %} {% block exceptions %} {% if exceptions %} .. rubric:: Exceptions .. autosummary:: {% for item in exceptions %} {{ item }} {%- endfor %} {% endif %} {% endblock %} spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/tests/0000755000077000000240000000000012654610601026207 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/tests/__init__.py0000644000077000000240000000334112412505144030316 0ustar adamstaff00000000000000import os import subprocess as sp import sys from textwrap import dedent import pytest @pytest.fixture def cython_testpackage(tmpdir, request): """ Creates a trivial Cython package for use with tests. """ test_pkg = tmpdir.mkdir('test_pkg') test_pkg.mkdir('_eva_').ensure('__init__.py') test_pkg.join('_eva_').join('unit02.pyx').write(dedent("""\ def pilot(): \"\"\"Returns the pilot of Eva Unit-02.\"\"\" return True """)) import astropy_helpers test_pkg.join('setup.py').write(dedent("""\ import sys sys.path.insert(0, {0!r}) from os.path import join from setuptools import setup, Extension from astropy_helpers.setup_helpers import register_commands NAME = '_eva_' VERSION = 0.1 RELEASE = True cmdclassd = register_commands(NAME, VERSION, RELEASE) setup( name=NAME, version=VERSION, cmdclass=cmdclassd, ext_modules=[Extension('_eva_.unit02', [join('_eva_', 'unit02.pyx')])] ) """.format(os.path.dirname(astropy_helpers.__path__[0])))) test_pkg.chdir() # Build the Cython module in a subprocess; otherwise strange things can # happen with Cython's global module state sp.call([sys.executable, 'setup.py', 'build_ext', '--inplace']) sys.path.insert(0, str(test_pkg)) import _eva_.unit02 def cleanup(test_pkg=test_pkg): for modname in ['_eva_', '_eva_.unit02']: try: del sys.modules[modname] except KeyError: pass sys.path.remove(str(test_pkg)) request.addfinalizer(cleanup) return test_pkg spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/tests/test_autodoc_enhancements.py0000644000077000000240000000324312533471373034017 0ustar adamstaff00000000000000import sys from textwrap import dedent import pytest from ..autodoc_enhancements import type_object_attrgetter # Define test classes outside the class; otherwise there is flakiness with the # details of how exec works on different Python versions class Meta(type): @property def foo(cls): return 'foo' if sys.version_info[0] < 3: exec(dedent(""" class MyClass(object): __metaclass__ = Meta @property def foo(self): \"\"\"Docstring for MyClass.foo property.\"\"\" return 'myfoo' """)) else: exec(dedent(""" class MyClass(metaclass=Meta): @property def foo(self): \"\"\"Docstring for MyClass.foo property.\"\"\" return 'myfoo' """)) def test_type_attrgetter(): """ This test essentially reproduces the docstring for `type_object_attrgetter`. Sphinx itself tests the custom attrgetter feature; see: https://bitbucket.org/birkenfeld/sphinx/src/40bd03003ac6fe274ccf3c80d7727509e00a69ea/tests/test_autodoc.py?at=default#cl-502 so rather than a full end-to-end functional test it's simple enough to just test that this function does what it needs to do. """ assert getattr(MyClass, 'foo') == 'foo' obj = type_object_attrgetter(MyClass, 'foo') assert isinstance(obj, property) assert obj.__doc__ == 'Docstring for MyClass.foo property.' with pytest.raises(AttributeError): type_object_attrgetter(MyClass, 'susy') assert type_object_attrgetter(MyClass, 'susy', 'default') == 'default' assert type_object_attrgetter(MyClass, '__dict__') == MyClass.__dict__ spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/tests/test_automodapi.py0000644000077000000240000001613612533471373032000 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import sys import pytest from . import * from ....utils import iteritems pytest.importorskip('sphinx') # skips these tests if sphinx not present class FakeConfig(object): """ Mocks up a sphinx configuration setting construct for automodapi tests """ def __init__(self, **kwargs): for k, v in iteritems(kwargs): setattr(self, k, v) class FakeApp(object): """ Mocks up a `sphinx.application.Application` object for automodapi tests """ # Some default config values _defaults = { 'automodapi_toctreedirnm': 'api', 'automodapi_writereprocessed': False } def __init__(self, **configs): config = self._defaults.copy() config.update(configs) self.config = FakeConfig(**config) self.info = [] self.warnings = [] def info(self, msg, loc): self.info.append((msg, loc)) def warn(self, msg, loc): self.warnings.append((msg, loc)) am_replacer_str = """ This comes before .. automodapi:: astropy_helpers.sphinx.ext.tests.test_automodapi {options} This comes after """ am_replacer_basic_expected = """ This comes before astropy_helpers.sphinx.ext.tests.test_automodapi Module ------------------------------------------------------- .. automodule:: astropy_helpers.sphinx.ext.tests.test_automodapi Functions ^^^^^^^^^ .. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi :functions-only: :toctree: api/ Classes ^^^^^^^ .. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi :classes-only: :toctree: api/ Class Inheritance Diagram ^^^^^^^^^^^^^^^^^^^^^^^^^ .. automod-diagram:: astropy_helpers.sphinx.ext.tests.test_automodapi :private-bases: :parts: 1 {empty} This comes after """.format(empty='') # the .format is necessary for editors that remove empty-line whitespace def test_am_replacer_basic(): """ Tests replacing an ".. automodapi::" with the automodapi no-option template """ from ..automodapi import automodapi_replace fakeapp = FakeApp() result = automodapi_replace(am_replacer_str.format(options=''), fakeapp) assert result == am_replacer_basic_expected am_replacer_noinh_expected = """ This comes before astropy_helpers.sphinx.ext.tests.test_automodapi Module ------------------------------------------------------- .. automodule:: astropy_helpers.sphinx.ext.tests.test_automodapi Functions ^^^^^^^^^ .. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi :functions-only: :toctree: api/ Classes ^^^^^^^ .. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi :classes-only: :toctree: api/ This comes after """.format(empty='') def test_am_replacer_noinh(): """ Tests replacing an ".. automodapi::" with no-inheritance-diagram option """ from ..automodapi import automodapi_replace fakeapp = FakeApp() ops = ['', ':no-inheritance-diagram:'] ostr = '\n '.join(ops) result = automodapi_replace(am_replacer_str.format(options=ostr), fakeapp) assert result == am_replacer_noinh_expected am_replacer_titleandhdrs_expected = """ This comes before astropy_helpers.sphinx.ext.tests.test_automodapi Module &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& .. automodule:: astropy_helpers.sphinx.ext.tests.test_automodapi Functions ********* .. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi :functions-only: :toctree: api/ Classes ******* .. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi :classes-only: :toctree: api/ Class Inheritance Diagram ************************* .. automod-diagram:: astropy_helpers.sphinx.ext.tests.test_automodapi :private-bases: :parts: 1 {empty} This comes after """.format(empty='') def test_am_replacer_titleandhdrs(): """ Tests replacing an ".. automodapi::" entry with title-setting and header character options. """ from ..automodapi import automodapi_replace fakeapp = FakeApp() ops = ['', ':title: A new title', ':headings: &*'] ostr = '\n '.join(ops) result = automodapi_replace(am_replacer_str.format(options=ostr), fakeapp) assert result == am_replacer_titleandhdrs_expected am_replacer_nomain_str = """ This comes before .. automodapi:: astropy_helpers.sphinx.ext.automodapi :no-main-docstr: This comes after """ am_replacer_nomain_expected = """ This comes before astropy_helpers.sphinx.ext.automodapi Module -------------------------------------------- Functions ^^^^^^^^^ .. automodsumm:: astropy_helpers.sphinx.ext.automodapi :functions-only: :toctree: api/ This comes after """.format(empty='') def test_am_replacer_nomain(): """ Tests replacing an ".. automodapi::" with "no-main-docstring" . """ from ..automodapi import automodapi_replace fakeapp = FakeApp() result = automodapi_replace(am_replacer_nomain_str, fakeapp) assert result == am_replacer_nomain_expected am_replacer_skip_str = """ This comes before .. automodapi:: astropy_helpers.sphinx.ext.automodapi :skip: something1 :skip: something2 This comes after """ am_replacer_skip_expected = """ This comes before astropy_helpers.sphinx.ext.automodapi Module -------------------------------------------- .. automodule:: astropy_helpers.sphinx.ext.automodapi Functions ^^^^^^^^^ .. automodsumm:: astropy_helpers.sphinx.ext.automodapi :functions-only: :toctree: api/ :skip: something1,something2 This comes after """.format(empty='') def test_am_replacer_skip(): """ Tests using the ":skip: option in an ".. automodapi::" . """ from ..automodapi import automodapi_replace fakeapp = FakeApp() result = automodapi_replace(am_replacer_skip_str, fakeapp) assert result == am_replacer_skip_expected am_replacer_invalidop_str = """ This comes before .. automodapi:: astropy_helpers.sphinx.ext.automodapi :invalid-option: This comes after """ def test_am_replacer_invalidop(): """ Tests that a sphinx warning is produced with an invalid option. """ from ..automodapi import automodapi_replace fakeapp = FakeApp() automodapi_replace(am_replacer_invalidop_str, fakeapp) expected_warnings = [('Found additional options invalid-option in ' 'automodapi.', None)] assert fakeapp.warnings == expected_warnings am_replacer_cython_str = """ This comes before .. automodapi:: _eva_.unit02 {options} This comes after """ am_replacer_cython_expected = """ This comes before _eva_.unit02 Module ------------------- .. automodule:: _eva_.unit02 Functions ^^^^^^^^^ .. automodsumm:: _eva_.unit02 :functions-only: :toctree: api/ This comes after """.format(empty='') def test_am_replacer_cython(cython_testpackage): """ Tests replacing an ".. automodapi::" for a Cython module. """ from ..automodapi import automodapi_replace fakeapp = FakeApp() result = automodapi_replace(am_replacer_cython_str.format(options=''), fakeapp) assert result == am_replacer_cython_expected spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/tests/test_automodsumm.py0000644000077000000240000000456412533471373032212 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst import sys import pytest from . import * from ....utils import iteritems pytest.importorskip('sphinx') # skips these tests if sphinx not present class FakeEnv(object): """ Mocks up a sphinx env setting construct for automodapi tests """ def __init__(self, **kwargs): for k, v in iteritems(kwargs): setattr(self, k, v) class FakeBuilder(object): """ Mocks up a sphinx builder setting construct for automodapi tests """ def __init__(self, **kwargs): self.env = FakeEnv(**kwargs) class FakeApp(object): """ Mocks up a `sphinx.application.Application` object for automodapi tests """ def __init__(self, srcdir, automodapipresent=True): self.builder = FakeBuilder(srcdir=srcdir) self.info = [] self.warnings = [] self._extensions = [] if automodapipresent: self._extensions.append('astropy_helpers.sphinx.ext.automodapi') def info(self, msg, loc): self.info.append((msg, loc)) def warn(self, msg, loc): self.warnings.append((msg, loc)) ams_to_asmry_str = """ Before .. automodsumm:: astropy_helpers.sphinx.ext.automodsumm :p: And After """ ams_to_asmry_expected = """\ .. currentmodule:: astropy_helpers.sphinx.ext.automodsumm .. autosummary:: :p: Automoddiagram Automodsumm automodsumm_to_autosummary_lines generate_automodsumm_docs process_automodsumm_generation setup """ def test_ams_to_asmry(tmpdir): from ..automodsumm import automodsumm_to_autosummary_lines fi = tmpdir.join('automodsumm.rst') fi.write(ams_to_asmry_str) fakeapp = FakeApp(srcdir='') resultlines = automodsumm_to_autosummary_lines(str(fi), fakeapp) assert '\n'.join(resultlines) == ams_to_asmry_expected ams_cython_str = """ Before .. automodsumm:: _eva_.unit02 :functions-only: :p: And After """ ams_cython_expected = """\ .. currentmodule:: _eva_.unit02 .. autosummary:: :p: pilot """ def test_ams_cython(tmpdir, cython_testpackage): from ..automodsumm import automodsumm_to_autosummary_lines fi = tmpdir.join('automodsumm.rst') fi.write(ams_cython_str) fakeapp = FakeApp(srcdir='') resultlines = automodsumm_to_autosummary_lines(str(fi), fakeapp) assert '\n'.join(resultlines) == ams_cython_expected spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/tests/test_docscrape.py0000644000077000000240000004327112412505144031567 0ustar adamstaff00000000000000# -*- encoding:utf-8 -*- from __future__ import division, absolute_import, print_function import sys, textwrap from ..docscrape import NumpyDocString, FunctionDoc, ClassDoc from ..docscrape_sphinx import SphinxDocString, SphinxClassDoc if sys.version_info[0] >= 3: sixu = lambda s: s else: sixu = lambda s: unicode(s, 'unicode_escape') doc_txt = '''\ numpy.multivariate_normal(mean, cov, shape=None, spam=None) Draw values from a multivariate normal distribution with specified mean and covariance. The multivariate normal or Gaussian distribution is a generalisation of the one-dimensional normal distribution to higher dimensions. Parameters ---------- mean : (N,) ndarray Mean of the N-dimensional distribution. .. math:: (1+2+3)/3 cov : (N, N) ndarray Covariance matrix of the distribution. shape : tuple of ints Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). Returns ------- out : ndarray The drawn samples, arranged according to `shape`. If the shape given is (m,n,...), then the shape of `out` is is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution. list of str This is not a real return value. It exists to test anonymous return values. Other Parameters ---------------- spam : parrot A parrot off its mortal coil. Raises ------ RuntimeError Some error Warns ----- RuntimeWarning Some warning Warnings -------- Certain warnings apply. Notes ----- Instead of specifying the full covariance matrix, popular approximations include: - Spherical covariance (`cov` is a multiple of the identity matrix) - Diagonal covariance (`cov` has non-negative elements only on the diagonal) This geometrical property can be seen in two dimensions by plotting generated data-points: >>> mean = [0,0] >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis >>> x,y = multivariate_normal(mean,cov,5000).T >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() Note that the covariance matrix must be symmetric and non-negative definite. References ---------- .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic Processes," 3rd ed., McGraw-Hill Companies, 1991 .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 2nd ed., Wiley, 2001. See Also -------- some, other, funcs otherfunc : relationship Examples -------- >>> mean = (1,2) >>> cov = [[1,0],[1,0]] >>> x = multivariate_normal(mean,cov,(3,3)) >>> print x.shape (3, 3, 2) The following is probably true, given that 0.6 is roughly twice the standard deviation: >>> print list( (x[0,0,:] - mean) < 0.6 ) [True, True] .. index:: random :refguide: random;distributions, random;gauss ''' doc = NumpyDocString(doc_txt) def test_signature(): assert doc['Signature'].startswith('numpy.multivariate_normal(') assert doc['Signature'].endswith('spam=None)') def test_summary(): assert doc['Summary'][0].startswith('Draw values') assert doc['Summary'][-1].endswith('covariance.') def test_extended_summary(): assert doc['Extended Summary'][0].startswith('The multivariate normal') def test_parameters(): assert len(doc['Parameters']) == 3 assert [n for n,_,_ in doc['Parameters']] == ['mean','cov','shape'] arg, arg_type, desc = doc['Parameters'][1] assert arg_type == '(N, N) ndarray' assert desc[0].startswith('Covariance matrix') assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' def test_other_parameters(): assert len(doc['Other Parameters']) == 1 assert [n for n,_,_ in doc['Other Parameters']] == ['spam'] arg, arg_type, desc = doc['Other Parameters'][0] assert arg_type == 'parrot' assert desc[0].startswith('A parrot off its mortal coil') def test_returns(): assert len(doc['Returns']) == 2 arg, arg_type, desc = doc['Returns'][0] assert arg == 'out' assert arg_type == 'ndarray' assert desc[0].startswith('The drawn samples') assert desc[-1].endswith('distribution.') arg, arg_type, desc = doc['Returns'][1] assert arg == 'list of str' assert arg_type == '' assert desc[0].startswith('This is not a real') assert desc[-1].endswith('anonymous return values.') def test_notes(): assert doc['Notes'][0].startswith('Instead') assert doc['Notes'][-1].endswith('definite.') assert len(doc['Notes']) == 17 def test_references(): assert doc['References'][0].startswith('..') assert doc['References'][-1].endswith('2001.') def test_examples(): assert doc['Examples'][0].startswith('>>>') assert doc['Examples'][-1].endswith('True]') def test_index(): assert doc['index']['default'] == 'random' assert len(doc['index']) == 2 assert len(doc['index']['refguide']) == 2 def non_blank_line_by_line_compare(a,b): a = textwrap.dedent(a) b = textwrap.dedent(b) a = [l.rstrip() for l in a.split('\n') if l.strip()] b = [l.rstrip() for l in b.split('\n') if l.strip()] for n,line in enumerate(a): if not line == b[n]: raise AssertionError("Lines %s of a and b differ: " "\n>>> %s\n<<< %s\n" % (n,line,b[n])) def test_str(): non_blank_line_by_line_compare(str(doc), """numpy.multivariate_normal(mean, cov, shape=None, spam=None) Draw values from a multivariate normal distribution with specified mean and covariance. The multivariate normal or Gaussian distribution is a generalisation of the one-dimensional normal distribution to higher dimensions. Parameters ---------- mean : (N,) ndarray Mean of the N-dimensional distribution. .. math:: (1+2+3)/3 cov : (N, N) ndarray Covariance matrix of the distribution. shape : tuple of ints Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). Returns ------- out : ndarray The drawn samples, arranged according to `shape`. If the shape given is (m,n,...), then the shape of `out` is is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution. list of str This is not a real return value. It exists to test anonymous return values. Other Parameters ---------------- spam : parrot A parrot off its mortal coil. Raises ------ RuntimeError Some error Warns ----- RuntimeWarning Some warning Warnings -------- Certain warnings apply. See Also -------- `some`_, `other`_, `funcs`_ `otherfunc`_ relationship Notes ----- Instead of specifying the full covariance matrix, popular approximations include: - Spherical covariance (`cov` is a multiple of the identity matrix) - Diagonal covariance (`cov` has non-negative elements only on the diagonal) This geometrical property can be seen in two dimensions by plotting generated data-points: >>> mean = [0,0] >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis >>> x,y = multivariate_normal(mean,cov,5000).T >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() Note that the covariance matrix must be symmetric and non-negative definite. References ---------- .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic Processes," 3rd ed., McGraw-Hill Companies, 1991 .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 2nd ed., Wiley, 2001. Examples -------- >>> mean = (1,2) >>> cov = [[1,0],[1,0]] >>> x = multivariate_normal(mean,cov,(3,3)) >>> print x.shape (3, 3, 2) The following is probably true, given that 0.6 is roughly twice the standard deviation: >>> print list( (x[0,0,:] - mean) < 0.6 ) [True, True] .. index:: random :refguide: random;distributions, random;gauss""") def test_sphinx_str(): sphinx_doc = SphinxDocString(doc_txt) non_blank_line_by_line_compare(str(sphinx_doc), """ .. index:: random single: random;distributions, random;gauss Draw values from a multivariate normal distribution with specified mean and covariance. The multivariate normal or Gaussian distribution is a generalisation of the one-dimensional normal distribution to higher dimensions. :Parameters: **mean** : (N,) ndarray Mean of the N-dimensional distribution. .. math:: (1+2+3)/3 **cov** : (N, N) ndarray Covariance matrix of the distribution. **shape** : tuple of ints Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). :Returns: **out** : ndarray The drawn samples, arranged according to `shape`. If the shape given is (m,n,...), then the shape of `out` is is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution. list of str This is not a real return value. It exists to test anonymous return values. :Other Parameters: **spam** : parrot A parrot off its mortal coil. :Raises: **RuntimeError** Some error :Warns: **RuntimeWarning** Some warning .. warning:: Certain warnings apply. .. seealso:: :obj:`some`, :obj:`other`, :obj:`funcs` :obj:`otherfunc` relationship .. rubric:: Notes Instead of specifying the full covariance matrix, popular approximations include: - Spherical covariance (`cov` is a multiple of the identity matrix) - Diagonal covariance (`cov` has non-negative elements only on the diagonal) This geometrical property can be seen in two dimensions by plotting generated data-points: >>> mean = [0,0] >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis >>> x,y = multivariate_normal(mean,cov,5000).T >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() Note that the covariance matrix must be symmetric and non-negative definite. .. rubric:: References .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic Processes," 3rd ed., McGraw-Hill Companies, 1991 .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 2nd ed., Wiley, 2001. .. only:: latex [1]_, [2]_ .. rubric:: Examples >>> mean = (1,2) >>> cov = [[1,0],[1,0]] >>> x = multivariate_normal(mean,cov,(3,3)) >>> print x.shape (3, 3, 2) The following is probably true, given that 0.6 is roughly twice the standard deviation: >>> print list( (x[0,0,:] - mean) < 0.6 ) [True, True] """) doc2 = NumpyDocString(""" Returns array of indices of the maximum values of along the given axis. Parameters ---------- a : {array_like} Array to look in. axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis""") def test_parameters_without_extended_description(): assert len(doc2['Parameters']) == 2 doc3 = NumpyDocString(""" my_signature(*params, **kwds) Return this and that. """) def test_escape_stars(): signature = str(doc3).split('\n')[0] signature == 'my_signature(\*params, \*\*kwds)' doc4 = NumpyDocString( """a.conj() Return an array with all complex-valued elements conjugated.""") def test_empty_extended_summary(): assert doc4['Extended Summary'] == [] doc5 = NumpyDocString( """ a.something() Raises ------ LinAlgException If array is singular. Warns ----- SomeWarning If needed """) def test_raises(): assert len(doc5['Raises']) == 1 name,_,desc = doc5['Raises'][0] assert name == 'LinAlgException' assert desc == ['If array is singular.'] def test_warns(): assert len(doc5['Warns']) == 1 name,_,desc = doc5['Warns'][0] assert name == 'SomeWarning' assert desc == ['If needed'] def test_see_also(): doc6 = NumpyDocString( """ z(x,theta) See Also -------- func_a, func_b, func_c func_d : some equivalent func foo.func_e : some other func over multiple lines func_f, func_g, :meth:`func_h`, func_j, func_k :obj:`baz.obj_q` :class:`class_j`: fubar foobar """) assert len(doc6['See Also']) == 12 for func, desc, role in doc6['See Also']: if func in ('func_a', 'func_b', 'func_c', 'func_f', 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): assert(not desc) else: assert(desc) if func == 'func_h': assert role == 'meth' elif func == 'baz.obj_q': assert role == 'obj' elif func == 'class_j': assert role == 'class' else: assert role is None if func == 'func_d': assert desc == ['some equivalent func'] elif func == 'foo.func_e': assert desc == ['some other func over', 'multiple lines'] elif func == 'class_j': assert desc == ['fubar', 'foobar'] def test_see_also_print(): class Dummy(object): """ See Also -------- func_a, func_b func_c : some relationship goes here func_d """ pass obj = Dummy() s = str(FunctionDoc(obj, role='func')) assert(':func:`func_a`, :func:`func_b`' in s) assert(' some relationship' in s) assert(':func:`func_d`' in s) doc7 = NumpyDocString(""" Doc starts on second line. """) def test_empty_first_line(): assert doc7['Summary'][0].startswith('Doc starts') def test_no_summary(): str(SphinxDocString(""" Parameters ----------""")) def test_unicode(): doc = SphinxDocString(""" öäöäöäöäöåååå öäöäöäööäååå Parameters ---------- ååå : äää ööö Returns ------- ååå : ööö äää """) assert isinstance(doc['Summary'][0], str) assert doc['Summary'][0] == 'öäöäöäöäöåååå' def test_plot_examples(): cfg = dict(use_plots=True) doc = SphinxDocString(""" Examples -------- >>> import matplotlib.pyplot as plt >>> plt.plot([1,2,3],[4,5,6]) >>> plt.show() """, config=cfg) assert 'plot::' in str(doc), str(doc) doc = SphinxDocString(""" Examples -------- .. plot:: import matplotlib.pyplot as plt plt.plot([1,2,3],[4,5,6]) plt.show() """, config=cfg) assert str(doc).count('plot::') == 1, str(doc) def test_class_members(): class Dummy(object): """ Dummy class. """ def spam(self, a, b): """Spam\n\nSpam spam.""" pass def ham(self, c, d): """Cheese\n\nNo cheese.""" pass @property def spammity(self): """Spammity index""" return 0.95 class Ignorable(object): """local class, to be ignored""" pass for cls in (ClassDoc, SphinxClassDoc): doc = cls(Dummy, config=dict(show_class_members=False)) assert 'Methods' not in str(doc), (cls, str(doc)) assert 'spam' not in str(doc), (cls, str(doc)) assert 'ham' not in str(doc), (cls, str(doc)) assert 'spammity' not in str(doc), (cls, str(doc)) assert 'Spammity index' not in str(doc), (cls, str(doc)) doc = cls(Dummy, config=dict(show_class_members=True)) assert 'Methods' in str(doc), (cls, str(doc)) assert 'spam' in str(doc), (cls, str(doc)) assert 'ham' in str(doc), (cls, str(doc)) assert 'spammity' in str(doc), (cls, str(doc)) if cls is SphinxClassDoc: assert '.. autosummary::' in str(doc), str(doc) else: assert 'Spammity index' in str(doc), str(doc) def test_duplicate_signature(): # Duplicate function signatures occur e.g. in ufuncs, when the # automatic mechanism adds one, and a more detailed comes from the # docstring itself. doc = NumpyDocString( """ z(x1, x2) z(a, theta) """) assert doc['Signature'].strip() == 'z(a, theta)' class_doc_txt = """ Foo Parameters ---------- f : callable ``f(t, y, *f_args)`` Aaa. jac : callable ``jac(t, y, *jac_args)`` Bbb. Attributes ---------- t : float Current time. y : ndarray Current variable values. Methods ------- a b c Examples -------- For usage examples, see `ode`. """ def test_class_members_doc(): doc = ClassDoc(None, class_doc_txt) non_blank_line_by_line_compare(str(doc), """ Foo Parameters ---------- f : callable ``f(t, y, *f_args)`` Aaa. jac : callable ``jac(t, y, *jac_args)`` Bbb. Examples -------- For usage examples, see `ode`. Attributes ---------- t : float Current time. y : ndarray Current variable values. Methods ------- a b c .. index:: """) def test_class_members_doc_sphinx(): doc = SphinxClassDoc(None, class_doc_txt) non_blank_line_by_line_compare(str(doc), """ Foo :Parameters: **f** : callable ``f(t, y, *f_args)`` Aaa. **jac** : callable ``jac(t, y, *jac_args)`` Bbb. .. rubric:: Examples For usage examples, see `ode`. .. rubric:: Attributes === ========== t (float) Current time. y (ndarray) Current variable values. === ========== .. rubric:: Methods === ========== a b c === ========== """) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/tests/test_utils.py0000644000077000000240000000174312412505144030762 0ustar adamstaff00000000000000#namedtuple is needed for find_mod_objs so it can have a non-local module import sys from collections import namedtuple import pytest from ..utils import find_mod_objs PY3 = sys.version_info[0] >= 3 pytestmark = pytest.mark.skipif("PY3") def test_find_mod_objs(): lnms, fqns, objs = find_mod_objs('astropy_helpers') # this import is after the above call intentionally to make sure # find_mod_objs properly imports astropy on its own import astropy_helpers # just check for astropy.test ... other things might be added, so we # shouldn't check that it's the only thing assert lnms == [] lnms, fqns, objs = find_mod_objs( 'astropy_helpers.sphinx.ext.tests.test_utils', onlylocals=False) assert namedtuple in objs lnms, fqns, objs = find_mod_objs( 'astropy_helpers.sphinx.ext.tests.test_utils', onlylocals=True) assert 'namedtuple' not in lnms assert 'collections.namedtuple' not in fqns assert namedtuple not in objs spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/tocdepthfix.py0000644000077000000240000000124512340434262027741 0ustar adamstaff00000000000000from sphinx import addnodes def fix_toc_entries(app, doctree): # Get the docname; I don't know why this isn't just passed in to the # callback # This seems a bit unreliable as it's undocumented, but it's not "private" # either: docname = app.builder.env.temp_data['docname'] if app.builder.env.metadata[docname].get('tocdepth', 0) != 0: # We need to reprocess any TOC nodes in the doctree and make sure all # the files listed in any TOCs are noted for treenode in doctree.traverse(addnodes.toctree): app.builder.env.note_toctree(docname, treenode) def setup(app): app.connect('doctree-read', fix_toc_entries) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/traitsdoc.py0000644000077000000240000001026112412505144027410 0ustar adamstaff00000000000000""" ========= traitsdoc ========= Sphinx extension that handles docstrings in the Numpy standard format, [1] and support Traits [2]. This extension can be used as a replacement for ``numpydoc`` when support for Traits is required. .. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard .. [2] http://code.enthought.com/projects/traits/ """ from __future__ import division, absolute_import, print_function import inspect import os import pydoc import collections from . import docscrape from . import docscrape_sphinx from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString from . import numpydoc from . import comment_eater class SphinxTraitsDoc(SphinxClassDoc): def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): if not inspect.isclass(cls): raise ValueError("Initialise using a class. Got %r" % cls) self._cls = cls if modulename and not modulename.endswith('.'): modulename += '.' self._mod = modulename self._name = cls.__name__ self._func_doc = func_doc docstring = pydoc.getdoc(cls) docstring = docstring.split('\n') # De-indent paragraph try: indent = min(len(s) - len(s.lstrip()) for s in docstring if s.strip()) except ValueError: indent = 0 for n,line in enumerate(docstring): docstring[n] = docstring[n][indent:] self._doc = docscrape.Reader(docstring) self._parsed_data = { 'Signature': '', 'Summary': '', 'Description': [], 'Extended Summary': [], 'Parameters': [], 'Returns': [], 'Raises': [], 'Warns': [], 'Other Parameters': [], 'Traits': [], 'Methods': [], 'See Also': [], 'Notes': [], 'References': '', 'Example': '', 'Examples': '', 'index': {} } self._parse() def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Description'] + self['Extended Summary'] + [''] def __str__(self, indent=0, func_role="func"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Traits', 'Methods', 'Returns','Raises'): out += self._str_param_list(param_list) out += self._str_see_also("obj") out += self._str_section('Notes') out += self._str_references() out += self._str_section('Example') out += self._str_section('Examples') out = self._str_indent(out,indent) return '\n'.join(out) def looks_like_issubclass(obj, classname): """ Return True if the object has a class or superclass with the given class name. Ignores old-style classes. """ t = obj if t.__name__ == classname: return True for klass in t.__mro__: if klass.__name__ == classname: return True return False def get_doc_object(obj, what=None, config=None): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif isinstance(obj, collections.Callable): what = 'function' else: what = 'object' if what == 'class': doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) if looks_like_issubclass(obj, 'HasTraits'): for name, trait, comment in comment_eater.get_class_traits(obj): # Exclude private traits. if not name.startswith('_'): doc['Traits'].append((name, trait, comment.splitlines())) return doc elif what in ('function', 'method'): return SphinxFunctionDoc(obj, '', config=config) else: return SphinxDocString(pydoc.getdoc(obj), config=config) def setup(app): # init numpydoc numpydoc.setup(app, get_doc_object) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/utils.py0000644000077000000240000000443312340434262026562 0ustar adamstaff00000000000000import inspect import sys def find_mod_objs(modname, onlylocals=False): """ Returns all the public attributes of a module referenced by name. .. note:: The returned list *not* include subpackages or modules of `modname`,nor does it include private attributes (those that beginwith '_' or are not in `__all__`). Parameters ---------- modname : str The name of the module to search. onlylocals : bool If True, only attributes that are either members of `modname` OR one of its modules or subpackages will be included. Returns ------- localnames : list of str A list of the names of the attributes as they are named in the module `modname` . fqnames : list of str A list of the full qualified names of the attributes (e.g., ``astropy.utils.misc.find_mod_objs``). For attributes that are simple variables, this is based on the local name, but for functions or classes it can be different if they are actually defined elsewhere and just referenced in `modname`. objs : list of objects A list of the actual attributes themselves (in the same order as the other arguments) """ __import__(modname) mod = sys.modules[modname] if hasattr(mod, '__all__'): pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__] else: pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_'] # filter out modules and pull the names and objs out ismodule = inspect.ismodule localnames = [k for k, v in pkgitems if not ismodule(v)] objs = [v for k, v in pkgitems if not ismodule(v)] # fully qualified names can be determined from the object's module fqnames = [] for obj, lnm in zip(objs, localnames): if hasattr(obj, '__module__') and hasattr(obj, '__name__'): fqnames.append(obj.__module__ + '.' + obj.__name__) else: fqnames.append(modname + '.' + lnm) if onlylocals: valids = [fqn.startswith(modname) for fqn in fqnames] localnames = [e for i, e in enumerate(localnames) if valids[i]] fqnames = [e for i, e in enumerate(fqnames) if valids[i]] objs = [e for i, e in enumerate(objs) if valids[i]] return localnames, fqnames, objs spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/ext/viewcode.py0000644000077000000240000001752512533471373027245 0ustar adamstaff00000000000000# -*- coding: utf-8 -*- """ sphinx.ext.viewcode ~~~~~~~~~~~~~~~~~~~ Add links to module code in Python object descriptions. :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. Patched using patch in https://bitbucket.org/birkenfeld/sphinx/issue/623/extension-viewcode-fails-with-function on 21 Aug 2013 by Kyle H Barbary """ from docutils import nodes from sphinx import addnodes from sphinx.locale import _ from sphinx.pycode import ModuleAnalyzer from sphinx.util.inspect import safe_getattr from sphinx.util.nodes import make_refnode import sys import traceback if sys.version < '3': text_type = unicode else: text_type = str from ...utils import iteritems def doctree_read(app, doctree): env = app.builder.env if not hasattr(env, '_viewcode_modules'): env._viewcode_modules = {} def get_full_modname(modname, attribute): try: __import__(modname) except Exception as error: if not app.quiet: app.info(traceback.format_exc().rstrip()) app.warn('viewcode can\'t import %s, failed with error "%s"' % (modname, error)) return None module = sys.modules[modname] try: # Allow an attribute to have multiple parts and incidentally allow # repeated .s in the attribute. attr = attribute.split('.') value = module for attr in attribute.split('.'): if attr: value = safe_getattr(value, attr) except AttributeError: app.warn('Didn\'t find %s in %s' % (attribute, module.__name__)) return None else: return safe_getattr(value, '__module__', None) def has_tag(modname, fullname, docname, refname): entry = env._viewcode_modules.get(modname, None) if entry is None: try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: env._viewcode_modules[modname] = False return analyzer.find_tags() if not isinstance(analyzer.code, text_type): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code entry = code, analyzer.tags, {}, refname env._viewcode_modules[modname] = entry elif entry is False: return _, tags, used, _ = entry if fullname in tags: used[fullname] = docname return True for objnode in doctree.traverse(addnodes.desc): if objnode.get('domain') != 'py': continue names = set() for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue modname = signode.get('module') fullname = signode.get('fullname') refname = modname if env.config.viewcode_import: modname = get_full_modname(modname, fullname) if not modname: continue if not has_tag(modname, fullname, env.docname, refname): continue if fullname in names: # only one link per name, please continue names.add(fullname) pagename = '_modules/' + modname.replace('.', '/') onlynode = addnodes.only(expr='html') onlynode += addnodes.pending_xref( '', reftype='viewcode', refdomain='std', refexplicit=False, reftarget=pagename, refid=fullname, refdoc=env.docname) onlynode[0] += nodes.inline('', _('[source]'), classes=['viewcode-link']) signode += onlynode def missing_reference(app, env, node, contnode): # resolve our "viewcode" reference nodes -- they need special treatment if node['reftype'] == 'viewcode': return make_refnode(app.builder, node['refdoc'], node['reftarget'], node['refid'], contnode) def collect_pages(app): env = app.builder.env if not hasattr(env, '_viewcode_modules'): return highlighter = app.builder.highlighter urito = app.builder.get_relative_uri modnames = set(env._viewcode_modules) app.builder.info(' (%d module code pages)' % len(env._viewcode_modules), nonl=1) for modname, entry in iteritems(env._viewcode_modules): if not entry: continue code, tags, used, refname = entry # construct a page name for the highlighted source pagename = '_modules/' + modname.replace('.', '/') # highlight the source using the builder's highlighter highlighted = highlighter.highlight_block(code, 'python', linenos=False) # split the code into lines lines = highlighted.splitlines() # split off wrap markup from the first line of the actual code before, after = lines[0].split('
')
        lines[0:1] = [before + '
', after]
        # nothing to do for the last line; it always starts with 
anyway # now that we have code lines (starting at index 1), insert anchors for # the collected tags (HACK: this only works if the tag boundaries are # properly nested!) maxindex = len(lines) - 1 for name, docname in iteritems(used): type, start, end = tags[name] backlink = urito(pagename, docname) + '#' + refname + '.' + name lines[start] = ( '
%s' % (name, backlink, _('[docs]')) + lines[start]) lines[min(end - 1, maxindex)] += '
' # try to find parents (for submodules) parents = [] parent = modname while '.' in parent: parent = parent.rsplit('.', 1)[0] if parent in modnames: parents.append({ 'link': urito(pagename, '_modules/' + parent.replace('.', '/')), 'title': parent}) parents.append({'link': urito(pagename, '_modules/index'), 'title': _('Module code')}) parents.reverse() # putting it all together context = { 'parents': parents, 'title': modname, 'body': _('

Source code for %s

') % modname + \ '\n'.join(lines) } yield (pagename, context, 'page.html') if not modnames: return app.builder.info(' _modules/index') html = ['\n'] # the stack logic is needed for using nested lists for submodules stack = [''] for modname in sorted(modnames): if modname.startswith(stack[-1]): stack.append(modname + '.') html.append('
    ') else: stack.pop() while not modname.startswith(stack[-1]): stack.pop() html.append('
') stack.append(modname + '.') html.append('
  • %s
  • \n' % ( urito('_modules/index', '_modules/' + modname.replace('.', '/')), modname)) html.append('' * (len(stack) - 1)) context = { 'title': _('Overview: module code'), 'body': _('

    All modules for which code is available

    ') + \ ''.join(html), } yield ('_modules/index', context, 'page.html') def setup(app): app.add_config_value('viewcode_import', True, False) app.connect('doctree-read', doctree_read) app.connect('html-collect-pages', collect_pages) app.connect('missing-reference', missing_reference) #app.add_config_value('viewcode_include_modules', [], 'env') #app.add_config_value('viewcode_exclude_modules', [], 'env') spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/local/0000755000077000000240000000000012654610601025337 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/local/python3links.inv0000644000077000000240000000074312533471373030535 0ustar adamstaff00000000000000# Sphinx inventory version 2 # Project: Python # Version: 3.4 # The remainder of this file is compressed using zlib. xœ¥“OSƒ0Åïù;Ó‹ÀñÏ©÷z®¶SωÜl:à§7@­i‡:¨²ï÷–äm°i*eZPf†-u°Grʸ X“}CÉKXw\YVvcu ÷éCøÜV„u¦L¶®”ƒRiWY¯ Ȥ­Bç°ï”y…­òT䣃¦[–ÞHî[&·*”QwóµæÒŠk½µ‰Øª­ç‘¥ÅVbsÎ𠔈Ü+Í*mÞo®·‘:sî‡öeÄjåf‘ƒ.â¸kp7è"nÐ×R(æà±ïÉœPpÑe²Ã¶"ÌŠµµzÕ¢ô<ðŸnމ{õ˜>õÏIÿ±>XÆÒD4¤ _]ÏLvPêSÊÐspectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/local/python3links.txt0000644000077000000240000000233612533471373030560 0ustar adamstaff00000000000000# Sphinx inventory version 2 # Project: Python # Version: 3.4 # The remainder of this file should be compressed using zlib. bytes py:function -1 library/functions.html#bytes - TimeoutError py:exception -1 library/exceptions.html#TimeoutError - builtins.object py:class -1 library/functions.html#object - builtins.list py:class -1 library/functions.html#list - builtins.type py:class -1 library/functions.html#type - builtins.classmethod py:class -1 library/functions.html#classmethod - builtins.SyntaxWarning py:exception -1 library/exceptions.html#SyntaxWarning - builtins.RuntimeWarning py:exception -1 library/exceptions.html#RuntimeWarning - builtins.ValueError py:exception -1 library/exceptions.html#ValueError - object py:function -1 library/functions.html#object - object py:class -1 library/functions.html#object - urllib.request.urlopen py:function -1 library/urllib.request.html#urllib.request.urlopen - concurrent.futures.Future py:class -1 library/concurrent.futures.html#concurrent.futures.Future - concurrent.futures.ThreadPoolExecutor py:class -1 library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor - queue.Queue py:class -1 library/queue.html#queue.Queue - print() py:function -1 library/functions.html#print - spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/setup_package.py0000644000077000000240000000050412412505144027426 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst def get_package_data(): # Install the theme files return { 'astropy_helpers.sphinx': [ 'ext/templates/*/*', 'local/*.inv', 'themes/bootstrap-astropy/*.*', 'themes/bootstrap-astropy/static/*.*']} spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/0000755000077000000240000000000012654610601025532 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/0000755000077000000240000000000012654610601031246 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/globaltoc.html0000644000077000000240000000011112340434262034072 0ustar adamstaff00000000000000

    Table of Contents

    {{ toctree(maxdepth=-1, titles_only=true) }} spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/layout.html0000644000077000000240000000655012412505144033454 0ustar adamstaff00000000000000{% extends "basic/layout.html" %} {# Collapsible sidebar script from default/layout.html in Sphinx #} {% set script_files = script_files + ['_static/sidebar.js'] %} {# Add the google webfonts needed for the logo #} {% block extrahead %} {% if not embedded %}{% endif %} {% endblock %} {% block header %}
    {{ theme_logotext1 }}{{ theme_logotext2 }}{{ theme_logotext3 }}
    • Index
    • Modules
    • {% block sidebarsearch %} {% include "searchbox.html" %} {% endblock %}
    {% endblock %} {% block relbar1 %} {% endblock %} {# Silence the bottom relbar. #} {% block relbar2 %}{% endblock %} {%- block footer %}

    {%- if edit_on_github %} {{ edit_on_github_page_message }}   {%- endif %} {%- if show_source and has_source and sourcename %} {{ _('Page Source') }} {%- endif %}   Back to Top

    {%- if show_copyright %} {%- if hasdoc('copyright') %} {% trans path=pathto('copyright'), copyright=copyright|e %}© Copyright {{ copyright }}.{% endtrans %}
    {%- else %} {% trans copyright=copyright|e %}© Copyright {{ copyright }}.{% endtrans %}
    {%- endif %} {%- endif %} {%- if show_sphinx %} {% trans sphinx_version=sphinx_version|e %}Created using Sphinx {{ sphinx_version }}.{% endtrans %}   {%- endif %} {%- if last_updated %} {% trans last_updated=last_updated|e %}Last built {{ last_updated }}.{% endtrans %}
    {%- endif %}

    {%- endblock %} spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/localtoc.html0000644000077000000240000000004212340434262033727 0ustar adamstaff00000000000000

    Page Contents

    {{ toc }} spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/searchbox.html0000644000077000000240000000042012340434262034105 0ustar adamstaff00000000000000{%- if pagename != "search" %}
    {%- endif %} spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/0000755000077000000240000000000012654610601032535 5ustar adamstaff00000000000000././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout.svgspectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_l0000644000077000000240000001212112533471373034500 0ustar adamstaff00000000000000 ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout_20.pngspectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_l0000644000077000000240000000327512340434262034502 0ustar adamstaff00000000000000‰PNG  IHDR[â8A bKGDÿÿÿ ½§“ oFFsvek pHYs × ×B(›x vpAg\@0¢Ð¬IDATXÃå˜iŒ_SÆϘÚ+Új‹fF« BH‘XbOÐέ†ª}‰-Z¤Abû¢$¤Öi…V#¸T•ZCÕ– µIi™ÚU”d¦ª÷ý›;·÷?™)Ó*OrsïyÏsÏûžçžóžs®è!ج’ôLOùÙ`[À–=é`œ3|¼±;»1`{ÛͶﱽÔv]mú«ßØÞX°=˜l¦y’Zjs„á@30ŒlÈ<,éÝ’ÆöÆ @+ð60SÒ϶ûÇG½í‰ñü¡¤mo œ¬‘t—íþÀ%À `¶¤4üÔ pÐX<,’Ô1¦„:`•R~qÂPà` ð.°0kœÐ¨WJéŒs¶@R>)é÷ÎÀ´Ntž$éS`6p6pTØím¢5…—ÿÆHš“s8˜Éã{à@`»¿ ÷J:×v=ð%``/à9`çàœ/iší~À\`ÿbŸ{ƒçœH7KBäÝ€§"Æ“o€f¥´:¡/°hRÊʱ' J™\"ö`ànàÜ*ý[!©ÍöåÀ”ˆÿ `'I­ØÆö¶µ}Ÿí ¶o´Ý9÷#Ûg›Ùþ6ì l²}’í—m¿h[¶›lO·ýeð~ŽòtÛgE;õnÇÛkmϳ=Ëö^ÑÎKQ¿&âš~*¸² Ò NøÑ §ìµNxÊ ×æl30¡L-'ÌwÂ~¥uö ÛOÒ lOŒ˜Ïm)†ÙÞ©`»"×±ÁakÈÙšs\"5äߟ[m,ˆÝfû˜Bý±¹ú 9{ígÃþ[Œþ¼Ø“ªØà„'(Ê8á}'ëðú;aqÑ^{N•:l_q-ãÔHZ"éëx©.„Ü5ÇkŠû×ÀOñ|[ì86—„¤_Y?Ü-éé‚í¸¸ÿB6m‰8×wDqkÚ×… ÚÊ(eY´5$ʯwdz"ðD%¿—iZMh²´1/éѪbÛîmûZÛŸ‘åÒ¸0Çë] ŒV’-Ž_Ù¾9öÕ냲…ª1îK%­)Ôå®AÝðÓBûº08­À9•lî *±íN¶à'’ž M/ÎØÛÛo×;·GcJ=IÏÛ€€þÀeÀ›¶û®§àÕ:T6’܆ò}ÖæÊ³€£œP à„F 7°¸“6J}Kú h,ÌÐa¡S‡ÎŒŠV`¤¤‹%½üXU é[I—»WEÀÿˆÔ°<îM¶‹;¤Á¹çeÝh³1ÏWÊjà% 2úF3;I!±ËF6’Z ¦âÇ¥†ÈcÀrIKªtªÝ›=¢"€¤VIS€rªà·¸°½Y7Å®ï·ÎÈù8/ŠmÀü®4æ„}Õdg‡<¦çÄóhàÁ.4§.p*Úv»ø*žw·}=YJ9ÖÝÙ¼,²=øì”…9ú;À @_`†í¹ÀÊ.þ'IÉöê#{lï |Hv868·Hú¦ðÞÞNRòûï-ÈRãÍ%£öM Þ ûµJÿšQÕÐVCvNé öŒ¶¸&ìk"À“ÉrrÉv$Ä•Ç:ŽŒidi¥8%®WiµU!i­íÑÀcáçÒ\õÀý¹XóÌsÂL²…w7`2°¸o?)8áNàqàÖ.ŠØd{rxS˜yÙ¾ÓÞ¸˜,¡¯î—ôží1À²³ýòàöŽúß‘”æåOtÁ\ V $MSë©A{UÒGeÑFºj&;öö#›IIZg‹dK| ó€=ÉÆJYTM'lE¶»¤”–ÎÔ‹³Äé]ü(¯Hú üMq¨¹h=ÞÛÏ ¯lˆkþ~›<&wmGÿk±pYº™½!üõäÿì%âÿÈ#ÀædëÀX¥·h=…ÿ’ØSß»À3p5™Ø‹óÛĞƟ ½§pÅ%tEXtdate:create2012-10-18T20:57:33+02:00¢p_f%tEXtdate:modify2012-10-18T20:57:33+02:00Ó-çÚtEXtSoftwarewww.inkscape.org›î<IEND®B`‚././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.icospectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_l0000644000077000000240000010033412533471373034504 0ustar adamstaff00000000000000@@ (@F  (n@ ( –P (¾Y(@€ ÿÿ ÿ* ÿVýy ý›¬±ûÕúûüí÷ùüìýýýíýýýí§ªû× ü«ý‹ýoüKÿ ÿ ÿÿîûHýýÌúìýó ûøûü°µÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ˜Ÿÿÿûþ ûúûöýòúéûÇýÿIóÿ¿ÿ!ýyûÏüðüúÿÿÿÿÿÿ ÿÿ>KÿÿÏÓÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‡ÿÿ ÿÿÿÿÿÿÿÿÿÿÿÿûúÿðûÚü—û?ã ÿÿý|ûÚýöÿÿÿÿÿÿþÿýÿýÿLYþÿÌÏÿÿûüÿÿÿÿÿÿÿÿÿÿÿÿÿÿþþÿÿerþÿýÿýÿýÿýÿýÿþÿÿÿÿÿÿÿüûûíú½ÿZÿÿüJûÇýöÿÿÿÿþÿýÿýÿýÿ*<þÿ™¡ÿÿêíÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÉÎÿÿ3Eýÿ ýÿýÿýÿýÿýÿýÿýÿýÿþÿÿÿÿÿÿÿÿðûÄüXÿÿü•üêÿÿÿÿþÿýÿýÿýÿ1ýÿ“þÿäèÿÿûûÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿrþÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿÿÿÿÿÿÿüïù´ÿ=ÿø$û®ýúÿÿÿÿýÿýÿýÿýÿ.Cþÿ·¿ÿÿùúÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ”Ÿþÿ*ýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿÿÿÿÿÿÿúâû†ÿÿ3üÃüþÿÿýÿýÿýÿýÿýÿ^pþÿÓÙÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ÷øÿÿ…“þÿ+ýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿÿÿÿÿýöü¸ÿ7 ÿ(øÃ!ÿÿ ÿÿ ýÿ ýÿ ýÿýÿýÿsƒþÿåéÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÅÍþÿPfýÿ %ýÿýÿýÿ ýÿ ýÿ ýÿ ýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿýÿþÿÿÿ ÿÿûÙ ÿi#ÿ úº#ÿÿ"ÿÿ"ýÿ"ýÿ"ýÿ ýÿ ýÿy‹ÿÿîðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿâåþÿu‡ýÿ =ýÿýÿýÿ!ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ!ýÿ!ÿÿ!ÿÿ ûìýƒÿÿ$ý›$ýÿ$ÿÿ$ýÿ$ýÿ$ýÿ"ýÿýÿmþÿëîÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýýÿÿ¦²þÿ8Týÿ%ýÿýÿ!ýÿ$ýÿ$ýÿ$ýÿ$ýÿ$ýÿ#ýÿ#ýÿ#ýÿ"ýÿ"ýÿ"ýÿ"ýÿ#ýÿ#ýÿ#ýÿ$ýÿ$ýÿ$ýÿ$ýÿ$ýÿ$ýÿ#ýÿ#ýÿ$þÿ$ÿÿ#üõ!ü’'ÿ &ÿj'ûñ(ÿÿ&ýÿ&ýÿ&ýÿ&ýÿ!ýÿNiþÿÞãÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿçëþÿmƒýÿ5ýÿ ýÿ ýÿ%ýÿ&ýÿ&ýÿ&ýÿ%ýÿ%ýÿ$ýÿ#ýÿ#ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ"ýÿ#ýÿ#ýÿ$ýÿ$ýÿ%ýÿ&ýÿ&ýÿ&ýÿ&ýÿ&ýÿ&ýÿ&þÿ&ÿÿ%ýø&üš3ÿ)ÿ%(ûØ*ÿÿ)ýÿ)ýÿ)ýÿ(ýÿ%ýÿ'IþÿÇÐÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÙßþÿUqýÿ'ýÿ!ýÿ'ýÿ)ýÿ(ýÿ(ýÿ(ýÿ'ýÿ&ýÿ#ýÿ(ýÿ"DþÿPkþÿp†þÿ‡™þÿ’£þÿ‘¢þÿ„˜þÿm„þÿNjþÿ!Cþÿ&ýÿ"ýÿ%ýÿ&ýÿ'ýÿ(ýÿ(ýÿ(ýÿ(ýÿ(ýÿ(ýÿ(ÿÿ&üú'ü˜9ÿ ÿ+ý”*þÿ+ÿÿ*þÿ+þÿ+þÿ)þÿ*þÿ®ÿÿþþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàåÿÿQoþÿ%þÿ%þÿ+þÿ+þÿ+þÿ+þÿ*þÿ'þÿ$þÿ"Gþÿt‹ÿÿ¶ÂÿÿÔÛÿÿäèÿÿïñÿÿöøÿÿúûÿÿúûÿÿö÷ÿÿîñÿÿãèÿÿÔÛÿÿ¹Åÿÿ}’ÿÿ/Rþÿ'þÿ&þÿ)þÿ)þÿ*þÿ*þÿ*þÿ*þÿ*þÿ*ÿÿ'ûù'ý‰Uª+ÿ;-üæ.ÿÿ-þÿ-þÿ-þÿ,þÿ)þÿVtÿÿêîÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿíñÿÿ]zþÿ(þÿ'þÿ-þÿ-þÿ-þÿ-þÿ,þÿ$þÿFþÿ‰ÿÿÒÚÿÿðóÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿóõÿÿ×Þÿÿ£³ÿÿ?`ÿÿ)þÿ)þÿ,þÿ,þÿ,þÿ,þÿ,þÿ,þÿ.ÿÿ,üñ.ýt@ÿ/ý™/þÿ0ÿÿ0þÿ0þÿ0þÿ-þÿ :þÿ¿Ìÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿz“þÿ1þÿ(þÿ0þÿ/þÿ.þÿ/þÿ,þÿ+þÿZwÿÿÇÑÿÿøúÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÜâÿÿ”§ÿÿ#Lþÿ'þÿ.þÿ/þÿ/þÿ/þÿ/þÿ/þÿ0ÿÿ.üâ-ÿI0ÿ%0ýá2ÿÿ1þÿ1þÿ1þÿ0þÿ/þÿbÿÿðóÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¯¿þÿHþÿ)þÿ1þÿ2þÿ2þÿ1þÿ.þÿ8þÿx’ÿÿæëÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿÄÏÿÿTsÿÿ+þÿ/þÿ1þÿ0þÿ0þÿ0þÿ1ÿÿ1ÿÿ/üÈ2ÿ$@¿3ýn3üø5ÿÿ3þÿ3þÿ3þÿ1þÿ8þÿÀÌÿÿþþÿÿÿÿÿÿÿÿÿÿÿÿÿÿëïÿÿEkþÿ,þÿ2þÿ4þÿ3þÿ4þÿ1þÿ8þÿ€šÿÿñôÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿôöÿÿÕÞÿÿËÕÿÿËÖÿÿ×ßÿÿõöÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿâçÿÿqÿÿ1þÿ0þÿ3þÿ2þÿ2þÿ2þÿ3ÿÿ2þÿ0üŸ@ÿã 2ü¶6üþ6ÿÿ6þÿ6þÿ5þÿ5þÿDlþÿèíÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ”«þÿ @þÿ/þÿ6þÿ5þÿ5þÿ4þÿ0þÿt‘ÿÿñôÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿäéÿÿ—­þÿ\~þÿ?eþÿ1\þÿ,Xþÿ,Xþÿ2]þÿ?fþÿZ}þÿ§þÿÖßÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿïòÿÿu’ÿÿ3þÿ3þÿ5þÿ5þÿ5þÿ5þÿ6ÿÿ4üð4ÿX8ÿ 5ûå:ÿÿ8þÿ8þÿ8þÿ7þÿ7þÿ–®ÿÿûüÿÿÿÿÿÿÿÿÿÿÿÿÿÿðôÿÿ@jþÿ0þÿ8þÿ8þÿ8þÿ8þÿ.þÿRxÿÿæëÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿñóÿÿާþÿ:eþÿEþÿ1þÿ*þÿ-þÿ.þÿ.þÿ-þÿ*þÿ0þÿCþÿ3]þÿj‹þÿËÖþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿíòÿÿlŒÿÿ1þÿ6þÿ8þÿ7þÿ7þÿ8þÿ8ÿÿ5ýÌ5ÿ:ÿO9üô<ÿÿ:þÿ:þÿ:þÿ8þÿ BþÿÏÚÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ®ÁþÿMþÿ3þÿ;þÿ:þÿ;þÿ7þÿOþÿ»ËÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÕÞþÿ^„þÿ@þÿ/þÿ4þÿ9þÿ:þÿ:þÿ:þÿ9þÿ9þÿ9þÿ8þÿ4þÿ.þÿ6þÿ/]þÿ‹¥þÿóõÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàçÿÿMuÿÿ3þÿ9þÿ9þÿ9þÿ9þÿ;ÿÿ8ûý8ÿÿ<ÿ{;ü÷=ÿÿ<þÿ<þÿ<þÿ;þÿ9hþÿæìÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿcˆþÿ<þÿ:þÿ<þÿ<þÿ<þÿ2þÿx™ÿÿõøÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÒÝþÿOyþÿ6þÿ5þÿ<þÿ=þÿ<þÿ<þÿ<þÿ<þÿ<þÿ<þÿ<þÿ<þÿ<þÿ<þÿ9þÿ2þÿ Aþÿf‹þÿàçÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÅÓÿÿ!Sþÿ9þÿ;þÿ;þÿ;þÿ<þÿ=ÿÿ9üÝ;ÿ'=ý¢?ûý@ÿÿ?þÿ?þÿ=þÿ?þÿkÿÿðôÿÿÿÿÿÿÿÿÿÿÿÿÿÿñôÿÿ7gþÿ5þÿ>þÿ>þÿ>þÿ;þÿMþÿ¾Îÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿêïÿÿc‰þÿ8þÿ9þÿ?þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ>þÿ?þÿ>þÿ7þÿ:þÿ[„þÿàèÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿ‘¬ÿÿ=þÿ<þÿ>þÿ>þÿ>þÿ@ÿÿ>üþ>ýˆÿAþ¿BÿÿBÿÿAþÿAþÿ?þÿBþÿ•°ÿÿúüÿÿÿÿÿÿÿÿÿÿÿÿÿÿÉ×ÿÿ#[þÿ9þÿAþÿAþÿAþÿ:þÿUÿÿäëÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ”°þÿ Fþÿ9þÿAþÿ@þÿ@þÿ@þÿ>þÿ:þÿ6þÿ5þÿ5þÿ7þÿ;þÿ?þÿ@þÿ@þÿ@þÿ@þÿAþÿ;þÿ=þÿeþÿñõÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿãëÿÿ>oþÿ>þÿ@þÿ@þÿ@þÿAþÿBÿÿ?üÝ<ÿBûÒDÿÿCÿÿCþÿCþÿAþÿDþÿ³Çÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¢ºþÿTþÿ>þÿCþÿCþÿCþÿ9þÿŒ«ÿÿûýÿÿÿÿÿÿÿÿÿÿÿÿÿÿâêÿÿEvþÿ7þÿCþÿCþÿCþÿ>þÿ4þÿDþÿBsþÿs˜þÿƒ£ÿÿ„£ÿÿjÿÿ0fþÿ>þÿ:þÿBþÿBþÿBþÿBþÿCþÿ;þÿ Fþÿˆ¦þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿ§¾ÿÿCþÿ@þÿBþÿBþÿBþÿDÿÿBûúBýhUÿEûãGÿÿEþÿEþÿEþÿBþÿFþÿÈÖÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‡¨þÿOþÿAþÿEþÿEþÿDþÿ@þÿ®Äÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ©ÁþÿYþÿ?þÿFþÿCþÿ9þÿ&aþÿ¡ÿÿ¨ÀÿÿÔàÿÿöùÿÿÿÿÿÿÿÿÿÿïóÿÿÈÖÿÿ—³ÿÿ?rÿÿ?þÿBþÿEþÿEþÿEþÿEþÿ=þÿ$_þÿÇÖþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿâêÿÿ4kþÿCþÿDþÿDþÿDþÿDÿÿEýþBüÄ@ÿ FúêJÿÿHþÿHþÿHþÿEþÿIþÿÕáÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿxžþÿMþÿDþÿGþÿGþÿFþÿ Oþÿ¾Ðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‚¥þÿIþÿEþÿ?þÿHþÿ_Šÿÿ¿ÑÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÒßÿÿbŽÿÿHþÿEþÿGþÿGþÿGþÿEþÿEþÿ\‰þÿþþÿÿÿÿÿÿÿÿÿÿÿÿÿÿùûÿÿ‰ªÿÿEþÿEþÿGþÿGþÿGþÿIÿÿDûñJÿ4IûåLÿÿJþÿJþÿJþÿGþÿLþÿÙäÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿtœþÿNþÿGþÿJþÿJþÿHþÿZþÿÃÕÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿp™þÿBþÿ=þÿ ]ÿÿЬÿÿêðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿèïÿÿ]ŒÿÿBþÿIþÿIþÿIþÿJþÿAþÿ `þÿÆ×þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿËÚÿÿ PþÿGþÿIþÿIþÿIþÿKÿÿHüùJÿxLúàNÿÿLÿÿLþÿLþÿIþÿNþÿÖãÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ{¢þÿ RþÿIþÿLþÿLþÿJþÿ\þÿÃÕÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿrœþÿ;þÿ.hÿÿ¬ÄÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿËÛÿÿ+iÿÿGþÿKþÿKþÿKþÿHþÿNþÿo™þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿçîÿÿ5pþÿJþÿKþÿKþÿKþÿKÿÿKüþLü¿OûÓPÿÿNÿÿNþÿNþÿLþÿOþÿÈÙÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ°þÿYþÿJþÿNþÿNþÿMþÿRþÿ¼Ðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€¦þÿ5nÿÿ®ÈÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿøûÿÿŠ®ÿÿFþÿMþÿNþÿNþÿNþÿFþÿ9uþÿô÷ÿÿÿÿÿÿÿÿÿÿÿÿÿÿðõÿÿnšÿÿMþÿKþÿMþÿMþÿMþÿOÿÿLûéPüºQþÿQÿÿQþÿQþÿOþÿRþÿ±Éÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¬ÆÿÿbþÿJþÿPþÿPþÿPþÿHþÿ¨ÃÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿºÏÿÿ¹ÐÿÿüýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÀÕÿÿ\þÿNþÿPþÿPþÿPþÿIþÿ!gþÿÈÙÿÿÿÿÿÿÿÿÿÿÿÿÿÿûýÿÿ™¹ÿÿPþÿNþÿPþÿPþÿPþÿRÿÿPüïRýŸQüüTÿÿSþÿSþÿQþÿTþÿ‘µÿÿùûÿÿÿÿÿÿÿÿÿÿÿÿÿÿÚæÿÿ'mþÿJþÿSþÿSþÿSþÿJþÿ}§ÿÿõøÿÿÿÿÿÿÿÿÿÿÿÿÿÿ÷ùÿÿþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿûüÿÿüýÿÿÿÿÿÿÿÿÿÿÿÿÿÿØäÿÿAÿÿNþÿRþÿRþÿRþÿMþÿ`þÿ›¼þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿºÐÿÿTþÿPþÿRþÿRþÿRþÿTÿÿPüíVÿ}UüøVÿÿUþÿUþÿSþÿTþÿf™ÿÿïôÿÿÿÿÿÿÿÿÿÿÿÿÿÿûýÿÿF„þÿOþÿTþÿUþÿUþÿQþÿ9{ÿÿØåÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÑàÿÿœ½ÿÿéðÿÿÿÿÿÿÿÿÿÿÿÿÿÿæîÿÿ_“ÿÿNþÿTþÿTþÿTþÿPþÿ Zþÿ~¨þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÑàÿÿVþÿQþÿTþÿTþÿTþÿVÿÿRüíVÿPWüóYÿÿWþÿWþÿVþÿUþÿ0wþÿåîÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿˆ±þÿ _þÿSþÿWþÿWþÿVþÿXþÿ¡ÁÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿçïÿÿSÿÿ:}þÿÖäÿÿÿÿÿÿÿÿÿÿÿÿÿÿìóÿÿjÿÿPþÿVþÿVþÿVþÿTþÿYþÿjœþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÛçÿÿ ]þÿSþÿVþÿVþÿVþÿXÿÿVüíZÿ"Xúì\ÿÿYþÿYþÿYþÿWþÿ^þÿÃÙÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÜéÿÿ-vþÿQþÿYþÿYþÿYþÿRþÿ;€ÿÿÙæÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿßêÿÿb™ÿÿQþÿ9þÿÛèÿÿÿÿÿÿÿÿÿÿÿÿÿÿëòÿÿhœÿÿSþÿYþÿYþÿYþÿWþÿZþÿb™þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÞéÿÿeþÿWþÿXþÿXþÿXþÿ[ÿÿXüífÿ XüÇ]ýþ\ÿÿ\þÿ\þÿZþÿZþÿ~¬ÿÿ÷úÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿzªþÿ^þÿWþÿ\þÿ[þÿ[þÿWþÿd›ÿÿêòÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¿×ÿÿOÿÿXþÿKþÿNŽþÿó÷ÿÿÿÿÿÿÿÿÿÿÿÿÿÿáìÿÿV’ÿÿVþÿ[þÿ[þÿ[þÿYþÿ\þÿeœþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÝéÿÿ dþÿYþÿ[þÿ[þÿ[þÿ^ÿÿ[üífÿ_ý„]üù_ÿÿ^þÿ^þÿ]þÿ\þÿ*xþÿÜèÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàëÿÿ<„þÿXþÿ]þÿ^þÿ^þÿ\þÿ_þÿdÿÿÓäÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÑâÿÿ‡³ÿÿ)wÿÿTþÿ[þÿYþÿt§þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÒâÿÿ4þÿYþÿ]þÿ]þÿ]þÿZþÿaþÿr¥þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÙçÿÿ`þÿZþÿ]þÿ]þÿ]þÿ`ÿÿ]üíÿbÿ<`ûñcÿÿ`þÿ`þÿ`þÿ^þÿ]þÿ˜¿ÿÿûýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ®ÌþÿoþÿYþÿ`þÿ`þÿ`þÿ]þÿ[þÿBˆÿÿ—¾ÿÿÏáÿÿøúÿÿÿÿÿÿüýÿÿäîÿÿ´Ðÿÿ‹·ÿÿ?‡ÿÿZþÿYþÿ`þÿWþÿ"vþÿ½Õþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¹Óÿÿeþÿ]þÿ_þÿ_þÿ_þÿ\þÿhþÿеþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÈÜÿÿ`þÿ\þÿ_þÿ_þÿ_þÿbÿÿ]üímíbýÌdýþbþÿbþÿbþÿbþÿaþÿ/€þÿÚèÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‘»þÿiþÿ[þÿaþÿbþÿbþÿaþÿZþÿ_þÿ;†þÿr¨ÿÿ°ÿÿy¬ÿÿW—þÿoþÿWþÿ[þÿaþÿbþÿ_þÿ]þÿp§þÿøûÿÿÿÿÿÿÿÿÿÿÿÿÿÿùûÿÿ‰·ÿÿYþÿaþÿbþÿbþÿbþÿ]þÿrþÿ¬Ìþÿÿÿÿÿÿÿÿÿÿÿÿÿþÿÿÿ­Ìÿÿbþÿ_þÿaþÿaþÿaþÿdÿÿ_üíUªeýwdüùgÿÿeþÿeþÿeþÿdþÿ`þÿy®ÿÿùûÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýþÿÿºþÿoþÿ\þÿbþÿdþÿdþÿdþÿcþÿ_þÿZþÿYþÿZþÿ\þÿaþÿdþÿdþÿdþÿaþÿ]þÿGþÿÔäþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿØçÿÿ9‡ÿÿ_þÿdþÿdþÿdþÿdþÿ\þÿ(|þÿÜêÿÿÿÿÿÿÿÿÿÿÿÿÿÿ÷ûÿÿ‰·ÿÿeþÿbþÿdþÿdþÿdþÿgÿÿdüígÿ%füâjÿÿgþÿgþÿgþÿgþÿeþÿpþÿ°Ïÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ Æþÿ3…þÿcþÿ_þÿfþÿgþÿgþÿgþÿfþÿfþÿfþÿfþÿfþÿfþÿfþÿ_þÿcþÿG‘þÿÆÝþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿ˜Áÿÿcþÿeþÿfþÿfþÿfþÿeþÿ`þÿGþÿþþÿÿÿÿÿÿÿÿÿÿÿÿÿÿìôÿÿZ›þÿeþÿeþÿfþÿfþÿfþÿiÿÿfüíÿiý—hýýkÿÿiþÿiþÿiþÿhþÿeþÿ5‡þÿÍáÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿËáþÿg¥þÿ'}þÿeþÿ`þÿdþÿfþÿgþÿhþÿhþÿgþÿeþÿaþÿbþÿwþÿj§þÿØèþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÐäÿÿ5‡ÿÿcþÿhþÿhþÿhþÿhþÿdþÿ pþÿ‰¹þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàíÿÿ&þÿfþÿgþÿhþÿhþÿhþÿkÿÿhüíjÿ0kúánÿÿkþÿkþÿkþÿkþÿjþÿeþÿN˜ÿÿ×éÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¹Öþÿi¨þÿ=þÿ|þÿ qþÿhþÿdþÿfþÿlþÿvþÿ0…þÿXžþÿ«Îþÿüýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿïöÿÿd¦ÿÿdþÿjþÿkþÿkþÿkþÿkþÿdþÿ*„þÿØéÿÿÿÿÿÿÿÿÿÿÿÿÿÿþÿÿÿ¼ØÿÿkþÿhþÿjþÿjþÿjþÿjþÿmÿÿküínýmüþpÿÿnþÿnþÿnþÿnþÿmþÿgþÿSœÿÿÑåÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿáîÿÿ«Ïþÿ‡ºþÿmªþÿ_¤þÿe§þÿx±þÿ˜ÃþÿÊáÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿòøÿÿ|µÿÿnþÿkþÿmþÿmþÿmþÿmþÿjþÿmþÿl«þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿôùÿÿq®ÿÿlþÿlþÿmþÿmþÿmþÿmþÿpÿÿküïuÿ%mýÔrÿÿpþÿpþÿpþÿpþÿpþÿoþÿiþÿD•ÿÿ·×ÿÿûýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿìôÿÿ~·ÿÿ tþÿkþÿoþÿoþÿnþÿjþÿkþÿiþÿ.‰þÿÒåÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÙêÿÿ#‚þÿmþÿnþÿoþÿoþÿoþÿoþÿrÿÿoüérÿgpûôuÿÿqþÿrþÿrþÿrþÿrþÿqþÿlþÿþÿÁÿÿáîÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÕèÿÿj¬ÿÿsþÿoþÿqþÿqþÿpþÿsþÿ~þÿtþÿtþÿ¿þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿ•ÄÿÿnþÿoþÿqþÿqþÿqþÿqþÿqÿÿrýþnüÂ`ÿtü«vÿÿuÿÿtþÿtþÿtþÿtþÿtþÿtþÿqþÿsþÿR ÿÿ²Õÿÿãðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿâïÿÿ§Ïÿÿ:“ÿÿoþÿrþÿtþÿtþÿtþÿmþÿ%ˆþÿ—Æþÿn°þÿb©þÿóøÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÜìÿÿ0þÿrþÿrþÿsþÿsþÿsþÿsþÿuÿÿsüúrýyxÿ1výÓzÿÿwþÿwþÿwþÿwþÿwþÿwþÿvþÿuþÿpþÿ {þÿW¤ÿÿ©ÒÿÿÑæÿÿëôÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿð÷ÿÿÔèÿÿ«ÑÿÿT¢ÿÿxþÿqþÿuþÿuþÿuþÿvþÿsþÿ vþÿp²þÿÿÿÿÿï÷ÿÿåñÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüýÿÿ‰Àÿÿsþÿuþÿvþÿvþÿvþÿvþÿvþÿyÿÿuüéwÿ/xÿ[wûê|ÿÿyþÿxþÿxþÿxþÿxþÿyþÿyþÿxþÿwþÿtþÿvþÿ+þÿh°ÿÿ›Êÿÿ¼ÜÿÿÊãÿÿÒçÿÿÔèÿÿÒèÿÿÍåÿÿÃàÿÿ§Ñÿÿt¶ÿÿ2“þÿwþÿsþÿwþÿxþÿxþÿxþÿxþÿxþÿqþÿ7”þÿÙëÿÿÿÿÿÿÿÿÿÿýÿÿÿýþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÃßÿÿ…þÿvþÿxþÿxþÿwþÿwþÿwþÿyÿÿxÿÿwüª’ÿ|û„xûù~ÿÿ{þÿzþÿzþÿzþÿzþÿzþÿzþÿzþÿzþÿzþÿxþÿwþÿuþÿxþÿ…þÿ)þÿ-’þÿ)þÿŠþÿ|þÿuþÿwþÿxþÿzþÿzþÿzþÿ{þÿ{þÿ{þÿ{þÿvþÿ €þÿ‰ÁþÿþþÿÿÿÿÿÿÿÿÿÿþþÿÿúüÿÿþÿÿÿÿÿÿÿÝíÿÿO¤ÿÿvþÿyþÿyþÿyþÿyþÿyþÿzþÿ|ÿÿwûï|ÿHÿŽÿ {ü—~ýú€ÿÿ}þÿ}þÿ}þÿ}þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ{þÿ{þÿ{þÿzþÿ{þÿ{þÿ{þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿ|þÿwþÿP¥þÿôùÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþþÿÿýþÿÿõúÿÿ†Áÿÿyþÿ{þÿ}þÿ|þÿ|þÿ|þÿ|þÿ}ÿÿ}ÿÿzû«’ÿ€ÿ€ý¢‚ýýƒÿÿ€þÿþÿþÿþÿþÿþÿþÿþÿþÿþÿþÿþÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿyþÿ!þÿ«ÕþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿëõÿÿÀþÿ„þÿ}þÿ~þÿ~þÿ~þÿ~þÿþÿ‚ÿÿ~ûä}ÿ7„ÿú¨ƒüû…ÿÿ‚ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ€ÿÿ}ÿÿb±ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüþÿÿ¿ßÿÿ/˜ÿÿ€ÿÿ€ÿÿ€ÿÿ€ÿÿ€ÿÿƒÿÿ€ýùý€ÿ€ÿüž…üù‡ÿÿ„ÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿƒÿÿ‚ÿÿ„ÿÿn¹ÿÿúýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿòùÿÿÇäÿÿšÎÿÿ; ÿÿƒÿÿƒÿÿƒÿÿƒÿÿ‚ÿÿ…ÿÿ…ÿÿ€ü²ˆÿ’ÿ…ý’„üòŠÿÿ‡ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿƒÿÿ‚ÿÿÿÿ€ÿÿ‚ÿÿ]±ÿÿåóÿÿùýÿÿÿÿÿÿÿÿÿÿàðÿÿ¼àÿÿƒÄÿÿ“ÿÿƒÿÿ„ÿÿ…ÿÿ…ÿÿ…ÿÿ…ÿÿ†ÿÿˆÿÿ‚ýÌ‚ÿ+€ÿ‰ýu‰üáŒÿÿŠÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‰ÿÿ‘ÿÿ•ÿÿ •ÿÿ%–ÿÿg¸ÿÿËçÿÿ³Ûÿÿ¬ØÿÿÕëÿÿ©ÖÿÿL«ÿÿ‰ÿÿ…ÿÿ†ÿÿ†ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‰ÿÿŒÿÿ†ýÔ‡ÿ@‹üMˆüÈüûŽÿÿ‹ÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿŠÿÿ‰ÿÿ‰ÿÿ‰ÿÿ‰ÿÿ†ÿÿ•ÿÿxÁÿÿ«Øÿÿ¦ÖÿÿÓêÿÿëöÿÿs¾ÿÿ‰ÿÿ’ÿÿ&›ÿÿˆÿÿˆÿÿˆÿÿˆÿÿ‰ÿÿ‰ÿÿ‰ÿÿ‰ÿÿ‰ÿÿ‹ÿÿŽÿÿ‰ýÔˆÿGŠÿ#Šý™‹üìÿÿÿÿÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŒÿÿŠÿÿ•ÿÿV³ÿÿsÀÿÿ]¶ÿÿµÝÿÿ´Ýÿÿ‰ÿÿŠÿÿ‹ÿÿŠÿÿŠÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿŒÿÿÿÿýþ‰üÆŒÿ<€ÿÿTŽüÇýó‘ÿÿ‘ÿÿÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿÿÿÿÿ”ÿÿ‘Îÿÿ–Ñÿÿ†ÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿŽÿÿÿÿ’ÿÿŽýóŽþ­Šÿ%ŒÿÿqŒýÔýõ“ÿÿ”ÿÿ‘ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‹ÿÿžÿÿ’ÐÿÿsÁÿÿ‹ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ’ÿÿ”ÿÿ’ýûüÜýs€ê €ÿ™ÿ”ÿw‘ýÒ“ÿð“ÿÿ—ÿÿ–ÿÿ”ÿÿ“ÿÿ“ÿÿ’ÿÿ’ÿÿ’ÿÿ’ÿÿ’ÿÿ’ÿÿÿÿœÿÿP´ÿÿ7ªÿÿÿÿ’ÿÿ’ÿÿ’ÿÿ’ÿÿ“ÿÿ•ÿÿ–ÿÿ”ýûüç‘ýœ•ÿ0€ÿªÿ”ÿ˜ÿW“þµ”üè–ýô”ýþ™ÿÿ™ÿÿ˜ÿÿ—ÿÿ–ÿÿ–ÿÿ–ÿÿ•ÿÿ•ÿÿ•ÿÿ–ÿÿ–ÿÿ–ÿÿ—ÿÿ˜ÿÿ™ÿÿ˜ÿÿ”þÿ•ýó’úâ“ýš”ÿ7™ÿÿÿŽÿ šÿ&—ýi˜ü¯–üà˜ýï•ýõ•ýú–ýþ™ÿÿšÿÿ›ÿÿœÿÿ›ÿÿ™ÿÿ™ÿÿ˜ÿÿ—ýû•ý÷™ÿð—úâ—þ°–ÿd‘ÿªÿªÿ™æ ™ÿ›ÿ8™ÿi—ýŽšþ«—üÇ—ûØ—üá—üê•ýè˜ûÚ™ýΘþ¹™ý˜›ÿu˜ÿE›ÿ™ÿ €ÿ( @ ÿüdýŸüÁþþÿÿÿÿÿÿ¢¦ûíþº ýšüW ÿÿ,þ«úûýÿýÿuþÿÿÿÿÿÿÿÿÿ‰þÿýÿýÿýÿúüüÀüJÿÿý‡úûýÿýÿ>Pýÿ½Ãþÿÿÿÿÿÿÿÿÿúúÿÿ&:ýÿýÿýÿýÿýÿýÿýÿýÍüKÿ þºýÿýÿýÿbsþÿýýÿÿÿÿÿÿÿÿÿÿúûÿÿ\nþÿýÿýÿýÿýÿýÿýÿýÿýÿýÿý¦ ÿ+ÿþ»!ýÿ!ýÿ!ýÿŠ™þÿÿÿÿÿÿÿÿÿôöÿÿÈÏÿÿ0Jýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿ ýÿüÛÿ+%ý‹%ýÿ%ýÿ%ýÿp…þÿÿÿÿÿÿÿÿÿúûÿÿs‡þÿ(ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ%ýÿ$ýÿ$ýÿ$úí"ÿ5+ÿ0*úý*ýÿ*ýÿ>^ýÿüüÿÿÿÿÿÿõ÷ÿÿ@_ýÿ)ýÿ)ýÿ)ýÿ)ýÿ$Gýÿyþÿ¥³þÿ»Æþÿ¯¼þÿ¢þÿPlþÿ 1ýÿ)ýÿ)ýÿ)ýÿ)ýÿ)úð'ÿ..þ¶.þÿ.þÿ0þÿÌÕÿÿÿÿÿÿþþÿÿXvþÿ.þÿ.þÿ.þÿCþÿ²Àÿÿþþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿêîÿÿ{’þÿ3þÿ-þÿ-þÿ-þÿ*üÞ$ÿ3ÿ#3ûþ3þÿ3þÿXyþÿÿÿÿÿÿÿÿÿ¤¶ÿÿ2þÿ2þÿ2þÿ8_þÿìðÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÍ×ÿÿ#Nþÿ2þÿ2þÿ2þÿ2ýž7ÿt7þÿ7þÿ7þÿ·ÇÿÿÿÿÿÿúûÿÿOþÿ7þÿ7þÿOþÿèíÿÿÿÿÿÿÿÿÿÿÓÜÿÿ`‚þÿKþÿ6þÿ =þÿ;eþÿ§ÿÿóöÿÿÿÿÿÿÿÿÿÿàçÿÿJþÿ6þÿ6þÿ6þÿ7ÿO:þ³<þÿ;þÿIþÿúûÿÿÿÿÿÿ¸Èÿÿ;þÿ;þÿ;þÿ¯Âÿÿÿÿÿÿÿÿÿÿ¡·ÿÿ@þÿ;þÿ;þÿ;þÿ;þÿ;þÿ;þÿPþÿÀÏÿÿÿÿÿÿÿÿÿÿÎÙÿÿ@þÿ;þÿ:þÿ:üÛUÿ@üÞ@þÿ@þÿCrþÿÿÿÿÿÿÿÿÿjþÿ@þÿ@þÿ(^þÿýýÿÿÿÿÿÿÇÕÿÿCþÿ?þÿ?þÿ?þÿ?þÿ?þÿ?þÿ?þÿ?þÿ FþÿÅÓÿÿÿÿÿÿÿÿÿÿs–þÿ?þÿ?þÿ?þÿAÿSAûõDþÿDþÿgþÿÿÿÿÿÿÿÿÿAtþÿDþÿDþÿj’þÿÿÿÿÿÿÿÿÿM|þÿDþÿDþÿ7lþÿ­ÂþÿÑÝÿÿÂÒÿÿHyþÿDþÿDþÿDþÿTþÿðôÿÿÿÿÿÿîòÿÿNþÿCþÿCþÿDüÈIûøIþÿIþÿ|¡þÿÿÿÿÿÿÿÿÿ)fþÿIþÿIþÿ°ÿÿÿÿÿÿÿÿÿÿWþÿ Qþÿ¥¿ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿˆªÿÿHþÿHþÿHþÿxžþÿÿÿÿÿÿÿÿÿ\ŠþÿHþÿHþÿHþÿFÿ!MûçMþÿMþÿošþÿÿÿÿÿÿÿÿÿ6sþÿMþÿMþÿ‰­ÿÿÿÿÿÿÿÿÿÿ#eþÿÁÔÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýþÿÿ8sþÿLþÿLþÿ^þÿýþÿÿÿÿÿÿªÃÿÿLþÿLþÿLþÿMÿcRýÓRþÿRþÿ\þÿÿÿÿÿÿÿÿÿYŽþÿQþÿQþÿh˜þÿÿÿÿÿÿÿÿÿËÜÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþþÿÿþþÿÿÿÿÿÿœ»ÿÿQþÿQþÿQþÿÏÞÿÿÿÿÿÿÛæÿÿQþÿQþÿQþÿOý§Vþ®VþÿVþÿ"mþÿÿÿÿÿÿÿÿÿž¿ÿÿVþÿVþÿcþÿöùÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ³ÿÿãìÿÿÿÿÿÿÂÖÿÿUþÿUþÿUþÿ­ÈÿÿÿÿÿÿöùÿÿUþÿUþÿUþÿSýÏ\ÿl[þÿ[þÿZþÿÜèÿÿÿÿÿÿó÷ÿÿjþÿZþÿZþÿz©þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ’¹ÿÿ[þÿçïÿÿÿÿÿÿ»ÓÿÿZþÿZþÿZþÿŸÁÿÿÿÿÿÿþþÿÿ]þÿYþÿYþÿWúàaÿ*_þÿ_þÿ_þÿr§þÿÿÿÿÿÿÿÿÿžÂÿÿ_þÿ_þÿ_þÿ€¯þÿýþÿÿÿÿÿÿÿÿÿÿßëÿÿG‹þÿ^þÿ*yþÿÿÿÿÿÿÿÿÿ—½ÿÿ^þÿ^þÿ^þÿ²Îÿÿÿÿÿÿðöÿÿ^þÿ^þÿ^þÿ[ûòÿaýÔcþÿcþÿhþÿáíÿÿÿÿÿÿÿÿÿÿn¦þÿcþÿcþÿcþÿeþÿ({þÿlþÿcþÿcþÿdþÿ±ÏÿÿÿÿÿÿÿÿÿÿA‹þÿcþÿcþÿcþÿÛéÿÿÿÿÿÿÖæÿÿbþÿbþÿbþÿ_ûóiÿ_hþÿhþÿhþÿH“þÿüýÿÿÿÿÿÿÿÿÿÿ•Àÿÿqþÿhþÿgþÿgþÿgþÿgþÿ mþÿŸÆÿÿÿÿÿÿÿÿÿÿÌáÿÿhþÿgþÿgþÿ zþÿÿÿÿÿÿÿÿÿšÃÿÿgþÿgþÿgþÿgüÜfÿkúálþÿlþÿlþÿj©þÿþþÿÿÿÿÿÿÿÿÿÿíõÿÿ“ÁÿÿQ›þÿ8ŒþÿG•þÿ€¶þÿâîÿÿÿÿÿÿÿÿÿÿîõÿÿ#€þÿlþÿlþÿkþÿ‡¹ÿÿÿÿÿÿÿÿÿÿVþÿkþÿkþÿkþÿiþ³pÿ]qþÿqþÿqþÿqþÿNœþÿð÷ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàîÿÿ7þÿpþÿpþÿpþÿ~þÿñ÷ÿÿÿÿÿÿåðÿÿtþÿpþÿpþÿpþÿoÿwtþ­uþÿuþÿuþÿuþÿ€þÿŽÂÿÿñ÷ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿñ÷ÿÿ“Åÿÿ~þÿuþÿtþÿ yþÿÇàÿÿÉáÿÿÿÿÿÿÿÿÿÿs³þÿtþÿtþÿtþÿtûþvÿ'yÿ{ûçyþÿyþÿyþÿyþÿyþÿ|þÿ@›þÿl²þÿ€¼þÿq´þÿJ þÿ þÿyþÿyþÿyþÿyþÿŽÃþÿÿÿÿÿêôÿÿÿÿÿÿÌäÿÿ|þÿyþÿyþÿyþÿwþ¼}ÿ5~ûô~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ~þÿ}þÿ}þÿ}þÿ}þÿ"ŽþÿäñÿÿÿÿÿÿÿÿÿÿèóÿÿP¥þÿ}þÿ}þÿ}þÿ}þÿ{ÿ<ƒÿD‚üó‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿ‚ÿÿŸÐÿÿÿÿÿÿÿÿÿÿýþÿÿÑéÿÿsºÿÿ‚ÿÿ‚ÿÿÿÿý–‰ÿ6…üå‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ‡ÿÿ†ÿÿ†ÿÿ†ÿÿ†ÿÿ†ÿÿ†ÿÿŠÿÿœÐÿÿÓêÿÿ¼ßÿÿi¸ÿÿˆÿÿ†ÿÿ†ÿÿ†ÿÿ‡üÇŽÿ ÿ‹þ»‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ‹ÿÿ%œÿÿ‘ÍÿÿÊæÿÿ3¢ÿÿŽÿÿ‹ÿÿ‹ÿÿ‹ÿÿŠÿÿŠÿÿŠüÇŽÿÿÿ‘ÿaŽüÞÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ‹ÌÿÿÿÿÿÿÿÿÿÿÿÿŒüþý˜Žÿ €ÿ”ÿ_’ýÔ”ÿÿ”ÿÿ”ÿÿ”ÿÿ”ÿÿ”ÿÿ$£ÿÿ”ÿÿ“ÿÿ“ÿÿ“ÿÿ“þ½“ÿ;•ÿ$—ÿl˜þ°™ýÑ•üæ˜üû˜üé™ýÑ–þ²˜ÿwšÿ&(0 ÿù\ýš&2üÅùùýîÜÞûì$1üÉý¢ÿeÿ1ÿýiýÐÿÿ9Eÿÿ²¸ÿÿÿÿÿÿ¸¿ÿÿ ÿÿÿÿÿÿüðý ûBÿýÿÿÿÿl{ÿÿìïÿÿÿÿÿÿÚÞÿÿ;Oýÿýÿýÿþÿÿÿÿÿüîûÿÿ "ýŸÿÿ ÿÿƒ“þÿÿÿÿÿÿÿÿÿ¡®þÿ)Cýÿýÿýÿýÿýÿýÿýÿ!ÿÿ"ÿÿ þ¾ø&&ýk'üý ÿÿg~ýÿüüÿÿñóÿÿgþÿ#ýÿýÿ#ýÿ-MýÿTnýÿ[týÿGcýÿ<ýÿýÿ!ÿÿ'ÿÿ'ýÐ&ÿ(1ÿ+ûÙ'ÿÿ&Nþÿèìÿÿÿÿÿÿeþÿþÿ!þÿ5Xþÿ­»þÿéíÿÿÿÿÿÿÿÿÿÿþþÿÿÙàÿÿŽ¡þÿEþÿ$ÿÿ.ÿÿ*üÄÿ3ÿd4ÿÿ1ÿÿˆ þÿÿÿÿÿ£µÿÿ1þÿ&þÿMpþÿçìÿÿÿÿÿÿïóÿÿÇÒÿÿÀÌÿÿÒÜÿÿÿÿÿÿÿÿÿÿÓÜÿÿ@eþÿ*ÿÿ5ÿÿ1ýŒÿ8ý¨7ÿÿFþÿÝåÿÿøúÿÿ;gþÿ&þÿ(Xþÿâêÿÿÿÿÿÿ¤¹ÿÿ4\þÿ2þÿ)þÿ @þÿMsþÿ»Ëþÿÿÿÿÿáèÿÿ1^þÿ1ÿÿ8ýó8ÿD>ûÕ;ÿÿ8iþÿÿÿÿÿÈÖÿÿKþÿ2þÿŒ¨þÿÿÿÿÿ¯Ãÿÿ;þÿ,þÿCþÿ Gþÿ9þÿ,þÿFþÿ¬Àÿÿÿÿÿÿ·ÉÿÿEþÿ=ÿÿ>ú¨UÕBúê@ÿÿ[‡þÿÿÿÿÿ¢»ÿÿ>þÿOþÿÅÕÿÿþþÿÿ3jþÿ@þÿ^‰þÿ»ÍþÿÓßÿÿ…¦þÿSþÿ7þÿWþÿÙãÿÿÿÿÿÿOþÿ=ÿÿDüòCú5KùæFÿÿbþÿÿÿÿÿ›¸ÿÿ@þÿ]þÿ×âÿÿõ÷ÿÿ+iþÿš¸ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¢¾ÿÿJþÿ@þÿo™þÿÿÿÿÿžºÿÿKÿÿLÿÿIýpQýÔNÿÿP‡þÿÿÿÿÿ±ÊÿÿPþÿ WþÿÀÓþÿüýÿÿÁÕÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþþÿÿó÷ÿÿ?|ÿÿ?þÿ3sþÿùûÿÿ×äÿÿ YþÿOÿÿPþ­Vþ±Uÿÿ#nþÿøúÿÿäíÿÿ"nþÿFþÿv¦þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¦Äÿÿ¾Ôÿÿÿÿÿÿc—ÿÿDþÿ"mþÿâìÿÿôøÿÿbþÿTÿÿQýÑ]ýv_ÿÿ_ÿÿ¶Ñþÿÿÿÿÿ~­ÿÿQþÿdþÿ©Éþÿÿÿÿÿÿÿÿÿûüÿÿ”»þÿeþÿµÐÿÿÿÿÿÿU’ÿÿLþÿ"rþÿàëÿÿôøÿÿhþÿZÿÿYúâaû:büö^ÿÿGþÿÿÿÿÿôøÿÿO”þÿUþÿdþÿFŽþÿR•þÿ1þÿVþÿBŠþÿøûÿÿåïÿÿ!vþÿUþÿ0€þÿúüÿÿÛéÿÿ jþÿaÿÿ_úèqÿ fü¬mÿÿgþÿ½þÿÿÿÿÿñ÷ÿÿw®ÿÿvþÿbþÿ`þÿhþÿQ—þÿßìÿÿÿÿÿÿ‚µÿÿaþÿaþÿb¢þÿÿÿÿÿ¦ÊÿÿjþÿhÿÿgýÔnûHoýômÿÿtþÿ’Âþÿÿÿÿÿÿÿÿÿàîÿÿ«Ïÿÿ¡ÉÿÿÆßÿÿÿÿÿÿÿÿÿÿ¥Ìþÿ rþÿdþÿrþÿÇßÿÿÿÿÿÿ]£þÿhþÿqÿÿmý©ªÿrý“zÿÿqÿÿtþÿX¤þÿÇàÿÿüþÿÿÿÿÿÿÿÿÿÿÿÿÿÿàîÿÿt´ÿÿ xþÿjþÿQžþÿ±ÓÿÿÿÿÿÿÏåÿÿþÿnÿÿyÿÿtýgózüÈ‚ÿÿwÿÿuþÿ ~þÿ3•þÿ^¬þÿe¯þÿFŸþÿ…þÿvþÿqþÿ‰þÿÙëþÿÿÿÿÿóùÿÿV§ÿÿrþÿÿÿwüÞxÿ ‚ù+ûÕˆÿÿ€ÿÿ}þÿ|þÿ{þÿ{þÿ|þÿ|þÿ|þÿ|ÿÿ„ÂÿÿÿÿÿÿÿÿÿÿÍæÿÿ'’þÿ}ÿÿ„ÿÿ~ýs…ÿ,‡üÄÿÿŠÿÿ†ÿÿ†ÿÿ…ÿÿ…ÿÿ…ÿÿ ‰ÿÿ“ÿÿ§ÖÿÿÑéÿÿ‘Ëÿÿ6ŸÿÿŽÿÿŒÿÿ„ý¨™ÿ ÿŠý™ýô•ÿÿŽÿÿŒÿÿ‹ÿÿŒÿÿ>¨ÿÿžÓÿÿ4£ÿÿ ÿÿ‹ÿÿŒÿÿŽÿÿŠý¦ŒÿüL’ý¨’üøšÿÿ—ÿÿ–ÿÿ”ÿÿ;°ÿÿ™ÿÿ“ÿÿ—ÿÿ”ýÙŽýsŽÿ ™ÿ”ÿ9™ÿq–þ²—ýÓ–üç•üä—ýΕý©™ÿf•ÿ$(  ÿÿÿ* ýmîïý“cmýƒüU#ÿÿÿÿ1ÿ üª üû‘šþÿþþþþ4Eüþüÿüîý• ÿÿ1ÿúá6ýÿßãýýô÷ýüG\üûûûûý ûþüÿ ûí'ûHUÿ'þ¶+ýÿÝâýøÆÒýþ#ýÿ!üþl„üþ§³ýþœªýýKiüú!üþ$üý0üPUÿAÿC$ýÿvýýöùýþ 3üþ DþÿåëþÿúûÿÿÁÎþÿÏÙýþÿÿÿÿÅÐýü;þÿ-üñ>ÿ%>ýˆ4ýÿ×àýü”ªýþ$ýÿÏÚþÿÉ×þÿ4ýÿ"ýÿ"ýÿ RýþáêþÿÄÒýú6ýÿ;ý¨Aý¨Jþÿ÷ùýüRýþTþÿÿÿÿÿ0aþÿIxþÿ¼Îþÿœ¸þÿ Býÿ UýþþþþþW‚þÿ7üöUÿ-Mý RþÿôøýüXˆýþ_þÿöøþÿ´Ëþÿÿÿÿÿûüþÿÿÿÿÿœ»þÿ5ýÿ¾Ïýþ°ÉýüAýÿ Tÿt\ÿtOýÿÈÚýü­ÈýþCýÿ»Òþÿÿÿÿÿÿÿÿÿ¶Ïþÿ¡ÂþÿÐàþÿBýÿ—·ýþÏßýüNýÿYý lÿ-XüöNþÿþþþþKŒýþWýÿWšþÿGþÿ\ýÿÔãþÿ¢ÆþÿIýÿ­Êýþ¿ÖýüVýÿdý¨mý¨dýÿ‰ºýúÿÿÿÿ¡Æýþ@ŽþÿN–þÿÔåþÿíõþÿuþÿdýÿïöýþy°ýü_ýÿoýˆuÿ%uýñmýÿQ¡ýüÕéþÿúýýþùýþÿ´Øþÿþÿ{þÿºÙýþêóýþ}üýqýÿvÿCUÿ|ÿP€ýývýþvýú ‚ýýýþtýþoýþ¬Ôþÿþþþþ¼ýøtþÿ€þ¶Uÿ‡ÿHˆþí‰ÿÿ…þþ…þý ŒþûN©þû–ÎýüZ²ýý‹ÿÿ…þá„ÿªÿŠÿý”ýî‘þÿýþ:ªýþ þÿ†þúþª“ÿÿÿ€ÿÿÿ—ÿ–ÿU—ÿ€ÿˆ™ÿl¤ÿ*ÿÿÿ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.svgspectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_l0000644000077000000240000001103212533471373034500 0ustar adamstaff00000000000000 ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo_32.pngspectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_l0000644000077000000240000000353412340434262034500 0ustar adamstaff00000000000000‰PNG  IHDR szzôsBIT|dˆ pHYsÓÇòŽtEXtSoftwarewww.inkscape.org›î<ÙIDATX…Å—PT×Ç?÷ñc‘P° ˆ „FƒUBìȘ&Ådi”I# ±ÒÄJhC2æÇŒ5ÑÔtw:íÛª5ÑÒSÒÆª©é„Ú¡%¨ZÉF2Vk­JFŒ`ÁŠ(JdÙ÷öÝþqaß., É?½3gæ½sÏïýžûΞº®óÿ\Ñ_ÄÉ•ìÎ#šÅHŠl ø2pèZ€Ý8ØãéÔ¯KL”Wš;†AC°È‹h4¥z>ÕÀ$?ñôé—#¹hJ~‹»œ›´`;&y˜#D²ËÂß b0¨Â¤Åu‹»2RìqKàJr'âã7˜<6.´;`Îã2Ò‹@‹†Ž&°Ìa‹$`›+Æâ1ôWB]Ç, w.rÆM¶|»r€Þh?G6B—m"ù‘GêÕïKàƒ…“œ0º#Ñ&¢: WBÅaˆË°mL6¸pÏ€+àΔƒx¥Áti@D1Çä;«áz§ v³ú7zCýrׇóE9ÎÐäš ‹,“é_Gÿ±hbÞˆy•ˆ;¾Ñ Ðñ!,e÷ÙUÄ—¦AÚlˆO†„©ˆ€-^;V€¬…~ï;MçÅðKxUZùK%:Lü剜"¸ë9äžáT½rÝë†3WCúWaá8úè9ô³`p4XW·;KšxBjó«ËwÙÉ¥„Ö÷á“ýÐÚׇ.WêLDå_e5Êw`ÎDîzFíG;ßz9ì¾?@ÈghI^Ž ÄâUˆ¥›Ô³áƒÆMÈl…+çíãÇÄs%bñZˆK„»Ÿ‚Ão@ûÅ`ó!8¹ò—À¬o‚)Ô!ÔÊpu¹4W›;Uü0ˆ0×i'÷Ý@V— ë\Ð}>üÖßôÁž Èu Àôƒˆï¾ ¦übdëÇ‘‰Yáþ>rµ¡z—c0iØI,\1D‹‰ÜX §)‡Ìùׇˆ×üˆ__…Šm cáB3ì߬|f̃¹ÙI.œ²KŸ;ò“NÖ¤AqÐ!~*Üùr8Þg)ã¬BÄß…¬;!*â'#î©DÔôÁürdÓN;Ql’ à|(€Ùá Xôj®€[Ã`aPy÷ã* ÷ר—¦Ô¥h¹bâO½¶Î 9el¢­ïë 0HÆi¦a29HáReÜÝ 5*Ã@ä)}豄 ¢cU5ö»aÙIr mý0›Jú€nARÂPÊør‡j­&5â“+Þðçõ£AL:éµKðAƒÍ\îÿ´ž eà'_Œ໩âlg'ò›Èm/!7|ü¾p7z‘¯T@ß5å—0 KÕÞ¹Àg†öƒ ú@/fHN|ׯ@b bÁÃÈú8X‹lü,yf} ºÚ ®ú•ˆU; )U1·o»bSµ j€~Ú¦‚aS2!&A”8¼/‡‚û ¿Ž7ªhu¯Ž.@ùó0¿D=¿_oo nIøý/© Ió”è70è¦FÞ§¬&%ÀýÁ¶,Ô*}t â—ƒ{Ë#ÿ$'Ï@ütbÅËʾç?ÈuO„Ú j&Á¡DèºÎK î-T㎉E4| )épá,ò;·Ûí³ôˆµ¿…¨!ÊÎ7ÿ¼Èö3ˆiÙ0ý6X°“Ô¾¹ò8önðôB°ÚSjOEÑšÅNi 0ýÈÚ-ˆg<0c&”T@Ãe]· ùßKˆ» .²ó ;©Þzäæç¡³-Tû³™R[åt:iºÝy±è„·‹,, å4âÑçÝEBÛY8{Z5˜öðîFô÷A¬¦¤ƒÐK]àä?‘úÓð»upíjèLñ©,ñ<«÷…" ^?aReÁ ÀAO/¬YŽØü–±áHKCî}K7ÿÙ¼V='N†´ èhß@$.:4Á}žr½säFp"jÊw^ùÆqo?%Š…føä$¢äâþ2HÍ€÷€°O6àƒžËà75E)iנس\o™FÌ„ë*õj¬þ”î{YU†¬¢üI´¿…ܹ㠦!bò¦¦Qà©Ð[Ç¢&âX¾¶Æ])àWHTÿ]º í…ŸAÖ­Ê`Їu×W ëâXq;¤dÍúgõÚ± "20¼Ö¯Ð·k·að:µobÝ3¹u‹2pÄ!}rô¸nÒ,TjÝäN$9Là¿¡k“{rÀâAMP*a¦Öri.©išÜ[ï—ËÊÎ h“Ш™ì÷¼¨7O$éç0 Ë•Lg§$3ó3Çãÿ¼ G®ÿ.Á½8<ßÇIEND®B`‚././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astropy.cssspectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap0000644000077000000240000002673512533471373034521 0ustar adamstaff00000000000000/*! * Bootstrap v1.4.0 * * Copyright 2011 Twitter, Inc * Licensed under the Apache License v2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Heavily modified by Kyle Barbary for the AstroPy Project for use with Sphinx. */ @import url("basic.css"); body { background-color: #ffffff; margin: 0; font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size: 13px; font-weight: normal; line-height: 18px; color: #404040; } /* Hyperlinks ----------------------------------------------------------------*/ a { color: #0069d6; text-decoration: none; line-height: inherit; font-weight: inherit; } a:hover { color: #00438a; text-decoration: underline; } /* Typography ----------------------------------------------------------------*/ h1,h2,h3,h4,h5,h6 { color: #404040; margin: 0.7em 0 0 0; line-height: 1.5em; } h1 { font-size: 24px; margin: 0; } h2 { font-size: 21px; line-height: 1.2em; margin: 1em 0 0.5em 0; border-bottom: 1px solid #404040; } h3 { font-size: 18px; } h4 { font-size: 16px; } h5 { font-size: 14px; } h6 { font-size: 13px; text-transform: uppercase; } p { font-size: 13px; font-weight: normal; line-height: 18px; margin-top: 0px; margin-bottom: 9px; } ul, ol { margin-left: 0; padding: 0 0 0 25px; } ul ul, ul ol, ol ol, ol ul { margin-bottom: 0; } ul { list-style: disc; } ol { list-style: decimal; } li { line-height: 18px; color: #404040; } ul.unstyled { list-style: none; margin-left: 0; } dl { margin-bottom: 18px; } dl dt, dl dd { line-height: 18px; } dl dd { margin-left: 9px; } hr { margin: 20px 0 19px; border: 0; border-bottom: 1px solid #eee; } strong { font-style: inherit; font-weight: bold; } em { font-style: italic; font-weight: inherit; line-height: inherit; } .muted { color: #bfbfbf; } address { display: block; line-height: 18px; margin-bottom: 18px; } code, pre { padding: 0 3px 2px; font-family: monospace; -webkit-border-radius: 3px; -moz-border-radius: 3px; border-radius: 3px; } tt { font-family: monospace; } code { color: rgba(0, 0, 0, 0.75); padding: 1px 3px; } pre { display: block; padding: 8.5px; margin: 0 0 18px; line-height: 18px; border: 1px solid #ddd; border: 1px solid rgba(0, 0, 0, 0.12); -webkit-border-radius: 3px; -moz-border-radius: 3px; border-radius: 3px; white-space: pre; white-space: pre-wrap; word-wrap: break-word; } img { margin: 9px 0; } /* format inline code with a rounded box */ tt { margin: 0 2px; padding: 0 5px; border: 1px solid #ddd; border: 1px solid rgba(0, 0, 0, 0.12); border-radius: 3px; } /* all code has same box background color, even in headers */ h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt, pre, code, tt { background-color: #f8f8f8; } /* override box for links & other sphinx-specifc stuff */ tt.xref, a tt, tt.descname, tt.descclassname { padding: 0 1px 0 1px; border: none; } /* override box for related bar at the top of the page */ .related tt { border: none; padding: 0 1px 0 1px; background-color: transparent; font-weight: bold; } th { background-color: #dddddd; } .viewcode-back { font-family: sans-serif; } div.viewcode-block:target { background-color: #f4debf; border-top: 1px solid #ac9; border-bottom: 1px solid #ac9; } table.docutils { border-spacing: 5px; border-collapse: separate; } /* Topbar --------------------------------------------------------------------*/ div.topbar { height: 40px; position: absolute; top: 0; left: 0; right: 0; z-index: 10000; padding: 0px 10px; background-color: #222; background-color: #222222; background-repeat: repeat-x; background-image: -khtml-gradient(linear, left top, left bottom, from(#333333), to(#222222)); background-image: -moz-linear-gradient(top, #333333, #222222); background-image: -ms-linear-gradient(top, #333333, #222222); background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #333333), color-stop(100%, #222222)); background-image: -webkit-linear-gradient(top, #333333, #222222); background-image: -o-linear-gradient(top, #333333, #222222); background-image: linear-gradient(top, #333333, #222222); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#333333', endColorstr='#222222', GradientType=0); } div.topbar a.brand { font-family: 'Source Sans Pro', sans-serif; font-size: 26px; color: #ffffff; font-weight: 600; text-decoration: none; float: left; display: block; height: 32px; padding: 8px 12px 0px 45px; margin-left: -10px; background: transparent url("astropy_logo_32.png") no-repeat 10px 4px; background-image: url("astropy_logo.svg"), none; background-size: 32px 32px; } #logotext1 { } #logotext2 { font-weight:200; color: #ff5000; } #logotext3 { font-weight:200; } div.topbar .brand:hover, div.topbar ul li a.homelink:hover { background-color: #333; background-color: rgba(255, 255, 255, 0.05); } div.topbar ul { font-size: 110%; list-style: none; margin: 0; padding: 0 0 0 10px; float: right; color: #bfbfbf; text-align: center; text-decoration: none; height: 100%; } div.topbar ul li { float: left; display: inline; height: 30px; margin: 5px; padding: 0px; } div.topbar ul li a { color: #bfbfbf; text-decoration: none; padding: 5px; display: block; height: auto; text-align: center; vertical-align: middle; border-radius: 4px; } div.topbar ul li a:hover { color: #ffffff; text-decoration: none; } div.topbar ul li a.homelink { width: 112px; display: block; height: 20px; padding: 5px 0px; background: transparent url("astropy_linkout_20.png") no-repeat 10px 5px; background-image: url("astropy_linkout.svg"), none; background-size: 91px 20px; } div.topbar form { text-align: left; margin: 0 0 0 5px; position: relative; filter: alpha(opacity=100); -khtml-opacity: 1; -moz-opacity: 1; opacity: 1; } div.topbar input { background-color: #444; background-color: rgba(255, 255, 255, 0.3); font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size: normal; font-weight: 13px; line-height: 1; padding: 4px 9px; color: #ffffff; color: rgba(255, 255, 255, 0.75); border: 1px solid #111; -webkit-border-radius: 4px; -moz-border-radius: 4px; border-radius: 4px; -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0px rgba(255, 255, 255, 0.25); -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0px rgba(255, 255, 255, 0.25); box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0px rgba(255, 255, 255, 0.25); -webkit-transition: none; -moz-transition: none; -ms-transition: none; -o-transition: none; transition: none; } div.topbar input:-moz-placeholder { color: #e6e6e6; } div.topbar input::-webkit-input-placeholder { color: #e6e6e6; } div.topbar input:hover { background-color: #bfbfbf; background-color: rgba(255, 255, 255, 0.5); color: #ffffff; } div.topbar input:focus, div.topbar input.focused { outline: 0; background-color: #ffffff; color: #404040; text-shadow: 0 1px 0 #ffffff; border: 0; padding: 5px 10px; -webkit-box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); -moz-box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); } /* Relation bar (breadcrumbs, prev, next) ------------------------------------*/ div.related { height: 21px; width: auto; margin: 0 10px; position: absolute; top: 42px; clear: both; left: 0; right: 0; z-index: 10000; font-size: 100%; vertical-align: middle; background-color: #fff; border-bottom: 1px solid #bbb; } div.related ul { padding: 0; margin: 0; } /* Footer --------------------------------------------------------------------*/ footer { display: block; margin: 10px 10px 0px; padding: 10px 0 0 0; border-top: 1px solid #bbb; } .pull-right { float: right; width: 30em; text-align: right; } /* Sphinx sidebar ------------------------------------------------------------*/ div.sphinxsidebar { font-size: inherit; border-radius: 3px; background-color: #eee; border: 1px solid #bbb; } div.sphinxsidebarwrapper { padding: 0px 0px 0px 5px; } div.sphinxsidebar h3 { font-family: 'Trebuchet MS', sans-serif; font-size: 1.4em; font-weight: normal; margin: 5px 0px 0px 5px; padding: 0; line-height: 1.6em; } div.sphinxsidebar h4 { font-family: 'Trebuchet MS', sans-serif; font-size: 1.3em; font-weight: normal; margin: 5px 0 0 0; padding: 0; } div.sphinxsidebar p { } div.sphinxsidebar p.topless { margin: 5px 10px 10px 10px; } div.sphinxsidebar ul { margin: 0px 0px 0px 5px; padding: 0; } div.sphinxsidebar ul ul { margin-left: 15px; list-style-type: disc; } /* If showing the global TOC (toctree), color the current page differently */ div.sphinxsidebar a.current { color: #404040; } div.sphinxsidebar a.current:hover { color: #404040; } /* document, documentwrapper, body, bodywrapper ----------------------------- */ div.document { margin-top: 72px; margin-left: 10px; margin-right: 10px; } div.documentwrapper { float: left; width: 100%; } div.body { background-color: #ffffff; padding: 0 0 0px 20px; } div.bodywrapper { margin: 0 0 0 230px; max-width: 55em; } /* Header links ------------------------------------------------------------- */ a.headerlink { font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; } a.headerlink:hover { background-color: #0069d6; color: white; text-docoration: none; } /* Admonitions and warnings ------------------------------------------------- */ /* Shared by admonitions and warnings */ div.admonition, div.warning { padding: 0px; border-radius: 3px; -moz-border-radius: 3px; -webkit-border-radius: 3px; } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; font-weight: bold; font-size: 1.1em; } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } /* Admonitions only */ div.admonition { border: 1px solid #609060; background-color: #e9ffe9; } div.admonition p.admonition-title { background-color: #70A070; } /* Warnings only */ div.warning { border: 1px solid #900000; background-color: #ffe9e9; } div.warning p.admonition-title { background-color: #b04040; } /* Figures ------------------------------------------------------------------ */ .figure.align-center { clear: none; } /* This is a div for containing multiple figures side-by-side, for use with * .. container:: figures */ div.figures { border: 1px solid #CCCCCC; background-color: #F8F8F8; margin: 1em; text-align: center; } div.figures .figure { clear: none; float: none; display: inline-block; border: none; margin-left: 0.5em; margin-right: 0.5em; } .field-list th { white-space: nowrap; } table.field-list { border-spacing: 0px; margin-left: 1px; border-left: 5px solid rgb(238, 238, 238) !important; } table.field-list th.field-name { display: inline-block; padding: 1px 8px 1px 5px; white-space: nowrap; background-color: rgb(238, 238, 238); border-radius: 0 3px 3px 0; -webkit-border-radius: 0 3px 3px 0; } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/copybutton.jsspectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/copybutto0000644000077000000240000000467712412505144034523 0ustar adamstaff00000000000000$(document).ready(function() { /* Add a [>>>] button on the top-right corner of code samples to hide * the >>> and ... prompts and the output and thus make the code * copyable. */ var div = $('.highlight-python .highlight,' + '.highlight-python3 .highlight') var pre = div.find('pre'); // get the styles from the current theme pre.parent().parent().css('position', 'relative'); var hide_text = 'Hide the prompts and output'; var show_text = 'Show the prompts and output'; var border_width = pre.css('border-top-width'); var border_style = pre.css('border-top-style'); var border_color = pre.css('border-top-color'); var button_styles = { 'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0', 'border-color': border_color, 'border-style': border_style, 'border-width': border_width, 'color': border_color, 'text-size': '75%', 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em', 'border-radius': '0 3px 0 0' } // create and add the button to all the code blocks that contain >>> div.each(function(index) { var jthis = $(this); if (jthis.find('.gp').length > 0) { var button = $('>>>'); button.css(button_styles) button.attr('title', hide_text); jthis.prepend(button); } // tracebacks (.gt) contain bare text elements that need to be // wrapped in a span to work with .nextUntil() (see later) jthis.find('pre:has(.gt)').contents().filter(function() { return ((this.nodeType == 3) && (this.data.trim().length > 0)); }).wrap(''); }); // define the behavior of the button when it's clicked $('.copybutton').toggle( function() { var button = $(this); button.parent().find('.go, .gp, .gt').hide(); button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden'); button.css('text-decoration', 'line-through'); button.attr('title', show_text); }, function() { var button = $(this); button.parent().find('.go, .gp, .gt').show(); button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible'); button.css('text-decoration', 'none'); button.attr('title', hide_text); }); }); ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/sidebar.jsspectral-cube-0.3.1/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/sidebar.j0000644000077000000240000001155312340434262034325 0ustar adamstaff00000000000000/* * sidebar.js * ~~~~~~~~~~ * * This script makes the Sphinx sidebar collapsible. * * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton * used to collapse and expand the sidebar. * * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden * and the width of the sidebar and the margin-left of the document * are decreased. When the sidebar is expanded the opposite happens. * This script saves a per-browser/per-session cookie used to * remember the position of the sidebar among the pages. * Once the browser is closed the cookie is deleted and the position * reset to the default (expanded). * * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ $(function() { // global elements used by the functions. // the 'sidebarbutton' element is defined as global after its // creation, in the add_sidebar_button function var bodywrapper = $('.bodywrapper'); var sidebar = $('.sphinxsidebar'); var sidebarwrapper = $('.sphinxsidebarwrapper'); // for some reason, the document has no sidebar; do not run into errors if (!sidebar.length) return; // original margin-left of the bodywrapper and width of the sidebar // with the sidebar expanded var bw_margin_expanded = bodywrapper.css('margin-left'); var ssb_width_expanded = sidebar.width(); // margin-left of the bodywrapper and width of the sidebar // with the sidebar collapsed var bw_margin_collapsed = 12; var ssb_width_collapsed = 12; // custom colors var dark_color = '#404040'; var light_color = '#505050'; function sidebar_is_collapsed() { return sidebarwrapper.is(':not(:visible)'); } function toggle_sidebar() { if (sidebar_is_collapsed()) expand_sidebar(); else collapse_sidebar(); } function collapse_sidebar() { sidebarwrapper.hide(); sidebar.css('width', ssb_width_collapsed); bodywrapper.css('margin-left', bw_margin_collapsed); sidebarbutton.css({ 'margin-left': '-1px', 'height': bodywrapper.height(), 'border-radius': '3px' }); sidebarbutton.find('span').text('»'); sidebarbutton.attr('title', _('Expand sidebar')); document.cookie = 'sidebar=collapsed'; } function expand_sidebar() { bodywrapper.css('margin-left', bw_margin_expanded); sidebar.css('width', ssb_width_expanded); sidebarwrapper.show(); sidebarbutton.css({ 'margin-left': ssb_width_expanded - 12, 'height': bodywrapper.height(), 'border-radius': '0px 3px 3px 0px' }); sidebarbutton.find('span').text('«'); sidebarbutton.attr('title', _('Collapse sidebar')); document.cookie = 'sidebar=expanded'; } function add_sidebar_button() { sidebarwrapper.css({ 'float': 'left', 'margin-right': '0', 'width': ssb_width_expanded - 18 }); // create the button sidebar.append('
    «
    '); var sidebarbutton = $('#sidebarbutton'); // find the height of the viewport to center the '<<' in the page var viewport_height; if (window.innerHeight) viewport_height = window.innerHeight; else viewport_height = $(window).height(); var sidebar_offset = sidebar.offset().top; var sidebar_height = Math.max(bodywrapper.height(), sidebar.height()); sidebarbutton.find('span').css({ 'font-family': '"Lucida Grande",Arial,sans-serif', 'display': 'block', 'top': Math.min(viewport_height/2, sidebar_height/2 + sidebar_offset) - 10, 'width': 12, 'position': 'fixed', 'text-align': 'center' }); sidebarbutton.click(toggle_sidebar); sidebarbutton.attr('title', _('Collapse sidebar')); sidebarbutton.css({ 'color': '#FFFFFF', 'background-color': light_color, 'border': '1px solid ' + light_color, 'border-radius': '0px 3px 3px 0px', 'font-size': '1.2em', 'cursor': 'pointer', 'height': sidebar_height, 'padding-top': '1px', 'margin': '-1px', 'margin-left': ssb_width_expanded - 12 }); sidebarbutton.hover( function () { $(this).css('background-color', dark_color); }, function () { $(this).css('background-color', light_color); } ); } function set_position_from_cookie() { if (!document.cookie) return; var items = document.cookie.split(';'); for(var k=0; k= (3, 3): import importlib importlib.invalidate_caches() @pytest.fixture(scope='function', autouse=True) def reset_setup_helpers(request): """ Saves and restores the global state of the astropy_helpers.setup_helpers module between tests. """ mod = __import__('astropy_helpers.setup_helpers', fromlist=['']) old_state = mod._module_state.copy() def finalizer(old_state=old_state): mod = sys.modules.get('astropy_helpers.setup_helpers') if mod is not None: mod._module_state.update(old_state) request.addfinalizer(finalizer) @pytest.fixture(scope='function', autouse=True) def reset_distutils_log(): """ This is a setup/teardown fixture that ensures the log-level of the distutils log is always set to a default of WARN, since different settings could affect tests that check the contents of stdout. """ from distutils import log log.set_threshold(log.WARN) @pytest.fixture(scope='module', autouse=True) def fix_hide_setuptools(): """ Workaround for https://github.com/astropy/astropy-helpers/issues/124 In setuptools 10.0 run_setup was changed in such a way that it sweeps away the existing setuptools import before running the setup script. In principle this is nice, but in the practice of testing astropy_helpers this is problematic since we're trying to test code that has already been imported during the testing process, and which relies on the setuptools module that was already in use. """ if hasattr(sandbox, 'hide_setuptools'): sandbox.hide_setuptools = lambda: None TEST_PACKAGE_SETUP_PY = """\ #!/usr/bin/env python from setuptools import setup NAME = 'astropy-helpers-test' VERSION = {version!r} setup(name=NAME, version=VERSION, packages=['_astropy_helpers_test_'], zip_safe=False) """ @pytest.fixture def testpackage(tmpdir, version='0.1'): """ This fixture creates a simplified package called _astropy_helpers_test_ used primarily for testing ah_boostrap, but without using the astropy_helpers package directly and getting it confused with the astropy_helpers package already under test. """ source = tmpdir.mkdir('testpkg') with source.as_cwd(): source.mkdir('_astropy_helpers_test_') init = source.join('_astropy_helpers_test_', '__init__.py') init.write('__version__ = {0!r}'.format(version)) setup_py = TEST_PACKAGE_SETUP_PY.format(version=version) source.join('setup.py').write(setup_py) # Make the new test package into a git repo run_cmd('git', ['init']) run_cmd('git', ['add', '--all']) run_cmd('git', ['commit', '-m', 'test package']) return source def cleanup_import(package_name): """Remove all references to package_name from sys.modules""" for k in list(sys.modules): if not isinstance(k, str): # Some things will actually do this =_= continue elif k.startswith('astropy_helpers.tests'): # Don't delete imported test modules or else the tests will break, # badly continue if k == package_name or k.startswith(package_name + '.'): del sys.modules[k] spectral-cube-0.3.1/astropy_helpers/astropy_helpers/tests/test_ah_bootstrap.py0000644000077000000240000003434012533471373030207 0ustar adamstaff00000000000000# -*- coding: utf-8 -*- import glob import os import textwrap import sys from distutils.version import StrictVersion import setuptools from setuptools.package_index import PackageIndex import pytest from . import * from ..utils import silence TEST_SETUP_PY = """\ #!/usr/bin/env python from __future__ import print_function import os import sys import ah_bootstrap # reset the name of the package installed by ah_boostrap to # _astropy_helpers_test_--this will prevent any confusion by pkg_resources with # any already installed packages named astropy_helpers # We also disable auto-upgrade by default ah_bootstrap.DIST_NAME = 'astropy-helpers-test' ah_bootstrap.PACKAGE_NAME = '_astropy_helpers_test_' ah_bootstrap.AUTO_UPGRADE = False ah_bootstrap.DOWNLOAD_IF_NEEDED = False try: ah_bootstrap.BOOTSTRAPPER = ah_bootstrap._Bootstrapper.main() ah_bootstrap.use_astropy_helpers({args}) finally: ah_bootstrap.DIST_NAME = 'astropy-helpers' ah_bootstrap.PACKAGE_NAME = 'astropy_helpers' ah_bootstrap.AUTO_UPGRADE = True ah_bootstrap.DOWNLOAD_IF_NEEDED = True # Kind of a hacky way to do this, but this assertion is specifically # for test_check_submodule_no_git # TODO: Rework the tests in this module so that it's easier to test specific # behaviors of ah_bootstrap for each test assert '--no-git' not in sys.argv import _astropy_helpers_test_ filename = os.path.abspath(_astropy_helpers_test_.__file__) filename = filename.replace('.pyc', '.py') # More consistent this way print(filename) """ # The behavior checked in some of the tests depends on the version of # setuptools try: SETUPTOOLS_VERSION = StrictVersion(setuptools.__version__).version except: # Broken setuptools? ¯\_(ツ)_/¯ SETUPTOOLS_VERSION = (0, 0, 0) def test_bootstrap_from_submodule(tmpdir, testpackage, capsys): """ Tests importing _astropy_helpers_test_ from a submodule in a git repository. This tests actually performing a fresh clone of the repository without the submodule initialized, and that importing astropy_helpers in that context works transparently after calling `ah_boostrap.use_astropy_helpers`. """ orig_repo = tmpdir.mkdir('orig') # Ensure ah_bootstrap is imported from the local directory import ah_bootstrap with orig_repo.as_cwd(): run_cmd('git', ['init']) # Write a test setup.py that uses ah_bootstrap; it also ensures that # any previous reference to astropy_helpers is first wiped from # sys.modules orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args='')) run_cmd('git', ['add', 'setup.py']) # Add our own clone of the astropy_helpers repo as a submodule named # astropy_helpers run_cmd('git', ['submodule', 'add', str(testpackage), '_astropy_helpers_test_']) run_cmd('git', ['commit', '-m', 'test repository']) os.chdir(str(tmpdir)) # Creates a clone of our test repo in the directory 'clone' run_cmd('git', ['clone', 'orig', 'clone']) os.chdir('clone') run_setup('setup.py', []) stdout, stderr = capsys.readouterr() path = stdout.strip() # Ensure that the astropy_helpers used by the setup.py is the one that # was imported from git submodule a = os.path.normcase(path) b = os.path.normcase(str(tmpdir.join('clone', '_astropy_helpers_test_', '_astropy_helpers_test_', '__init__.py'))) assert a == b def test_bootstrap_from_submodule_no_locale(tmpdir, testpackage, capsys, monkeypatch): """ Regression test for https://github.com/astropy/astropy/issues/2749 Runs test_bootstrap_from_submodule but with missing locale/language settings. """ for varname in ('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE'): monkeypatch.delenv(varname, raising=False) test_bootstrap_from_submodule(tmpdir, testpackage, capsys) def test_bootstrap_from_submodule_bad_locale(tmpdir, testpackage, capsys, monkeypatch): """ Additional regression test for https://github.com/astropy/astropy/issues/2749 """ for varname in ('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE'): monkeypatch.delenv(varname, raising=False) # Test also with bad LC_CTYPE a la http://bugs.python.org/issue18378 monkeypatch.setenv('LC_CTYPE', 'UTF-8') test_bootstrap_from_submodule(tmpdir, testpackage, capsys) def test_check_submodule_no_git(tmpdir, testpackage): """ Tests that when importing astropy_helpers from a submodule, it is still recognized as a submodule even when using the --no-git option. In particular this ensures that the auto-upgrade feature is not activated. """ orig_repo = tmpdir.mkdir('orig') # Ensure ah_bootstrap is imported from the local directory import ah_bootstrap with orig_repo.as_cwd(): run_cmd('git', ['init']) # Write a test setup.py that uses ah_bootstrap; it also ensures that # any previous reference to astropy_helpers is first wiped from # sys.modules args = 'auto_upgrade=True' orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args=args)) run_cmd('git', ['add', 'setup.py']) # Add our own clone of the astropy_helpers repo as a submodule named # astropy_helpers run_cmd('git', ['submodule', 'add', str(testpackage), '_astropy_helpers_test_']) run_cmd('git', ['commit', '-m', 'test repository']) # Temporarily patch _do_upgrade to fail if called class UpgradeError(Exception): pass def _do_upgrade(*args, **kwargs): raise UpgradeError() orig_do_upgrade = ah_bootstrap._Bootstrapper._do_upgrade ah_bootstrap._Bootstrapper._do_upgrade = _do_upgrade try: run_setup('setup.py', ['--no-git']) except UpgradeError: pytest.fail('Attempted to run auto-upgrade despite importing ' '_astropy_helpers_test_ from a git submodule') finally: ah_bootstrap._Bootstrapper._do_upgrade = orig_do_upgrade # Ensure that the no-git option was in fact set assert not ah_bootstrap.BOOTSTRAPPER.use_git def test_bootstrap_from_directory(tmpdir, testpackage, capsys): """ Tests simply bundling a copy of the astropy_helpers source code in its entirety bundled directly in the source package and not in an archive. """ import ah_bootstrap source = tmpdir.mkdir('source') testpackage.copy(source.join('_astropy_helpers_test_')) with source.as_cwd(): source.join('setup.py').write(TEST_SETUP_PY.format(args='')) run_setup('setup.py', []) stdout, stderr = capsys.readouterr() stdout = stdout.splitlines() if stdout: path = stdout[-1].strip() else: path = '' # Ensure that the astropy_helpers used by the setup.py is the one that # was imported from git submodule a = os.path.normcase(path) b = os.path.normcase(str(source.join('_astropy_helpers_test_', '_astropy_helpers_test_', '__init__.py'))) assert a == b def test_bootstrap_from_archive(tmpdir, testpackage, capsys): """ Tests importing _astropy_helpers_test_ from a .tar.gz source archive shipped alongside the package that uses it. """ orig_repo = tmpdir.mkdir('orig') # Ensure ah_bootstrap is imported from the local directory import ah_bootstrap # Make a source distribution of the test package with silence(): run_setup(str(testpackage.join('setup.py')), ['sdist', '--dist-dir=dist', '--formats=gztar']) dist_dir = testpackage.join('dist') for dist_file in dist_dir.visit('*.tar.gz'): dist_file.copy(orig_repo) with orig_repo.as_cwd(): # Write a test setup.py that uses ah_bootstrap; it also ensures that # any previous reference to astropy_helpers is first wiped from # sys.modules args = 'path={0!r}'.format(os.path.basename(str(dist_file))) orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args=args)) run_setup('setup.py', []) stdout, stderr = capsys.readouterr() path = stdout.splitlines()[-1].strip() # Installation from the .tar.gz should have resulted in a .egg # directory that the _astropy_helpers_test_ package was imported from eggs = _get_local_eggs() assert eggs egg = orig_repo.join(eggs[0]) assert os.path.isdir(str(egg)) a = os.path.normcase(path) b = os.path.normcase(str(egg.join('_astropy_helpers_test_', '__init__.py'))) assert a == b def test_download_if_needed(tmpdir, testpackage, capsys): """ Tests the case where astropy_helpers was not actually included in a package, or is otherwise missing, and we need to "download" it. This does not test actually downloading from the internet--this is normally done through setuptools' easy_install command which can also install from a source archive. From the point of view of ah_boostrap the two actions are equivalent, so we can just as easily simulate this by providing a setup.cfg giving the path to a source archive to "download" (as though it were a URL). """ source = tmpdir.mkdir('source') # Ensure ah_bootstrap is imported from the local directory import ah_bootstrap # Make a source distribution of the test package with silence(): run_setup(str(testpackage.join('setup.py')), ['sdist', '--dist-dir=dist', '--formats=gztar']) dist_dir = testpackage.join('dist') with source.as_cwd(): source.join('setup.py').write(TEST_SETUP_PY.format( args='download_if_needed=True')) source.join('setup.cfg').write(textwrap.dedent("""\ [easy_install] find_links = {find_links} """.format(find_links=str(dist_dir)))) run_setup('setup.py', []) stdout, stderr = capsys.readouterr() # Just take the last line--on Python 2.6 distutils logs warning # messages to stdout instead of stderr, causing them to be mixed up # with our expected output path = stdout.splitlines()[-1].strip() # easy_install should have worked by 'installing' astropy_helpers as a # .egg in the current directory eggs = _get_local_eggs() assert eggs egg = source.join(eggs[0]) assert os.path.isdir(str(egg)) a = os.path.normcase(path) b = os.path.normcase(str(egg.join('_astropy_helpers_test_', '__init__.py'))) assert a == b def test_upgrade(tmpdir, capsys): # Run the testpackage fixture manually, since we use it multiple times in # this test to make different versions of _astropy_helpers_test_ orig_dir = testpackage(tmpdir.mkdir('orig')) # Make a test package that uses _astropy_helpers_test_ source = tmpdir.mkdir('source') dist_dir = source.mkdir('dists') orig_dir.copy(source.join('_astropy_helpers_test_')) with source.as_cwd(): setup_py = TEST_SETUP_PY.format(args='auto_upgrade=True') source.join('setup.py').write(setup_py) # This will be used to later to fake downloading the upgrade package source.join('setup.cfg').write(textwrap.dedent("""\ [easy_install] find_links = {find_links} """.format(find_links=str(dist_dir)))) # Make additional "upgrade" versions of the _astropy_helpers_test_ # package--one of them is version 0.2 and the other is version 0.1.1. The # auto-upgrade should ignore version 0.2 but use version 0.1.1. upgrade_dir_1 = testpackage(tmpdir.mkdir('upgrade_1'), version='0.2') upgrade_dir_2 = testpackage(tmpdir.mkdir('upgrade_2'), version='0.1.1') dists = [] # For each upgrade package go ahead and build a source distribution of it # and copy that source distribution to a dist directory we'll use later to # simulate a 'download' for upgrade_dir in [upgrade_dir_1, upgrade_dir_2]: with silence(): run_setup(str(upgrade_dir.join('setup.py')), ['sdist', '--dist-dir=dist', '--formats=gztar']) dists.append(str(upgrade_dir.join('dist'))) for dist_file in upgrade_dir.visit('*.tar.gz'): dist_file.copy(source.join('dists')) # Monkey with the PackageIndex in ah_bootstrap so that it is initialized # with the test upgrade packages, and so that it does not actually go out # to the internet to look for anything import ah_bootstrap class FakePackageIndex(PackageIndex): def __init__(self, *args, **kwargs): PackageIndex.__init__(self, *args, **kwargs) self.to_scan = dists def find_packages(self, requirement): # no-op pass ah_bootstrap.PackageIndex = FakePackageIndex try: with source.as_cwd(): # Now run the source setup.py; this test is similar to # test_download_if_needed, but we explicitly check that the correct # *version* of _astropy_helpers_test_ was used run_setup('setup.py', []) stdout, stderr = capsys.readouterr() path = stdout.splitlines()[-1].strip() eggs = _get_local_eggs() assert eggs egg = source.join(eggs[0]) assert os.path.isdir(str(egg)) a = os.path.normcase(path) b = os.path.normcase(str(egg.join('_astropy_helpers_test_', '__init__.py'))) assert a == b assert 'astropy_helpers_test-0.1.1-' in str(egg) finally: ah_bootstrap.PackageIndex = PackageIndex def _get_local_eggs(path='.'): """ Helper utility used by some tests to get the list of egg archive files in a local directory. """ if SETUPTOOLS_VERSION[0] >= 7: eggs = glob.glob(os.path.join(path, '.eggs', '*.egg')) else: eggs = glob.glob('*.egg') return eggs spectral-cube-0.3.1/astropy_helpers/astropy_helpers/tests/test_git_helpers.py0000644000077000000240000001627412533471373030035 0ustar adamstaff00000000000000import glob import imp import os import pkgutil import re import sys import tarfile from . import * PY3 = sys.version_info[0] == 3 if PY3: _text_type = str else: _text_type = unicode _DEV_VERSION_RE = re.compile(r'\d+\.\d+(?:\.\d+)?\.dev(\d+)') TEST_VERSION_SETUP_PY = """\ #!/usr/bin/env python from setuptools import setup NAME = '_eva_' VERSION = {version!r} RELEASE = 'dev' not in VERSION from astropy_helpers.git_helpers import get_git_devstr from astropy_helpers.version_helpers import generate_version_py if not RELEASE: VERSION += get_git_devstr(False) generate_version_py(NAME, VERSION, RELEASE, False, uses_git=not RELEASE) setup(name=NAME, version=VERSION, packages=['_eva_']) """ TEST_VERSION_INIT = """\ try: from .version import version as __version__ from .version import githash as __githash__ except ImportError: __version__ = __githash__ = '' """ @pytest.fixture def version_test_package(tmpdir, request): def make_test_package(version='42.42.dev'): test_package = tmpdir.mkdir('test_package') test_package.join('setup.py').write( TEST_VERSION_SETUP_PY.format(version=version)) test_package.mkdir('_eva_').join('__init__.py').write(TEST_VERSION_INIT) with test_package.as_cwd(): run_cmd('git', ['init']) run_cmd('git', ['add', '--all']) run_cmd('git', ['commit', '-m', 'test package']) if '' in sys.path: sys.path.remove('') sys.path.insert(0, '') def finalize(): cleanup_import('_eva_') request.addfinalizer(finalize) return test_package return make_test_package def test_update_git_devstr(version_test_package, capsys): """Tests that the commit number in the package's version string updates after git commits even without re-running setup.py. """ # We have to call version_test_package to actually create the package test_pkg = version_test_package() with test_pkg.as_cwd(): run_setup('setup.py', ['--version']) stdout, stderr = capsys.readouterr() version = stdout.strip() m = _DEV_VERSION_RE.match(version) assert m, ( "Stdout did not match the version string pattern:" "\n\n{0}\n\nStderr:\n\n{1}".format(stdout, stderr)) revcount = int(m.group(1)) import _eva_ assert _eva_.__version__ == version # Make a silly git commit with open('.test', 'w'): pass run_cmd('git', ['add', '.test']) run_cmd('git', ['commit', '-m', 'test']) import _eva_.version imp.reload(_eva_.version) # Previously this checked packagename.__version__, but in order for that to # be updated we also have to re-import _astropy_init which could be tricky. # Checking directly that the packagename.version module was updated is # sufficient: m = _DEV_VERSION_RE.match(_eva_.version.version) assert m assert int(m.group(1)) == revcount + 1 # This doesn't test astropy_helpers.get_helpers.update_git_devstr directly # since a copy of that function is made in packagename.version (so that it # can work without astropy_helpers installed). In order to get test # coverage on the actual astropy_helpers copy of that function just call it # directly and compare to the value in packagename from astropy_helpers.git_helpers import update_git_devstr newversion = update_git_devstr(version, path=str(test_pkg)) assert newversion == _eva_.version.version def test_version_update_in_other_repos(version_test_package, tmpdir): """ Regression test for https://github.com/astropy/astropy-helpers/issues/114 and for https://github.com/astropy/astropy-helpers/issues/107 """ test_pkg = version_test_package() with test_pkg.as_cwd(): run_setup('setup.py', ['build']) # Add the path to the test package to sys.path for now sys.path.insert(0, str(test_pkg)) try: import _eva_ m = _DEV_VERSION_RE.match(_eva_.__version__) assert m correct_revcount = int(m.group(1)) with tmpdir.as_cwd(): testrepo = tmpdir.mkdir('testrepo') testrepo.chdir() # Create an empty git repo run_cmd('git', ['init']) import _eva_.version imp.reload(_eva_.version) m = _DEV_VERSION_RE.match(_eva_.version.version) assert m assert int(m.group(1)) == correct_revcount correct_revcount = int(m.group(1)) # Add several commits--more than the revcount for the _eva_ package for idx in range(correct_revcount + 5): test_filename = '.test' + str(idx) testrepo.ensure(test_filename) run_cmd('git', ['add', test_filename]) run_cmd('git', ['commit', '-m', 'A message']) import _eva_.version imp.reload(_eva_.version) m = _DEV_VERSION_RE.match(_eva_.version.version) assert m assert int(m.group(1)) == correct_revcount correct_revcount = int(m.group(1)) finally: sys.path.remove(str(test_pkg)) @pytest.mark.parametrize('version', ['1.0.dev', '1.0']) def test_installed_git_version(version_test_package, version, tmpdir, capsys): """ Test for https://github.com/astropy/astropy-helpers/issues/87 Ensures that packages installed with astropy_helpers have a correct copy of the git hash of the installed commit. """ # To test this, it should suffice to build a source dist, unpack it # somewhere outside the git repository, and then do a build and import # from the build directory--no need to "install" as such test_pkg = version_test_package(version) with test_pkg.as_cwd(): run_setup('setup.py', ['build']) try: import _eva_ githash = _eva_.__githash__ assert githash and isinstance(githash, _text_type) # Ensure that it does in fact look like a git hash and not some # other arbitrary string assert re.match(r'[0-9a-f]{40}', githash) finally: cleanup_import('_eva_') run_setup('setup.py', ['sdist', '--dist-dir=dist', '--formats=gztar']) tgzs = glob.glob(os.path.join('dist', '*.tar.gz')) assert len(tgzs) == 1 tgz = test_pkg.join(tgzs[0]) build_dir = tmpdir.mkdir('build_dir') tf = tarfile.open(str(tgz), mode='r:gz') tf.extractall(str(build_dir)) with build_dir.as_cwd(): pkg_dir = glob.glob('_eva_-*')[0] os.chdir(pkg_dir) run_setup('setup.py', ['build']) try: import _eva_ loader = pkgutil.get_loader('_eva_') # Ensure we are importing the 'packagename' that was just unpacked # into the build_dir if sys.version_info[:2] != (3, 3): # Skip this test on Python 3.3 wherein the SourceFileLoader # has a bug where get_filename() does not return an absolute # path assert loader.get_filename().startswith(str(build_dir)) assert _eva_.__githash__ == githash finally: cleanup_import('_eva_') spectral-cube-0.3.1/astropy_helpers/astropy_helpers/tests/test_setup_helpers.py0000644000077000000240000002201412533471373030377 0ustar adamstaff00000000000000import shutil import sys from textwrap import dedent from .. import setup_helpers from ..setup_helpers import get_package_info, register_commands from . import * @pytest.fixture def extension_test_package(tmpdir, request): """Creates a simple test package with an extension module.""" test_pkg = tmpdir.mkdir('test_pkg') test_pkg.mkdir('_eva_').ensure('__init__.py') # TODO: It might be later worth making this particular test package into a # reusable fixture for other build_ext tests # A minimal C extension for testing test_pkg.join('_eva_', 'unit01.c').write(dedent("""\ #include #ifndef PY3K #if PY_MAJOR_VERSION >= 3 #define PY3K 1 #else #define PY3K 0 #endif #endif #if PY3K static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "unit01", NULL, -1, NULL }; PyMODINIT_FUNC PyInit_unit01(void) { return PyModule_Create(&moduledef); } #else PyMODINIT_FUNC initunit01(void) { Py_InitModule3("unit01", NULL, NULL); } #endif """)) test_pkg.join('_eva_', 'setup_package.py').write(dedent("""\ from setuptools import Extension from os.path import join def get_extensions(): return [Extension('_eva_.unit01', [join('_eva_', 'unit01.c')])] """)) test_pkg.join('setup.py').write(dedent("""\ from os.path import join from setuptools import setup from astropy_helpers.setup_helpers import register_commands from astropy_helpers.setup_helpers import get_package_info from astropy_helpers.version_helpers import generate_version_py NAME = '_eva_' VERSION = '0.1' RELEASE = True cmdclassd = register_commands(NAME, VERSION, RELEASE) generate_version_py(NAME, VERSION, RELEASE, False, False) package_info = get_package_info() setup( name=NAME, version=VERSION, cmdclass=cmdclassd, **package_info ) """)) if '' in sys.path: sys.path.remove('') sys.path.insert(0, '') def finalize(): cleanup_import('_eva_') request.addfinalizer(finalize) return test_pkg def test_cython_autoextensions(tmpdir): """ Regression test for https://github.com/astropy/astropy-helpers/pull/19 Ensures that Cython extensions in sub-packages are discovered and built only once. """ # Make a simple test package test_pkg = tmpdir.mkdir('test_pkg') test_pkg.mkdir('yoda').mkdir('luke') test_pkg.ensure('yoda', '__init__.py') test_pkg.ensure('yoda', 'luke', '__init__.py') test_pkg.join('yoda', 'luke', 'dagobah.pyx').write( """def testfunc(): pass""") # Required, currently, for get_package_info to work register_commands('yoda', '0.0', False, srcdir=str(test_pkg)) package_info = get_package_info(str(test_pkg)) assert len(package_info['ext_modules']) == 1 assert package_info['ext_modules'][0].name == 'yoda.luke.dagobah' def test_compiler_module(extension_test_package): """ Test ensuring that the compiler module is built and installed for packages that have extension modules. """ test_pkg = extension_test_package install_temp = test_pkg.mkdir('install_temp') with test_pkg.as_cwd(): # This is one of the simplest ways to install just a package into a # test directory run_setup('setup.py', ['install', '--single-version-externally-managed', '--install-lib={0}'.format(install_temp), '--record={0}'.format(install_temp.join('record.txt'))]) with install_temp.as_cwd(): import _eva_ # Make sure we imported the _eva_ package from the correct place dirname = os.path.abspath(os.path.dirname(_eva_.__file__)) assert dirname == str(install_temp.join('_eva_')) import _eva_._compiler import _eva_.version assert _eva_.version.compiler == _eva_._compiler.compiler assert _eva_.version.compiler != 'unknown' def test_no_cython_buildext(extension_test_package): """ Regression test for https://github.com/astropy/astropy-helpers/pull/35 This tests the custom build_ext command installed by astropy_helpers when used with a project that has no Cython extensions (but does have one or more normal C extensions). """ test_pkg = extension_test_package # In order for this test to test the correct code path we need to fool # setup_helpers into thinking we don't have Cython installed setup_helpers._module_state['have_cython'] = False with test_pkg.as_cwd(): run_setup('setup.py', ['build_ext', '--inplace']) sys.path.insert(0, str(test_pkg)) try: import _eva_.unit01 dirname = os.path.abspath(os.path.dirname(_eva_.unit01.__file__)) assert dirname == str(test_pkg.join('_eva_')) finally: sys.path.remove(str(test_pkg)) @pytest.mark.parametrize('mode', ['cli', 'cli-w', 'direct']) def test_build_sphinx(tmpdir, mode): """ Test for build_sphinx """ import astropy_helpers ah_path = os.path.dirname(astropy_helpers.__file__) test_pkg = tmpdir.mkdir('test_pkg') test_pkg.mkdir('mypackage') test_pkg.join('mypackage').join('__init__.py').write(dedent("""\ def test_function(): pass class A(): pass class B(A): pass """)) docs = test_pkg.mkdir('docs') autosummary = docs.mkdir('_templates').mkdir('autosummary') autosummary.join('base.rst').write('{% extends "autosummary_core/base.rst" %}') autosummary.join('class.rst').write('{% extends "autosummary_core/class.rst" %}') autosummary.join('module.rst').write('{% extends "autosummary_core/module.rst" %}') docs_dir = test_pkg.join('docs') docs_dir.join('conf.py').write(dedent("""\ import sys sys.path.append("../") import warnings with warnings.catch_warnings(): # ignore matplotlib warning warnings.simplefilter("ignore") from astropy_helpers.sphinx.conf import * exclude_patterns.append('_templates') """)) docs_dir.join('index.rst').write(dedent("""\ .. automodapi:: mypackage """)) test_pkg.join('setup.py').write(dedent("""\ from os.path import join from setuptools import setup, Extension from astropy_helpers.setup_helpers import register_commands, get_package_info NAME = 'mypackage' VERSION = 0.1 RELEASE = True cmdclassd = register_commands(NAME, VERSION, RELEASE) setup( name=NAME, version=VERSION, cmdclass=cmdclassd, **get_package_info() ) """)) with test_pkg.as_cwd(): shutil.copytree(ah_path, 'astropy_helpers') if mode == 'cli': run_setup('setup.py', ['build_sphinx']) elif mode == 'cli-w': run_setup('setup.py', ['build_sphinx', '-w']) elif mode == 'direct': # to check coverage with docs_dir.as_cwd(): from sphinx import main try: main(['-b html', '-d _build/doctrees', '.', '_build/html']) except SystemExit as exc: assert exc.code == 0 def test_command_hooks(tmpdir, capsys): """A basic test for pre- and post-command hooks.""" test_pkg = tmpdir.mkdir('test_pkg') test_pkg.mkdir('_welltall_') test_pkg.join('_welltall_', '__init__.py').ensure() # Create a setup_package module with a couple of command hooks in it test_pkg.join('_welltall_', 'setup_package.py').write(dedent("""\ def pre_build_hook(cmd_obj): print('Hello build!') def post_build_hook(cmd_obj): print('Goodbye build!') """)) # A simple setup.py for the test package--running register_commands should # discover and enable the command hooks test_pkg.join('setup.py').write(dedent("""\ from os.path import join from setuptools import setup, Extension from astropy_helpers.setup_helpers import register_commands, get_package_info NAME = '_welltall_' VERSION = 0.1 RELEASE = True cmdclassd = register_commands(NAME, VERSION, RELEASE) setup( name=NAME, version=VERSION, cmdclass=cmdclassd ) """)) with test_pkg.as_cwd(): try: run_setup('setup.py', ['build']) finally: cleanup_import('_welltall_') stdout, stderr = capsys.readouterr() want = dedent("""\ running build running pre_hook from _welltall_.setup_package for build command Hello build! running post_hook from _welltall_.setup_package for build command Goodbye build! """).strip() assert want in stdout spectral-cube-0.3.1/astropy_helpers/astropy_helpers/utils.py0000644000077000000240000005051712533471373024465 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import absolute_import, unicode_literals import contextlib import functools import imp import inspect import os import sys import textwrap import types import warnings try: from importlib import machinery as import_machinery # Python 3.2 does not have SourceLoader if not hasattr(import_machinery, 'SourceLoader'): import_machinery = None except ImportError: import_machinery = None # Python 3.3's importlib caches filesystem reads for faster imports in the # general case. But sometimes it's necessary to manually invalidate those # caches so that the import system can pick up new generated files. See # https://github.com/astropy/astropy/issues/820 if sys.version_info[:2] >= (3, 3): from importlib import invalidate_caches else: invalidate_caches = lambda: None # Note: The following Warning subclasses are simply copies of the Warnings in # Astropy of the same names. class AstropyWarning(Warning): """ The base warning class from which all Astropy warnings should inherit. Any warning inheriting from this class is handled by the Astropy logger. """ class AstropyDeprecationWarning(AstropyWarning): """ A warning class to indicate a deprecated feature. """ class AstropyPendingDeprecationWarning(PendingDeprecationWarning, AstropyWarning): """ A warning class to indicate a soon-to-be deprecated feature. """ def _get_platlib_dir(cmd): """ Given a build command, return the name of the appropriate platform-specific build subdirectory directory (e.g. build/lib.linux-x86_64-2.7) """ plat_specifier = '.{0}-{1}'.format(cmd.plat_name, sys.version[0:3]) return os.path.join(cmd.build_base, 'lib' + plat_specifier) def get_numpy_include_path(): """ Gets the path to the numpy headers. """ # We need to go through this nonsense in case setuptools # downloaded and installed Numpy for us as part of the build or # install, since Numpy may still think it's in "setup mode", when # in fact we're ready to use it to build astropy now. if sys.version_info[0] >= 3: import builtins if hasattr(builtins, '__NUMPY_SETUP__'): del builtins.__NUMPY_SETUP__ import imp import numpy imp.reload(numpy) else: import __builtin__ if hasattr(__builtin__, '__NUMPY_SETUP__'): del __builtin__.__NUMPY_SETUP__ import numpy reload(numpy) try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() return numpy_include class _DummyFile(object): """A noop writeable object.""" errors = '' # Required for Python 3.x def write(self, s): pass def flush(self): pass @contextlib.contextmanager def silence(): """A context manager that silences sys.stdout and sys.stderr.""" old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() exception_occurred = False try: yield except: exception_occurred = True # Go ahead and clean up so that exception handling can work normally sys.stdout = old_stdout sys.stderr = old_stderr raise if not exception_occurred: sys.stdout = old_stdout sys.stderr = old_stderr if sys.platform == 'win32': import ctypes def _has_hidden_attribute(filepath): """ Returns True if the given filepath has the hidden attribute on MS-Windows. Based on a post here: http://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection """ if isinstance(filepath, bytes): filepath = filepath.decode(sys.getfilesystemencoding()) try: attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath) assert attrs != -1 result = bool(attrs & 2) except (AttributeError, AssertionError): result = False return result else: def _has_hidden_attribute(filepath): return False def is_path_hidden(filepath): """ Determines if a given file or directory is hidden. Parameters ---------- filepath : str The path to a file or directory Returns ------- hidden : bool Returns `True` if the file is hidden """ name = os.path.basename(os.path.abspath(filepath)) if isinstance(name, bytes): is_dotted = name.startswith(b'.') else: is_dotted = name.startswith('.') return is_dotted or _has_hidden_attribute(filepath) def walk_skip_hidden(top, onerror=None, followlinks=False): """ A wrapper for `os.walk` that skips hidden files and directories. This function does not have the parameter `topdown` from `os.walk`: the directories must always be recursed top-down when using this function. See also -------- os.walk : For a description of the parameters """ for root, dirs, files in os.walk( top, topdown=True, onerror=onerror, followlinks=followlinks): # These lists must be updated in-place so os.walk will skip # hidden directories dirs[:] = [d for d in dirs if not is_path_hidden(d)] files[:] = [f for f in files if not is_path_hidden(f)] yield root, dirs, files def write_if_different(filename, data): """Write `data` to `filename`, if the content of the file is different. Parameters ---------- filename : str The file name to be written to. data : bytes The data to be written to `filename`. """ assert isinstance(data, bytes) if os.path.exists(filename): with open(filename, 'rb') as fd: original_data = fd.read() else: original_data = None if original_data != data: with open(filename, 'wb') as fd: fd.write(data) def import_file(filename, name=None): """ Imports a module from a single file as if it doesn't belong to a particular package. The returned module will have the optional ``name`` if given, or else a name generated from the filename. """ # Specifying a traditional dot-separated fully qualified name here # results in a number of "Parent module 'astropy' not found while # handling absolute import" warnings. Using the same name, the # namespaces of the modules get merged together. So, this # generates an underscore-separated name which is more likely to # be unique, and it doesn't really matter because the name isn't # used directly here anyway. mode = 'U' if sys.version_info[0] < 3 else 'r' if name is None: basename = os.path.splitext(filename)[0] name = '_'.join(os.path.relpath(basename).split(os.sep)[1:]) if import_machinery: loader = import_machinery.SourceFileLoader(name, filename) mod = loader.load_module() else: with open(filename, mode) as fd: mod = imp.load_module(name, fd, filename, ('.py', mode, 1)) return mod def resolve_name(name): """Resolve a name like ``module.object`` to an object and return it. Raise `ImportError` if the module or name is not found. """ parts = name.split('.') cursor = len(parts) - 1 module_name = parts[:cursor] attr_name = parts[-1] while cursor > 0: try: ret = __import__('.'.join(module_name), fromlist=[attr_name]) break except ImportError: if cursor == 0: raise cursor -= 1 module_name = parts[:cursor] attr_name = parts[cursor] ret = '' for part in parts[cursor:]: try: ret = getattr(ret, part) except AttributeError: raise ImportError(name) return ret if sys.version_info[0] >= 3: def iteritems(dictionary): return dictionary.items() else: def iteritems(dictionary): return dictionary.iteritems() def extends_doc(extended_func): """ A function decorator for use when wrapping an existing function but adding additional functionality. This copies the docstring from the original function, and appends to it (along with a newline) the docstring of the wrapper function. Example ------- >>> def foo(): ... '''Hello.''' ... >>> @extends_doc(foo) ... def bar(): ... '''Goodbye.''' ... >>> print(bar.__doc__) Hello. Goodbye. """ def decorator(func): func.__doc__ = '\n\n'.join([extended_func.__doc__.rstrip('\n'), func.__doc__.lstrip('\n')]) return func return decorator # Duplicated from astropy.utils.decorators.deprecated # When fixing issues in this function fix them in astropy first, then # port the fixes over to astropy-helpers def deprecated(since, message='', name='', alternative='', pending=False, obj_type=None): """ Used to mark a function or class as deprecated. To mark an attribute as deprecated, use `deprecated_attribute`. Parameters ------------ since : str The release at which this API became deprecated. This is required. message : str, optional Override the default deprecation message. The format specifier ``func`` may be used for the name of the function, and ``alternative`` may be used in the deprecation message to insert the name of an alternative to the deprecated function. ``obj_type`` may be used to insert a friendly name for the type of object being deprecated. name : str, optional The name of the deprecated function or class; if not provided the name is automatically determined from the passed in function or class, though this is useful in the case of renamed functions, where the new function is just assigned to the name of the deprecated function. For example:: def new_function(): ... oldFunction = new_function alternative : str, optional An alternative function or class name that the user may use in place of the deprecated object. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a AstropyPendingDeprecationWarning instead of a AstropyDeprecationWarning. obj_type : str, optional The type of this object, if the automatically determined one needs to be overridden. """ method_types = (classmethod, staticmethod, types.MethodType) def deprecate_doc(old_doc, message): """ Returns a given docstring with a deprecation message prepended to it. """ if not old_doc: old_doc = '' old_doc = textwrap.dedent(old_doc).strip('\n') new_doc = (('\n.. deprecated:: %(since)s' '\n %(message)s\n\n' % {'since': since, 'message': message.strip()}) + old_doc) if not old_doc: # This is to prevent a spurious 'unexpected unindent' warning from # docutils when the original docstring was blank. new_doc += r'\ ' return new_doc def get_function(func): """ Given a function or classmethod (or other function wrapper type), get the function object. """ if isinstance(func, method_types): try: func = func.__func__ except AttributeError: # classmethods in Python2.6 and below lack the __func__ # attribute so we need to hack around to get it method = func.__get__(None, object) if isinstance(method, types.FunctionType): # For staticmethods anyways the wrapped object is just a # plain function (not a bound method or anything like that) func = method elif hasattr(method, '__func__'): func = method.__func__ elif hasattr(method, 'im_func'): func = method.im_func else: # Nothing we can do really... just return the original # classmethod, etc. return func return func def deprecate_function(func, message): """ Returns a wrapped function that displays an ``AstropyDeprecationWarning`` when it is called. """ if isinstance(func, method_types): func_wrapper = type(func) else: func_wrapper = lambda f: f func = get_function(func) def deprecated_func(*args, **kwargs): if pending: category = AstropyPendingDeprecationWarning else: category = AstropyDeprecationWarning warnings.warn(message, category, stacklevel=2) return func(*args, **kwargs) # If this is an extension function, we can't call # functools.wraps on it, but we normally don't care. # This crazy way to get the type of a wrapper descriptor is # straight out of the Python 3.3 inspect module docs. if type(func) != type(str.__dict__['__add__']): deprecated_func = functools.wraps(func)(deprecated_func) deprecated_func.__doc__ = deprecate_doc( deprecated_func.__doc__, message) return func_wrapper(deprecated_func) def deprecate_class(cls, message): """ Returns a wrapper class with the docstrings updated and an __init__ function that will raise an ``AstropyDeprectationWarning`` warning when called. """ # Creates a new class with the same name and bases as the # original class, but updates the dictionary with a new # docstring and a wrapped __init__ method. __module__ needs # to be manually copied over, since otherwise it will be set # to *this* module (astropy.utils.misc). # This approach seems to make Sphinx happy (the new class # looks enough like the original class), and works with # extension classes (which functools.wraps does not, since # it tries to modify the original class). # We need to add a custom pickler or you'll get # Can't pickle : it's not found as ... # errors. Picklability is required for any class that is # documented by Sphinx. members = cls.__dict__.copy() members.update({ '__doc__': deprecate_doc(cls.__doc__, message), '__init__': deprecate_function(get_function(cls.__init__), message), }) return type(cls.__name__, cls.__bases__, members) def deprecate(obj, message=message, name=name, alternative=alternative, pending=pending): if obj_type is None: if isinstance(obj, type): obj_type_name = 'class' elif inspect.isfunction(obj): obj_type_name = 'function' elif inspect.ismethod(obj) or isinstance(obj, method_types): obj_type_name = 'method' else: obj_type_name = 'object' else: obj_type_name = obj_type if not name: name = get_function(obj).__name__ altmessage = '' if not message or type(message) == type(deprecate): if pending: message = ('The %(func)s %(obj_type)s will be deprecated in a ' 'future version.') else: message = ('The %(func)s %(obj_type)s is deprecated and may ' 'be removed in a future version.') if alternative: altmessage = '\n Use %s instead.' % alternative message = ((message % { 'func': name, 'name': name, 'alternative': alternative, 'obj_type': obj_type_name}) + altmessage) if isinstance(obj, type): return deprecate_class(obj, message) else: return deprecate_function(obj, message) if type(message) == type(deprecate): return deprecate(message) return deprecate def deprecated_attribute(name, since, message=None, alternative=None, pending=False): """ Used to mark a public attribute as deprecated. This creates a property that will warn when the given attribute name is accessed. To prevent the warning (i.e. for internal code), use the private name for the attribute by prepending an underscore (i.e. ``self._name``). Parameters ---------- name : str The name of the deprecated attribute. since : str The release at which this API became deprecated. This is required. message : str, optional Override the default deprecation message. The format specifier ``name`` may be used for the name of the attribute, and ``alternative`` may be used in the deprecation message to insert the name of an alternative to the deprecated function. alternative : str, optional An alternative attribute that the user may use in place of the deprecated attribute. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a AstropyPendingDeprecationWarning instead of a AstropyDeprecationWarning. Examples -------- :: class MyClass: # Mark the old_name as deprecated old_name = misc.deprecated_attribute('old_name', '0.1') def method(self): self._old_name = 42 """ private_name = '_' + name @deprecated(since, name=name, obj_type='attribute') def get(self): return getattr(self, private_name) @deprecated(since, name=name, obj_type='attribute') def set(self, val): setattr(self, private_name, val) @deprecated(since, name=name, obj_type='attribute') def delete(self): delattr(self, private_name) return property(get, set, delete) def minversion(module, version, inclusive=True, version_path='__version__'): """ Returns `True` if the specified Python module satisfies a minimum version requirement, and `False` if not. By default this uses `pkg_resources.parse_version` to do the version comparison if available. Otherwise it falls back on `distutils.version.LooseVersion`. Parameters ---------- module : module or `str` An imported module of which to check the version, or the name of that module (in which case an import of that module is attempted-- if this fails `False` is returned). version : `str` The version as a string that this module must have at a minimum (e.g. ``'0.12'``). inclusive : `bool` The specified version meets the requirement inclusively (i.e. ``>=``) as opposed to strictly greater than (default: `True`). version_path : `str` A dotted attribute path to follow in the module for the version. Defaults to just ``'__version__'``, which should work for most Python modules. Examples -------- >>> import astropy >>> minversion(astropy, '0.4.4') True """ if isinstance(module, types.ModuleType): module_name = module.__name__ elif isinstance(module, six.string_types): module_name = module try: module = resolve_name(module_name) except ImportError: return False else: raise ValueError('module argument must be an actual imported ' 'module, or the import name of the module; ' 'got {0!r}'.format(module)) if '.' not in version_path: have_version = getattr(module, version_path) else: have_version = resolve_name('.'.join([module.__name__, version_path])) try: from pkg_resources import parse_version except ImportError: from distutils.version import LooseVersion as parse_version if inclusive: return parse_version(have_version) >= parse_version(version) else: return parse_version(have_version) > parse_version(version) spectral-cube-0.3.1/astropy_helpers/astropy_helpers/version.py0000644000077000000240000000703312340435031024770 0ustar adamstaff00000000000000# Autogenerated by Astropy-affiliated package astropy_helpers's setup.py on 2014-05-25 20:44:41.896718 import os import subprocess import warnings def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: #otherwise it's already the true/release version return version def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revsion number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if not os.path.exists(os.path.join(path, '.git')): return '' if sha: cmd = ['rev-parse'] # Faster for getting just the hash of HEAD else: cmd = ['rev-list', '--count'] try: p = subprocess.Popen(['git'] + cmd + ['HEAD'], cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return '' if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using default ' 'dev version.'.format(path)) return '' elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: ' + stderr) return '' if sha: return stdout.decode('utf-8')[:40] else: return stdout.decode('utf-8').strip() _last_generated_version = '0.4.dev' version = update_git_devstr(_last_generated_version) githash = get_git_devstr(sha=True, show_warning=False) major = 0 minor = 4 bugfix = 0 release = False debug = False try: from ._compiler import compiler except ImportError: compiler = "unknown" try: from .cython_version import cython_version except ImportError: cython_version = "unknown" spectral-cube-0.3.1/astropy_helpers/astropy_helpers/version_helpers.py0000644000077000000240000002264412533471373026534 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Utilities for generating the version string for Astropy (or an affiliated package) and the version.py module, which contains version info for the package. Within the generated astropy.version module, the `major`, `minor`, and `bugfix` variables hold the respective parts of the version number (bugfix is '0' if absent). The `release` variable is True if this is a release, and False if this is a development version of astropy. For the actual version string, use:: from astropy.version import version or:: from astropy import __version__ """ from __future__ import division import datetime import imp import os import pkgutil import sys from distutils import log import pkg_resources from . import git_helpers from .distutils_helpers import is_distutils_display_option from .utils import invalidate_caches PY3 = sys.version_info[0] == 3 def _version_split(version): """ Split a version string into major, minor, and bugfix numbers. If any of those numbers are missing the default is zero. Any pre/post release modifiers are ignored. Examples ======== >>> _version_split('1.2.3') (1, 2, 3) >>> _version_split('1.2') (1, 2, 0) >>> _version_split('1.2rc1') (1, 2, 0) >>> _version_split('1') (1, 0, 0) >>> _version_split('') (0, 0, 0) """ parsed_version = pkg_resources.parse_version(version) if hasattr(parsed_version, 'base_version'): # New version parsing for setuptools >= 8.0 if parsed_version.base_version: parts = [int(part) for part in parsed_version.base_version.split('.')] else: parts = [] else: parts = [] for part in parsed_version: if part.startswith('*'): # Ignore any .dev, a, b, rc, etc. break parts.append(int(part)) if len(parts) < 3: parts += [0] * (3 - len(parts)) # In principle a version could have more parts (like 1.2.3.4) but we only # support .. return tuple(parts[:3]) # This is used by setup.py to create a new version.py - see that file for # details. Note that the imports have to be absolute, since this is also used # by affiliated packages. _FROZEN_VERSION_PY_TEMPLATE = """ # Autogenerated by {packagetitle}'s setup.py on {timestamp!s} from __future__ import unicode_literals import datetime {header} major = {major} minor = {minor} bugfix = {bugfix} release = {rel} timestamp = {timestamp!r} debug = {debug} try: from ._compiler import compiler except ImportError: compiler = "unknown" try: from .cython_version import cython_version except ImportError: cython_version = "unknown" """[1:] _FROZEN_VERSION_PY_WITH_GIT_HEADER = """ {git_helpers} _packagename = "{packagename}" _last_generated_version = "{verstr}" _last_githash = "{githash}" # Determine where the source code for this module # lives. If __file__ is not a filesystem path then # it is assumed not to live in a git repo at all. if _get_repo_path(__file__, levels=len(_packagename.split('.'))): version = update_git_devstr(_last_generated_version, path=__file__) githash = get_git_devstr(sha=True, show_warning=False, path=__file__) or _last_githash else: # The file does not appear to live in a git repo so don't bother # invoking git version = _last_generated_version githash = _last_githash """[1:] _FROZEN_VERSION_PY_STATIC_HEADER = """ version = "{verstr}" githash = "{githash}" """[1:] def _get_version_py_str(packagename, version, githash, release, debug, uses_git=True): timestamp = datetime.datetime.now() major, minor, bugfix = _version_split(version) if packagename.lower() == 'astropy': packagetitle = 'Astropy' else: packagetitle = 'Astropy-affiliated package ' + packagename header = '' if uses_git: header = _generate_git_header(packagename, version, githash) elif not githash: # _generate_git_header will already generate a new git has for us, but # for creating a new version.py for a release (even if uses_git=False) # we still need to get the githash to include in the version.py # See https://github.com/astropy/astropy-helpers/issues/141 githash = git_helpers.get_git_devstr(sha=True, show_warning=True) if not header: # If _generate_git_header fails it returns an empty string header = _FROZEN_VERSION_PY_STATIC_HEADER.format(verstr=version, githash=githash) return _FROZEN_VERSION_PY_TEMPLATE.format(packagetitle=packagetitle, timestamp=timestamp, header=header, major=major, minor=minor, bugfix=bugfix, rel=release, debug=debug) def _generate_git_header(packagename, version, githash): """ Generates a header to the version.py module that includes utilities for probing the git repository for updates (to the current git hash, etc.) These utilities should only be available in development versions, and not in release builds. If this fails for any reason an empty string is returned. """ loader = pkgutil.get_loader(git_helpers) source = loader.get_source(git_helpers.__name__) or '' source_lines = source.splitlines() if not source_lines: log.warn('Cannot get source code for astropy_helpers.git_helpers; ' 'git support disabled.') return '' idx = 0 for idx, line in enumerate(source_lines): if line.startswith('# BEGIN'): break git_helpers_py = '\n'.join(source_lines[idx + 1:]) if PY3: verstr = version else: # In Python 2 don't pass in a unicode string; otherwise verstr will # be represented with u'' syntax which breaks on Python 3.x with x # < 3. This is only an issue when developing on multiple Python # versions at once verstr = version.encode('utf8') new_githash = git_helpers.get_git_devstr(sha=True, show_warning=False) if new_githash: githash = new_githash return _FROZEN_VERSION_PY_WITH_GIT_HEADER.format( git_helpers=git_helpers_py, packagename=packagename, verstr=verstr, githash=githash) def generate_version_py(packagename, version, release=None, debug=None, uses_git=True): """Regenerate the version.py module if necessary.""" try: version_module = get_pkg_version_module(packagename) try: last_generated_version = version_module._last_generated_version except AttributeError: last_generated_version = version_module.version try: last_githash = version_module._last_githash except AttributeError: last_githash = version_module.githash current_release = version_module.release current_debug = version_module.debug except ImportError: version_module = None last_generated_version = None last_githash = None current_release = None current_debug = None if release is None: # Keep whatever the current value is, if it exists release = bool(current_release) if debug is None: # Likewise, keep whatever the current value is, if it exists debug = bool(current_debug) version_py = os.path.join(packagename, 'version.py') if (last_generated_version != version or current_release != release or current_debug != debug): if '-q' not in sys.argv and '--quiet' not in sys.argv: log.set_threshold(log.INFO) if is_distutils_display_option(): # Always silence unnecessary log messages when display options are # being used log.set_threshold(log.WARN) log.info('Freezing version number to {0}'.format(version_py)) with open(version_py, 'w') as f: # This overwrites the actual version.py f.write(_get_version_py_str(packagename, version, last_githash, release, debug, uses_git=uses_git)) invalidate_caches() if version_module: imp.reload(version_module) def get_pkg_version_module(packagename, fromlist=None): """Returns the package's .version module generated by `astropy_helpers.version_helpers.generate_version_py`. Raises an ImportError if the version module is not found. If ``fromlist`` is an iterable, return a tuple of the members of the version module corresponding to the member names given in ``fromlist``. Raises an `AttributeError` if any of these module members are not found. """ if not fromlist: # Due to a historical quirk of Python's import implementation, # __import__ will not return submodules of a package if 'fromlist' is # empty. # TODO: For Python 3.1 and up it may be preferable to use importlib # instead of the __import__ builtin return __import__(packagename + '.version', fromlist=['']) else: mod = __import__(packagename + '.version', fromlist=fromlist) return tuple(getattr(mod, member) for member in fromlist) spectral-cube-0.3.1/astropy_helpers/astropy_helpers.egg-info/0000755000077000000240000000000012654610601024426 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/astropy_helpers.egg-info/dependency_links.txt0000644000077000000240000000000112340435031030466 0ustar adamstaff00000000000000 spectral-cube-0.3.1/astropy_helpers/astropy_helpers.egg-info/not-zip-safe0000644000077000000240000000000112340435031026646 0ustar adamstaff00000000000000 spectral-cube-0.3.1/astropy_helpers/astropy_helpers.egg-info/PKG-INFO0000644000077000000240000000051012340435031025511 0ustar adamstaff00000000000000Metadata-Version: 1.1 Name: astropy-helpers Version: 0.4.dev102 Summary: UNKNOWN Home-page: http://astropy.org Author: The Astropy Developers Author-email: astropy.team@gmail.com License: BSD Download-URL: http://pypi.python.org/packages/source/a/astropy-helpers/astropy-0.4.dev102.tar.gz Description: UNKNOWN Platform: UNKNOWN spectral-cube-0.3.1/astropy_helpers/astropy_helpers.egg-info/SOURCES.txt0000644000077000000240000000512312340435031026305 0ustar adamstaff00000000000000LICENSE.rst MANIFEST.in README.rst ah_bootstrap.py ez_setup.py setup.cfg setup.py astropy_helpers/__init__.py astropy_helpers/git_helpers.py astropy_helpers/setup_helpers.py astropy_helpers/test_helpers.py astropy_helpers/utils.py astropy_helpers/version.py astropy_helpers/version_helpers.py astropy_helpers.egg-info/PKG-INFO astropy_helpers.egg-info/SOURCES.txt astropy_helpers.egg-info/dependency_links.txt astropy_helpers.egg-info/not-zip-safe astropy_helpers.egg-info/top_level.txt astropy_helpers/compat/__init__.py astropy_helpers/compat/subprocess.py astropy_helpers/compat/_subprocess_py2/__init__.py astropy_helpers/sphinx/__init__.py astropy_helpers/sphinx/conf.py astropy_helpers/sphinx/setup_package.py astropy_helpers/sphinx/ext/__init__.py astropy_helpers/sphinx/ext/astropyautosummary.py astropy_helpers/sphinx/ext/automodapi.py astropy_helpers/sphinx/ext/automodsumm.py astropy_helpers/sphinx/ext/changelog_links.py astropy_helpers/sphinx/ext/comment_eater.py astropy_helpers/sphinx/ext/compiler_unparse.py astropy_helpers/sphinx/ext/docscrape.py astropy_helpers/sphinx/ext/docscrape_sphinx.py astropy_helpers/sphinx/ext/doctest.py astropy_helpers/sphinx/ext/edit_on_github.py astropy_helpers/sphinx/ext/numpydoc.py astropy_helpers/sphinx/ext/phantom_import.py astropy_helpers/sphinx/ext/smart_resolver.py astropy_helpers/sphinx/ext/tocdepthfix.py astropy_helpers/sphinx/ext/traitsdoc.py astropy_helpers/sphinx/ext/utils.py astropy_helpers/sphinx/ext/viewcode.py astropy_helpers/sphinx/ext/templates/autosummary_core/base.rst astropy_helpers/sphinx/ext/templates/autosummary_core/class.rst astropy_helpers/sphinx/ext/templates/autosummary_core/module.rst astropy_helpers/sphinx/ext/tests/__init__.py astropy_helpers/sphinx/ext/tests/test_automodapi.py astropy_helpers/sphinx/ext/tests/test_automodsumm.py astropy_helpers/sphinx/ext/tests/test_utils.py astropy_helpers/sphinx/themes/bootstrap-astropy/globaltoc.html astropy_helpers/sphinx/themes/bootstrap-astropy/layout.html astropy_helpers/sphinx/themes/bootstrap-astropy/localtoc.html astropy_helpers/sphinx/themes/bootstrap-astropy/searchbox.html astropy_helpers/sphinx/themes/bootstrap-astropy/theme.conf astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout_20.png astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.ico astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo_32.png astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astropy.css astropy_helpers/sphinx/themes/bootstrap-astropy/static/sidebar.js astropy_helpers/src/__init__.py astropy_helpers/src/compiler.c astropy_helpers/src/setup_package.pyspectral-cube-0.3.1/astropy_helpers/astropy_helpers.egg-info/top_level.txt0000644000077000000240000000002012340435031027142 0ustar adamstaff00000000000000astropy_helpers spectral-cube-0.3.1/astropy_helpers/CHANGES.rst0000644000077000000240000002061612533471373021327 0ustar adamstaff00000000000000astropy-helpers Changelog ========================= 1.0.2 (2015-04-02) ------------------ - Various fixes enabling the astropy-helpers Sphinx build command and Sphinx extensions to work with Sphinx 1.3. [#148] - More improvement to the ability to handle multiple versions of astropy-helpers being imported in the same Python interpreter session in the (somewhat rare) case of nested installs. [#147] - To better support high resolution displays, use SVG for the astropy logo and linkout image, falling back to PNGs for browsers that support it. [#150, #151] - Improve ``setup_helpers.get_compiler_version`` to work with more compilers, and to return more info. This will help fix builds of Astropy on less common compilers, like Sun C. [#153] 1.0.1 (2015-03-04) ------------------ - Released in concert with v0.4.8 to address the same issues. - Improved the ``ah_bootstrap`` script's ability to override existing installations of astropy-helpers with new versions in the context of installing multiple packages simultaneously within the same Python interpreter (e.g. when one package has in its ``setup_requires`` another package that uses a different version of astropy-helpers. [#144] - Added a workaround to an issue in matplotlib that can, in rare cases, lead to a crash when installing packages that import matplotlib at build time. [#144] 0.4.8 (2015-03-04) ------------------ - Improved the ``ah_bootstrap`` script's ability to override existing installations of astropy-helpers with new versions in the context of installing multiple packages simultaneously within the same Python interpreter (e.g. when one package has in its ``setup_requires`` another package that uses a different version of astropy-helpers. [#144] - Added a workaround to an issue in matplotlib that can, in rare cases, lead to a crash when installing packages that import matplotlib at build time. [#144] 1.0 (2015-02-17) ---------------- - Added new pre-/post-command hook points for ``setup.py`` commands. Now any package can define code to run before and/or after any ``setup.py`` command without having to manually subclass that command by adding ``pre__hook`` and ``post__hook`` callables to the package's ``setup_package.py`` module. See the PR for more details. [#112] - The following objects in the ``astropy_helpers.setup_helpers`` module have been relocated: - ``get_dummy_distribution``, ``get_distutils_*``, ``get_compiler_option``, ``add_command_option``, ``is_distutils_display_option`` -> ``astropy_helpers.distutils_helpers`` - ``should_build_with_cython``, ``generate_build_ext_command`` -> ``astropy_helpers.commands.build_ext`` - ``AstropyBuildPy`` -> ``astropy_helpers.commands.build_py`` - ``AstropyBuildSphinx`` -> ``astropy_helpers.commands.build_sphinx`` - ``AstropyInstall`` -> ``astropy_helpers.commands.install`` - ``AstropyInstallLib`` -> ``astropy_helpers.commands.install_lib`` - ``AstropyRegister`` -> ``astropy_helpers.commands.register`` - ``get_pkg_version_module`` -> ``astropy_helpers.version_helpers`` - ``write_if_different``, ``import_file``, ``get_numpy_include_path`` -> ``astropy_helpers.utils`` All of these are "soft" deprecations in the sense that they are still importable from ``astropy_helpers.setup_helpers`` for now, and there is no (easy) way to produce deprecation warnings when importing these objects from ``setup_helpers`` rather than directly from the modules they are defined in. But please consider updating any imports to these objects. [#110] - Use of the ``astropy.sphinx.ext.astropyautosummary`` extension is deprecated for use with Sphinx < 1.2. Instead it should suffice to remove this extension for the ``extensions`` list in your ``conf.py`` and add the stock ``sphinx.ext.autosummary`` instead. [#131] 0.4.7 (2015-02-17) ------------------ - Fixed incorrect/missing git hash being added to the generated ``version.py`` when creating a release. [#141] 0.4.6 (2015-02-16) ------------------ - Fixed problems related to the automatically generated _compiler module not being created properly. [#139] 0.4.5 (2015-02-11) ------------------ - Fixed an issue where ah_bootstrap.py could blow up when astropy_helper's version number is 1.0. - Added a workaround for documentation of properties in the rare case where the class's metaclass has a property of the same name. [#130] - Fixed an issue on Python 3 where importing a package using astropy-helper's generated version.py module would crash when the current working directory is an empty git repository. [#114] - Fixed an issue where the "revision count" appended to .dev versions by the generated version.py did not accurately reflect the revision count for the package it belongs to, and could be invalid if the current working directory is an unrelated git repository. [#107] - Likewise, fixed a confusing warning message that could occur in the same circumstances as the above issue. [#121] 0.4.4 (2014-12-31) ------------------ - More improvements for building the documentation using Python 3.x. [#100] - Additional minor fixes to Python 3 support. [#115] - Updates to support new test features in Astropy [#92, #106] 0.4.3 (2014-10-22) ------------------ - The generated ``version.py`` file now preserves the git hash of installed copies of the package as well as when building a source distribution. That is, the git hash of the changeset that was installed/released is preserved. [#87] - In smart resolver add resolution for class links when they exist in the intersphinx inventory, but not the mapping of the current package (e.g. when an affiliated package uses an astropy core class of which "actual" and "documented" location differs) [#88] - Fixed a bug that could occur when running ``setup.py`` for the first time in a repository that uses astropy-helpers as a submodule: ``AttributeError: 'NoneType' object has no attribute 'mkdtemp'`` [#89] - Fixed a bug where optional arguments to the ``doctest-skip`` Sphinx directive were sometimes being left in the generated documentation output. [#90] - Improved support for building the documentation using Python 3.x. [#96] - Avoid error message if .git directory is not present. [#91] 0.4.2 (2014-08-09) ------------------ - Fixed some CSS issues in generated API docs. [#69] - Fixed the warning message that could be displayed when generating a version number with some older versions of git. [#77] - Fixed automodsumm to work with new versions of Sphinx (>= 1.2.2). [#80] 0.4.1 (2014-08-08) ------------------ - Fixed git revision count on systems with git versions older than v1.7.2. [#70] - Fixed display of warning text when running a git command fails (previously the output of stderr was not being decoded properly). [#70] - The ``--offline`` flag to ``setup.py`` understood by ``ah_bootstrap.py`` now also prevents git from going online to fetch submodule updates. [#67] - The Sphinx extension for converting issue numbers to links in the changelog now supports working on arbitrary pages via a new ``conf.py`` setting: ``changelog_links_docpattern``. By default it affects the ``changelog`` and ``whatsnew`` pages in one's Sphinx docs. [#61] - Fixed crash that could result from users with missing/misconfigured locale settings. [#58] - The font used for code examples in the docs is now the system-defined ``monospace`` font, rather than ``Minaco``, which is not available on all platforms. [#50] 0.4 (2014-07-15) ---------------- - Initial release of astropy-helpers. See `APE4 `_ for details of the motivation and design of this package. - The ``astropy_helpers`` package replaces the following modules in the ``astropy`` package: - ``astropy.setup_helpers`` -> ``astropy_helpers.setup_helpers`` - ``astropy.version_helpers`` -> ``astropy_helpers.version_helpers`` - ``astropy.sphinx`` - > ``astropy_helpers.sphinx`` These modules should be considered deprecated in ``astropy``, and any new, non-critical changes to those modules will be made in ``astropy_helpers`` instead. Affiliated packages wishing to make use those modules (as in the Astropy package-template) should use the versions from ``astropy_helpers`` instead, and include the ``ah_bootstrap.py`` script in their project, for bootstrapping the ``astropy_helpers`` package in their setup.py script. spectral-cube-0.3.1/astropy_helpers/continuous-integration/0000755000077000000240000000000012654610601024240 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/continuous-integration/appveyor/0000755000077000000240000000000012654610601026105 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/continuous-integration/appveyor/install-miniconda.ps10000644000077000000240000000446012533471373032152 0ustar adamstaff00000000000000# Sample script to install anaconda under windows # Authors: Stuart Mumford # Borrwed from: Olivier Grisel and Kyle Kastner # License: BSD 3 clause $MINICONDA_URL = "http://repo.continuum.io/miniconda/" function DownloadMiniconda ($version, $platform_suffix) { $webclient = New-Object System.Net.WebClient $filename = "Miniconda-" + $version + "-Windows-" + $platform_suffix + ".exe" $url = $MINICONDA_URL + $filename $basedir = $pwd.Path + "\" $filepath = $basedir + $filename if (Test-Path $filename) { Write-Host "Reusing" $filepath return $filepath } # Download and retry up to 3 times in case of network transient errors. Write-Host "Downloading" $filename "from" $url $retry_attempts = 2 for($i=0; $i -lt $retry_attempts; $i++){ try { $webclient.DownloadFile($url, $filepath) break } Catch [Exception]{ Start-Sleep 1 } } if (Test-Path $filepath) { Write-Host "File saved at" $filepath } else { # Retry once to get the error message if any at the last try $webclient.DownloadFile($url, $filepath) } return $filepath } function InstallMiniconda ($python_version, $architecture, $python_home) { Write-Host "Installing miniconda" $python_version "for" $architecture "bit architecture to" $python_home if (Test-Path $python_home) { Write-Host $python_home "already exists, skipping." return $false } if ($architecture -eq "x86") { $platform_suffix = "x86" } else { $platform_suffix = "x86_64" } $filepath = DownloadMiniconda $python_version $platform_suffix Write-Host "Installing" $filepath "to" $python_home $args = "/InstallationType=AllUsers /S /AddToPath=1 /RegisterPython=1 /D=" + $python_home Write-Host $filepath $args Start-Process -FilePath $filepath -ArgumentList $args -Wait -Passthru #Start-Sleep -s 15 if (Test-Path C:\conda) { Write-Host "Miniconda $python_version ($architecture) installation complete" } else { Write-Host "Failed to install Python in $python_home" Exit 1 } } function main () { InstallMiniconda $env:MINICONDA_VERSION $env:PLATFORM $env:PYTHON } main spectral-cube-0.3.1/astropy_helpers/continuous-integration/appveyor/windows_sdk.cmd0000644000077000000240000000346212533471373031141 0ustar adamstaff00000000000000:: To build extensions for 64 bit Python 3, we need to configure environment :: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1) :: :: To build extensions for 64 bit Python 2, we need to configure environment :: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0) :: :: 32 bit builds do not require specific environment configurations. :: :: Note: this script needs to be run with the /E:ON and /V:ON flags for the :: cmd interpreter, at least for (SDK v7.0) :: :: More details at: :: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows :: http://stackoverflow.com/a/13751649/163740 :: :: Author: Olivier Grisel :: License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ @ECHO OFF SET COMMAND_TO_RUN=%* SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows SET MAJOR_PYTHON_VERSION="%PYTHON_VERSION:~0,1%" IF %MAJOR_PYTHON_VERSION% == "2" ( SET WINDOWS_SDK_VERSION="v7.0" ) ELSE IF %MAJOR_PYTHON_VERSION% == "3" ( SET WINDOWS_SDK_VERSION="v7.1" ) ELSE ( ECHO Unsupported Python version: "%MAJOR_PYTHON_VERSION%" EXIT 1 ) IF "%PYTHON_ARCH%"=="64" ( ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %MAJOR_PYTHON_VERSION% on a 64 bit architecture SET DISTUTILS_USE_SDK=1 SET MSSdk=1 "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION% "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release ECHO Executing: %COMMAND_TO_RUN% call %COMMAND_TO_RUN% || EXIT 1 ) ELSE ( ECHO Using default MSVC build environment for 32 bit architecture ECHO Executing: %COMMAND_TO_RUN% call %COMMAND_TO_RUN% || EXIT 1 ) spectral-cube-0.3.1/astropy_helpers/continuous-integration/travis/0000755000077000000240000000000012654610601025550 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/continuous-integration/travis/install_conda_linux.sh0000755000077000000240000000032412533471373032146 0ustar adamstaff00000000000000#!/bin/bash wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh chmod +x miniconda.sh ./miniconda.sh -b export PATH=/home/travis/miniconda/bin:$PATH conda update --yes conda spectral-cube-0.3.1/astropy_helpers/continuous-integration/travis/install_conda_osx.sh0000755000077000000240000000032512533471373031621 0ustar adamstaff00000000000000#!/bin/bash wget http://repo.continuum.io/miniconda/Miniconda-3.7.3-MacOSX-x86_64.sh -O miniconda.sh chmod +x miniconda.sh ./miniconda.sh -b export PATH=/Users/travis/miniconda/bin:$PATH conda update --yes conda spectral-cube-0.3.1/astropy_helpers/continuous-integration/travis/install_graphviz_linux.sh0000755000077000000240000000007712533471373032721 0ustar adamstaff00000000000000#!/bin/bash sudo apt-get update sudo apt-get install graphviz spectral-cube-0.3.1/astropy_helpers/continuous-integration/travis/install_graphviz_osx.sh0000755000077000000240000000005712533471373032371 0ustar adamstaff00000000000000#!/bin/bash brew update brew install graphviz spectral-cube-0.3.1/astropy_helpers/CONTRIBUTING.md0000644000077000000240000000216512412505144021743 0ustar adamstaff00000000000000Contributing to astropy-helpers =============================== The guidelines for contributing to ``astropy-helpers`` are generally the same as the [contributing guidelines for the astropy core package](http://github.com/astropy/astropy/blob/master/CONTRIBUTING.md). Basically, report relevant issues in the ``astropy-helpers`` issue tracker, and we welcome pull requests that broadly follow the [Astropy coding guidelines](http://docs.astropy.org/en/latest/development/codeguide.html). The key subtlety lies in understanding the relationship between ``astropy`` and ``astropy-helpers``. This package contains the build, installation, and documentation tools used by astropy. It also includes support for the ``setup.py test`` command, though Astropy is still required for this to function (it does not currently include the full Astropy test runner). So issues or improvements to that functionality should be addressed in this package. Any other aspect of the [astropy core package](http://github.com/astropy/astropy) (or any other package that uses ``astropy-helpers``) should be addressed in the github repository for that package. spectral-cube-0.3.1/astropy_helpers/ez_setup.py0000644000077000000240000002757312340434262021736 0ustar adamstaff00000000000000#!python """Bootstrap setuptools installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from ez_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import os import shutil import sys import tempfile import tarfile import optparse import subprocess import platform from distutils import log try: from site import USER_SITE except ImportError: USER_SITE = None DEFAULT_VERSION = "1.4.2" DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/" def _python_cmd(*args): args = (sys.executable,) + args return subprocess.call(args) == 0 def _check_call_py24(cmd, *args, **kwargs): res = subprocess.call(cmd, *args, **kwargs) class CalledProcessError(Exception): pass if not res == 0: msg = "Command '%s' return non-zero exit status %d" % (cmd, res) raise CalledProcessError(msg) vars(subprocess).setdefault('check_call', _check_call_py24) def _install(tarball, install_args=()): # extracting the tarball tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) # installing log.warn('Installing Setuptools') if not _python_cmd('setup.py', 'install', *install_args): log.warn('Something went wrong during the installation.') log.warn('See the error message above.') # exitcode will be 2 return 2 finally: os.chdir(old_wd) shutil.rmtree(tmpdir) def _build_egg(egg, tarball, to_dir): # extracting the tarball tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) # building an egg log.warn('Building a Setuptools egg in %s', to_dir) _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) finally: os.chdir(old_wd) shutil.rmtree(tmpdir) # returning the result log.warn(egg) if not os.path.exists(egg): raise IOError('Could not build the egg.') def _do_download(version, download_base, to_dir, download_delay): egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg' % (version, sys.version_info[0], sys.version_info[1])) if not os.path.exists(egg): tarball = download_setuptools(version, download_base, to_dir, download_delay) _build_egg(egg, tarball, to_dir) sys.path.insert(0, egg) # Remove previously-imported pkg_resources if present (see # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). if 'pkg_resources' in sys.modules: del sys.modules['pkg_resources'] import setuptools setuptools.bootstrap_install_from = egg def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15): # making sure we use the absolute path to_dir = os.path.abspath(to_dir) was_imported = 'pkg_resources' in sys.modules or \ 'setuptools' in sys.modules try: import pkg_resources except ImportError: return _do_download(version, download_base, to_dir, download_delay) try: pkg_resources.require("setuptools>=" + version) return except pkg_resources.VersionConflict: e = sys.exc_info()[1] if was_imported: sys.stderr.write( "The required version of setuptools (>=%s) is not available,\n" "and can't be installed while this script is running. Please\n" "install a more recent version first, using\n" "'easy_install -U setuptools'." "\n\n(Currently using %r)\n" % (version, e.args[0])) sys.exit(2) else: del pkg_resources, sys.modules['pkg_resources'] # reload ok return _do_download(version, download_base, to_dir, download_delay) except pkg_resources.DistributionNotFound: return _do_download(version, download_base, to_dir, download_delay) def _clean_check(cmd, target): """ Run the command to download target. If the command fails, clean up before re-raising the error. """ try: subprocess.check_call(cmd) except subprocess.CalledProcessError: if os.access(target, os.F_OK): os.unlink(target) raise def download_file_powershell(url, target): """ Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete. """ target = os.path.abspath(target) cmd = [ 'powershell', '-Command', "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(), ] _clean_check(cmd, target) def has_powershell(): if platform.system() != 'Windows': return False cmd = ['powershell', '-Command', 'echo test'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_powershell.viable = has_powershell def download_file_curl(url, target): cmd = ['curl', url, '--silent', '--output', target] _clean_check(cmd, target) def has_curl(): cmd = ['curl', '--version'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_curl.viable = has_curl def download_file_wget(url, target): cmd = ['wget', url, '--quiet', '--output-document', target] _clean_check(cmd, target) def has_wget(): cmd = ['wget', '--version'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_wget.viable = has_wget def download_file_insecure(url, target): """ Use Python to download the file, even though it cannot authenticate the connection. """ try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen src = dst = None try: src = urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = src.read() dst = open(target, "wb") dst.write(data) finally: if src: src.close() if dst: dst.close() download_file_insecure.viable = lambda: True def get_best_downloader(): downloaders = [ download_file_powershell, download_file_curl, download_file_wget, download_file_insecure, ] for dl in downloaders: if dl.viable(): return dl def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. ``downloader_factory`` should be a function taking no arguments and returning a function for downloading a URL to a target. """ # making sure we use the absolute path to_dir = os.path.abspath(to_dir) tgz_name = "setuptools-%s.tar.gz" % version url = download_base + tgz_name saveto = os.path.join(to_dir, tgz_name) if not os.path.exists(saveto): # Avoid repeated downloads log.warn("Downloading %s", url) downloader = downloader_factory() downloader(url, saveto) return os.path.realpath(saveto) def _extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ import copy import operator from tarfile import ExtractError directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directories with a safe mode. directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 448 # decimal for oct 0700 self.extract(tarinfo, path) # Reverse sort directories. if sys.version_info < (2, 4): def sorter(dir1, dir2): return cmp(dir1.name, dir2.name) directories.sort(sorter) directories.reverse() else: directories.sort(key=operator.attrgetter('name'), reverse=True) # Set correct owner, mtime and filemode on directories. for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError: e = sys.exc_info()[1] if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def _build_install_args(options): """ Build the arguments to 'python setup.py install' on the setuptools package """ install_args = [] if options.user_install: if sys.version_info < (2, 6): log.warn("--user requires Python 2.6 or later") raise SystemExit(1) install_args.append('--user') return install_args def _parse_args(): """ Parse the command line for options """ parser = optparse.OptionParser() parser.add_option( '--user', dest='user_install', action='store_true', default=False, help='install in user site package (requires Python 2.6 or later)') parser.add_option( '--download-base', dest='download_base', metavar="URL", default=DEFAULT_URL, help='alternative URL from where to download the setuptools package') parser.add_option( '--insecure', dest='downloader_factory', action='store_const', const=lambda: download_file_insecure, default=get_best_downloader, help='Use internal, non-validating downloader' ) options, args = parser.parse_args() # positional arguments are ignored return options def main(version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" options = _parse_args() tarball = download_setuptools(download_base=options.download_base, downloader_factory=options.downloader_factory) return _install(tarball, _build_install_args(options)) if __name__ == '__main__': sys.exit(main()) spectral-cube-0.3.1/astropy_helpers/LICENSE.rst0000644000077000000240000000272312412505144021326 0ustar adamstaff00000000000000Copyright (c) 2014, Astropy Developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Astropy Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. spectral-cube-0.3.1/astropy_helpers/licenses/0000755000077000000240000000000012654610601021316 5ustar adamstaff00000000000000spectral-cube-0.3.1/astropy_helpers/licenses/LICENSE_COPYBUTTON.rst0000644000077000000240000000471112412505144024720 0ustar adamstaff00000000000000Copyright 2014 Python Software Foundation License: PSF PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -------------------------------------------- . 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. . 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. . 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. . 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. . 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. . 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. . 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. . 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. spectral-cube-0.3.1/astropy_helpers/licenses/LICENSE_NUMPYDOC.rst0000644000077000000240000001350712412505144024453 0ustar adamstaff00000000000000------------------------------------------------------------------------------- The files - numpydoc.py - docscrape.py - docscrape_sphinx.py - phantom_import.py have the following license: Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------------------------------------------------------------- The files - compiler_unparse.py - comment_eater.py - traitsdoc.py have the following license: This software is OSI Certified Open Source Software. OSI Certified is a certification mark of the Open Source Initiative. Copyright (c) 2006, Enthought, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Enthought, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------------------------------------------------------------- The file - plot_directive.py originates from Matplotlib (http://matplotlib.sf.net/) which has the following license: Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. 1. This LICENSE AGREEMENT is between John D. Hunter (“JDHâ€), and the Individual or Organization (“Licenseeâ€) accessing and otherwise using matplotlib software in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved†are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3. 4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS†basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement. spectral-cube-0.3.1/astropy_helpers/MANIFEST.in0000644000077000000240000000024312340434262021245 0ustar adamstaff00000000000000include README.rst include CHANGES.rst include LICENSE.rst include ez_setup.py include ah_bootstrap.py exclude *.pyc *.o prune build prune astropy_helpers/tests spectral-cube-0.3.1/astropy_helpers/README.rst0000644000077000000240000000323112533471373021206 0ustar adamstaff00000000000000astropy-helpers =============== This project provides a Python package, ``astropy_helpers``, which includes many build, installation, and documentation-related tools used by the Astropy project, but packaged separately for use by other projects that wish to leverage this work. The motivation behind this package and details of its implementation are in the accepted `Astropy Proposal for Enhancement (APE) 4 `_. ``astropy_helpers`` includes a special "bootstrap" module called ``ah_bootstrap.py`` which is intended to be used by a project's setup.py in order to ensure that the ``astropy_helpers`` package is available for build/installation. This is similar to the ``ez_setup.py`` module that is shipped with some projects to bootstrap `setuptools `_. As described in APE4, the version numbers for ``astropy_helpers`` follow the corresponding major/minor version of the `astropy core package `_, but with an independent sequence of micro (bugfix) version numbers. Hence, the initial release is 0.4, in parallel with Astropy v0.4, which will be the first version of Astropy to use ``astropy-helpers``. For examples of how to implement ``astropy-helpers`` in a project, see the ``setup.py`` and ``setup.cfg`` files of the `Affiliated package template `_. .. image:: https://travis-ci.org/astropy/astropy-helpers.png :target: https://travis-ci.org/astropy/astropy-helpers .. image:: https://coveralls.io/repos/astropy/astropy-helpers/badge.png :target: https://coveralls.io/r/astropy/astropy-helpers spectral-cube-0.3.1/astropy_helpers/setup.cfg0000644000077000000240000000014612340434262021332 0ustar adamstaff00000000000000[pytest] norecursedirs = .tox astropy_helpers/tests/package_template python_functions = test_ spectral-cube-0.3.1/astropy_helpers/setup.py0000755000077000000240000000402312533471373021234 0ustar adamstaff00000000000000#!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst import ah_bootstrap import pkg_resources from setuptools import setup from astropy_helpers.setup_helpers import register_commands, get_package_info from astropy_helpers.version_helpers import generate_version_py NAME = 'astropy_helpers' VERSION = '1.0.2' RELEASE = 'dev' not in VERSION DOWNLOAD_BASE_URL = 'http://pypi.python.org/packages/source/a/astropy-helpers' generate_version_py(NAME, VERSION, RELEASE, False, uses_git=not RELEASE) # Use the updated version including the git rev count from astropy_helpers.version import version as VERSION cmdclass = register_commands(NAME, VERSION, RELEASE) # This package actually doesn't use the Astropy test command del cmdclass['test'] setup( name=pkg_resources.safe_name(NAME), # astropy_helpers -> astropy-helpers version=VERSION, description='Utilities for building and installing Astropy, Astropy ' 'affiliated packages, and their respective documentation.', author='The Astropy Developers', author_email='astropy.team@gmail.com', license='BSD', url='http://astropy.org', long_description=open('README.rst').read(), download_url='{0}/astropy-helpers-{1}.tar.gz'.format(DOWNLOAD_BASE_URL, VERSION), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Framework :: Setuptools Plugin', 'Framework :: Sphinx :: Extension', 'Framework :: Sphinx :: Theme', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Archiving :: Packaging' ], cmdclass=cmdclass, zip_safe=False, **get_package_info(exclude=['astropy_helpers.tests']) ) spectral-cube-0.3.1/astropy_helpers/tox.ini0000644000077000000240000000045012533471373021032 0ustar adamstaff00000000000000[tox] envlist = py26,py27,py32,py33,py34 [testenv] deps = pytest numpy Cython Sphinx==1.2.3 # Note: Sphinx is required to run the sphinx.ext tests commands = py.test {posargs} sitepackages = False [testenv:py32] deps = pygments<=1.9 Jinja2<2.7 {[testenv]deps} spectral-cube-0.3.1/CHANGES.rst0000644000077000000240000001277212654610125016102 0ustar adamstaff000000000000000.3.1 (2016-02-04) ------------------ - Preserve metadata when making projections (https://github.com/radio-astro-tools/spectral-cube/pull/250) - bugfix: cube._data cannot be a quantity (https://github.com/radio-astro-tools/spectral-cube/pull/251) - partial fix for ds9 import bug (https://github.com/radio-astro-tools/spectral-cube/pull/253) - preserve WCS information in projections (https://github.com/radio-astro-tools/spectral-cube/pull/256) - whitespace stripped from BUNIT (https://github.com/radio-astro-tools/spectral-cube/pull/257) - bugfix: sometimes cube would be read into memory when it should not be (https://github.com/radio-astro-tools/spectral-cube/pull/259) - more projection preservation fixes (https://github.com/radio-astro-tools/spectral-cube/pull/265) - correct jy/beam capitalization (https://github.com/radio-astro-tools/spectral-cube/pull/267) - convenience attribute for beam access (https://github.com/radio-astro-tools/spectral-cube/pull/268) - fix beam reading, which would claim failure even during success (https://github.com/radio-astro-tools/spectral-cube/pull/271) 0.3.0 (2015-08-16) ------------------ - Add experimental line-finding tool using astroquery.splatalogue (https://github.com/radio-astro-tools/spectral-cube/pull/210) - Bugfixes (211,212,217) - Add arithmetic operations (add, subtract, divide, multiply, power) (https://github.com/radio-astro-tools/spectral-cube/pull/220). These operations will not be permitted on large cubes by default, but will require the user to specify that they are allowed using the attribute ``allow_huge_operations`` - Implemented slicewise stddev and mean (https://github.com/radio-astro-tools/spectral-cube/pull/225) - Bugfix: prevent a memory leak when creating a large number of Cubes (https://github.com/radio-astro-tools/spectral-cube/pull/233) - Provide a ``base`` attribute so that tools like joblib can operate on ``SpectralCube`` s as memory maps (https://github.com/radio-astro-tools/spectral-cube/pull/230) - Masks have a quicklook method (https://github.com/radio-astro-tools/spectral-cube/pull/228) - Memory mapping can be disabled (https://github.com/radio-astro-tools/spectral-cube/pull/226) - Add xor operations for Masks (https://github.com/radio-astro-tools/spectral-cube/pull/241) - Added a new StokesSpectralCube class to deal with 4-d cubes (https://github.com/radio-astro-tools/spectral-cube/pull/249) 0.2.2 (2015-03-12) ------------------ - Output mask as a CASA image https://github.com/radio-astro-tools/spectral-cube/pull/171 - ytcube exports to .obj and .ply too https://github.com/radio-astro-tools/spectral-cube/pull/173 - Fix air wavelengths, which were mistreated (https://github.com/radio-astro-tools/spectral-cube/pull/186) - Add support for sum/mean/std over both spatial axes to return a OneDSpectrum object. This PR also removes numpy 1.5-1.7 tests, since many `spectral_cube` functions are not compatible with these versions of numpy (https://github.com/radio-astro-tools/spectral-cube/pull/188) 0.2.1 (2014-12-03) ------------------ - CASA cube readers now compatible with ALMA .image files (tested on Cycle 2 data) https://github.com/radio-astro-tools/spectral-cube/pull/165 - Spectral quicklooks available https://github.com/radio-astro-tools/spectral-cube/pull/164 now that 1D slices are possible https://github.com/radio-astro-tools/spectral-cube/pull/157 - `to_pvextractor` tool allows easy export to `pvextractor `_ https://github.com/radio-astro-tools/spectral-cube/pull/160 - `to_glue` sends the cube to `glue `_ https://github.com/radio-astro-tools/spectral-cube/pull/153 0.2 (2014-09-11) ---------------- - `moments` preserve spectral units now https://github.com/radio-astro-tools/spectral-cube/pull/118 - Initial support added for Air Wavelength. This is only 1-way support, round-tripping (vacuum->air) is not supported yet. https://github.com/radio-astro-tools/spectral-cube/pull/117 - Integer slices (single frames) are supported https://github.com/radio-astro-tools/spectral-cube/pull/113 - Bugfix: BUNIT capitalized https://github.com/radio-astro-tools/spectral-cube/pull/112 - Masks can be any array that is broadcastable to the cube shape https://github.com/radio-astro-tools/spectral-cube/pull/115 - Added `.header` and `.hdu` convenience methods https://github.com/radio-astro-tools/spectral-cube/pull/120 - Added public functions `apply_function` and `apply_numpy_function` that allow functions to be run on cubes while preserving important metadata (e.g., WCS) - Added a quicklook tool using aplpy to view slices (https://github.com/radio-astro-tools/spectral-cube/pull/131) - Added subcube and ds9 region extraction tools (https://github.com/radio-astro-tools/spectral-cube/pull/128) - Added a `to_yt` function for easily converting between SpectralCube and yt datacube/dataset objects (https://github.com/radio-astro-tools/spectral-cube/pull/90, https://github.com/radio-astro-tools/spectral-cube/pull/129) - Masks' `.include()` method works without ``data`` arguments. (https://github.com/radio-astro-tools/spectral-cube/pull/147) - Allow movie name to be specified in yt movie creation (https://github.com/radio-astro-tools/spectral-cube/pull/145) - add `flattened_world` method to get the world coordinates corresponding to each pixel in the flattened array (https://github.com/radio-astro-tools/spectral-cube/pull/146) 0.1 (2014-06-01) ---------------- - Initial Release. spectral-cube-0.3.1/docs/0000755000077000000240000000000012654610601015216 5ustar adamstaff00000000000000spectral-cube-0.3.1/docs/_templates/0000755000077000000240000000000012654610601017353 5ustar adamstaff00000000000000spectral-cube-0.3.1/docs/_templates/autosummary/0000755000077000000240000000000012654610601021741 5ustar adamstaff00000000000000spectral-cube-0.3.1/docs/_templates/autosummary/base.rst0000644000077000000240000000005212337446544023415 0ustar adamstaff00000000000000{% extends "autosummary_core/base.rst" %} spectral-cube-0.3.1/docs/_templates/autosummary/class.rst0000644000077000000240000000005312337446544023611 0ustar adamstaff00000000000000{% extends "autosummary_core/class.rst" %} spectral-cube-0.3.1/docs/_templates/autosummary/module.rst0000644000077000000240000000005412337446544023772 0ustar adamstaff00000000000000{% extends "autosummary_core/module.rst" %} spectral-cube-0.3.1/docs/accessing.rst0000644000077000000240000000560012551776560017725 0ustar adamstaff00000000000000Accessing data ============== Once you have initialized a :meth:`~spectral_cube.SpectralCube` instance, either directly or by reading in a file, you can easily access the data values and the world coordinate information. Data values ----------- You can access the underlying data using the ``unmasked_data`` array which is a Numpy-like array:: >>> slice_unmasked = cube.unmasked_data[0,:,:] # doctest: +SKIP The order of the dimensions of the ``unmasked_data`` array is deterministic - it is always ``(n_spectral, n_y, n_x)`` irrespective of how the cube was stored on disk. .. note:: The term ``unmasked`` indicates that the data is the raw original data from the file. :meth:`~spectral_cube.SpectralCube` also allows masking of values, which is discussed in :doc:`masking`. If a slice is not specified, the object returned is not strictly a Numpy array, and will not work with all functions outside of the ``spectral_cube`` package that expect Numpy arrays. In order to extract a normal Numpy array, instead specify a mask of ``[:]`` which will force the object to be converted to a Numpy array (the compulsory slicing is necessary in order to avoid memory-related issues with large data cubes). World coordinates ----------------- Given a cube object, it is straightforward to find the coordinates along the spectral axis:: >>> cube.spectral_axis # doctest: +SKIP [ -2.97198762e+03 -2.63992044e+03 -2.30785327e+03 -1.97578610e+03 -1.64371893e+03 -1.31165176e+03 -9.79584583e+02 -6.47517411e+02 ... 3.15629983e+04 3.18950655e+04 3.22271326e+04 3.25591998e+04 3.28912670e+04 3.32233342e+04] m / s The default units of a spectral axis are determined from the FITS header or WCS object used to initialize the cube, but it is also possible to change the spectral axis (see :doc:`manipulating`). More generally, it is possible to extract the world coordinates of all the pixels using the :attr:`~spectral_cube.SpectralCube.world` property, which returns the spectral axis then the two positional coordinates in reverse order (in the same order as the data indices). >>> velo, dec, ra = cube.world[:] # doctest: +SKIP In order to extract coordinates, a slice (such as ``[:]`` above) is required. Using ``[:]`` will return three 3-d arrays with the coordinates for all pixels. Using e.g. ``[0,:,:]`` will return three 2-d arrays of coordinates for the first spectral slice. If you forget to specify a slice, you will get the following error: >>> velo, dec, ra = cube.world # doctest: +SKIP ... Exception: You need to specify a slice (e.g. ``[:]`` or ``[0,:,:]`` in order to access this property. In the case of large data cubes, requesting the coordinates of all pixels would likely be too slow, so the slicing allows you to compute only a subset of the pixel coordinates (see :doc:`big_data` for more information on dealing with large data cubes). spectral-cube-0.3.1/docs/api.rst0000644000077000000240000000013712377070030016520 0ustar adamstaff00000000000000API Documentation ================= .. automodapi:: spectral_cube :no-inheritance-diagram: spectral-cube-0.3.1/docs/arithmetic.rst0000644000077000000240000000137112557414172020112 0ustar adamstaff00000000000000Spectral Cube Arithmetic ======================== Simple arithmetic operations between cubes and scalars, broadcastable numpy arrays, and other cubes are possible. However, such operations should be performed with caution because they require loading the whole cube into memory and will generally create a new cube in memory. Examples:: >>> import astropy.units as u >>> from spectral_cube import SpectralCube >>> cube = SpectralCube.read('adv.fits') # doctest: +SKIP >>> cube2 = cube * 2 # doctest: +SKIP >>> cube3 = cube + 1.5*u.K # doctest: +SKIP >>> cube4 = cube2 + cube3 # doctest: +SKIP Each of these cubes is a new cube in memory. Note that for addition and subtraction, the units must be equivalent to those of the cube. spectral-cube-0.3.1/docs/big_data.rst0000644000077000000240000000755412625614674017531 0ustar adamstaff00000000000000Handling large datasets ======================= .. currentmodule:: spectral_cube .. TODO: we can move things specific to large data and copying/referencing here. The :class:`SpectralCube` class is designed to allow working with files larger than can be stored in memory. To take advantage of this and work effectively with large spectral cubes, you should keep the following three ideas in mind: - Work with small subsets of data at a time. - Minimize data copying. - Minimize the number of passes over the data. Work with small subsets of data at a time ----------------------------------------- Numpy supports a *memory-mapping* mode which means that the data is stored on disk and the array elements are only loaded into memory when needed. ``spectral_cube`` takes advantage of this if possible, to avoid loading large files into memory. Typically, working with NumPy involves writing code that operates on an entire array at once. For example:: x = y = np.sum(np.abs(x * 3 + 10), axis=0) Unfortunately, this code creates several temporary arrays whose size is equal to ``x``. This is infeasible if ``x`` is a large memory-mapped array, because an operation like ``(x * 3)`` will require more RAM than exists on your system. A better way to compute y is by working with a single slice of ``x`` at a time:: y = x[0] * 0 for plane in x: y += np.abs(plane * 3 + 10) Many methods in :class:`SpectralCube` allow you to extract subsets of relevant data, to make writing code like this easier: - :meth:`SpectralCube.filled_data`, :meth:`SpectralCube.unmasked_data`, :meth:`SpectralCube.world` all accept Numpy style slice syntax. For example, ``cube.filled_data[0:3, :, :]`` returns only the first 3 spectral channels of the cube, with masked elements replaced with ``cube.fill_value``. - :meth:`SpectralCube` itself can be sliced to extract subcubes - :meth:`SpectralCube.spectral_slab` extracts a subset of spectral channels. Many methods in :class:`SpectralCube` iterate over smaller chunks of data, to avoid large memory allocations when working with big cubes. Some of these have a ``how`` keyword parameter, for fine-grained control over how much memory is accessed at once. ``how='cube'`` works with the entire array in memory, ``how='slice'`` works with one slice at a time, and ``how='ray'`` works with one ray at a time. As a user, your best strategy for working with large datasets is to rely on builtin methods to :class:`SpectralCube`, and to access data from :meth:`~SpectralCube.filled_data` and :meth:`~SpectralCube.unmasked_data` in smaller chunks if possible. .. warning :: At the moment, :meth:`~SpectralCube.argmax` and :meth:`~SpectralCube.argmin`, are **not** optimized for handling large datasets. Minimize Data Copying --------------------- Methods in :meth:`SpectralCube` avoid copying as much as possible. For example, all of the following operations create new cubes or masks without copying any data:: >>> mask = cube > 3 # doctest: +SKIP >>> slab = cube.spectral_slab(...) # doctest: +SKIP >>> subcube = cube[0::2, 10:, 0:30] # doctest: +SKIP >>> cube2 = cube.with_fill(np.nan) # doctest: +SKIP >>> cube2 = cube.apply_mask(mask) # doctest: +SKIP Minimize the number of passes over the data ------------------------------------------- Accessing memory-mapped arrays is much slower than a normal array, due to the overhead of reading from disk. Because of this, it is more efficient to perform computations that iterate over the data as few times as possible. An even subtler issue pertains to how the 3D or 4D spectral cube is arranged as a 1D sequence of bytes in a file. Data access is much faster when it corresponds to a single contiguous scan of bytes on disk. For more information on this topic, see `this tutorial on Numpy strides `_. spectral-cube-0.3.1/docs/conf.py0000644000077000000240000001407512643464660016536 0ustar adamstaff00000000000000# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # # Astropy documentation build configuration file. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this file. # # All configuration values have a default. Some values are defined in # the global Astropy configuration which is loaded here before anything else. # See astropy.sphinx.conf for which values are set there. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('..')) # IMPORTANT: the above commented section was generated by sphinx-quickstart, but # is *NOT* appropriate for astropy or Astropy affiliated packages. It is left # commented out with this explanation to make it clear why this should not be # done. If the sys.path entry above is added, when the astropy.sphinx.conf # import occurs, it will import the *source* version of astropy instead of the # version installed (if invoked as "make html" or directly with sphinx), or the # version in the build directory (if "python setup.py build_sphinx" is used). # Thus, any C-extensions that are needed to build the documentation will *not* # be accessible, and the documentation will not build correctly. import datetime import os import sys try: import astropy_helpers except ImportError: # Building from inside the docs/ directory? if os.path.basename(os.getcwd()) == 'docs': a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers')) if os.path.isdir(a_h_path): sys.path.insert(1, a_h_path) # Load all of the global Astropy configuration from astropy_helpers.sphinx.conf import * # Get configuration information from setup.cfg from distutils import config conf = config.ConfigParser() conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')]) setup_cfg = dict(conf.items('metadata')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.2' # To perform a Sphinx version check that needs to be more specific than # major.minor, call `check_sphinx_version("x.y.z")` here. # check_sphinx_version("1.2.1") # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns.append('_templates') # This is added to the end of RST files - a good place to put substitutions to # be used globally. rst_epilog += """ """ # -- Project information ------------------------------------------------------ # This does not *have* to match the package name, but typically does project = setup_cfg['package_name'] author = setup_cfg['author'] copyright = '{0}, {1}'.format( datetime.datetime.now().year, setup_cfg['author']) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. __import__(setup_cfg['package_name']) package = sys.modules[setup_cfg['package_name']] # The short X.Y version. version = package.__version__.split('-', 1)[0] # The full version, including alpha/beta/rc tags. release = package.__version__ # -- Options for HTML output --------------------------------------------------- # A NOTE ON HTML THEMES # The global astropy configuration uses a custom theme, 'bootstrap-astropy', # which is installed along with astropy. A different theme can be used or # the options for this theme can be modified by overriding some of the # variables set in the global configuration. The variables set in the # global configuration are listed below, commented out. # Add any paths that contain custom themes here, relative to this directory. # To use a different custom theme, add the directory containing the theme. #html_theme_path = [] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. To override the custom theme, set this to the # name of a builtin theme or the name of a custom theme in html_theme_path. # html_theme = 'default' # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = '' # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = '{0} v{1}'.format(project, release) # Output file base name for HTML help builder. htmlhelp_basename = project + 'doc' # -- Options for LaTeX output -------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [('index', project + '.tex', project + u' Documentation', author, 'manual')] # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [('index', project.lower(), project + u' Documentation', [author], 1)] ## -- Options for the edit_on_github extension ---------------------------------------- if eval(setup_cfg.get('edit_on_github')): extensions += ['astropy_helpers.sphinx.ext.edit_on_github'] versionmod = __import__(setup_cfg['package_name'] + '.version') edit_on_github_project = setup_cfg['github_project'] if versionmod.version.release: edit_on_github_branch = "v" + versionmod.version.version else: edit_on_github_branch = "master" edit_on_github_source_root = "" edit_on_github_doc_root = "docs" spectral-cube-0.3.1/docs/creating_reading.rst0000644000077000000240000000406612551776560021260 0ustar adamstaff00000000000000Creating/reading spectral cubes =============================== Importing --------- The :class:`~spectral_cube.SpectralCube` class is used to represent 3-dimensional datasets (two positional dimensions and one spectral dimension) with a World Coordinate System (WCS) projection that describes the mapping from pixel to world coordinates and vice-versa. The class is imported with:: >>> from spectral_cube import SpectralCube Reading from a file ------------------- In most cases, you are likely to read in an existing spectral cube from a file. The reader is designed to be able to deal with any arbitrary axis order and always return a consistently oriented spectral cube (see :doc:`accessing`). To read in a file, use the :meth:`~spectral_cube.SpectralCube.read` method as follows:: >>> cube = SpectralCube.read('L1448_13CO.fits') # doctest: +SKIP This will always read the Stokes I parameter in the file. For information on accessing other Stokes parameters, see :doc:`stokes`. .. note:: In most cases, the FITS reader should be able to open the file in *memory-mapped* mode, which means that the data is not immediately read, but is instead read as needed when data is accessed. This allows large files (including larger than memory) to be accessed. However, note that certain FITS files cannot be opened in memory-mapped mode, in particular compressed (e.g. ``.fits.gz``) files. See the :doc:`big_data` page for more details about dealing with large data sets. Direct Initialization --------------------- If you are interested in directly creating a :class:`~spectral_cube.SpectralCube` instance, you can do so using a 3-d Numpy-like array with a 3-d :class:`~astropy.wcs.WCS` object:: >>> cube = SpectralCube(data=data, wcs=wcs) # doctest: +SKIP Here ``data`` can be any Numpy-like array, including *memory-mapped* Numpy arrays (as mentioned in `Reading from a file`_, memory-mapping is a technique that avoids reading the whole file into memory and instead accessing it from the disk as needed). spectral-cube-0.3.1/docs/index.rst0000644000077000000240000000460112643500146017060 0ustar adamstaff00000000000000Spectral Cube documentation =========================== The ``spectral-cube`` package provides an easy way to read, manipulate, analyze, and write data cubes with two positional dimensions and one spectral dimension, optionally with Stokes parameters. ``spectral-cube`` aims to be a versatile data container for building custom analysis routines. It provides the following main features: - A uniform interface to spectral cubes, robust to the wide range of conventions of axis order, spatial projections, and spectral units that exist in the wild. - Easy extraction of cube sub-regions using physical coordinates. - Ability to easily create, combine, and apply masks to datasets. - Basic summary statistic methods like moments and array aggregates. - Designed to work with datasets too large to load into memory. Quick start ----------- Here's a simple script demonstrating ``spectral-cube``:: >>> import astropy.units as u >>> from spectral_cube import SpectralCube >>> cube = SpectralCube.read('adv.fits') # doctest: +SKIP >>> print cube # doctest: +SKIP SpectralCube with shape=(4, 3, 2) and unit=K: n_x: 2 type_x: RA---SIN unit_x: deg range: 24.062698 deg: 24.063349 deg n_y: 3 type_y: DEC--SIN unit_y: deg range: 29.934094 deg: 29.935209 deg n_s: 4 type_s: VOPT unit_s: m / s range: -321214.699 m / s: -317350.054 m / s # extract the subcube between 98 and 100 GHz >>> slab = cube.spectral_slab(98 * u.GHz, 100 * u.GHz) # doctest: +SKIP # Ignore elements fainter than 1K >>> masked_slab = slab.with_mask(slab > 1) # doctest: +SKIP # Compute the first moment and write to file >>> m1 = masked_slab.moment(order=1) # doctest: +SKIP >>> m1.write('moment_1.fits') # doctest: +SKIP Using ``spectral-cube`` ----------------------- The package centers around the :class:`~spectral_cube.SpectralCube` class. In the following sections, we look at how to read data into this class, manipulate spectral cubes, extract moment maps or subsets of spectral cubes, and write spectral cubes to files. Getting started ^^^^^^^^^^^^^^^ .. toctree:: :maxdepth: 1 installing.rst creating_reading.rst accessing.rst masking.rst arithmetic.rst manipulating.rst writing.rst moments.rst quick_looks.rst Advanced ^^^^^^^^ .. toctree:: :maxdepth: 1 yt_example.rst big_data.rst api.rst spectral-cube-0.3.1/docs/installing.rst0000644000077000000240000000245412643464660020133 0ustar adamstaff00000000000000Installing ``spectral-cube`` ============================ Requirements ------------ This package has the following dependencies: * `Python `_ 2.7 or later (Python 3.x is supported) * `Numpy `_ 1.8 or later * `Astropy `__ 1.0 or later * `Bottleneck `_, optional (speeds up median and percentile operations on cubes with missing data) Installation ------------ To install the latest stable release, you can type:: pip install spectral-cube or you can download the latest tar file from `PyPI `_ and install it using:: python setup.py install Developer version ----------------- If you want to install the latest developer version of the spectral cube code, you can do so from the git repository:: git clone https://github.com/radio-astro-tools/spectral-cube.git cd spectral-cube python setup.py install You may need to add the ``--user`` option to the last line `if you do not have root access `_. You can also install the latest developer version in a single line with pip:: pip install https://github.com/radio-astro-tools/spectral-cube/archive/master.zip spectral-cube-0.3.1/docs/Makefile0000644000077000000240000001520212337446544016671 0ustar adamstaff00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/SpectralCube.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/SpectralCube.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/SpectralCube" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/SpectralCube" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." spectral-cube-0.3.1/docs/manipulating.rst0000644000077000000240000001152212551776560020456 0ustar adamstaff00000000000000Manipulating cubes ================== Modifying the spectral axis --------------------------- As mentioned in :doc:`accessing`, it is straightforward to find the coordinates along the spectral axis using the :attr:`~spectral_cube.SpectralCube.spectral_axis` attribute:: >>> cube.spectral_axis # doctest: +SKIP [ -2.97198762e+03 -2.63992044e+03 -2.30785327e+03 -1.97578610e+03 -1.64371893e+03 -1.31165176e+03 -9.79584583e+02 -6.47517411e+02 ... 3.15629983e+04 3.18950655e+04 3.22271326e+04 3.25591998e+04 3.28912670e+04 3.32233342e+04] m / s The default units of a spectral axis are determined from the FITS header or WCS object used to initialize the cube, but it is also possible to change the spectral axis unit using :meth:`~spectral_cube.SpectralCube.with_spectral_unit`:: >>> from astropy import units as u >>> cube2 = cube.with_spectral_unit(u.km / u.s) # doctest: +SKIP >>> cube2.spectral_axis # doctest: +SKIP [ -2.97198762e+00 -2.63992044e+00 -2.30785327e+00 -1.97578610e+00 -1.64371893e+00 -1.31165176e+00 -9.79584583e-01 -6.47517411e-01 ... 3.02347296e+01 3.05667968e+01 3.08988639e+01 3.12309311e+01 3.15629983e+01 3.18950655e+01 3.22271326e+01 3.25591998e+01 3.28912670e+01 3.32233342e+01] km / s It is also possible to change from velocity to frequency for example, but this requires specifying the rest frequency or wavelength as well as a convention for the doppler shift calculation:: >>> cube3 = cube.with_spectral_unit(u.GHz, velocity_convention='radio', ... rest_value=200 * u.GHz) # doctest: +SKIP [ 220.40086492 220.40062079 220.40037667 220.40013254 220.39988841 220.39964429 220.39940016 220.39915604 220.39891191 220.39866778 ... 220.37645231 220.37620818 220.37596406 220.37571993 220.3754758 220.37523168 220.37498755 220.37474342 220.3744993 220.37425517] GHz The new cubes will then preserve the new spectral units when computing moments for example (see :doc:`moments`). Extracting a spectral slab -------------------------- Given a spectral cube, it is easy to extract a sub-cube covering only a subset of the original range in the spectral axis. To do this, you can use the :meth:`~spectral_cube.SpectralCube.spectral_slab` method. This method takes lower and upper bounds for the spectral axis, as well as an optional rest frequency, and returns a new :class:`~spectral_cube.SpectralCube` instance. The bounds can be specified as a frequency, wavelength, or a velocity but the units have to match the type of the spectral units in the cube (if they do not match, first use :meth:`~spectral_cube.SpectralCube.with_spectral_unit` to ensure that they are in the same units). The bounds should be given as Astropy :class:`Quantities ` as follows:: >>> from astropy import units as u >>> subcube = cube.spectral_slab(-50 * u.km / u.s, +50 * u.km / u.s) # doctest: +SKIP The resulting cube ``subcube`` (which is also a :class:`~spectral_cube.SpectralCube` instance) then contains all channels that overlap with the range -50 to 50 km/s relative to the rest frequency assumed by the world coordinates, or the rest frequency specified by a prior call to :meth:`~spectral_cube.SpectralCube.with_spectral_unit`. Extracting a sub-cube by indexing --------------------------------- It is also easy to extract a sub-cube from pixel coordinates using standard Numpy slicing notation:: >>> sub_cube = cube[:100, 10:50, 10:50] # doctest: +SKIP This returns a new :class:`~spectral_cube.SpectralCube` object with updated WCS information. Extracting a subcube from a ds9 region -------------------------------------- Starting with spectral_cube v0.2, you can use ds9 regions to extract subcubes. The minimal enclosing subcube will be extracted with a two-dimensional mask corresponding to the ds9 region. `pyregion `_ is required for region parsing:: >>> region_list = pyregion.open('file.reg') # doctest: +SKIP >>> sub_cube = cube.subcube_from_ds9region(region_list) # doctest: +SKIP You can also create a region on the fly using ds9 region syntax. This extracts a 0.1 degree circle around the Galactic Center:: >>> region_list = pyregion.parse("galactic; circle(0,0,0.1)") # doctest: +SKIP >>> sub_cube = cube.subcube_from_ds9region(region_list) # doctest: +SKIP Extract the minimal valid subcube --------------------------------- If you have a mask that masks out some of the cube edges, such that the resulting sub-cube might be smaller in memory, it can be useful to extract the minimal enclosing sub-cube:: >>> sub_cube = cube.minimal_subcube() # doctest: +SKIP You can also shrink any cube by this mechanism:: >>> sub_cube = cube.with_mask(smaller_region).minimal_subcube() # doctest: +SKIP spectral-cube-0.3.1/docs/masking.rst0000644000077000000240000001446312557414172017420 0ustar adamstaff00000000000000Masking ======= Getting started --------------- In addition to supporting the representation of data and associated WCS, it is also possible to attach a boolean mask to the :class:`~spectral_cube.SpectralCube` class. Masks can take various forms, but one of the more common ones is a cube with the same dimensions as the data, and that contains e.g. the boolean value ``True`` where data should be used, and the value ``False`` when the data should be ignored (though it is also possible to flip the convention around). To create a boolean mask from a boolean array ``mask_array``, you can for example use:: >>> from astropy import units as u >>> from spectral_cube import BooleanArrayMask >>> mask = BooleanArrayMask(mask=mask_array, wcs=cube.wcs) # doctest: +SKIP Using a pure boolean array may not always be the most efficient solution, because it may require a large amount of memory. You can also create a mask using simple conditions directly on the cube values themselves, for example:: >>> mask = cube > 1.3*u.K # doctest: +SKIP This is more efficient, because the condition is actually evaluated on-the-fly as needed. Note that units equivalent to the cube's units must be used. Masks can be combined using standard boolean comparison operators:: >>> new_mask = (cube > 1.3*u.K) & (cube < 100.*u.K) # doctest: +SKIP The available operators are ``&`` (and), ``|`` (or), and ``~`` (not). To apply a new mask to a :class:`~spectral_cube.SpectralCube` class, use the :meth:`~spectral_cube.SpectralCube.with_mask` method, which can take a mask and combine it with any pre-existing mask:: >>> cube2 = cube.with_mask(new_mask) # doctest: +SKIP In the above example, ``cube2`` contains a mask that is the ``&`` combination of ``new_mask`` with the existing mask on ``cube``. The ``cube2`` object contains a view to the same data as ``cube``, so no data is copied during this operation. Boolean arrays can also be used as input to :meth:`~spectral_cube.SpectralCube.with_mask`, assuming the shape of the mask and the data match:: >>> cube2 = cube.with_mask(boolean_array) # doctest: +SKIP Any boolean area that can be `broadcast `_ to the cube shape can be used as a boolean array mask. Accessing masked data --------------------- As mention in :doc:`accessing`, the raw and unmasked data can be accessed with the :attr:`~spectral_cube.SpectralCube.unmasked_data` attribute. You can access the masked data using ``filled_data``. This array is a copy of the original data with any masked value replaced by a fill value (which is ``np.nan`` by default but can be changed using the ``fill_value`` option in the :class:`~spectral_cube.SpectralCube` initializer). The 'filled' data is accessed with e.g.:: >>> slice_filled = cube.filled_data[0,:,:] # doctest: +SKIP Note that accessing the filled data should still be efficient because the data are loaded and filled only once you access the actual data values, so this should still be efficient for large datasets. If you are only interested in getting a flat (i.e. 1-d) array of all the non-masked values, you can also make use of the :meth:`~spectral_cube.SpectralCube.flattened` method:: >>> flat_array = cube.flattened() # doctest: +SKIP Fill values ----------- When accessing the data (see :doc:`accessing`), the mask may be applied to the data and the masked values replaced by a *fill* value. This fill value can be set using the ``fill_value`` initializer in :class:`~spectral_cube.SpectralCube`, and is set to ``np.nan`` by default. To change the fill value on a cube, you can make use of the :meth:`~spectral_cube.SpectralCube.with_fill_value` method:: >>> cube2 = cube.with_fill_value(0.) # doctest: +SKIP This returns a new :class:`~spectral_cube.SpectralCube` instance that contains a view to the same data in ``cube`` (so no data are copied). Inclusion and Exclusion ----------------------- The term "mask" is often used to refer both to the act of exluding and including pixels from analysis. To be explicit about how they behave, all mask objects have an :meth:`~spectral_cube.masks.MaskBase.include` method that returns a boolean array. True values in this array indicate that the pixel is included/valid, and not filtered/replaced in any way. Conversely, True values in the output from :meth:`~spectral_cube.masks.MaskBase.exclude` indicate the pixel is excluded/invalid, and will be filled/filtered. The inclusion/exclusion behavior of any mask can be inverted via ``mask_inverse = ~mask``. Advanced masking ---------------- Masks based on simple functions that operate on the initial data can be defined using the :class:`~spectral_cube.LazyMask` class. The motivation behind the :class:`~spectral_cube.LazyMask` class is that it is essentially equivalent to a boolean array, but the boolean values are computed on-the-fly as needed, meaning that the whole boolean array does not ever necessarily need to be computed or stored in memory, making it ideal for very large datasets. The function passed to :class:`~spectral_cube.LazyMask` should be a simple function taking one argument - the dataset itself:: >>> from spectral_cube import LazyMask >>> cube = read(...) # doctest: +SKIP >>> LazyMask(np.isfinite, cube=cube) # doctest: +SKIP or for example:: >>> def threshold(data): ... return data > 3. >>> LazyMask(threshold, cube=cube) # doctest: +SKIP As shown in `Getting Started`_, :class:`~spectral_cube.LazyMask` instances can also be defined directly by specifying conditions on :class:`~spectral_cube.SpectralCube` objects: >>> cube > 5*u.K # doctest: +SKIP LazyComparisonMask(...) .. TODO: add example for FunctionalMask Outputting masks ---------------- The attached mask to the given :class:`~spectral_cube.SpectralCube` class can be converted into a CASA image using :func:`~spectral_cube.io.make_casa_mask`: >>> from spectral_cube.io.casa_masks import make_casa_mask >>> make_casa_mask(cube, 'casa_mask.image', add_stokes=False) # doctest: +SKIP Optionally, a redundant Stokes axis can be added to match the original CASA image. .. Masks may also be appended to an existing CASA image: .. >>> make_casa_mask(cube, 'casa_mask.image', append_to_img=True, img='casa.image') .. note:: Outputting to CASA masks requires that `spectral_cube` be run from a CASA python session. spectral-cube-0.3.1/docs/moments.rst0000644000077000000240000000235712551776560017456 0ustar adamstaff00000000000000Moment maps and statistics ========================== Producing moment maps from a :class:`~spectral_cube.SpectralCube` instance is straightforward:: >>> moment_0 = cube.moment(order=0) # doctest: +SKIP >>> moment_1 = cube.moment(order=1) # doctest: +SKIP >>> moment_2 = cube.moment(order=2) # doctest: +SKIP By default, moments are computed along the spectral dimension, but it is also possible to pass the ``axis`` argument to compute them along a different axis:: >>> moment_0_along_x = cube.moment(order=0, axis=2) # doctest: +SKIP The moment maps returned are :class:`~spectral_cube.Projection` instances, which act like :class:`~astropy.units.Quantity` objects, and also have convenience methods for writing to a file:: >>> moment_0.write('moment0.fits') # doctest: +SKIP and converting the data and WCS to a FITS HDU:: >>> moment_0.hdu # doctest: +SKIP The conversion to HDU objects makes it very easy to use the moment map with plotting packages such as APLpy:: >>> import aplpy # doctest: +SKIP >>> f = aplpy.FITSFigure(moment_0.hdu) # doctest: +SKIP >>> f.show_colorscale() # doctest: +SKIP >>> f.save('moment_0.png') # doctest: +SKIP spectral-cube-0.3.1/docs/quick_looks.rst0000644000077000000240000000116512551776560020313 0ustar adamstaff00000000000000Quick Looks =========== Once you've loaded a cube, you inevitably will want to look at it in various ways. Slices in any direction have `quicklook` methods: >>> cube[50,:,:].quicklook() # show an image # doctest: +SKIP >>> cube[:, 50, 50].quicklook() # plot a spectrum # doctest: +SKIP The same can be done with moments: >>> cube.moment0(axis=0).quicklook() # doctest: +SKIP PVSlicer -------- The `pvextractor `_ package comes with a GUI that has a simple matplotlib image viewer. To activate it for your cube: >>> cube.to_pvextractor() # doctest: +SKIP spectral-cube-0.3.1/docs/rtd-pip-requirements0000644000077000000240000000021712377070030021237 0ustar adamstaff00000000000000-e git+http://github.com/astropy/astropy-helpers.git#egg=astropy_helpers numpy Cython -e git+http://github.com/astropy/astropy.git#egg=astropy spectral-cube-0.3.1/docs/stokes.rst0000644000077000000240000000040212377070030017252 0ustar adamstaff00000000000000:orphan: Stokes components ================= We plan to implement the `~spectral_cube.StokesSpectralCube` class and will update the documentation once this class is ready to use. .. TODO: first we need to make sure the StokesSpectralCube class is working.spectral-cube-0.3.1/docs/writing.rst0000644000077000000240000000040012551776560017442 0ustar adamstaff00000000000000Writing spectral cubes ====================== You can write out a :class:`~spectral_cube.SpectralCube` instance by making use of the :meth:`~spectral_cube.SpectralCube.write` method:: >>> cube.write('new_cube.fits', format='fits') # doctest: +SKIP spectral-cube-0.3.1/docs/yt_example.rst0000644000077000000240000001523212557414172020131 0ustar adamstaff00000000000000Visualizing spectral cubes with yt ================================== Extracting yt objects --------------------- The :class:`~spectral_cube.SpectralCube` class includes a :meth:`~spectral_cube.SpectralCube.to_yt` method that makes is easy to return an object that can be used by `yt `_ to make volume renderings or other visualizations of the data. One common issue with volume rendering of spectral cubes is that you may not want pixels along the spectral axis to be given the same '3-d' size as positional pixels, so the :meth:`~spectral_cube.SpectralCube.to_yt` method includes a ``spectral_factor`` argument that can be used to compress or expand the spectral axis. The :meth:`~spectral_cube.SpectralCube.to_yt` method is used as follows:: >>> ytcube = cube.to_yt(spectral_factor=0.5) # doctest: +SKIP >>> ds = ytcube.dataset # doctest: +SKIP .. WARNING:: The API change in https://github.com/radio-astro-tools/spectral-cube/pull/129 affects the interpretation of the 0-pixel. There may be a 1-pixel offset between the yt cube and the SpectralCube The ``ds`` object is then a yt object that can be used for rendering! By default the dataset is defined in pixel coordinates, going from ``0.5`` to ``n+0.5``, as would be the case in ds9, for example. Along the spectral axis, this range will be modified if ``spectral_factor`` does not equal unity. When working with datasets in yt, it may be useful to convert world coordinates to pixel coordinates, so that whenever you may have to input a position in yt (e.g., for slicing or volume rendering) you can get the pixel coordinate that corresponds to the desired world coordinate. For this purpose, the method :meth:`~spectral_cube.ytCube.world2yt` is provided:: >>> import astropy.units as u >>> pix_coord = ytcube.world2yt([51.424522, ... 30.723611, ... 5205.18071], # units of deg, deg, m/s ... ) # doctest: +SKIP There is also a reverse method provided, :meth:`~spectral_cube.ytCube.yt2world`:: >>> world_coord = ytcube.yt2world([ds.domain_center]) # doctest: +SKIP which in this case would return the world coordinates of the center of the dataset in yt. .. TODO: add a way to center it on a specific coordinate and return in world .. coordinate offset. .. note:: The :meth:`~spectral_cube.SpectralCube.to_yt` method and its associated coordinate methods are compatible with both yt v. 2.x and v. 3.0 and following, but use of version 3.0 or later is recommended due to substantial improvements in support for FITS data. For more information on how yt handles FITS datasets, see `the yt docs `_. Visualization example --------------------- This section shows an example of a rendering script that can be used to produce a 3-d isocontour visualization using an object returned by :meth:`~spectral_cube.SpectralCube.to_yt`:: import numpy as np from spectral_cube import SpectralCube from yt.mods import ColorTransferFunction, write_bitmap import astropy.units as u # Read in spectral cube cube = SpectralCube.read('L1448_13CO.fits', format='fits') # Extract the yt object from the SpectralCube instance ytcube = cube.to_yt(spectral_factor=0.75) ds = ytcube.dataset # Set the number of levels, the minimum and maximum level and the width # of the isocontours n_v = 10 vmin = 0.05 vmax = 4.0 dv = 0.02 # Set up color transfer function transfer = ColorTransferFunction((vmin, vmax)) transfer.add_layers(n_v, dv, colormap='RdBu_r') # Set up the camera parameters # Derive the pixel coordinate of the desired center # from the corresponding world coordinate center = ytcube.world2yt([51.424522, 30.723611, 5205.18071]) direction = np.array([1.0, 0.0, 0.0]) width = 100. # pixels size = 1024 camera = ds.camera(center, direction, width, size, transfer, fields=['flux']) # Take a snapshot and save to a file snapshot = camera.snapshot() write_bitmap(snapshot, 'cube_rendering.png', transpose=True) You can move the camera around; see the `yt camera docs `_. Movie Making ------------ There is a simple utility for quick movie making. The default movie is a rotation of the cube around one of the spatial axes, going from PP -> PV space and back.:: >>> cube = read('cube.fits', format='fits') # doctest: +SKIP >>> ytcube = cube.to_yt() # doctest: +SKIP >>> images = ytcube.quick_render_movie('outdir') # doctest: +SKIP The movie only does rotation, but it is a useful stepping-stone if you wish to learn how to use yt's rendering system. Example: .. raw:: html SketchFab Isosurface Contours ----------------------------- For data exploration, making movies can be tedious - it is difficult to control the camera and expensive to generate new renderings. Instead, creating a 'model' from the data and exporting that to SketchFab can be very useful. Only grayscale figures will be created with the quicklook code. You need an account on sketchfab.com for this to work.:: >>> ytcube.quick_isocontour(title='GRS l=49 13CO 1 K contours', level=1.0) # doctest: +SKIP Here's an example: .. raw:: html

    GRS l=49 13CO 1 K contours by keflavich on Sketchfab

    You can also export locally to .ply and .obj files, which can be read by many programs (sketchfab, meshlab, blender). See the `yt page `_ for details.:: >>> ytcube.quick_isocontour(export_to='ply', filename='meshes.ply', level=1.0) # doctest: +SKIP >>> ytcube.quick_isocontour(export_to='obj', filename='meshes', level=1.0) # doctest: +SKIP spectral-cube-0.3.1/ez_setup.py0000644000077000000240000002757312377070030016512 0ustar adamstaff00000000000000#!python """Bootstrap setuptools installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from ez_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import os import shutil import sys import tempfile import tarfile import optparse import subprocess import platform from distutils import log try: from site import USER_SITE except ImportError: USER_SITE = None DEFAULT_VERSION = "1.4.2" DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/" def _python_cmd(*args): args = (sys.executable,) + args return subprocess.call(args) == 0 def _check_call_py24(cmd, *args, **kwargs): res = subprocess.call(cmd, *args, **kwargs) class CalledProcessError(Exception): pass if not res == 0: msg = "Command '%s' return non-zero exit status %d" % (cmd, res) raise CalledProcessError(msg) vars(subprocess).setdefault('check_call', _check_call_py24) def _install(tarball, install_args=()): # extracting the tarball tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) # installing log.warn('Installing Setuptools') if not _python_cmd('setup.py', 'install', *install_args): log.warn('Something went wrong during the installation.') log.warn('See the error message above.') # exitcode will be 2 return 2 finally: os.chdir(old_wd) shutil.rmtree(tmpdir) def _build_egg(egg, tarball, to_dir): # extracting the tarball tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) # building an egg log.warn('Building a Setuptools egg in %s', to_dir) _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) finally: os.chdir(old_wd) shutil.rmtree(tmpdir) # returning the result log.warn(egg) if not os.path.exists(egg): raise IOError('Could not build the egg.') def _do_download(version, download_base, to_dir, download_delay): egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg' % (version, sys.version_info[0], sys.version_info[1])) if not os.path.exists(egg): tarball = download_setuptools(version, download_base, to_dir, download_delay) _build_egg(egg, tarball, to_dir) sys.path.insert(0, egg) # Remove previously-imported pkg_resources if present (see # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). if 'pkg_resources' in sys.modules: del sys.modules['pkg_resources'] import setuptools setuptools.bootstrap_install_from = egg def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15): # making sure we use the absolute path to_dir = os.path.abspath(to_dir) was_imported = 'pkg_resources' in sys.modules or \ 'setuptools' in sys.modules try: import pkg_resources except ImportError: return _do_download(version, download_base, to_dir, download_delay) try: pkg_resources.require("setuptools>=" + version) return except pkg_resources.VersionConflict: e = sys.exc_info()[1] if was_imported: sys.stderr.write( "The required version of setuptools (>=%s) is not available,\n" "and can't be installed while this script is running. Please\n" "install a more recent version first, using\n" "'easy_install -U setuptools'." "\n\n(Currently using %r)\n" % (version, e.args[0])) sys.exit(2) else: del pkg_resources, sys.modules['pkg_resources'] # reload ok return _do_download(version, download_base, to_dir, download_delay) except pkg_resources.DistributionNotFound: return _do_download(version, download_base, to_dir, download_delay) def _clean_check(cmd, target): """ Run the command to download target. If the command fails, clean up before re-raising the error. """ try: subprocess.check_call(cmd) except subprocess.CalledProcessError: if os.access(target, os.F_OK): os.unlink(target) raise def download_file_powershell(url, target): """ Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete. """ target = os.path.abspath(target) cmd = [ 'powershell', '-Command', "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(), ] _clean_check(cmd, target) def has_powershell(): if platform.system() != 'Windows': return False cmd = ['powershell', '-Command', 'echo test'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_powershell.viable = has_powershell def download_file_curl(url, target): cmd = ['curl', url, '--silent', '--output', target] _clean_check(cmd, target) def has_curl(): cmd = ['curl', '--version'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_curl.viable = has_curl def download_file_wget(url, target): cmd = ['wget', url, '--quiet', '--output-document', target] _clean_check(cmd, target) def has_wget(): cmd = ['wget', '--version'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_wget.viable = has_wget def download_file_insecure(url, target): """ Use Python to download the file, even though it cannot authenticate the connection. """ try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen src = dst = None try: src = urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = src.read() dst = open(target, "wb") dst.write(data) finally: if src: src.close() if dst: dst.close() download_file_insecure.viable = lambda: True def get_best_downloader(): downloaders = [ download_file_powershell, download_file_curl, download_file_wget, download_file_insecure, ] for dl in downloaders: if dl.viable(): return dl def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. ``downloader_factory`` should be a function taking no arguments and returning a function for downloading a URL to a target. """ # making sure we use the absolute path to_dir = os.path.abspath(to_dir) tgz_name = "setuptools-%s.tar.gz" % version url = download_base + tgz_name saveto = os.path.join(to_dir, tgz_name) if not os.path.exists(saveto): # Avoid repeated downloads log.warn("Downloading %s", url) downloader = downloader_factory() downloader(url, saveto) return os.path.realpath(saveto) def _extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ import copy import operator from tarfile import ExtractError directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directories with a safe mode. directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 448 # decimal for oct 0700 self.extract(tarinfo, path) # Reverse sort directories. if sys.version_info < (2, 4): def sorter(dir1, dir2): return cmp(dir1.name, dir2.name) directories.sort(sorter) directories.reverse() else: directories.sort(key=operator.attrgetter('name'), reverse=True) # Set correct owner, mtime and filemode on directories. for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError: e = sys.exc_info()[1] if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def _build_install_args(options): """ Build the arguments to 'python setup.py install' on the setuptools package """ install_args = [] if options.user_install: if sys.version_info < (2, 6): log.warn("--user requires Python 2.6 or later") raise SystemExit(1) install_args.append('--user') return install_args def _parse_args(): """ Parse the command line for options """ parser = optparse.OptionParser() parser.add_option( '--user', dest='user_install', action='store_true', default=False, help='install in user site package (requires Python 2.6 or later)') parser.add_option( '--download-base', dest='download_base', metavar="URL", default=DEFAULT_URL, help='alternative URL from where to download the setuptools package') parser.add_option( '--insecure', dest='downloader_factory', action='store_const', const=lambda: download_file_insecure, default=get_best_downloader, help='Use internal, non-validating downloader' ) options, args = parser.parse_args() # positional arguments are ignored return options def main(version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" options = _parse_args() tarball = download_setuptools(download_base=options.download_base, downloader_factory=options.downloader_factory) return _install(tarball, _build_install_args(options)) if __name__ == '__main__': sys.exit(main()) spectral-cube-0.3.1/LICENSE.rst0000644000077000000240000000272612377070030016107 0ustar adamstaff00000000000000Copyright (c) 2014, spectral-cube developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.spectral-cube-0.3.1/PKG-INFO0000644000077000000240000000063012654610601015362 0ustar adamstaff00000000000000Metadata-Version: 1.0 Name: spectral-cube Version: 0.3.1 Summary: A package for interaction with spectral cubes Home-page: http://spectral-cube.readthedocs.org Author: Tom Robitaille, Adam Ginsburg, Chris Beaumont, Adam Leroy, and Erik Rosolowsky Author-email: robitaille@mpia.de, adam.g.ginsburg@gmail.com License: BSD Description: This is an Astropy affiliated package. Platform: UNKNOWN spectral-cube-0.3.1/README.md0000644000077000000240000000260712625614674015566 0ustar adamstaff00000000000000About ===== [![Join the chat at https://gitter.im/radio-astro-tools/spectral-cube](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/radio-astro-tools/spectral-cube?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) This package aims to facilitate the reading, writing, manipulation, and analysis of spectral data cubes. More information is available in the documentation, avaliable [online at readthedocs.org](http://spectral-cube.rtfd.org). ![Powered by Astropy Badge](http://img.shields.io/badge/powered%20by-AstroPy-orange.svg?style=flat) Credits ======= This package is developed by: * Chris Beaumont ([@ChrisBeaumont](http://github.com/ChrisBeaumont)) * Adam Ginsburg ([@keflavich](http://github.com/keflavich)) * Adam Leroy ([@akleroy](http://github.com/akleroy)) * Thomas Robitaille ([@astrofrog](http://github.com/astrofrog)) * Erik Rosolowsky ([@low-sky](http://github.com/low-sky)) Build and coverage status ========================= [![Build Status](https://travis-ci.org/radio-astro-tools/spectral-cube.png?branch=master)](https://travis-ci.org/radio-astro-tools/spectral-cube) [![Coverage Status](https://coveralls.io/repos/radio-astro-tools/spectral-cube/badge.svg?branch=master)](https://coveralls.io/r/radio-astro-tools/spectral-cube?branch=master) [![DOI](https://zenodo.org/badge/doi/10.5281/zenodo.11485.png)](http://dx.doi.org/10.5281/zenodo.11485) spectral-cube-0.3.1/setup.cfg0000644000077000000240000000133512551776560016126 0ustar adamstaff00000000000000[build_sphinx] source-dir = docs build-dir = docs/_build all_files = 1 [upload_docs] upload-dir = docs/_build/html show-response = 1 [pytest] minversion = 2.2 norecursedirs = build docs/_build doctest_plus = enabled [ah_bootstrap] auto_use = True [metadata] package_name = spectral_cube description = A package for interaction with spectral cubes long_description = There are lots of things you wanna do with spectral cubes and this does some of them author = Tom Robitaille, Adam Ginsburg, Chris Beaumont, Adam Leroy, and Erik Rosolowsky author_email = robitaille@mpia.de, adam.g.ginsburg@gmail.com license = BSD url = http://spectral-cube.readthedocs.org edit_on_github = False github_project = radio-astro-tools/spectral-cube spectral-cube-0.3.1/setup.py0000755000077000000240000000662312654610211016007 0ustar adamstaff00000000000000#!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst import glob import os import sys import ah_bootstrap from setuptools import setup #A dirty hack to get around some early import/configurations ambiguities if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins builtins._ASTROPY_SETUP_ = True from astropy_helpers.setup_helpers import ( register_commands, adjust_compiler, get_debug_option, get_package_info) from astropy_helpers.git_helpers import get_git_devstr from astropy_helpers.version_helpers import generate_version_py # Get some values from the setup.cfg from distutils import config conf = config.ConfigParser() conf.read(['setup.cfg']) metadata = dict(conf.items('metadata')) PACKAGENAME = metadata.get('package_name', 'packagename') DESCRIPTION = metadata.get('description', 'Astropy affiliated package') AUTHOR = metadata.get('author', '') AUTHOR_EMAIL = metadata.get('author_email', '') LICENSE = metadata.get('license', 'unknown') URL = metadata.get('url', 'http://astropy.org') # Get the long description from the package's docstring __import__(PACKAGENAME) package = sys.modules[PACKAGENAME] LONG_DESCRIPTION = package.__doc__ # Store the package name in a built-in variable so it's easy # to get from other parts of the setup infrastructure builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME # VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386) VERSION = '0.3.1' # Indicates if this version is a release version RELEASE = 'dev' not in VERSION if not RELEASE: VERSION += get_git_devstr(False) # Populate the dict of setup command overrides; this should be done before # invoking any other functionality from distutils since it can potentially # modify distutils' behavior. cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE) # Adjust the compiler in case the default on this platform is to use a # broken one. adjust_compiler(PACKAGENAME) # Freeze build information in version.py generate_version_py(PACKAGENAME, VERSION, RELEASE, get_debug_option(PACKAGENAME)) # Treat everything in scripts except README.rst as a script to be installed scripts = [fname for fname in glob.glob(os.path.join('scripts', '*')) if os.path.basename(fname) != 'README.rst'] # Get configuration information from all of the various subpackages. # See the docstring for setup_helpers.update_package_files for more # details. package_info = get_package_info() # Add the project-global data package_info['package_data'].setdefault(PACKAGENAME, []) package_info['package_data'][PACKAGENAME].append('data/*') # Include all .c files, recursively, including those generated by # Cython, since we can not do this in MANIFEST.in with a "dynamic" # directory name. c_files = [] for root, dirs, files in os.walk(PACKAGENAME): for filename in files: if filename.endswith('.c'): c_files.append( os.path.join( os.path.relpath(root, PACKAGENAME), filename)) package_info['package_data'][PACKAGENAME].extend(c_files) setup(name='spectral-cube', version=VERSION, description=DESCRIPTION, scripts=scripts, install_requires=['astropy','numpy>=1.8.0'], author=AUTHOR, author_email=AUTHOR_EMAIL, license=LICENSE, url=URL, long_description=LONG_DESCRIPTION, cmdclass=cmdclassd, zip_safe=False, **package_info ) spectral-cube-0.3.1/spectral_cube/0000755000077000000240000000000012654610601017101 5ustar adamstaff00000000000000spectral-cube-0.3.1/spectral_cube/__init__.py0000644000077000000240000000105412647754466021236 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This is an Astropy affiliated package. """ # Affiliated packages may add whatever they like to this file, but # should keep this content at the top. # ---------------------------------------------------------------------------- from ._astropy_init import * # ---------------------------------------------------------------------------- if not _ASTROPY_SETUP_: from .spectral_cube import SpectralCube from .stokes_spectral_cube import StokesSpectralCube from .masks import * spectral-cube-0.3.1/spectral_cube/_astropy_init.py0000644000077000000240000001223112551776560022352 0ustar adamstaff00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst __all__ = ['__version__', '__githash__', 'test'] # this indicates whether or not we are in the package's setup.py try: _ASTROPY_SETUP_ except NameError: from sys import version_info if version_info[0] >= 3: import builtins else: import __builtin__ as builtins builtins._ASTROPY_SETUP_ = False try: from .version import version as __version__ except ImportError: __version__ = '' try: from .version import githash as __githash__ except ImportError: __githash__ = '' # set up the test command def _get_test_runner(): import os from astropy.tests.helper import TestRunner return TestRunner(os.path.dirname(__file__)) def test(package=None, test_path=None, args=None, plugins=None, verbose=False, pastebin=None, remote_data=False, pep8=False, pdb=False, coverage=False, open_files=False, **kwargs): """ Run the tests using `py.test `__. A proper set of arguments is constructed and passed to `pytest.main`_. .. _py.test: http://pytest.org/latest/ .. _pytest.main: http://pytest.org/latest/builtin.html#pytest.main Parameters ---------- package : str, optional The name of a specific package to test, e.g. 'io.fits' or 'utils'. If nothing is specified all default tests are run. test_path : str, optional Specify location to test by path. May be a single file or directory. Must be specified absolutely or relative to the calling directory. args : str, optional Additional arguments to be passed to pytest.main_ in the ``args`` keyword argument. plugins : list, optional Plugins to be passed to pytest.main_ in the ``plugins`` keyword argument. verbose : bool, optional Convenience option to turn on verbose output from py.test_. Passing True is the same as specifying ``'-v'`` in ``args``. pastebin : {'failed','all',None}, optional Convenience option for turning on py.test_ pastebin output. Set to ``'failed'`` to upload info for failed tests, or ``'all'`` to upload info for all tests. remote_data : bool, optional Controls whether to run tests marked with @remote_data. These tests use online data and are not run by default. Set to True to run these tests. pep8 : bool, optional Turn on PEP8 checking via the `pytest-pep8 plugin `_ and disable normal tests. Same as specifying ``'--pep8 -k pep8'`` in ``args``. pdb : bool, optional Turn on PDB post-mortem analysis for failing tests. Same as specifying ``'--pdb'`` in ``args``. coverage : bool, optional Generate a test coverage report. The result will be placed in the directory htmlcov. open_files : bool, optional Fail when any tests leave files open. Off by default, because this adds extra run time to the test suite. Requires the `psutil `_ package. parallel : int, optional When provided, run the tests in parallel on the specified number of CPUs. If parallel is negative, it will use the all the cores on the machine. Requires the `pytest-xdist `_ plugin installed. Only available when using Astropy 0.3 or later. kwargs Any additional keywords passed into this function will be passed on to the astropy test runner. This allows use of test-related functionality implemented in later versions of astropy without explicitly updating the package template. """ test_runner = _get_test_runner() return test_runner.run_tests( package=package, test_path=test_path, args=args, plugins=plugins, verbose=verbose, pastebin=pastebin, remote_data=remote_data, pep8=pep8, pdb=pdb, coverage=coverage, open_files=open_files, **kwargs) if not _ASTROPY_SETUP_: import os from warnings import warn from astropy import config # add these here so we only need to cleanup the namespace at the end config_dir = None if not os.environ.get('ASTROPY_SKIP_CONFIG_UPDATE', False): config_dir = os.path.dirname(__file__) config_template = os.path.join(config_dir, __package__ + ".cfg") if os.path.isfile(config_template): try: config.configuration.update_default_config( __package__, config_dir, version=__version__) except TypeError as orig_error: try: config.configuration.update_default_config( __package__, config_dir) except config.configuration.ConfigurationDefaultMissingError as e: wmsg = (e.args[0] + " Cannot install default profile. If you are " "importing from source, this is expected.") warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg)) del e except: raise orig_error spectral-cube-0.3.1/spectral_cube/_moments.py0000644000077000000240000001115712643464660021313 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import numpy as np from .cube_utils import iterator_strategy from .np_compat import allbadtonan """ Functions to compute moment maps in a variety of ways """ def _moment_shp(cube, axis): """ Return the shape of the moment map Parameters ----------- cube : SpectralCube The cube to collapse axis : int The axis to collapse along (numpy convention) Returns ------- ny, nx """ return cube.shape[:axis] + cube.shape[axis + 1:] def _slice0(cube, axis): """ 0th moment along an axis, calculated slicewise Parameters ---------- cube : SpectralCube axis : int Returns ------- moment0 : array """ shp = _moment_shp(cube, axis) result = np.zeros(shp) view = [slice(None)] * 3 valid = np.zeros(shp, dtype=np.bool) for i in range(cube.shape[axis]): view[axis] = i plane = cube._get_filled_data(fill=np.nan, view=view) valid |= np.isfinite(plane) result += np.nan_to_num(plane) * cube._pix_size_slice(axis) result[~valid] = np.nan return result def _slice1(cube, axis): """ 1st moment along an axis, calculated slicewise Parameters ---------- cube : SpectralCube axis : int Returns ------- moment1 : array """ shp = _moment_shp(cube, axis) result = np.zeros(shp) view = [slice(None)] * 3 pix_size = cube._pix_size_slice(axis) pix_cen = cube._pix_cen()[axis] weights = np.zeros(shp) for i in range(cube.shape[axis]): view[axis] = i plane = cube._get_filled_data(fill=0, view=view) result += (plane * pix_cen[view] * pix_size) weights += plane * pix_size return result / weights def moment_slicewise(cube, order, axis): """ Compute moments by accumulating the result 1 slice at a time """ if order == 0: return _slice0(cube, axis) if order == 1: return _slice1(cube, axis) shp = _moment_shp(cube, axis) result = np.zeros(shp) view = [slice(None)] * 3 pix_size = cube._pix_size_slice(axis) pix_cen = cube._pix_cen()[axis] weights = np.zeros(shp) # would be nice to get mom1 and momn in single pass over data # possible for mom2, not sure about general case mom1 = _slice1(cube, axis) for i in range(cube.shape[axis]): view[axis] = i plane = cube._get_filled_data(fill=0, view=view) result += (plane * (pix_cen[view] - mom1) ** order * pix_size) weights += plane * pix_size return (result / weights) def moment_raywise(cube, order, axis): """ Compute moments by accumulating the answer one ray at a time """ shp = _moment_shp(cube, axis) out = np.zeros(shp) * np.nan pix_cen = cube._pix_cen()[axis] pix_size = cube._pix_size_slice(axis) for x, y, slc in cube._iter_rays(axis): # the intensity, i.e. the weights include = cube._mask.include(data=cube._data, wcs=cube._wcs, view=slc) if not include.any(): continue data = cube.flattened(slc).value * pix_size if order == 0: out[x, y] = data.sum() continue order1 = (data * pix_cen[slc][include]).sum() / data.sum() if order == 1: out[x, y] = order1 continue ordern = (data * (pix_cen[slc][include] - order1) ** order).sum() ordern /= data.sum() out[x, y] = ordern return out def moment_cubewise(cube, order, axis): """ Compute the moments by working with the entire data at once """ pix_cen = cube._pix_cen()[axis] data = cube._get_filled_data() * cube._pix_size_slice(axis) if order == 0: return allbadtonan(np.nansum)(data, axis=axis) if order == 1: return (np.nansum(data * pix_cen, axis=axis) / np.nansum(data, axis=axis)) else: mom1 = moment_cubewise(cube, 1, axis) # insert an axis so it broadcasts properly shp = list(_moment_shp(cube, axis)) shp.insert(axis, 1) mom1 = mom1.reshape(shp) return (np.nansum(data * (pix_cen - mom1) ** order, axis=axis) / np.nansum(data, axis=axis)) def moment_auto(cube, order, axis): """ Build a moment map, choosing a strategy to balance speed and memory. """ strategy = dict(cube=moment_cubewise, ray=moment_raywise, slice=moment_slicewise) return strategy[iterator_strategy(cube, axis)](cube, order, axis) spectral-cube-0.3.1/spectral_cube/conftest.py0000644000077000000240000000243212643464660021313 0ustar adamstaff00000000000000# this contains imports plugins that configure py.test for astropy tests. # by importing them here in conftest.py they are discoverable by py.test # no matter how it is invoked within the source tree. from __future__ import print_function, absolute_import, division from astropy.tests.pytest_plugins import * ## Uncomment the following line to treat all DeprecationWarnings as ## exceptions # enable_deprecations_as_exceptions() ## Uncomment and customize the following lines to add/remove entries ## from the list of packages for which version numbers are displayed ## when running the tests # try: # PYTEST_HEADER_MODULES['Astropy'] = 'astropy' # PYTEST_HEADER_MODULES['scikit-image'] = 'skimage' # del PYTEST_HEADER_MODULES['h5py'] # except NameError: # needed to support Astropy < 1.0 # pass ## Uncomment the following lines to display the version number of the ## package rather than the version number of Astropy in the top line when ## running the tests. # import os # ## This is to figure out the affiliated package version, rather than ## using Astropy's # from . import version # # try: # packagename = os.path.basename(os.path.dirname(__file__)) # TESTED_VERSIONS[packagename] = version.version # except NameError: # Needed to support Astropy <= 1.0.0 # pass spectral-cube-0.3.1/spectral_cube/cube_utils.py0000644000077000000240000001553012643464660021627 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import numpy as np from astropy.wcs import (WCSSUB_SPECTRAL, WCSSUB_LONGITUDE, WCSSUB_LATITUDE) from . import wcs_utils from astropy import log def _fix_spectral(wcs): """ Attempt to fix a cube with an invalid spectral axis definition. Only uses well-known exceptions, e.g. CTYPE = 'VELOCITY'. For the rest, it will try to raise a helpful error. """ axtypes = wcs.get_axis_types() types = [a['coordinate_type'] for a in axtypes] if wcs.naxis not in (3,4): raise TypeError("The WCS has {0} axes of types {1}".format(len(types), types)) # sanitize noncompliant headers if 'spectral' not in types: log.warn("No spectral axis found; header may be non-compliant.") for ind,tp in enumerate(types): if tp not in ('celestial','stokes'): if wcs.wcs.ctype[ind] in wcs_utils.bad_spectypes_mapping: wcs.wcs.ctype[ind] = wcs_utils.bad_spectypes_mapping[wcs.wcs.ctype[ind]] return wcs def _split_stokes(array, wcs): """ Given a 4-d data cube with 4-d WCS (spectral cube + stokes) return a dictionary of data and WCS objects for each Stokes component Parameters ---------- array : `~numpy.ndarray` The input 3-d array with two position dimensions, one spectral dimension, and a Stokes dimension. wcs : `~astropy.wcs.WCS` The input 3-d WCS with two position dimensions, one spectral dimension, and a Stokes dimension. """ if array.ndim not in (3,4): raise ValueError("Input array must be 3- or 4-dimensional for a" " STOKES cube") if wcs.wcs.naxis != 4: raise ValueError("Input WCS must be 4-dimensional for a STOKES cube") wcs = _fix_spectral(wcs) # reverse from wcs -> numpy convention axtypes = wcs.get_axis_types()[::-1] types = [a['coordinate_type'] for a in axtypes] try: # Find stokes dimension stokes_index = types.index('stokes') except ValueError: # stokes not in list, but we are 4d if types.count('celestial') == 2 and types.count('spectral') == 1: if None in types: stokes_index = types.index(None) log.warn("FITS file has no STOKES axis, but it has a blank" " axis type at index {0} that is assumed to be " "stokes.".format(4-stokes_index)) else: for ii,tp in enumerate(types): if tp not in ('celestial', 'spectral'): stokes_index = ii stokes_type = tp log.warn("FITS file has no STOKES axis, but it has an axis" " of type {1} at index {0} that is assumed to be " "stokes.".format(4-stokes_index, stokes_type)) else: raise IOError("There are 4 axes in the data cube but no STOKES " "axis could be identified") # TODO: make the stokes names more general stokes_names = ["I", "Q", "U", "V"] stokes_arrays = {} wcs_slice = wcs_utils.drop_axis(wcs, wcs.naxis - 1 - stokes_index) if array.ndim == 4: for i_stokes in range(array.shape[stokes_index]): array_slice = [i_stokes if idim == stokes_index else slice(None) for idim in range(array.ndim)] stokes_arrays[stokes_names[i_stokes]] = array[array_slice] else: # 3D array with STOKES as a 4th header parameter stokes_arrays['I'] = array return stokes_arrays, wcs_slice def _orient(array, wcs): """ Given a 3-d spectral cube and WCS, swap around the axes so that the spectral axis cube is the first in Numpy notation, and the last in WCS notation. Parameters ---------- array : `~numpy.ndarray` The input 3-d array with two position dimensions and one spectral dimension. wcs : `~astropy.wcs.WCS` The input 3-d WCS with two position dimensions and one spectral dimension. """ if array.ndim != 3: raise ValueError("Input array must be 3-dimensional") if wcs.wcs.naxis != 3: raise ValueError("Input WCS must be 3-dimensional") wcs = _fix_spectral(wcs) # reverse from wcs -> numpy convention axtypes = wcs.get_axis_types()[::-1] types = [a['coordinate_type'] for a in axtypes] nums = [None if a['coordinate_type'] != 'celestial' else a['number'] for a in axtypes] if 'stokes' in types: raise ValueError("Input WCS should not contain stokes") t = [types.index('spectral'), nums.index(1), nums.index(0)] result_array = array.transpose(t) result_wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE, WCSSUB_SPECTRAL]) return result_array, result_wcs def slice_syntax(f): """ This decorator wraps a function that accepts a tuple of slices. After wrapping, the function acts like a property that accepts bracket syntax (e.g., p[1:3, :, :]) Parameters ---------- f : function """ def wrapper(self): result = SliceIndexer(f, self) result.__doc__ = f.__doc__ return result wrapper.__doc__ = slice_doc.format(f.__doc__ or '', f.__name__) result = property(wrapper) return result slice_doc = """ {0} Notes ----- Supports efficient Numpy slice notation, like ``{1}[0:3, :, 2:4]`` """ class SliceIndexer(object): def __init__(self, func, _other): self._func = func self._other = _other def __getitem__(self, view): return self._func(self._other, view) def __iter__(self): raise Exception("You need to specify a slice (e.g. ``[:]`` or " "``[0,:,:]`` in order to access this property.") # TODO: make this into a proper configuration item # TODO: make threshold depend on memory? MEMORY_THRESHOLD=1e8 def is_huge(cube): if cube.size < MEMORY_THRESHOLD: # smallish return False else: return True def iterator_strategy(cube, axis=None): """ Guess the most efficient iteration strategy for iterating over a cube, given its size and layout Parameters ---------- cube : SpectralCube instance The cube to iterate over axis : [0, 1, 2] For reduction methods, the axis that is being collapsed Returns ------- strategy : ['cube' | 'ray' | 'slice'] The recommended iteration strategy. *cube* recommends working with the entire array in memory *slice* recommends working with one slice at a time *ray* recommends working with one ray at a time """ # pretty simple for now if cube.size < 1e8: # smallish return 'cube' return 'slice' spectral-cube-0.3.1/spectral_cube/io/0000755000077000000240000000000012654610601017510 5ustar adamstaff00000000000000spectral-cube-0.3.1/spectral_cube/io/__init__.py0000644000077000000240000000010112643464660021623 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division spectral-cube-0.3.1/spectral_cube/io/casa_image.py0000644000077000000240000001263712643464660022156 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import warnings from astropy.io import fits from astropy.extern import six from astropy.wcs import WCS import numpy as np from .. import SpectralCube, StokesSpectralCube, BooleanArrayMask, LazyMask from .. import cube_utils # Read and write from a CASA image. This has a few # complications. First, by default CASA does not return the # "python order" and so we either have to transpose the cube on # read or have dueling conventions. Second, CASA often has # degenerate stokes axes present in unpredictable places (3rd or # 4th without a clear expectation). We need to replicate these # when writing but don't want them in memory. By default, try to # yield the same array in memory that we would get from astropy. def is_casa_image(input, **kwargs): if isinstance(input, six.string_types): if input.endswith('.image'): return True return False def wcs_casa2astropy(casa_wcs): """ Convert a casac.coordsys object into an astropy.wcs.WCS object """ from astropy.wcs import WCS wcs = WCS(naxis=int(casa_wcs.naxes())) crpix = casa_wcs.referencepixel() if crpix['ar_type'] != 'absolute': raise ValueError("Unexpected ar_type: %s" % crpix['ar_type']) elif crpix['pw_type'] != 'pixel': raise ValueError("Unexpected pw_type: %s" % crpix['pw_type']) else: wcs.wcs.crpix = crpix['numeric'] cdelt = casa_wcs.increment() if cdelt['ar_type'] != 'absolute': raise ValueError("Unexpected ar_type: %s" % cdelt['ar_type']) elif cdelt['pw_type'] != 'world': raise ValueError("Unexpected pw_type: %s" % cdelt['pw_type']) else: wcs.wcs.cdelt = cdelt['numeric'] crval = casa_wcs.referencevalue() if crval['ar_type'] != 'absolute': raise ValueError("Unexpected ar_type: %s" % crval['ar_type']) elif crval['pw_type'] != 'world': raise ValueError("Unexpected pw_type: %s" % crval['pw_type']) else: wcs.wcs.crval = crval['numeric'] wcs.wcs.cunit = casa_wcs.units() # mapping betweeen CASA and FITS COORD_TYPE = {} COORD_TYPE['Right Ascension'] = "RA--" COORD_TYPE['Declination'] = "DEC-" COORD_TYPE['Longitude'] = "GLON" COORD_TYPE['Latitude'] = "GLAT" COORD_TYPE['Frequency'] = "FREQ" COORD_TYPE['Stokes'] = "STOKES" # There is no easy way at the moment to extract the orginal projection # codes from a coordsys object, so we need to figure out how to do this in # the most general way. The code below is still experimental. ctype = [] for i, name in enumerate(casa_wcs.names()): if name in COORD_TYPE: ctype.append(COORD_TYPE[name]) if casa_wcs.axiscoordinatetypes()[i] == 'Direction': ctype[-1] += ("%4s" % casa_wcs.projection()['type']).replace(' ', '-') else: raise KeyError("Don't know how to convert: %s" % name) wcs.wcs.ctype = ctype return wcs def load_casa_image(filename, skipdata=False, skipvalid=False, skipcs=False, **kwargs): """ Load a cube (into memory?) from a CASA image. By default it will transpose the cube into a 'python' order and drop degenerate axes. These options can be suppressed. The object holds the coordsys object from the image in memory. """ try: from taskinit import ia except ImportError: raise ImportError("Could not import CASA (casac) and therefore cannot read CASA .image files") # use the ia tool to get the file contents ia.open(filename) # read in the data if not skipdata: data = ia.getchunk() # CASA stores validity of data as a mask if not skipvalid: valid = ia.getchunk(getmask=True) # transpose is dealt with within the cube object # read in coordinate system object casa_cs = ia.coordsys() wcs = wcs_casa2astropy(casa_cs) unit = ia.brightnessunit() # don't need this yet # stokes = get_casa_axis(temp_cs, wanttype="Stokes", skipdeg=False,) # if stokes == None: # order = np.arange(self.data.ndim) # else: # order = [] # for ax in np.arange(self.data.ndim+1): # if ax == stokes: # continue # order.append(ax) # self.casa_cs = ia.coordsys(order) # This should work, but coordsys.reorder() has a bug # on the error checking. JIRA filed. Until then the # axes will be reversed from the original. # if transpose == True: # new_order = np.arange(self.data.ndim) # new_order = new_order[-1*np.arange(self.data.ndim)-1] # print new_order # self.casa_cs.reorder(new_order) # close the ia tool ia.close() meta = {'filename': filename, 'BUNIT': unit} if wcs.naxis == 3: mask = BooleanArrayMask(np.logical_not(valid), wcs) cube = SpectralCube(data, wcs, mask, meta=meta) elif wcs.naxis == 4: data, wcs = cube_utils._split_stokes(data.T, wcs) mask = {} for component in data: data[component], wcs_slice = cube_utils._orient(data[component], wcs) mask[component] = LazyMask(np.isfinite, data=data[component], wcs=wcs_slice) cube = StokesSpectralCube(data, wcs_slice, mask, meta=meta) return cube spectral-cube-0.3.1/spectral_cube/io/casa_masks.py0000644000077000000240000000605212643464660022204 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import numpy as np from astropy.io import fits import tempfile import warnings from ..wcs_utils import add_stokes_axis_to_wcs def make_casa_mask(SpecCube, outname, append_to_image=True, img=None, add_stokes=True, stokes_posn=None): ''' Outputs the mask attached to the SpectralCube object as a CASA image, or optionally appends the mask to a preexisting CASA image. Parameters ---------- SpecCube : SpectralCube SpectralCube object containing mask. outname : str Name of the outputted mask file. append_to_image : bool, optional Appends the mask to a given image. img : str, optional Image to be appended to. Must be specified if append_to_image is enabled. add_stokes: bool, optional Adds a Stokes axis onto the wcs from SpecCube. stokes_posn : int, optional Sets the position of the new Stokes axis. Defaults to the last axis. ''' try: from taskinit import ia except ImportError: print("Cannot import casac. Must be run in a CASA environment.") # Get the header info from the image # There's not wcs_astropy2casa (yet), so create a temporary file for # CASA to open. temp = tempfile.NamedTemporaryFile() # CASA is closing this file at some point so set it to manual delete. temp2 = tempfile.NamedTemporaryFile(delete=False) # Grab wcs # Optionally re-add on the Stokes axis if add_stokes: my_wcs = SpecCube.wcs if stokes_posn is None: stokes_posn = my_wcs.wcs.naxis new_wcs = add_stokes_axis_to_wcs(my_wcs, stokes_posn) header = new_wcs.to_header() # Transpose the shape so we're adding the axis at the place CASA will # recognize. Then transpose back. shape = SpecCube.shape[::-1] shape = shape[:stokes_posn] + (1,) + shape[stokes_posn:] shape = shape[::-1] else: # Just grab the header from SpecCube header = SpecCube.header shape = SpecCube.shape hdu = fits.PrimaryHDU(header=header, data=np.empty(shape, dtype='int16')) hdu.writeto(temp.name) ia.fromfits(infile=temp.name, outfile=temp2.name, overwrite=True) temp.close() cs = ia.coordsys() ia.close() temp2.close() mask_arr = SpecCube.mask.include() # Reshape mask with possible Stokes axis mask_arr = mask_arr.reshape(shape) # Transpose to match CASA axes mask_arr = mask_arr.T ia.newimagefromarray(outfile=outname, pixels=mask_arr.astype('int16')) ia.open(outname) ia.setcoordsys(cs.torecord()) ia.close() if append_to_image: if img is None: raise TypeError("img argument must be specified to append the mask.") ia.open(outname) ia.calcmask(outname+">0.5") ia.close() ia.open(img) ia.maskhandler('copy', [outname+":mask0", outname]) ia.maskhandler('set', outname) ia.close() spectral-cube-0.3.1/spectral_cube/io/class_lmv.py0000644000077000000240000007144112643464660022066 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import numpy as np import struct import warnings import string from astropy.extern import six from astropy import log from .fits import load_fits_cube """ .. TODO:: When any section length is zero, that means the following values are to be ignored. No warning is needed. """ # Constant: r2deg = 180/np.pi # see sicfits.f90 _ctype_dict={'LII':'GLON', 'BII':'GLAT', 'VELOCITY':'VELO', 'RA':'RA', 'DEC':'DEC', 'FREQUENCY': 'FREQ', } _cunit_dict = {'LII':'deg', 'BII':'deg', 'VELOCITY':'km s-1', 'RA':'deg', 'DEC':'deg', 'FREQUENCY': 'MHz', } cel_types = ('RA','DEC','GLON','GLAT') # CLASS apparently defaults to an ARC (zenithal equidistant) projection; this # is what is output in case the projection # is zero when exporting from CLASS _proj_dict = {0:'ARC', 1:'TAN', 2:'SIN', 3:'AZP', 4:'STG', 5:'ZEA', 6:'AIT', 7:'GLS', 8:'SFL', } _bunit_dict = {'k (tmb)': 'K'} def is_lmv(input, **kwargs): """ Determine whether input is in GILDAS CLASS lmv format """ if isinstance(input, six.string_types): if input.lower().endswith(('.lmv')): return True else: return False def read_lmv(filename): """ Read an LMV cube file Specification is primarily in GILDAS image_def.f90 """ log.warn("CLASS LMV cube reading is tentatively supported. " "Please post bug reports at the first sign of danger!") with open(filename,'rb') as lf: # lf for "LMV File" filetype = _read_string(lf, 12) #!--------------------------------------------------------------------- #! @ private #! SYCODE system code #! '-' IEEE #! '.' EEEI (IBM like) #! '_' VAX #! IMCODE file code #! '<' IEEE 64 bits (Little Endian, 99.9 % of recent computers) #! '>' EEEI 64 bits (Big Endian, HPUX, IBM-RISC, and SPARC ...) #!--------------------------------------------------------------------- imcode = filetype[6] if filetype[:6] != 'GILDAS' or filetype[7:] != 'IMAGE': raise TypeError("File is not a GILDAS Image file") if imcode in ('<','>'): if imcode =='>': log.warn("Swap the endianness first...") return read_lmv_type2(lf) else: return read_lmv_type1(lf) def read_lmv_type1(lf): header = {} # fmt probably matters! Default is "r4", i.e. float32 data, but could be float64 fmt = np.fromfile(lf, dtype='int32', count=1) # 4 # number of data blocks ndb = np.fromfile(lf, dtype='int32', count=1) # 5 gdf_type = np.fromfile(lf, dtype='int32', count=1) # 6 # Reserved Space reserved_fill = np.fromfile(lf, dtype='int32', count=4) # 7 general_section_length = np.fromfile(lf, dtype='int32', count=1) # 11 #print "Format: ",fmt," ndb: ",ndb, " fill: ",fill," other: ",unknown # pos 12 naxis,naxis1,naxis2,naxis3,naxis4 = np.fromfile(lf,count=5,dtype='int32') header['NAXIS'] = naxis header['NAXIS1'] = naxis1 header['NAXIS2'] = naxis2 header['NAXIS3'] = naxis3 header['NAXIS4'] = naxis4 # We are indexing bytes from here; CLASS indices are higher by 12 # pos 17 header['CRPIX1'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CRVAL1'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CDELT1'] = np.fromfile(lf,count=1,dtype='float64')[0] * r2deg header['CRPIX2'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CRVAL2'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CDELT2'] = np.fromfile(lf,count=1,dtype='float64')[0] * r2deg header['CRPIX3'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CRVAL3'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CDELT3'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CRPIX4'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CRVAL4'] = np.fromfile(lf,count=1,dtype='float64')[0] header['CDELT4'] = np.fromfile(lf,count=1,dtype='float64')[0] # pos 41 #print "Post-crval",lf.tell() blank_section_length = np.fromfile(lf,count=1,dtype='int32') if blank_section_length != 8: warnings.warn("Invalid section length found for blanking section") bval = np.fromfile(lf,count=1,dtype='float32')[0] # 42 header['TOLERANC'] = np.fromfile(lf,count=1,dtype='int32')[0] # 43 eval = tolerance extrema_section_length = np.fromfile(lf,count=1,dtype='int32')[0] # 44 if extrema_section_length != 40: warnings.warn("Invalid section length found for extrema section") vmin,vmax = np.fromfile(lf,count=2,dtype='float32') # 45 xmin,xmax,ymin,ymax,zmin,zmax = np.fromfile(lf,count=6,dtype='int32') # 47 wmin,wmax = np.fromfile(lf,count=2,dtype='int32') # 53 description_section_length = np.fromfile(lf,count=1,dtype='int32')[0] # 55 if description_section_length != 72: warnings.warn("Invalid section length found for description section") #strings = lf.read(description_section_length) # 56 header['BUNIT'] = _read_string(lf, 12) # 56 header['CTYPE1'] = _read_string(lf, 12) # 59 header['CTYPE2'] = _read_string(lf, 12) # 62 header['CTYPE3'] = _read_string(lf, 12) # 65 header['CTYPE4'] = _read_string(lf, 12) # 68 header['CUNIT1'] = _cunit_dict[header['CTYPE1'].strip()] header['CUNIT2'] = _cunit_dict[header['CTYPE2'].strip()] header['CUNIT3'] = _cunit_dict[header['CTYPE3'].strip()] header['COOSYS'] = _read_string(lf, 12) # 71 position_section_length = np.fromfile(lf,count=1,dtype='int32') # 74 if position_section_length != 48: warnings.warn("Invalid section length found for position section") header['OBJNAME'] = _read_string(lf, 4*3) # 75 header['RA'] = np.fromfile(lf, count=1, dtype='float64')[0] * r2deg # 78 header['DEC'] = np.fromfile(lf, count=1, dtype='float64')[0] * r2deg # 80 header['GLON'] = np.fromfile(lf, count=1, dtype='float64')[0] * r2deg # 82 header['GLAT'] = np.fromfile(lf, count=1, dtype='float64')[0] * r2deg # 84 header['EQUINOX'] = np.fromfile(lf,count=1,dtype='float32')[0] # 86 header['PROJWORD'] = _read_string(lf, 4) # 87 header['PTYP'] = np.fromfile(lf,count=1,dtype='int32')[0] # 88 header['A0'] = np.fromfile(lf,count=1,dtype='float64')[0] # 89 header['D0'] = np.fromfile(lf,count=1,dtype='float64')[0] # 91 header['PANG'] = np.fromfile(lf,count=1,dtype='float64')[0] # 93 header['XAXI'] = np.fromfile(lf,count=1,dtype='float32')[0] # 95 header['YAXI'] = np.fromfile(lf,count=1,dtype='float32')[0] # 96 spectroscopy_section_length = np.fromfile(lf,count=1,dtype='int32') # 97 if spectroscopy_section_length != 48: warnings.warn("Invalid section length found for spectroscopy section") header['RECVR'] = _read_string(lf, 12) # 98 header['FRES'] = np.fromfile(lf,count=1,dtype='float64')[0] # 101 header['IMAGFREQ'] = np.fromfile(lf,count=1,dtype='float64')[0] # 103 "FIMA" header['REFFREQ'] = np.fromfile(lf,count=1,dtype='float64')[0] # 105 header['VRES'] = np.fromfile(lf,count=1,dtype='float32')[0] # 107 header['VOFF'] = np.fromfile(lf,count=1,dtype='float32')[0] # 108 header['FAXI'] = np.fromfile(lf,count=1,dtype='int32')[0] # 109 resolution_section_length = np.fromfile(lf,count=1,dtype='int32')[0] # 110 if resolution_section_length != 12: warnings.warn("Invalid section length found for resolution section") #header['DOPP'] = np.fromfile(lf,count=1,dtype='float16')[0] # 110a ??? #header['VTYP'] = np.fromfile(lf,count=1,dtype='int16')[0] # 110b # integer, parameter :: vel_unk = 0 ! Unsupported referential :: planetary...) # integer, parameter :: vel_lsr = 1 ! LSR referential # integer, parameter :: vel_hel = 2 ! Heliocentric referential # integer, parameter :: vel_obs = 3 ! Observatory referential # integer, parameter :: vel_ear = 4 ! Earth-Moon barycenter referential # integer, parameter :: vel_aut = -1 ! Take referential from data header['BMAJ'] = np.fromfile(lf,count=1,dtype='float32')[0] # 111 header['BMIN'] = np.fromfile(lf,count=1,dtype='float32')[0] # 112 header['BPA'] = np.fromfile(lf,count=1,dtype='float32')[0] # 113 noise_section_length = np.fromfile(lf,count=1,dtype='int32') if noise_section_length != 0: warnings.warn("Invalid section length found for noise section") header['NOISE'] = np.fromfile(lf,count=1,dtype='float32')[0] # 115 header['RMS'] = np.fromfile(lf,count=1,dtype='float32')[0] # 116 astrometry_section_length = np.fromfile(lf,count=1,dtype='int32') if astrometry_section_length != 0: warnings.warn("Invalid section length found for astrometry section") header['MURA'] = np.fromfile(lf,count=1,dtype='float32')[0] # 118 header['MUDEC'] = np.fromfile(lf,count=1,dtype='float32')[0] # 119 header['PARALLAX'] = np.fromfile(lf,count=1,dtype='float32')[0] # 120 # Apparently CLASS headers aren't required to fill the 'value at # reference pixel' column if (header['CTYPE1'].strip() == 'RA' and header['CRVAL1'] == 0 and header['RA'] != 0): header['CRVAL1'] = header['RA'] header['CRVAL2'] = header['DEC'] # Copied from the type 2 reader: # Use the appropriate projection type ptyp = header['PTYP'] for kw in header: if 'CTYPE' in kw: if header[kw].strip() in cel_types: n_dashes = 5-len(header[kw].strip()) header[kw] = header[kw].strip()+ '-'*n_dashes + _proj_dict[ptyp] other_info = np.fromfile(lf, count=7, dtype='float32') # 121-end if not np.all(other_info == 0): warnings.warn("Found additional information in the last 7 bytes") endpoint = 508 if lf.tell() != endpoint: raise ValueError("Header was not parsed correctly") data = np.fromfile(lf, count=naxis1*naxis2*naxis3, dtype='float32') data[data == bval] = np.nan # for no apparent reason, y and z are 1-indexed and x is zero-indexed if (wmin-1,zmin-1,ymin-1,xmin) != np.unravel_index(np.nanargmin(data), [naxis4,naxis3,naxis2,naxis1]): warnings.warn("Data min location does not match that on file. " "Possible error reading data.") if (wmax-1,zmax-1,ymax-1,xmax) != np.unravel_index(np.nanargmax(data), [naxis4,naxis3,naxis2,naxis1]): warnings.warn("Data max location does not match that on file. " "Possible error reading data.") if np.nanmax(data) != vmax: warnings.warn("Data max does not match that on file. " "Possible error reading data.") if np.nanmin(data) != vmin: warnings.warn("Data min does not match that on file. " "Possible error reading data.") return data.reshape([naxis4,naxis3,naxis2,naxis1]),header # debug #return data.reshape([naxis3,naxis2,naxis1]), header, hdr_f, hdr_s, hdr_i, hdr_d, hdr_d_2 def read_lmv_tofits(filename): from astropy.io import fits data,header = read_lmv(filename) # LMV may contain extra dimensions that are improperly labeled data = data.squeeze() bad_kws = ['NAXIS4','CRVAL4','CRPIX4','CDELT4','CROTA4','CUNIT4','CTYPE4'] cards = [fits.header.Card(keyword=k, value=v[0], comment=v[1]) if isinstance(v, tuple) else fits.header.Card(''.join(s for s in k if s in string.printable), ''.join(s for s in v if s in string.printable) if isinstance(v, six.string_types) else v) for k,v in six.iteritems(header) if k not in bad_kws] Header = fits.Header(cards) hdu = fits.PrimaryHDU(data=data, header=Header) return hdu def load_lmv_cube(filename): hdu = read_lmv_tofits(filename) meta = {'filename':filename} return load_fits_cube(hdu, meta=meta) def _read_byte(f): '''Read a single byte (from idlsave)''' return np.uint8(struct.unpack('=B', f.read(4)[:1])[0]) def _read_int16(f): '''Read a signed 16-bit integer (from idlsave)''' return np.int16(struct.unpack('=h', f.read(4)[2:4])[0]) def _read_int32(f): '''Read a signed 32-bit integer (from idlsave)''' return np.int32(struct.unpack('=i', f.read(4))[0]) def _read_int64(f): '''Read a signed 64-bit integer ''' return np.int64(struct.unpack('=q', f.read(8))[0]) def _read_float32(f): '''Read a 32-bit float (from idlsave)''' return np.float32(struct.unpack('=f', f.read(4))[0]) def _read_string(f, size): '''Read a string of known maximum length''' return f.read(size).decode('utf-8').strip() def _read_float64(f): '''Read a 64-bit float (from idlsave)''' return np.float64(struct.unpack('=d', f.read(8))[0]) def _check_val(name, got,expected): if got != expected: log.warn("{2} = {0} instead of {1}".format(got, expected, name)) def read_lmv_type2(lf): """ See image_def.f90 """ header = {} lf.seek(12) # DONE before integer(kind=4) :: ijtyp(3) = 0 ! 1 Image Type # fmt probably matters! Default is "r4", i.e. float32 data, but could be float64 fmt = _read_int32(lf) # 4 # number of data blocks ndb = _read_int64(lf) # 5 nhb = _read_int32(lf) # 7 ntb = _read_int32(lf) # 8 version_gdf = _read_int32(lf) # 9 if version_gdf != 20: raise TypeError("Trying to read a version-2 file, but the version" " number is {0} (should be 20)".format(version_gdf)) type_gdf = _read_int32(lf) # 10 dim_start = _read_int32(lf) # 11 pad_trail = _read_int32(lf) # 12 if dim_start % 2 == 0: log.warn("Got even dim_start in lmv cube: this is not expected.") if dim_start > 17: log.warn("dim_start > 17 in lmv cube: this is not expected.") lf.seek(16*4) gdf_maxdims=7 dim_words = _read_int32(lf) # 17 if dim_words != 2*gdf_maxdims+2: log.warn("dim_words = {0} instead of {1}".format(dim_words, gdf_maxdims*2+2)) blan_start = _read_int32(lf) # 18 if blan_start != dim_start+dim_words+2: log.warn("blan_star = {0} instead of {1}".format(blan_start, dim_start+dim_words+2)) mdim = _read_int32(lf) # 19 ndim = _read_int32(lf) # 20 dims = np.fromfile(lf, count=gdf_maxdims, dtype='int64') if np.count_nonzero(dims) != ndim: raise ValueError("Disagreement between ndims and number of nonzero dims.") header['NAXIS'] = ndim valid_dims = [] for ii,dim in enumerate(dims): if dim != 0: header['NAXIS{0}'.format(ii+1)] = dim valid_dims.append(ii) blan_words = _read_int32(lf) if blan_words != 2: log.warn("blan_words = {0} instead of 2".format(blan_words)) extr_start = _read_int32(lf) bval = _read_float32(lf) # blanking value bval_tol = _read_float32(lf) # eval = tolerance # FITS requires integer BLANKs #header['BLANK'] = bval extr_words = _read_int32(lf) if extr_words != 6: log.warn("extr_words = {0} instead of 6".format(extr_words)) coor_start = _read_int32(lf) if coor_start != extr_start+extr_words+2: log.warn("coor_start = {0} instead of {1}".format(coor_start, extr_start+extr_words+2)) rmin = _read_float32(lf) rmax = _read_float32(lf) # position 168 minloc = _read_int64(lf) maxloc = _read_int64(lf) # lf.seek(184) coor_words = _read_int32(lf) if coor_words != gdf_maxdims*6: log.warn("coor_words = {0} instead of {1}".format(coor_words, gdf_maxdims*6)) desc_start = _read_int32(lf) if desc_start != coor_start+coor_words+2: log.warn("desc_start = {0} instead of {1}".format(desc_start, coor_start+coor_words+2)) convert = np.fromfile(lf, count=3*gdf_maxdims, dtype='float64').reshape([gdf_maxdims,3]) # conversion of "convert" to CRPIX/CRVAL/CDELT below desc_words = _read_int32(lf) if desc_words != 3*(gdf_maxdims+1): log.warn("desc_words = {0} instead of {1}".format(desc_words, 3*(gdf_maxdims+1))) null_start = _read_int32(lf) if null_start != desc_start+desc_words+2: log.warn("null_start = {0} instead of {1}".format(null_start, desc_start+desc_words+2)) ijuni = _read_string(lf, 12) # data unit ijcode = [_read_string(lf, 12) for ii in range(gdf_maxdims)] pad_desc = _read_int32(lf) if ijuni.lower() in _bunit_dict: header['BUNIT'] = (_bunit_dict[ijuni.lower()], ijuni) else: header['BUNIT'] = ijuni #! The first block length is thus #! s_dim-1 + (2*mdim+4) + (4) + (8) + (6*mdim+2) + (3*mdim+5) #! = s_dim-1 + mdim*(2+6+3) + (4+4+2+5+8) #! = s_dim-1 + 11*mdim + 23 #! With mdim = 7, s_dim=11, this is 110 spaces #! With mdim = 8, s_dim=11, this is 121 spaces #! MDIM > 8 would NOT fit in one block... #! #! Block 2: Ancillary information #! #! The same logic of Length + Pointer is used there too, although the #! length are fixed. Note rounding to even number for the pointer offsets #! in order to preserve alignement... #! lf.seek(512) posi_words = _read_int32(lf) _check_val('posi_words', posi_words, 15) proj_start = _read_int32(lf) source_name = _read_string(lf, 12) header['OBJECT'] = source_name coordinate_system = _read_string(lf, 12) header['RA'] = _read_float64(lf) header['DEC'] = _read_float64(lf) header['LII'] = _read_float64(lf) header['BII'] = _read_float64(lf) header['EPOCH'] = _read_float32(lf) #pad_posi = _read_float32(lf) #print pad_posi #raise ValueError("pad_posi should probably be 0?") #! PROJECTION #integer(kind=4) :: proj_words = 9 ! Projection length: 9 used + 1 padding #integer(kind=4) :: spec_start !! = proj_start + 12 #real(kind=8) :: a0 = 0.d0 ! 89 X of projection center #real(kind=8) :: d0 = 0.d0 ! 91 Y of projection center #real(kind=8) :: pang = 0.d0 ! 93 Projection angle #integer(kind=4) :: ptyp = p_none ! 88 Projection type (see p_... codes) #integer(kind=4) :: xaxi = 0 ! 95 X axis #integer(kind=4) :: yaxi = 0 ! 96 Y axis #integer(kind=4) :: pad_proj #! proj_words = _read_int32(lf) spec_start = _read_int32(lf) _check_val('spec_start', spec_start, proj_start+proj_words+2) if proj_words == 9: header['PROJ_A0'] = _read_float64(lf) header['PROJ_D0'] = _read_float64(lf) header['PROJPANG'] = _read_float64(lf) ptyp = _read_int32(lf) header['PROJXAXI'] = _read_int32(lf) header['PROJYAXI'] = _read_int32(lf) elif proj_words != 0: raise ValueError("Invalid # of projection keywords") for kw in header: if 'CTYPE' in kw: if header[kw].strip() in cel_types: n_dashes = 5-len(header[kw].strip()) header[kw] = header[kw].strip()+ '-'*n_dashes + _proj_dict[ptyp] for ii,((ref,val,inc),code) in enumerate(zip(convert,ijcode)): if ii in valid_dims: # jul14a gio/to_imfits.f90 line 284-313 if ptyp != 0 and (ii+1) in (header['PROJXAXI'], header['PROJYAXI']): #! Compute reference pixel so that VAL(REF) = 0 ref = ref - val/inc if (ii+1) == header['PROJXAXI']: val = header['PROJ_A0'] elif (ii+1) == header['PROJYAXI']: val = header['PROJ_D0'] else: raise ValueError("Impossible state - code bug.") val = val*r2deg inc = inc*r2deg rota = r2deg*header['PROJPANG'] elif code in ('RA', 'L', 'B', 'DEC', 'LII', 'BII', 'GLAT', 'GLON', 'LAT', 'LON'): val = val*r2deg inc = inc*r2deg rota = 0.0 # These are not implemented: prefer to maintain original units (we're # reading in to spectral_cube after all, no need to change units until the # output step) #elseif (code.eq.'FREQUENCY') then #val = val*1.0d6 ! MHz to Hz #inc = inc*1.0d6 #elseif (code.eq.'VELOCITY') then #code = 'VRAD' ! force VRAD instead of VELOCITY for CASA #val = val*1.0d3 ! km/s to m/s #inc = inc*1.0d3 header['CRPIX{0}'.format(ii+1)] = ref header['CRVAL{0}'.format(ii+1)] = val header['CDELT{0}'.format(ii+1)] = inc for ii,ctype in enumerate(ijcode): if ii in valid_dims: header['CTYPE{0}'.format(ii+1)] = _ctype_dict[ctype] header['CUNIT{0}'.format(ii+1)] = _cunit_dict[ctype] spec_words = _read_int32(lf) reso_start = _read_int32(lf) _check_val('reso_start', reso_start, proj_start+proj_words+2+spec_words+2) if spec_words == 14: header['FRES'] = _read_float64(lf) header['FIMA'] = _read_float64(lf) header['FREQ'] = _read_float64(lf) header['VRES'] = _read_float32(lf) header['VOFF'] = _read_float32(lf) header['DOPP'] = _read_float32(lf) header['FAXI'] = _read_int32(lf) header['LINENAME'] = _read_string(lf, 12) header['VTYPE'] = _read_int32(lf) elif spec_words != 0: raise ValueError("Invalid # of spectroscopic keywords") #! SPECTROSCOPY #integer(kind=4) :: spec_words = 14 ! Spectroscopy length: 14 used #integer(kind=4) :: reso_start !! = spec_words + 16 #real(kind=8) :: fres = 0.d0 !101 Frequency resolution #real(kind=8) :: fima = 0.d0 !103 Image frequency #real(kind=8) :: freq = 0.d0 !105 Rest Frequency #real(kind=4) :: vres = 0.0 !107 Velocity resolution #real(kind=4) :: voff = 0.0 !108 Velocity offset #real(kind=4) :: dopp = 0.0 ! Doppler factor #integer(kind=4) :: faxi = 0 !109 Frequency axis #integer(kind=4) :: ijlin(3) = 0 ! 98 Line name #integer(kind=4) :: vtyp = vel_unk ! Velocity type (see vel_... codes) reso_words = _read_int32(lf) nois_start = _read_int32(lf) _check_val('nois_start', nois_start, proj_start+proj_words+2+spec_words+2+reso_words+2) if reso_words == 3: header['BMAJ'] = _read_float32(lf) header['BMIN'] = _read_float32(lf) header['BPA'] = _read_float32(lf) #pad_reso = _read_float32(lf) elif reso_words != 0: raise ValueError("Invalid # of resolution keywords") #! RESOLUTION #integer(kind=4) :: reso_words = 3 ! Resolution length: 3 used + 1 padding #integer(kind=4) :: nois_start !! = reso_words + 6 #real(kind=4) :: majo = 0.0 !111 Major axis #real(kind=4) :: mino = 0.0 !112 Minor axis #real(kind=4) :: posa = 0.0 !113 Position angle #real(kind=4) :: pad_reso nois_words = _read_int32(lf) astr_start = _read_int32(lf) _check_val('astr_start', astr_start, proj_start+proj_words+2+spec_words+2+reso_words+2+nois_words+2) if nois_words == 2: header['NOISE_T'] = (_read_float32(lf), "Theoretical Noise") header['NOISERMS'] = (_read_float32(lf), "Measured (RMS) noise") elif nois_words != 0: raise ValueError("Invalid # of noise keywords") #! NOISE #integer(kind=4) :: nois_words = 2 ! Noise section length: 2 used #integer(kind=4) :: astr_start !! = s_nois + 4 #real(kind=4) :: noise = 0.0 ! 115 Theoretical noise #real(kind=4) :: rms = 0.0 ! 116 Actual noise astr_words = _read_int32(lf) uvda_start = _read_int32(lf) _check_val('uvda_start', uvda_start, proj_start+proj_words+2+spec_words+2+reso_words+2+nois_words+2+astr_words+2) if astr_words == 3: header['MURA'] = _read_float32(lf) header['MUDEC'] = _read_float32(lf) header['PARALLAX'] = _read_float32(lf) elif astr_words != 0: raise ValueError("Invalid # of astrometry keywords") #! ASTROMETRY #integer(kind=4) :: astr_words = 3 ! Proper motion section length: 3 used + 1 padding #integer(kind=4) :: uvda_start !! = s_astr + 4 #real(kind=4) :: mura = 0.0 ! 118 along RA, in mas/yr #real(kind=4) :: mudec = 0.0 ! 119 along Dec, in mas/yr #real(kind=4) :: parallax = 0.0 ! 120 in mas #real(kind=4) :: pad_astr #! real(kind=4) :: pepoch = 2000.0 ! 121 in yrs ? code_uvt_last=25 uvda_words = _read_int32(lf) void_start = _read_int32(lf) _check_val('void_start', void_start, proj_start + proj_words + 2 + spec_words + 2 + reso_words + 2 + nois_words + 2 + astr_words + 2 + uvda_words + 2) if uvda_words == 18+2*code_uvt_last: version_uv = _read_int32(lf) nchan = _read_int32(lf) nvisi = _read_int64(lf) nstokes = _read_int32(lf) natom = _read_int32(lf) basemin = _read_float32(lf) basemax = _read_float32(lf) fcol = _read_int32(lf) lcol = _read_int32(lf) nlead = _read_int32(lf) ntrail = _read_int32(lf) column_pointer = np.fromfile(lf, count=code_uvt_last, dtype='int32') column_size = np.fromfile(lf, count=code_uvt_last, dtype='int32') column_codes = np.fromfile(lf, count=nlead+ntrail, dtype='int32') column_types = np.fromfile(lf, count=nlead+ntrail, dtype='int32') order = _read_int32(lf) nfreq = _read_int32(lf) atoms = np.fromfile(lf, count=4, dtype='int32') elif uvda_words != 0: raise ValueError("Invalid # of UV data keywords") #! UV_DATA information #integer(kind=4) :: uvda_words = 18+2*code_uvt_last ! Length of section: 14 used #integer(kind=4) :: void_start !! = s_uvda + l_uvda + 2 #integer(kind=4) :: version_uv = code_version_uvt_current ! 1 version number. Will allow us to change the data format #integer(kind=4) :: nchan = 0 ! 2 Number of channels #integer(kind=8) :: nvisi = 0 ! 3-4 Independent of the transposition status #integer(kind=4) :: nstokes = 0 ! 5 Number of polarizations #integer(kind=4) :: natom = 0 ! 6. 3 for real, imaginary, weight. 1 for real. #real(kind=4) :: basemin = 0. ! 7 Minimum Baseline #real(kind=4) :: basemax = 0. ! 8 Maximum Baseline #integer(kind=4) :: fcol ! 9 Column of first channel #integer(kind=4) :: lcol ! 10 Column of last channel #! The number of information per channel can be obtained by #! (lcol-fcol+1)/(nchan*natom) #! so this could allow to derive the number of Stokes parameters #! Leading data at start of each visibility contains specific information #integer(kind=4) :: nlead = 7 ! 11 Number of leading informations (at lest 7) #! Trailing data at end of each visibility may hold additional information #integer(kind=4) :: ntrail = 0 ! 12 Number of trailing informations #! #! Leading / Trailing information codes have been specified before #integer(kind=4) :: column_pointer(code_uvt_last) = code_null ! Back pointer to the columns... #integer(kind=4) :: column_size(code_uvt_last) = 0 ! Number of columns for each #! In the data, we instead have the codes for each column #! integer(kind=4) :: column_codes(nlead+ntrail) ! Start column for each ... #! integer(kind=4) :: column_types(nlead+ntrail) /0,1,2/ ! Number of columns for each: 1 real*4, 2 real*8 #! Leading / Trailing information codes #! #integer(kind=4) :: order = 0 ! 13 Stoke/Channel ordering #integer(kind=4) :: nfreq = 0 ! 14 ! 0 or = nchan*nstokes #integer(kind=4) :: atoms(4) ! 15-18 Atom description #! #real(kind=8), pointer :: freqs(:) => null() ! (nchan*nstokes) = 0d0 #integer(kind=4), pointer :: stokes(:) => null() ! (nchan*nstokes) or (nstokes) = code_stoke #! #real(kind=8), pointer :: ref(:) => null() #real(kind=8), pointer :: val(:) => null() #real(kind=8), pointer :: inc(:) => null() lf.seek(1024) real_dims = dims[:ndim] data = np.fromfile(lf, count=np.product(real_dims), dtype='float32').reshape(real_dims[::-1]) data[data==bval] = np.nan return data,header spectral-cube-0.3.1/spectral_cube/io/core.py0000644000077000000240000000472112643464660021030 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division def read(filename, format=None, hdu=None, **kwargs): """ Read a file into a :class:`SpectralCube` or :class:`StokesSpectralCube` instance. Parameters ---------- filename : str or HDU File to read format : str, optional File format. hdu : int or str For FITS files, the HDU to read in (can be the ID or name of an HDU). kwargs : dict If the format is 'fits', the kwargs are passed to :func:`~astropy.io.fits.open`. Returns ------- cube : :class:`SpectralCube` or :class:`StokesSpectralCube` The spectral cube read in """ if format is None: format = determine_format(filename) if format == 'fits': from .fits import load_fits_cube return load_fits_cube(filename, hdu=hdu, **kwargs) elif format == 'casa_image': from .casa_image import load_casa_image return load_casa_image(filename) elif format in ('class_lmv','lmv'): from .class_lmv import load_lmv_cube return load_lmv_cube(filename) else: raise ValueError("Format {0} not implemented. Supported formats are " "'fits', 'casa_image', and 'lmv'.".format(format)) def write(filename, cube, overwrite=False, format=None): """ Write :class:`SpectralCube` or :class:`StokesSpectralCube` to a file. Parameters ---------- filename : str Name of the output file cube : :class:`SpectralCube` or :class:`StokesSpectralCube` The spectral cube to write out overwrite : bool, optional Whether to overwrite the output file format : str, optional File format. """ if format is None: format = determine_format(filename) if format == 'fits': from .fits import write_fits_cube write_fits_cube(filename, cube, overwrite=overwrite) else: raise ValueError("Format {0} not implemented. The only supported format is 'fits'".format(format)) def determine_format(input): from .fits import is_fits from .casa_image import is_casa_image from .class_lmv import is_lmv if is_fits(input): return 'fits' elif is_casa_image(input): return 'casa_image' elif is_lmv(input): return 'lmv' else: raise ValueError("Could not determine format - use the `format=` " "parameter to explicitly set the format") spectral-cube-0.3.1/spectral_cube/io/fits.py0000644000077000000240000001237712647754466021065 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import warnings from astropy.io import fits from astropy.wcs import WCS from astropy.extern import six from astropy.utils import OrderedDict from astropy.io.fits.hdu.hdulist import fitsopen as fits_open import numpy as np import datetime try: from .. import version SPECTRAL_CUBE_VERSION = version.version except ImportError: # We might be running py.test on a clean checkout SPECTRAL_CUBE_VERSION = 'dev' from .. import SpectralCube, StokesSpectralCube, LazyMask from .. import cube_utils def first(iterable): return next(iter(iterable)) # FITS registry code - once Astropy includes a proper extensible I/O base # class, we can use that instead. The following code takes care of # interpreting string input (filename), HDU, and HDUList. def is_fits(input, **kwargs): """ Determine whether input is in FITS format """ if isinstance(input, six.string_types): if input.lower().endswith(('.fits', '.fits.gz', '.fit', '.fit.gz', '.fits.Z', '.fit.Z')): return True elif isinstance(input, (fits.HDUList, fits.PrimaryHDU, fits.ImageHDU)): return True else: return False def read_data_fits(input, hdu=None, **kwargs): """ Read an array and header from an FITS file. Parameters ---------- input : str or compatible `astropy.io.fits` HDU object If a string, the filename to read the table from. The following `astropy.io.fits` HDU objects can be used as input: - :class:`~astropy.io.fits.hdu.table.PrimaryHDU` - :class:`~astropy.io.fits.hdu.table.ImageHDU` - :class:`~astropy.io.fits.hdu.hdulist.HDUList` hdu : int or str, optional The HDU to read the table from. """ if isinstance(input, fits.HDUList): # Parse all array objects arrays = OrderedDict() for ihdu, hdu_item in enumerate(input): if isinstance(hdu_item, (fits.PrimaryHDU, fits.ImageHDU)): arrays[ihdu] = hdu_item if len(arrays) > 1: if hdu is None: hdu = first(arrays) warnings.warn("hdu= was not specified but multiple arrays" " are present, reading in first available" " array (hdu={0})".format(hdu)) # hdu might not be an integer, so we first need to convert it # to the correct HDU index hdu = input.index_of(hdu) if hdu in arrays: array_hdu = arrays[hdu] else: raise ValueError("No array found in hdu={0}".format(hdu)) elif len(arrays) == 1: array_hdu = arrays[first(arrays)] else: raise ValueError("No arrays found") elif isinstance(input, (fits.PrimaryHDU, fits.ImageHDU)): array_hdu = input else: hdulist = fits_open(input, **kwargs) try: return read_data_fits(hdulist, hdu=hdu) finally: hdulist.close() return array_hdu.data, array_hdu.header def load_fits_cube(input, hdu=0, meta=None, **kwargs): """ Read in a cube from a FITS file using astropy. Parameters ---------- input: str or HDU The FITS cube file name or HDU hdu: int The extension number containing the data to be read meta: dict Metadata (can be inherited from other readers, for example) """ data, header = read_data_fits(input, hdu=hdu, **kwargs) if meta is None: meta = {} if 'BUNIT' in header: meta['BUNIT'] = header['BUNIT'] wcs = WCS(header) if wcs.wcs.naxis == 3: data, wcs = cube_utils._orient(data, wcs) mask = LazyMask(np.isfinite, data=data, wcs=wcs) assert data.shape == mask._data.shape cube = SpectralCube(data, wcs, mask, meta=meta, header=header) assert cube._data.shape == cube._mask._data.shape elif wcs.wcs.naxis == 4: data, wcs = cube_utils._split_stokes(data, wcs) stokes_data = {} for component in data: comp_data, comp_wcs = cube_utils._orient(data[component], wcs) comp_mask = LazyMask(np.isfinite, data=comp_data, wcs=comp_wcs) stokes_data[component] = SpectralCube(comp_data, wcs=comp_wcs, mask=comp_mask, meta=meta, header=header) cube = StokesSpectralCube(stokes_data) else: raise Exception("Data should be 3- or 4-dimensional") return cube def write_fits_cube(filename, cube, overwrite=False, include_origin_notes=True): """ Write a FITS cube with a WCS to a filename """ if isinstance(cube, SpectralCube): hdu = cube.hdu now = datetime.datetime.strftime(datetime.datetime.now(), "%Y/%m/%d-%H:%M:%S") hdu.header.add_history("Written by spectral_cube v{version} on " "{date}".format(version=SPECTRAL_CUBE_VERSION, date=now)) hdu.writeto(filename, clobber=overwrite) else: raise NotImplementedError() spectral-cube-0.3.1/spectral_cube/lower_dimensional_structures.py0000644000077000000240000001745612643657174025523 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division from astropy import units as u from astropy import wcs from astropy.io.fits import PrimaryHDU, ImageHDU, Header, Card, HDUList from astropy import wcs from .io.core import determine_format import numpy as np class LowerDimensionalObject(u.Quantity): """ Generic class for 1D and 2D objects """ @property def wcs(self): return self._wcs @property def meta(self): return self._meta @property def mask(self): return self._mask @property def header(self): header = self._header # This inplace update is OK; it's not bad to overwrite WCS in this # header if self.wcs is not None: header.update(self.wcs.to_header()) header['BUNIT'] = self.unit.to_string(format='fits') header.insert(2, Card(keyword='NAXIS', value=self.ndim)) for ind,sh in enumerate(self.shape[::-1]): header.insert(3+ind, Card(keyword='NAXIS{0:1d}'.format(ind+1), value=sh)) return header @property def hdu(self): from astropy.io import fits if self.wcs is None: hdu = fits.PrimaryHDU(self.value) else: hdu = fits.PrimaryHDU(self.value, header=self.wcs.to_header()) hdu.header['BUNIT'] = self.unit.to_string(format='fits') if 'beam' in self.meta: hdu.header.update(self.meta['beam'].to_header_keywords()) return hdu def write(self, filename, format=None, overwrite=False): """ Write the lower dimensional object to a file. Parameters ---------- filename : str The path to write the file to format : str The kind of file to write. (Currently limited to 'fits') overwrite : bool If True, overwrite `filename` if it exists """ if format is None: format = determine_format(filename) if format == 'fits': self.hdu.writeto(filename, clobber=overwrite) else: raise ValueError("Unknown format '{0}' - the only available " "format at this time is 'fits'") def to(self, unit, equivalencies=[]): """ Return a new ``LowerDimensionalObject'' of the same class with the specified unit. See `astropy.units.Quantity.to` for further details. """ converted_array = u.Quantity.to(self, unit, equivalencies=equivalencies).value # use private versions of variables, not the generated property # versions # Not entirely sure the use of __class__ here is kosher, but we do want # self.__class__, not super() new = self.__class__(value=converted_array, unit=unit, copy=True, wcs=self._wcs, meta=self._meta, mask=self._mask, header=self._header) return new def __getitem__(self, key): """ Return a new ``LowerDimensionalObject'' of the same class while keeping other properties fixed. """ new_qty = super(LowerDimensionalObject, self).__getitem__(key) if new_qty.ndim < 2: # do not return a projection return u.Quantity(new_qty) if self._wcs is not None: newwcs = self._wcs[key] else: newwcs = None new = self.__class__(value=new_qty.value, unit=new_qty.unit, copy=False, wcs=newwcs, meta=self._meta, mask=self._mask, header=self._header) return new def __array_finalize__(self, obj): self._unit = getattr(obj, '_unit', None) self._wcs = getattr(obj, '_wcs', None) self._meta = getattr(obj, '_meta', None) self._mask = getattr(obj, '_mask', None) self._header = getattr(obj, '_header', None) @property def __array_priority__(self): return super(LowerDimensionalObject, self).__array_priority__*2 class Projection(LowerDimensionalObject): def __new__(cls, value, unit=None, dtype=None, copy=True, wcs=None, meta=None, mask=None, header=None): if np.asarray(value).ndim != 2: raise ValueError("value should be a 2-d array") if wcs is not None and wcs.wcs.naxis != 2: raise ValueError("wcs should have two dimension") self = u.Quantity.__new__(cls, value, unit=unit, dtype=dtype, copy=copy).view(cls) self._wcs = wcs self._meta = {} if meta is None else meta self._mask = mask if header is not None: self._header = header else: self._header = Header() return self def quicklook(self, filename=None, use_aplpy=True): """ Use aplpy to make a quick-look image of the projection. This will make the `FITSFigure` attribute available. If there are unmatched celestial axes, this will instead show an image without axis labels. Parameters ---------- filename : str or Non Optional - the filename to save the quicklook to. """ if use_aplpy: try: if not hasattr(self, 'FITSFigure'): import aplpy self.FITSFigure = aplpy.FITSFigure(self.hdu) self.FITSFigure.show_grayscale() self.FITSFigure.add_colorbar() if filename is not None: self.FITSFigure.save(filename) except (wcs.InconsistentAxisTypesError, ImportError): self._quicklook_mpl(filename=filename) else: self._quicklook_mpl(filename=filename) def _quicklook_mpl(self, filename=None): from matplotlib import pyplot self.figure = pyplot.imshow(self.value) if filename is not None: self.figure.savefig(filename) # A slice is just like a projection in every way class Slice(Projection): pass class OneDSpectrum(LowerDimensionalObject): def __new__(cls, value, unit=None, dtype=None, copy=True, wcs=None, meta=None, mask=None, header=None): if np.asarray(value).ndim != 1: raise ValueError("value should be a 1-d array") if wcs is not None and wcs.wcs.naxis != 1: raise ValueError("wcs should have two dimension") self = u.Quantity.__new__(cls, value, unit=unit, dtype=dtype, copy=copy).view(cls) self._wcs = wcs self._meta = {} if meta is None else meta self._mask = mask if header is not None: self._header = header else: self._header = Header() return self @property def spectral_axis(self): """ A `~astropy.units.Quantity` array containing the central values of each channel along the spectral axis. """ return self.wcs.wcs_pix2world(np.arange(self.size), 0)[0] def quicklook(self, filename=None, drawstyle='steps-mid', **kwargs): """ Plot the spectrum with current spectral units in the currently open figure kwargs are passed to `matplotlib.pyplot.plot` Parameters ---------- filename : str or Non Optional - the filename to save the quicklook to. """ from matplotlib import pyplot ax = pyplot.gca() ax.plot(self.spectral_axis, self.value, drawstyle=drawstyle, **kwargs) ax.set_xlabel(self.wcs.wcs.cunit[0]) ax.set_ylabel(self.unit) if filename is not None: pyplot.gcf().savefig(filename) spectral-cube-0.3.1/spectral_cube/masks.py0000644000077000000240000005267012643464660020615 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import abc import numpy as np from numpy.lib.stride_tricks import as_strided from . import wcs_utils from .lower_dimensional_structures import Projection __all__ = ['InvertedMask', 'CompositeMask', 'BooleanArrayMask', 'LazyMask', 'LazyComparisonMask', 'FunctionMask'] # Global version of the with_spectral_unit docs to avoid duplicating them with_spectral_unit_docs = """ Parameters ---------- unit : u.Unit Any valid spectral unit: velocity, (wave)length, or frequency. Only vacuum units are supported. velocity_convention : u.doppler_relativistic, u.doppler_radio, or u.doppler_optical The velocity convention to use for the output velocity axis. Required if the output type is velocity. rest_value : u.Quantity A rest wavelength or frequency with appropriate units. Required if output type is velocity. The cube's WCS should include this already if the *input* type is velocity, but the WCS's rest wavelength/frequency can be overridden with this parameter. """ def is_broadcastable_and_smaller(shp1, shp2): """ Test if shape 1 can be broadcast to shape 2, not allowing the case where shape 2 has a dimension length 1 """ for a, b in zip(shp1[::-1], shp2[::-1]): # b==1 is broadcastable but not desired if a == 1 or a == b: pass else: return False return True def dims_to_skip(shp1, shp2): """ For a shape `shp1` that is broadcastable to shape `shp2`, specify which dimensions are length 1. Parameters ---------- keep : bool If True, return the dimensions to keep rather than those to remove """ if not is_broadcastable_and_smaller(shp1, shp2): raise ValueError("Cannot broadcast {0} to {1}".format(shp1,shp2)) dims = [] for ii,(a, b) in enumerate(zip(shp1[::-1], shp2[::-1])): # b==1 is broadcastable but not desired if a == 1: dims.append(len(shp2) - ii - 1) elif a == b: pass else: raise ValueError("This should not be possible") if len(shp1) < len(shp2): dims += list(range(len(shp2)-len(shp1))) return dims def view_of_subset(shp1, shp2, view): """ Given two shapes and a view, assuming that shape 1 can be broadcast to shape 2, return the sub-view that applies to shape 1 """ dts = dims_to_skip(shp1, shp2) if view: cv_view = [x for ii,x in enumerate(view) if ii not in dts] else: # if no view is specified, still need to slice cv_view = [x for ii,x in enumerate([slice(None)]*3) if ii not in dts] return cv_view class MaskBase(object): __metaclass__ = abc.ABCMeta def include(self, data=None, wcs=None, view=()): """ Return a boolean array indicating which values should be included. If ``view`` is passed, only the sliced mask will be returned, which avoids having to load the whole mask in memory. Otherwise, the whole mask is returned in-memory. """ self._validate_wcs(data, wcs) return self._include(data=data, wcs=wcs, view=view) def _validate_wcs(self, data, wcs): """ This method can be overridden in cases where the data and WCS have to conform to some rules. This gets called automatically when ``include`` or ``exclude`` are called. """ pass @abc.abstractmethod def _include(self, data=None, wcs=None, view=()): pass def exclude(self, data=None, wcs=None, view=()): """ Return a boolean array indicating which values should be excluded. If ``view`` is passed, only the sliced mask will be returned, which avoids having to load the whole mask in memory. Otherwise, the whole mask is returned in-memory. """ self._validate_wcs(data, wcs) return self._exclude(data=data, wcs=wcs, view=view) def _exclude(self, data=None, wcs=None, view=()): return ~self._include(data=data, wcs=wcs, view=view) def _flattened(self, data, wcs=None, view=()): """ Return a flattened array of the included elements of cube Parameters ---------- data : array-like The data array to flatten view : tuple, optional Any slicing to apply to the data before flattening Returns ------- flat_array : `~numpy.ndarray` A 1-D ndarray containing the flattened output Notes ----- This is an internal method used by :class:`SpectralCube`. """ return data[view][self.include(data=data, wcs=wcs, view=view)] def _filled(self, data, wcs=None, fill=np.nan, view=()): """ Replace the exluded elements of *array* with *fill*. Parameters ---------- data : array-like Input array fill : number Replacement value view : tuple, optional Any slicing to apply to the data before flattening Returns ------- filled_array : `~numpy.ndarray` A 1-D ndarray containing the filled output Notes ----- This is an internal method used by :class:`SpectralCube`. Users should use the property :meth:`MaskBase.filled_data` """ # Must convert to floating point, but should not change from inherited # type otherwise dt = np.find_common_type([data.dtype], [np.float]) sliced_data = data[view].astype(dt) ex = self.exclude(data=data, wcs=wcs, view=view) sliced_data[ex] = fill return sliced_data def __and__(self, other): return CompositeMask(self, other, operation='and') def __or__(self, other): return CompositeMask(self, other, operation='or') def __xor__(self, other): return CompositeMask(self, other, operation='xor') def __invert__(self): return InvertedMask(self) def __getitem__(self): raise NotImplementedError("Slicing not supported by mask class {0}" .format(self.__class__.__name__)) def quicklook(self, view, wcs=None, filename=None, use_aplpy=True): ''' View a 2D slice of the mask, specified by view. Parameters ---------- view : tuple Slicing to apply to the mask. Must return a 2D slice. wcs : astropy.wcs.WCS, optional WCS object to use in plotting the mask slice. filename : str, optional Filename of the output image. Enables saving of the plot. ''' view_twod = self.include(view=view) proj = Projection(view_twod, wcs=wcs) proj.quicklook(filename=filename, use_aplpy=use_aplpy) def _get_new_wcs(self, unit, velocity_convention=None, rest_value=None): """ Returns a new WCS with a different Spectral Axis unit """ from .spectral_axis import convert_spectral_axis,determine_ctype_from_vconv out_ctype = determine_ctype_from_vconv(self._wcs.wcs.ctype[self._wcs.wcs.spec], unit, velocity_convention=velocity_convention) newwcs = convert_spectral_axis(self._wcs, unit, out_ctype, rest_value=rest_value) newwcs.wcs.set() return newwcs _get_new_wcs.__doc__ += with_spectral_unit_docs class InvertedMask(MaskBase): def __init__(self, mask): self._mask = mask def _include(self, data=None, wcs=None, view=()): return ~self._mask.include(data=data, wcs=wcs, view=view) def __getitem__(self, view): return InvertedMask(self._mask[view]) def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None): """ Get an InvertedMask copy with a WCS in the modified unit """ newmask = self._mask.with_spectral_unit(unit, velocity_convention=velocity_convention, rest_value=rest_value) return InvertedMask(newmask) with_spectral_unit.__doc__ += with_spectral_unit_docs class CompositeMask(MaskBase): """ A combination of several masks. The included masks are treated with the specified operation. Parameters ---------- mask1, mask2 : Masks The two masks to composite operation : str Either 'and' or 'or'; the operation used to combine the masks """ def __init__(self, mask1, mask2, operation='and'): self._mask1 = mask1 self._mask2 = mask2 self._operation = operation def _validate_wcs(self, new_data, new_wcs): self._mask1._validate_wcs(new_data, new_wcs) self._mask2._validate_wcs(new_data, new_wcs) def _include(self, data=None, wcs=None, view=()): result_mask_1 = self._mask1._include(data=data, wcs=wcs, view=view) result_mask_2 = self._mask2._include(data=data, wcs=wcs, view=view) if self._operation == 'and': return result_mask_1 & result_mask_2 elif self._operation == 'or': return result_mask_1 | result_mask_2 elif self._operation == 'xor': return result_mask_1 ^ result_mask_2 else: raise ValueError("Operation '{0}' not supported".format(self._operation)) def __getitem__(self, view): return CompositeMask(self._mask1[view], self._mask2[view], operation=self._operation) def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None): """ Get a CompositeMask copy in which each component has a WCS in the modified unit """ newmask1 = self._mask1.with_spectral_unit(unit, velocity_convention=velocity_convention, rest_value=rest_value) newmask2 = self._mask2.with_spectral_unit(unit, velocity_convention=velocity_convention, rest_value=rest_value) return CompositeMask(newmask1, newmask2, self._operation) with_spectral_unit.__doc__ += with_spectral_unit_docs class BooleanArrayMask(MaskBase): """ A mask defined as an array on a spectral cube WCS Parameters ---------- mask: `numpy.ndarray` A boolean numpy ndarray wcs: `astropy.wcs.WCS` The WCS object shape: tuple The shape of the region the array is masking. This is *required* if ``mask.ndim != data.ndim`` to provide rules for how to broadcast the mask """ def __init__(self, mask, wcs, shape=None, include=True): self._mask_type = 'include' if include else 'exclude' self._wcs = wcs self._wcs_whitelist = set() #if mask.ndim != 3 and (shape is None or len(shape) != 3): # raise ValueError("When creating a BooleanArrayMask with <3 dimensions, " # "the shape of the 3D array must be specified.") if shape is not None and not is_broadcastable_and_smaller(mask.shape, shape): raise ValueError("Mask cannot be broadcast to the specified shape.") self._shape = shape or mask.shape n_extra_dims = (len(self._shape)-mask.ndim) if n_extra_dims > 0: strides = (0,)*n_extra_dims + mask.strides self._mask = as_strided(mask, shape=self.shape, strides=strides) else: self._mask = mask def _validate_wcs(self, new_data=None, new_wcs=None): if new_data is not None and not is_broadcastable_and_smaller(self._mask.shape, new_data.shape): raise ValueError("data shape cannot be broadcast to match mask shape") if new_wcs is not None: if new_wcs not in self._wcs_whitelist: if not wcs_utils.check_equality(new_wcs, self._wcs, warn_missing=True): raise ValueError("WCS does not match mask WCS") else: self._wcs_whitelist.add(new_wcs) def _include(self, data=None, wcs=None, view=()): result_mask = self._mask[view] return result_mask if self._mask_type == 'include' else ~result_mask def _exclude(self, data=None, wcs=None, view=()): result_mask = self._mask[view] return result_mask if self._mask_type == 'exclude' else ~result_mask @property def shape(self): return self._shape def __getitem__(self, view): return BooleanArrayMask(self._mask[view], wcs_utils.slice_wcs(self._wcs, view), shape=self._mask[view].shape) def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None): """ Get a BooleanArrayMask copy with a WCS in the modified unit """ newwcs = self._get_new_wcs(unit, velocity_convention, rest_value) newmask = BooleanArrayMask(self._mask, newwcs, include=self._mask_type=='include') return newmask with_spectral_unit.__doc__ += with_spectral_unit_docs class LazyMask(MaskBase): """ A boolean mask defined by the evaluation of a function on a fixed dataset. This is conceptually identical to a fixed boolean mask as in :class:`BooleanArrayMask` but defers the evaluation of the mask until it is needed. Parameters ---------- function : callable The function to apply to ``data``. This method should accept a numpy array, which will be a subset of the data array passed to __init__. It should return a boolean array, where True values indicate that which pixels are valid/unaffected by masking. data : array-like The array to evaluate ``function`` on. This should support Numpy-like slicing syntax. wcs : `~astropy.wcs.WCS` The WCS of the input data, which is used to define the coordinates for which the boolean mask is defined. """ def __init__(self, function, cube=None, data=None, wcs=None): self._function = function if cube is not None and (data is not None or wcs is not None): raise ValueError("Pass only cube or (data & wcs)") elif cube is not None: self._data = cube._data self._wcs = cube._wcs elif data is not None and wcs is not None: self._data = data self._wcs = wcs else: raise ValueError("Either a cube or (data & wcs) is required.") self._wcs_whitelist = set() def _validate_wcs(self, new_data=None, new_wcs=None): if new_data is not None: if not is_broadcastable_and_smaller(new_data.shape, self._data.shape): raise ValueError("data shape cannot be broadcast to match mask shape") if new_wcs is not None: if new_wcs not in self._wcs_whitelist: if not wcs_utils.check_equality(new_wcs, self._wcs, warn_missing=True): raise ValueError("WCS does not match mask WCS") else: self._wcs_whitelist.add(new_wcs) def _include(self, data=None, wcs=None, view=()): self._validate_wcs(data, wcs) return self._function(self._data[view]) def __getitem__(self, view): return LazyMask(self._function, data=self._data[view], wcs=wcs_utils.slice_wcs(self._wcs, view)) def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None): """ Get a LazyMask copy with a WCS in the modified unit """ newwcs = self._get_new_wcs(unit, velocity_convention, rest_value) newmask = LazyMask(self._function, data=self._data, wcs=newwcs) return newmask with_spectral_unit.__doc__ += with_spectral_unit_docs class LazyComparisonMask(LazyMask): """ A boolean mask defined by the evaluation of a comparison function between a fixed dataset and some other value. This is conceptually similar to the :class:`LazyMask` but it will ensure that the comparison value can be compared to the data Parameters ---------- function : callable The function to apply to ``data``. This method should accept a numpy array, which will be the data array passed to __init__, and a second argument also passed to __init__. It should return a boolean array, where True values indicate that which pixels are valid/unaffected by masking. comparison_value : float or array The comparison value for the array data : array-like The array to evaluate ``function`` on. This should support Numpy-like slicing syntax. wcs : `~astropy.wcs.WCS` The WCS of the input data, which is used to define the coordinates for which the boolean mask is defined. """ def __init__(self, function, comparison_value, cube=None, data=None, wcs=None): self._function = function if cube is not None and (data is not None or wcs is not None): raise ValueError("Pass only cube or (data & wcs)") elif cube is not None: self._data = cube._data self._wcs = cube._wcs elif data is not None and wcs is not None: self._data = data self._wcs = wcs else: raise ValueError("Either a cube or (data & wcs) is required.") if (hasattr(comparison_value, 'shape') and not is_broadcastable_and_smaller(self._data.shape, comparison_value.shape)): raise ValueError("The data and the comparison value cannot " "be broadcast to match shape") self._comparison_value = comparison_value self._wcs_whitelist = set() def _include(self, data=None, wcs=None, view=()): self._validate_wcs(data, wcs) if hasattr(self._comparison_value, 'shape'): cv_view = view_of_subset(self._comparison_value.shape, self._data.shape, view) return self._function(self._data[view], self._comparison_value[cv_view]) else: return self._function(self._data[view], self._comparison_value) def __getitem__(self, view): if hasattr(self._comparison_value, 'shape'): cv_view = view_of_subset(self._comparison_value.shape, self._data.shape, view) return LazyComparisonMask(self._function, data=self._data[view], comparison_value=self._comparison_value[cv_view], wcs=wcs_utils.slice_wcs(self._wcs, view)) else: return LazyComparisonMask(self._function, data=self._data[view], comparison_value=self._comparison_value, wcs=wcs_utils.slice_wcs(self._wcs, view)) def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None): """ Get a LazyComparisonMask copy with a WCS in the modified unit """ newwcs = self._get_new_wcs(unit, velocity_convention, rest_value) newmask = LazyComparisonMask(self._function, data=self._data, comparison_value=self._comparison_value, wcs=newwcs) return newmask class FunctionMask(MaskBase): """ A mask defined by a function that is evaluated at run-time using the data passed to the mask. This function differs from :class:`LazyMask` in the arguments which are passed to the function. FunctionMasks receive an array, wcs object, and view, whereas LazyMasks receive pre-sliced views into an array specified at mask-creation time. Parameters ---------- function : callable The function to evaluate the mask. The call signature should be ``function(data, wcs, slice)`` where ``data`` and ``wcs`` are the arguments that get passed to e.g. ``include``, ``exclude``, ``_filled``, and ``_flattened``. The function should return a boolean array, where `True` values indicate that which pixels are valid / unaffected by masking. """ def __init__(self, function): self._function = function def _validate_wcs(self, data, wcs): pass def _include(self, data=None, wcs=None, view=()): result = self._function(data, wcs, view) if result.shape != data[view].shape: raise ValueError("Function did not return mask with correct shape - expected {0}, got {1}".format(data[view].shape, result.shape)) return result def __getitem__(self, slice): return self def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None): """ Functional masks do not have WCS defined, so this simply returns a copy of the current mask in order to be consistent with ``with_spectral_unit`` from other Masks """ return FunctionMask(self._function) spectral-cube-0.3.1/spectral_cube/np_compat.py0000644000077000000240000000141412643464660021445 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import numpy as np from distutils.version import StrictVersion def allbadtonan(function): """ Wrapper of numpy's nansum etc.: for <=1.8, just return the function's results. For >=1.9, any axes with all-nan values will have all-nan outputs in the collapsed version """ def f(data, axis=None): result = function(data, axis=axis) if StrictVersion(np.__version__) >= StrictVersion('1.9.0'): if axis is None: if np.all(np.isnan(data)): return np.nan else: return result nans = np.all(np.isnan(data), axis=axis) result[nans] = np.nan return result return f spectral-cube-0.3.1/spectral_cube/spectral_axis.py0000644000077000000240000003650612643464660022340 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division from astropy import wcs from astropy import units as u from astropy import constants import warnings def _parse_velocity_convention(vc): if vc in (u.doppler_radio, 'radio', 'RADIO', 'VRAD', 'F', 'FREQ'): return u.doppler_radio elif vc in (u.doppler_optical, 'optical', 'OPTICAL', 'VOPT', 'W', 'WAVE'): return u.doppler_optical elif vc in (u.doppler_relativistic, 'relativistic', 'RELATIVE', 'VREL', 'speed', 'V', 'VELO'): return u.doppler_relativistic # These are the only linear transformations allowed LINEAR_CTYPES = {u.doppler_optical: 'VOPT', u.doppler_radio: 'VRAD', u.doppler_relativistic: 'VELO'} LINEAR_CTYPE_CHARS = {u.doppler_optical: 'W', u.doppler_radio: 'F', u.doppler_relativistic: 'V'} ALL_CTYPES = {'speed': LINEAR_CTYPES, 'frequency': 'FREQ', 'length': 'WAVE'} CTYPE_TO_PHYSICALTYPE = {'WAVE': 'length', 'AIR': 'air wavelength', 'AWAV': 'air wavelength', 'FREQ': 'frequency', 'VELO': 'speed', 'VRAD': 'speed', 'VOPT': 'speed', } CTYPE_CHAR_TO_PHYSICALTYPE = {'W': 'length', 'A': 'air wavelength', 'F': 'frequency', 'V': 'speed'} CTYPE_TO_PHYSICALTYPE.update(CTYPE_CHAR_TO_PHYSICALTYPE) PHYSICAL_TYPE_TO_CTYPE = dict([(v,k) for k,v in CTYPE_CHAR_TO_PHYSICALTYPE.items()]) PHYSICAL_TYPE_TO_CHAR = {'speed': 'V', 'frequency': 'F', 'length': 'W'} # Used to indicate the intial / final sampling system WCS_UNIT_DICT = {'F': u.Hz, 'W': u.m, 'V': u.m/u.s} PHYS_UNIT_DICT = {'length': u.m, 'frequency': u.Hz, 'speed': u.m/u.s} LINEAR_CUNIT_DICT = {'VRAD': u.Hz, 'VOPT': u.m, 'FREQ': u.Hz, 'WAVE': u.m, 'VELO': u.m/u.s, 'AWAV': u.m} LINEAR_CUNIT_DICT.update(WCS_UNIT_DICT) def unit_from_header(header): """ Retrieve the spectral unit from a header """ if 'CUNIT3' in header: return u.Unit(header['CUNIT3']) def wcs_unit_scale(unit): """ Determine the appropriate scaling factor to get to the equivalent WCS unit """ for wu in WCS_UNIT_DICT.values(): if wu.is_equivalent(unit): return wu.to(unit) def determine_vconv_from_ctype(ctype): """ Given a CTYPE, say what velocity convention it is associated with, i.e. what unit the velocity is linearly proportional to Parameters ---------- ctype : str The spectral CTYPE """ if len(ctype) < 5: return _parse_velocity_convention(ctype) elif len(ctype) == 8: return _parse_velocity_convention(ctype[7]) else: raise ValueError("A valid ctype must either have 4 or 8 characters.") def determine_ctype_from_vconv(ctype, unit, velocity_convention=None): """ Given a CTYPE describing the current WCS and an output unit and velocity convention, determine the appropriate output CTYPE Examples -------- >>> determine_ctype_from_vconv('VELO-F2V', u.Hz) 'FREQ' >>> determine_ctype_from_vconv('VELO-F2V', u.m) 'WAVE-F2W' >>> determine_ctype_from_vconv('FREQ', u.m/u.s) # doctest: +SKIP ... ValueError: A velocity convention must be specified >>> determine_ctype_from_vconv('FREQ', u.m/u.s, velocity_convention=u.doppler_radio) 'VRAD' >>> determine_ctype_from_vconv('FREQ', u.m/u.s, velocity_convention=u.doppler_optical) 'VOPT-F2W' >>> determine_ctype_from_vconv('FREQ', u.m/u.s, velocity_convention=u.doppler_relativistic) 'VELO-F2V' """ unit = u.Unit(unit) if len(ctype) > 4: in_physchar = ctype[5] else: lin_cunit = LINEAR_CUNIT_DICT[ctype] in_physchar = PHYSICAL_TYPE_TO_CHAR[lin_cunit.physical_type] if unit.physical_type == 'speed': if velocity_convention is None and ctype[0] == 'V': # Special case: velocity <-> velocity doesn't care about convention return ctype elif velocity_convention is None: raise ValueError('A velocity convention must be specified') vcin = _parse_velocity_convention(ctype[:4]) vcout = _parse_velocity_convention(velocity_convention) if vcin == vcout: return LINEAR_CTYPES[vcout] else: return "{type}-{s1}2{s2}".format(type=LINEAR_CTYPES[vcout], s1=in_physchar, s2=LINEAR_CTYPE_CHARS[vcout]) else: in_phystype = CTYPE_TO_PHYSICALTYPE[in_physchar] if in_phystype == unit.physical_type: # Linear case return ALL_CTYPES[in_phystype] else: # Nonlinear case out_physchar = PHYSICAL_TYPE_TO_CTYPE[unit.physical_type] return "{type}-{s1}2{s2}".format(type=ALL_CTYPES[unit.physical_type], s1=in_physchar, s2=out_physchar) def get_rest_value_from_wcs(mywcs): if mywcs.wcs.restfrq: ref_value = mywcs.wcs.restfrq*u.Hz return ref_value elif mywcs.wcs.restwav: ref_value = mywcs.wcs.restwav*u.m return ref_value def convert_spectral_axis(mywcs, outunit, out_ctype, rest_value=None): """ Convert a spectral axis from its unit to a specified out unit with a given output ctype Only VACUUM units are supported (not air) Process: 1. Convert the input unit to its equivalent linear unit 2. Convert the input linear unit to the output linear unit 3. Convert the output linear unit to the output unit """ # If the WCS includes a rest frequency/wavelength, convert it to frequency # or wavelength first. This allows the possibility of changing the rest # frequency wcs_rv = get_rest_value_from_wcs(mywcs) inunit = u.Unit(mywcs.wcs.cunit[mywcs.wcs.spec]) outunit = u.Unit(outunit) # If wcs_rv is set and speed -> speed, then we're changing the reference # location and we need to convert to meters or Hz first if (inunit.physical_type == 'speed' and outunit.physical_type == 'speed' and wcs_rv is not None): mywcs = convert_spectral_axis(mywcs, wcs_rv.unit, ALL_CTYPES[wcs_rv.unit.physical_type], rest_value=wcs_rv) inunit = u.Unit(mywcs.wcs.cunit[mywcs.wcs.spec]) elif (inunit.physical_type == 'speed' and outunit.physical_type == 'speed' and wcs_rv is None): # If there is no reference change, we want an identical WCS, since # WCS doesn't know about units *at all* newwcs = mywcs.deepcopy() return newwcs #crval_out = (mywcs.wcs.crval[mywcs.wcs.spec] * inunit).to(outunit) #cdelt_out = (mywcs.wcs.cdelt[mywcs.wcs.spec] * inunit).to(outunit) #newwcs.wcs.cdelt[newwcs.wcs.spec] = cdelt_out.value #newwcs.wcs.cunit[newwcs.wcs.spec] = cdelt_out.unit.to_string(format='fits') #newwcs.wcs.crval[newwcs.wcs.spec] = crval_out.value #newwcs.wcs.ctype[newwcs.wcs.spec] = out_ctype #return newwcs in_spec_ctype = mywcs.wcs.ctype[mywcs.wcs.spec] # Check whether we need to convert the rest value first ref_value = None if outunit.physical_type == 'speed': if rest_value is None: rest_value = wcs_rv if rest_value is None: raise ValueError("If converting from wavelength/frequency to speed, " "a reference wavelength/frequency is required.") ref_value = rest_value.to(u.Hz, u.spectral()) elif inunit.physical_type == 'speed': # The rest frequency and wavelength should be equivalent if rest_value is not None: ref_value = rest_value elif wcs_rv is not None: ref_value = wcs_rv else: raise ValueError("If converting from speed to wavelength/frequency, " "a reference wavelength/frequency is required.") # If the input unit is not linearly sampled, its linear equivalent will be # the 8th character in the ctype, and the linearly-sampled ctype will be # the 6th character # e.g.: VOPT-F2V lin_ctype = (in_spec_ctype[7] if len(in_spec_ctype) > 4 else in_spec_ctype[:4]) lin_cunit = (LINEAR_CUNIT_DICT[lin_ctype] if lin_ctype in LINEAR_CUNIT_DICT else mywcs.wcs.cunit[mywcs.wcs.spec]) in_vcequiv = _parse_velocity_convention(in_spec_ctype[:4]) out_ctype_conv = out_ctype[7] if len(out_ctype) > 4 else out_ctype[:4] if CTYPE_TO_PHYSICALTYPE[out_ctype_conv] == 'air wavelength': raise NotImplementedError("Conversion to air wavelength is not supported.") out_lin_cunit = (LINEAR_CUNIT_DICT[out_ctype_conv] if out_ctype_conv in LINEAR_CUNIT_DICT else outunit) out_vcequiv = _parse_velocity_convention(out_ctype_conv) # Load the input values crval_in = (mywcs.wcs.crval[mywcs.wcs.spec] * inunit) cdelt_in = (mywcs.wcs.cdelt[mywcs.wcs.spec] * inunit) if in_spec_ctype == 'AWAV': warnings.warn("Support for air wavelengths is experimental and only " "works in the forward direction (air->vac, not vac->air).") cdelt_in = air_to_vac_deriv(crval_in) * cdelt_in crval_in = air_to_vac(crval_in) in_spec_ctype = 'WAVE' # 1. Convert input to input, linear if in_vcequiv is not None and ref_value is not None: crval_lin1 = crval_in.to(lin_cunit, u.spectral() + in_vcequiv(ref_value)) else: crval_lin1 = crval_in.to(lin_cunit, u.spectral()) cdelt_lin1 = cdelt_derivative(crval_in, cdelt_in, # equivalent: inunit.physical_type intype=CTYPE_TO_PHYSICALTYPE[in_spec_ctype[:4]], outtype=lin_cunit.physical_type, rest=ref_value, linear=True ) # 2. Convert input, linear to output, linear if ref_value is None: if in_vcequiv is not None: pass # consider raising a ValueError here; not clear if this is valid crval_lin2 = crval_lin1.to(out_lin_cunit, u.spectral()) else: # at this stage, the transition can ONLY be relativistic, because the V # frame (as a linear frame) is only defined as "apparent velocity" crval_lin2 = crval_lin1.to(out_lin_cunit, u.spectral() + u.doppler_relativistic(ref_value)) # For cases like VRAD <-> FREQ and VOPT <-> WAVE, this will be linear too: linear_middle = in_vcequiv == out_vcequiv cdelt_lin2 = cdelt_derivative(crval_lin1, cdelt_lin1, intype=lin_cunit.physical_type, outtype=CTYPE_TO_PHYSICALTYPE[out_ctype_conv], rest=ref_value, linear=linear_middle) # 3. Convert output, linear to output if out_vcequiv is not None and ref_value is not None: crval_out = crval_lin2.to(outunit, out_vcequiv(ref_value) + u.spectral()) #cdelt_out = cdelt_lin2.to(outunit, out_vcequiv(ref_value) + u.spectral()) cdelt_out = cdelt_derivative(crval_lin2, cdelt_lin2, intype=CTYPE_TO_PHYSICALTYPE[out_ctype_conv], outtype=outunit.physical_type, rest=ref_value, linear=True ).to(outunit) else: crval_out = crval_lin2.to(outunit, u.spectral()) cdelt_out = cdelt_lin2.to(outunit, u.spectral()) if crval_out.unit != cdelt_out.unit: # this should not be possible, but it's a sanity check raise ValueError("Conversion failed: the units of cdelt and crval don't match.") # A cdelt of 0 would be meaningless if cdelt_out.value == 0: raise ValueError("Conversion failed: the output CDELT would be 0.") newwcs = mywcs.deepcopy() newwcs.wcs.cdelt[newwcs.wcs.spec] = cdelt_out.value newwcs.wcs.cunit[newwcs.wcs.spec] = cdelt_out.unit.to_string(format='fits') newwcs.wcs.crval[newwcs.wcs.spec] = crval_out.value newwcs.wcs.ctype[newwcs.wcs.spec] = out_ctype if rest_value is not None: if rest_value.unit.physical_type == 'frequency': newwcs.wcs.restfrq = rest_value.to(u.Hz).value elif rest_value.unit.physical_type == 'length': newwcs.wcs.restwav = rest_value.to(u.m).value else: raise ValueError("Rest Value was specified, but not in frequency or length units") return newwcs def cdelt_derivative(crval, cdelt, intype, outtype, linear=False, rest=None): if intype == outtype: return cdelt elif set((outtype,intype)) == set(('length','frequency')): # Symmetric equations! return (-constants.c / crval**2 * cdelt).to(PHYS_UNIT_DICT[outtype]) elif outtype in ('frequency','length') and intype == 'speed': if linear: numer = cdelt * rest.to(PHYS_UNIT_DICT[outtype], u.spectral()) denom = constants.c else: numer = cdelt * constants.c * rest.to(PHYS_UNIT_DICT[outtype], u.spectral()) denom = (constants.c + crval)*(constants.c**2 - crval**2)**0.5 if outtype == 'frequency': return (-numer/denom).to(PHYS_UNIT_DICT[outtype], u.spectral()) else: return (numer/denom).to(PHYS_UNIT_DICT[outtype], u.spectral()) elif outtype == 'speed' and intype in ('frequency','length'): if linear: numer = cdelt * constants.c denom = rest.to(PHYS_UNIT_DICT[intype], u.spectral()) else: numer = 4 * constants.c * crval * rest.to(crval.unit, u.spectral())**2 * cdelt denom = (crval**2 + rest.to(crval.unit, u.spectral())**2)**2 if intype == 'frequency': return (-numer/denom).to(PHYS_UNIT_DICT[outtype], u.spectral()) else: return (numer/denom).to(PHYS_UNIT_DICT[outtype], u.spectral()) elif intype == 'air wavelength': raise TypeError("Air wavelength should be converted to vacuum earlier.") elif outtype == 'air wavelength': raise TypeError("Conversion to air wavelength not supported.") else: raise ValueError("Invalid in/out frames") def air_to_vac(wavelength): """ Implements the air to vacuum wavelength conversion described in eqn 65 of Griesen 2006 """ wlum = wavelength.to(u.um).value return (1+1e-6*(287.6155+1.62887/wlum**2+0.01360/wlum**4)) * wavelength def vac_to_air(wavelength): """ Griesen 2006 reports that the error in naively inverting Eqn 65 is less than 10^-9 and therefore acceptable. This is therefore eqn 67 """ wlum = wavelength.to(u.um).value nl = (1+1e-6*(287.6155+1.62887/wlum**2+0.01360/wlum**4)) return wavelength/nl def air_to_vac_deriv(wavelength): """ Eqn 66 """ wlum = wavelength.to(u.um).value return (1+1e-6*(287.6155 - 1.62887/wlum**2 - 0.04080/wlum**4)) spectral-cube-0.3.1/spectral_cube/spectral_cube.py0000644000077000000240000024741312654106326022305 0ustar adamstaff00000000000000""" A class to represent a 3-d position-position-velocity spectral cube. """ from __future__ import print_function, absolute_import, division import warnings from functools import wraps import operator import sys import re from astropy import units as u from astropy.extern import six from astropy.io.fits import PrimaryHDU, ImageHDU, Header, Card, HDUList from astropy.utils.console import ProgressBar from astropy import log from astropy import wcs import numpy as np from . import cube_utils from . import wcs_utils from . import spectral_axis from .masks import (LazyMask, LazyComparisonMask, BooleanArrayMask, MaskBase, is_broadcastable_and_smaller) from .io.core import determine_format from .ytcube import ytCube from .lower_dimensional_structures import Projection, Slice, OneDSpectrum from distutils.version import StrictVersion __all__ = ['SpectralCube'] # apply_everywhere, world: do not have a valid cube to test on __doctest_skip__ = ['SpectralCube.world', 'SpectralCube._apply_everywhere'] try: # TODO replace with six.py xrange except NameError: xrange = range try: from scipy import ndimage scipyOK = True except ImportError: scipyOK = False DOPPLER_CONVENTIONS = {} DOPPLER_CONVENTIONS['radio'] = u.doppler_radio DOPPLER_CONVENTIONS['optical'] = u.doppler_optical DOPPLER_CONVENTIONS['relativistic'] = u.doppler_relativistic def cached(func): """ Decorator to cache function calls """ @wraps(func) def wrapper(self, *args): # The cache lives in the instance so that it gets garbage collected if func not in self._cache: self._cache[func, args] = func(self, *args) return self._cache[func, args] return wrapper def warn_slow(function): @wraps(function) def wrapper(self, *args, **kwargs): if self._is_huge and not self.allow_huge_operations: raise ValueError("This function ({0}) requires loading the entire " "cube into memory, and the cube is large ({1} " "pixels), so by default we disable this operation. " "To enable the operation, set " "`cube.allow_huge_operations=True` and try again." .format(str(function), self.size)) elif not self._is_huge: # TODO: add check for whether cube has been loaded into memory warnings.warn("This function ({0}) requires loading the entire cube into " "memory and may therefore be slow.".format(str(function))) return function(self, *args, **kwargs) return wrapper _NP_DOC = """ Ignores excluded mask elements. Parameters ---------- axis : int (optional) The axis to collapse, or None to perform a global aggregation how : cube | slice | ray | auto How to compute the aggregation. All strategies give the same result, but certain strategies are more efficient depending on data size and layout. Cube/slice/ray iterate over decreasing subsets of the data, to conserve memory. Default='auto' """.replace('\n', '\n ') def aggregation_docstring(func): @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.__doc__ += _NP_DOC return wrapper # convenience structures to keep track of the reversed index # conventions between WCS and numpy np2wcs = {2: 0, 1: 1, 0: 2} class SpectralCube(object): def __init__(self, data, wcs, mask=None, meta=None, fill_value=np.nan, header=None, allow_huge_operations=False, read_beam=True): # Deal with metadata first because it can affect data reading self._meta = meta or {} # data must not be a quantity when stored in self._data if hasattr(data, 'unit'): # strip the unit so that it can be treated as cube metadata data = data.value # TODO: mask should be oriented? Or should we assume correctly oriented here? self._data, self._wcs = cube_utils._orient(data, wcs) self._spectral_axis = None self._mask = mask # specifies which elements to Nan/blank/ignore # object or array-like object, given that WCS needs # to be consistent with data? #assert mask._wcs == self._wcs self._fill_value = fill_value self._header = Header() if header is None else header if not isinstance(self._header, Header): raise TypeError("If a header is given, it must be a fits.Header") # Beam loading must happen *after* WCS is read if read_beam: self._try_load_beam(header) if 'BUNIT' in self._meta: # special case: CASA (sometimes) makes non-FITS-compliant jy/beam headers bunit = re.sub("\s", "", self._meta['BUNIT'].lower()) if bunit == 'jy/beam': self._unit = u.Jy if not read_beam: warnings.warn("Units are in Jy/beam. Attempting to parse " "header for beam information.") self._try_load_beam(header) if hasattr(self, 'beam'): warnings.warn("Units were Jy/beam. The 'beam' is now " "stored in the .beam attribute, and the " "units are set to Jy") else: warnings.warn("Could not parse Jy/beam unit. Either " "you should install the radio_beam " "package or manually replace the units." " For now, the units are being interpreted " "as Jy.") else: try: self._unit = u.Unit(self._meta['BUNIT']) except ValueError: warnings.warn("Could not parse unit {0}".format(self._meta['BUNIT'])) self._unit = None elif hasattr(data, 'unit'): self._unit = data.unit else: self._unit = None # We don't pass the spectral unit via the initializer since the user # should be using ``with_spectral_unit`` if they want to set it. # However, we do want to keep track of what units the spectral axis # should be returned in, otherwise astropy's WCS can change the units, # e.g. km/s -> m/s. # This can be overridden with Header below self._spectral_unit = u.Unit(self._wcs.wcs.cunit[2]) if spectral_axis.unit_from_header(self._header) is not None: self._spectral_unit = spectral_axis.unit_from_header(self._header) self._spectral_scale = spectral_axis.wcs_unit_scale(self._spectral_unit) self.allow_huge_operations = allow_huge_operations self._cache = {} @property def _is_huge(self): return cube_utils.is_huge(self) def _new_cube_with(self, data=None, wcs=None, mask=None, meta=None, fill_value=None, spectral_unit=None, unit=None): data = self._data if data is None else data if unit is None and hasattr(data, 'unit'): if data.unit != self.unit: raise u.UnitsError("New data unit '{0}' does not" " match cube unit '{1}'. You can" " override this by specifying the" " `unit` keyword." .format(data.unit, self.unit)) unit = data.unit elif unit is not None: # convert string units to Units if not isinstance(unit, u.Unit): unit = u.Unit(unit) if hasattr(data, 'unit'): if u.Unit(unit) != data.unit: raise u.UnitsError("The specified new cube unit '{0}' " "does not match the input unit '{1}'." .format(unit, data.unit)) else: data = u.Quantity(data, unit=unit, copy=False) wcs = self._wcs if wcs is None else wcs mask = self._mask if mask is None else mask if meta is None: meta = {} meta.update(self._meta) if unit is not None: meta['BUNIT'] = unit.to_string(format='FITS') fill_value = self._fill_value if fill_value is None else fill_value spectral_unit = self._spectral_unit if spectral_unit is None else spectral_unit cube = SpectralCube(data=data, wcs=wcs, mask=mask, meta=meta, fill_value=fill_value, header=self._header, allow_huge_operations=self.allow_huge_operations) cube._spectral_unit = spectral_unit cube._spectral_scale = spectral_axis.wcs_unit_scale(spectral_unit) return cube def _try_load_beam(self, header): try: from radio_beam import Beam except ImportError: warnings.warn("radio_beam is not installed. No beam " "can be created.") try: self.beam = Beam.from_fits_header(header) self._meta['beam'] = self.beam self.pixels_per_beam = (self.beam.sr / (wcs.utils.proj_plane_pixel_area(self.wcs) * u.deg**2)).to(u.dimensionless_unscaled).value except Exception as ex: warnings.warn("Could not parse beam information from header." " Exception was: {0}".format(ex.__repr__())) @property def unit(self): """ The flux unit """ if self._unit: return self._unit else: return u.dimensionless_unscaled @property def shape(self): """ Length of cube along each axis """ return self._data.shape @property def size(self): """ Number of elements in the cube """ return self._data.size @property def base(self): """ The data type 'base' of the cube - useful for, e.g., joblib """ return self._data.base def __len__(self): return self.shape[0] @property def ndim(self): """ Dimensionality of the data """ return self._data.ndim def __repr__(self): s = "SpectralCube with shape={0}".format(self.shape) if self.unit is u.dimensionless_unscaled: s += ":\n" else: s += " and unit={0}:\n".format(self.unit) s += (" n_x: {0:6d} type_x: {1:8s} unit_x: {2:5s}" " range: {3:12.6f}:{4:12.6f}\n".format(self.shape[2], self.wcs.wcs.ctype[0], self.wcs.wcs.cunit[0], self.longitude_extrema[0], self.longitude_extrema[1], )) s += (" n_y: {0:6d} type_y: {1:8s} unit_y: {2:5s}" " range: {3:12.6f}:{4:12.6f}\n".format(self.shape[1], self.wcs.wcs.ctype[1], self.wcs.wcs.cunit[1], self.latitude_extrema[0], self.latitude_extrema[1], )) s += (" n_s: {0:6d} type_s: {1:8s} unit_s: {2:5s}" " range: {3:12.3f}:{4:12.3f}".format(self.shape[0], self.wcs.wcs.ctype[2], self.wcs.wcs.cunit[2], self.spectral_extrema[0], self.spectral_extrema[1], )) return s @property @cached def spectral_extrema(self): _spectral_min = self.spectral_axis.min() _spectral_max = self.spectral_axis.max() return _spectral_min, _spectral_max @property @cached def world_extrema(self): lat,lon = self.spatial_coordinate_map _lon_min = lon.min() _lon_max = lon.max() _lat_min = lat.min() _lat_max = lat.max() return ((_lon_min, _lon_max), (_lat_min, _lat_max)) @property @cached def longitude_extrema(self): return self.world_extrema[0] @property @cached def latitude_extrema(self): return self.world_extrema[1] def apply_numpy_function(self, function, fill=np.nan, reduce=True, how='auto', projection=False, unit=None, check_endian=False, progressbar=False, **kwargs): """ Apply a numpy function to the cube Parameters ---------- function : `numpy.ufunc` A numpy ufunc to apply to the cube fill : float The fill value to use on the data reduce : bool reduce indicates whether this is a reduce-like operation, that can be accumulated one slice at a time. sum/max/min are like this. argmax/argmin/stddev are not how : cube | slice | ray | auto How to compute the moment. All strategies give the same result, but certain strategies are more efficient depending on data size and layout. Cube/slice/ray iterate over decreasing subsets of the data, to conserve memory. Default='auto' projection : bool Return a `Projection` if the resulting array is 2D or a OneDProjection if the resulting array is 1D and the sum is over both spatial axes? unit : None or `astropy.units.Unit` The unit to include for the output array. For example, `SpectralCube.max` calls `SpectralCube.apply_numpy_function(np.max, unit=self.unit)`, inheriting the unit from the original cube. However, for other numpy functions, e.g. `numpy.argmax`, the return is an index and therefore unitless. check_endian : bool A flag to check the endianness of the data before applying the function. This is only needed for optimized functions, e.g. those in the `bottleneck` package. progressbar : bool Show a progressbar while iterating over the slices through the cube? kwargs : dict Passed to the numpy function. Returns ------- result : `Projection` or `~astropy.units.Quantity` or float The result depends on the value of ``axis``, ``projection``, and ``unit``. If ``axis`` is None, the return will be a scalar with or without units. If axis is an integer, the return will be a `Projection` if ``projection`` is set """ # leave axis in kwargs to avoid overriding numpy defaults, e.g. if the # default is axis=-1, we don't want to force it to be axis=None by # specifying that in the function definition axis = kwargs.get('axis', None) if how == 'auto': strategy = cube_utils.iterator_strategy(self, axis) else: strategy = how out = None if strategy == 'slice' and reduce: try: out = self._reduce_slicewise(function, fill, check_endian, progressbar=progressbar, **kwargs) except NotImplementedError: pass elif how not in ['auto', 'cube']: warnings.warn("Cannot use how=%s. Using how=cube" % how) if out is None: out = function(self._get_filled_data(fill=fill, check_endian=check_endian), **kwargs) if axis is None: # return is scalar if unit is not None: return u.Quantity(out, unit=unit) else: return out elif projection and reduce: meta = {'collapse_axis': axis} meta.update(self._meta) if hasattr(axis, '__len__') and len(axis) == 2: # if operation is over two spatial dims if set(axis) == set((1,2)): new_wcs = self._wcs.sub([wcs.WCSSUB_SPECTRAL]) header = self._nowcs_header return OneDSpectrum(value=out, wcs=new_wcs, copy=False, unit=unit, header=header, meta=meta) else: return out else: new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis]) header = self._nowcs_header return Projection(out, copy=False, wcs=new_wcs, meta=meta, unit=unit, header=header) else: return out def _reduce_slicewise(self, function, fill, check_endian, includemask=False, progressbar=False, **kwargs): """ Compute a numpy aggregation by grabbing one slice at a time """ ax = kwargs.pop('axis', None) full_reduce = ax is None ax = ax or 0 if isinstance(ax, tuple): raise NotImplementedError("Multi-axis reductions are not " "supported with how='slice'") if includemask: planes = self._iter_mask_slices(ax) else: planes = self._iter_slices(ax, fill=fill, check_endian=check_endian) result = next(planes) if progressbar: progressbar = ProgressBar(self.shape[ax]) pbu = progressbar.update else: pbu = lambda: True for plane in planes: result = function(np.dstack((result, plane)), axis=2, **kwargs) pbu() if full_reduce: result = function(result) return result def get_mask_array(self): """ Convert the mask to a boolean numpy array """ return self._mask.include(data=self._data, wcs=self._wcs) @property def mask(self): """ The underlying mask """ return self._mask def _naxes_dropped(self, view): """ Determine how many axes are being selected given a view. (1,2) -> 2 None -> 3 1 -> 1 2 -> 1 """ if hasattr(view,'__len__'): return len(view) elif view is None: return 3 else: return 1 @aggregation_docstring def sum(self, axis=None, how='auto'): """ Return the sum of the cube, optionally over an axis. """ from .np_compat import allbadtonan projection = self._naxes_dropped(axis) in (1,2) return self.apply_numpy_function(allbadtonan(np.nansum), fill=np.nan, how=how, axis=axis, unit=self.unit, projection=projection) @aggregation_docstring def mean(self, axis=None, how='cube'): """ Return the mean of the cube, optionally over an axis. """ projection = self._naxes_dropped(axis) in (1,2) if how == 'slice': counts = self._count_nonzero_slicewise(axis=axis) ttl = self.apply_numpy_function(np.nansum, fill=np.nan, how=how, axis=axis, unit=None, projection=False) out = ttl / counts if projection: new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis]) meta = {'collapse_axis': axis} meta.update(self._meta) return Projection(out, copy=False, wcs=new_wcs, meta=meta, unit=self.unit, header=self._nowcs_header) else: return out return self.apply_numpy_function(np.nanmean, fill=np.nan, how=how, axis=axis, unit=self.unit, projection=projection) def _count_nonzero_slicewise(self, axis=None): """ Count the number of finite pixels along an axis slicewise. This is a helper function for the mean and std deviation slicewise iterators. """ counts = self.apply_numpy_function(np.sum, fill=np.nan, how='slice', axis=axis, unit=None, projection=False, includemask=True) return counts @aggregation_docstring def std(self, axis=None, how='cube', ddof=0): """ Return the standard deviation of the cube, optionally over an axis. """ projection = self._naxes_dropped(axis) in (1,2) if how == 'slice': if axis is None: raise NotImplementedError("The overall standard deviation " "cannot be computed in a slicewise " "manner. Please use a " "different strategy.") counts = self._count_nonzero_slicewise(axis=axis) ttl = self.apply_numpy_function(np.nansum, fill=np.nan, how='slice', axis=axis, unit=None, projection=False) # Equivalent, but with more overhead: # ttl = self.sum(axis=axis, how='slice').value mean = ttl/counts planes = self._iter_slices(axis, fill=np.nan, check_endian=False) result = (next(planes)-mean)**2 for plane in planes: result = np.nansum(np.dstack((result, (plane-mean)**2)), axis=2) out = (result/(counts-ddof))**0.5 if projection: new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis]) meta = {'collapse_axis': axis} meta.update(self._meta) return Projection(out, copy=False, wcs=new_wcs, meta=meta, unit=self.unit, header=self._nowcs_header) else: return out # standard deviation cannot be computed as a trivial step-by-step # process. There IS a one-pass algorithm for std dev, but it is not # implemented, so we must force cube here. We could and should also # implement raywise reduction return self.apply_numpy_function(np.nanstd, fill=np.nan, how=how, axis=axis, unit=self.unit, projection=projection) @aggregation_docstring def max(self, axis=None, how='auto'): """ Return the maximum data value of the cube, optionally over an axis. """ projection = self._naxes_dropped(axis) in (1,2) return self.apply_numpy_function(np.nanmax, fill=np.nan, how=how, axis=axis, unit=self.unit, projection=projection) @aggregation_docstring def min(self, axis=None, how='auto'): """ Return the minimum data value of the cube, optionally over an axis. """ projection = self._naxes_dropped(axis) in (1,2) return self.apply_numpy_function(np.nanmin, fill=np.nan, how=how, axis=axis, unit=self.unit, projection=projection) @aggregation_docstring def argmax(self, axis=None, how='auto'): """ Return the index of the maximum data value. The return value is arbitrary if all pixels along ``axis`` are excluded from the mask. """ return self.apply_numpy_function(np.nanargmax, fill=-np.inf, reduce=False, projection=False, how=how, axis=axis) @aggregation_docstring def argmin(self, axis=None, how='auto'): """ Return the index of the minimum data value. The return value is arbitrary if all pixels along ``axis`` are excluded from the mask """ return self.apply_numpy_function(np.nanargmin, fill=np.inf, reduce=False, projection=False, how=how, axis=axis) def chunked(self, chunksize=1000): """ Not Implemented. Iterate over chunks of valid data """ raise NotImplementedError() def _get_flat_shape(self, axis): """ Get the shape of the array after flattening along an axis """ iteraxes = [0, 1, 2] iteraxes.remove(axis) # x,y are defined as first,second dim to iterate over # (not x,y in pixel space...) nx = self.shape[iteraxes[0]] ny = self.shape[iteraxes[1]] return nx, ny @warn_slow def _apply_everywhere(self, function, *args): """ Return a new cube with ``function`` applied to all pixels Private because this doesn't have an obvious and easy-to-use API Examples -------- >>> cube = SpectralCube.read('xyv.fits') >>> newcube = cube.apply_everywhere(np.add, 0.5*u.Jy) """ try: test_result = function(np.ones([1,1,1])*self.unit, *args) # First, check that function returns same # of dims? assert test_result.ndim == 3,"Output is not 3-dimensional" except Exception as ex: raise AssertionError("Function could not be applied to a simple " "cube. The error was: {0}".format(ex)) data = function(u.Quantity(self._get_filled_data(fill=self._fill_value), self.unit, copy=False), *args) return self._new_cube_with(data=data, unit=data.unit) @warn_slow def _cube_on_cube_operation(self, function, cube, equivalencies=[]): """ Apply an operation between two cubes. Inherits the metadata of the left cube. """ assert cube.shape == self.shape if not self.unit.is_equivalent(cube.unit, equivalencies=equivalencies): raise u.UnitsError("{0} is not equivalent to {1}" .format(self.unit, cube.unit)) if not wcs_utils.check_equality(self.wcs, cube.wcs, warn_missing=True): warnings.warn("Cube WCSs do not match, but their shapes do") try: test_result = function(np.ones([1,1,1])*self.unit, np.ones([1,1,1])*self.unit) # First, check that function returns same # of dims? assert test_result.shape == (1,1,1) except Exception as ex: raise AssertionError("Function {1} could not be applied to a " "pair of simple " "cube. The error was: {0}".format(ex, function)) cube = cube.to(self.unit) data = function(self._data, cube._data) try: # multiplication, division, etc. are valid inter-unit operations unit = function(self.unit, cube.unit) except TypeError: # addition, subtraction are not unit = self.unit return self._new_cube_with(data=data, unit=unit) def apply_function(self, function, axis=None, weights=None, unit=None, projection=False, progressbar=False, **kwargs): """ Apply a function to valid data along the specified axis or to the whole cube, optionally using a weight array that is the same shape (or at least can be sliced in the same way) Parameters ---------- function : function A function that can be applied to a numpy array. Does not need to be nan-aware axis : 1, 2, 3, or None The axis to operate along. If None, the return is scalar. weights : (optional) np.ndarray An array with the same shape (or slicing abilities/results) as the data cube unit : (optional) `~astropy.units.Unit` The unit of the output projection or value. Not all functions should return quantities with units. projection : bool Return a projection if the resulting array is 2D? progressbar : bool Show a progressbar while iterating over the slices/rays through the cube? Returns ------- result : `Projection` or `~astropy.units.Quantity` or float The result depends on the value of ``axis``, ``projection``, and ``unit``. If ``axis`` is None, the return will be a scalar with or without units. If axis is an integer, the return will be a `Projection` if ``projection`` is set """ if axis is None: out = function(self.flattened(), **kwargs) if unit is not None: return u.Quantity(out, unit=unit) else: return out # determine the output array shape nx, ny = self._get_flat_shape(axis) # allocate memory for output array out = np.empty([nx, ny]) * np.nan if progressbar: progressbar = ProgressBar(nx*ny) pbu = progressbar.update else: pbu = lambda: True # iterate over "lines of sight" through the cube for y, x, slc in self._iter_rays(axis): # acquire the flattened, valid data for the slice data = self.flattened(slc, weights=weights) if len(data) != 0: result = function(data, **kwargs) if hasattr(result, 'value'): # store result in array out[y, x] = result.value else: out[y, x] = result pbu() if projection and axis in (0,1,2): new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis]) meta = {'collapse_axis': axis} meta.update(self._meta) return Projection(out, copy=False, wcs=new_wcs, meta=meta, unit=unit, header=self._nowcs_header) else: return out def _iter_rays(self, axis=None): """ Iterate over view corresponding to lines-of-sight through a cube along the specified axis """ ny, nx = self._get_flat_shape(axis) for y in xrange(ny): for x in xrange(nx): # create length-1 view for each position slc = [slice(y, y + 1), slice(x, x + 1), ] # create a length-N slice (all-inclusive) along the selected axis slc.insert(axis, slice(None)) yield y, x, slc def _iter_slices(self, axis, fill=np.nan, check_endian=False): """ Iterate over the cube one slice at a time, replacing masked elements with fill """ view = [slice(None)] * 3 for x in range(self.shape[axis]): view[axis] = x yield self._get_filled_data(view=view, fill=fill, check_endian=check_endian) def _iter_mask_slices(self, axis): """ Iterate over the cube one slice at a time, replacing masked elements with fill """ view = [slice(None)] * 3 for x in range(self.shape[axis]): view[axis] = x yield self._mask._include(data=self._data, view=view, wcs=self._wcs) def flattened(self, slice=(), weights=None): """ Return a slice of the cube giving only the valid data (i.e., removing bad values) Parameters ---------- slice: 3-tuple A length-3 tuple of view (or any equivalent valid slice of a cube) weights: (optional) np.ndarray An array with the same shape (or slicing abilities/results) as the data cube """ data = self._mask._flattened(data=self._data, wcs=self._wcs, view=slice) if weights is not None: weights = self._mask._flattened(data=weights, wcs=self._wcs, view=slice) return u.Quantity(data * weights, self.unit, copy=False) else: return u.Quantity(data, self.unit, copy=False) def flattened_world(self, view=()): """ Retrieve the world coordinates corresponding to the extracted flattened version of the cube """ lon,lat,spec = self.world[view] spec = self._mask._flattened(data=spec, wcs=self._wcs, view=slice) lon = self._mask._flattened(data=lon, wcs=self._wcs, view=slice) lat = self._mask._flattened(data=lat, wcs=self._wcs, view=slice) return lat,lon,spec def median(self, axis=None, iterate_rays=False, **kwargs): """ Compute the median of an array, optionally along an axis. Ignores excluded mask elements. Parameters ---------- axis : int (optional) The axis to collapse iterate_rays : bool Iterate over individual rays? This mode is slower but can save RAM costs, which may be extreme for large cubes Returns ------- med : ndarray The median """ try: from bottleneck import nanmedian bnok = True except ImportError: bnok = False # slicewise median is nonsense, must force how = 'cube' if bnok and not iterate_rays: log.debug("Using bottleneck nanmedian") result = self.apply_numpy_function(nanmedian, axis=axis, projection=True, unit=self.unit, how='cube', check_endian=True, **kwargs) elif hasattr(np, 'nanmedian') and not iterate_rays: log.debug("Using numpy nanmedian") result = self.apply_numpy_function(np.nanmedian, axis=axis, projection=True, unit=self.unit, how='cube',**kwargs) else: log.debug("Using numpy median iterating over rays") result = self.apply_function(np.median, projection=True, axis=axis, unit=self.unit, **kwargs) return result def percentile(self, q, axis=None, iterate_rays=False, **kwargs): """ Return percentiles of the data. Parameters ---------- q : float The percentile to compute axis : int, or None Which axis to compute percentiles over iterate_rays : bool Iterate over individual rays? This mode is slower but can save RAM costs, which may be extreme for large cubes """ if hasattr(np, 'nanpercentile') and not iterate_rays: result = self.apply_numpy_function(np.nanpercentile, q=q, axis=axis, projection=True, unit=self.unit, how='cube', **kwargs) else: result = self.apply_function(np.percentile, q=q, axis=axis, projection=True, unit=self.unit, **kwargs) return result def with_mask(self, mask, inherit_mask=True): """ Return a new SpectralCube instance that contains a composite mask of the current SpectralCube and the new ``mask``. Parameters ---------- mask : :class:`MaskBase` instance, or boolean numpy array The mask to apply. If a boolean array is supplied, it will be converted into a mask, assuming that True values indicate included elements. inherit_mask : bool (optional, default=True) If True, combines the provided mask with the mask currently attached to the cube Returns ------- new_cube : :class:`SpectralCube` A cube with the new mask applied. Notes ----- This operation returns a view into the data, and not a copy. """ if isinstance(mask, np.ndarray): if not is_broadcastable_and_smaller(mask.shape, self._data.shape): raise ValueError("Mask shape is not broadcastable to data shape: " "%s vs %s" % (mask.shape, self._data.shape)) mask = BooleanArrayMask(mask, self._wcs) if self._mask is not None: return self._new_cube_with(mask=self._mask & mask if inherit_mask else mask) else: return self._new_cube_with(mask=mask) def __getitem__(self, view): # Need to allow self[:], self[:,:] if isinstance(view, (slice,int)): view = (view, slice(None), slice(None)) elif len(view) == 2: view = view + (slice(None),) elif len(view) > 3: raise IndexError("Too many indices") meta = {} meta.update(self._meta) meta['slice'] = [(s.start, s.stop, s.step) if hasattr(s,'start') else s for s in view] intslices = [2-ii for ii,s in enumerate(view) if not hasattr(s,'start')] if intslices: if len(intslices) > 1: if 2 in intslices: raise NotImplementedError("1D slices along non-spectral " "axes are not yet implemented.") newwcs = self._wcs.sub([a for a in (1,2,3) if a not in [x+1 for x in intslices]]) return OneDSpectrum(value=self._data[view], wcs=newwcs, copy=False, unit=self.unit, meta=meta) # only one element, so drop an axis newwcs = wcs_utils.drop_axis(self._wcs, intslices[0]) header = self._nowcs_header return Slice(value=self.filled_data[view], wcs=newwcs, copy=False, unit=self.unit, header=header, meta=meta) newmask = self._mask[view] if self._mask is not None else None return self._new_cube_with(data=self._data[view], wcs=wcs_utils.slice_wcs(self._wcs, view), mask=newmask, meta=meta) @property def unitless(self): """Return a copy of self with unit set to None""" newcube = self._new_cube_with() newcube._unit = None return newcube @property def fill_value(self): """ The replacement value used by :meth:`filled_data`. fill_value is immutable; use :meth:`with_fill_value` to create a new cube with a different fill value. """ return self._fill_value @cube_utils.slice_syntax def filled_data(self, view): """ Return a portion of the data array, with excluded mask values replaced by `fill_value`. Returns ------- data : Quantity The masked data. """ return u.Quantity(self._get_filled_data(view, fill=self._fill_value), self.unit, copy=False) def with_fill_value(self, fill_value): """ Create a new :class:`SpectralCube` with a different `fill_value`. Notes ----- This method is fast (it does not copy any data) """ return self._new_cube_with(fill_value=fill_value) def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None): """ Returns a new Cube with a different Spectral Axis unit Parameters ---------- unit : :class:`~astropy.units.Unit` Any valid spectral unit: velocity, (wave)length, or frequency. Only vacuum units are supported. velocity_convention : 'relativistic', 'radio', or 'optical' The velocity convention to use for the output velocity axis. Required if the output type is velocity. This can be either one of the above strings, or an `astropy.units` equivalency. rest_value : :class:`~astropy.units.Quantity` A rest wavelength or frequency with appropriate units. Required if output type is velocity. The cube's WCS should include this already if the *input* type is velocity, but the WCS's rest wavelength/frequency can be overridden with this parameter. .. note: This must be the rest frequency/wavelength *in vacuum*, even if your cube has air wavelength units """ from .spectral_axis import (convert_spectral_axis, determine_ctype_from_vconv) # Allow string specification of units, for example if not isinstance(unit, u.Unit): unit = u.Unit(unit) # Velocity conventions: required for frq <-> velo # convert_spectral_axis will handle the case of no velocity # convention specified & one is required if velocity_convention in DOPPLER_CONVENTIONS: velocity_convention = DOPPLER_CONVENTIONS[velocity_convention] elif (velocity_convention is not None and velocity_convention not in DOPPLER_CONVENTIONS.values()): raise ValueError("Velocity convention must be radio, optical, " "or relativistic.") # If rest value is specified, it must be a quantity if (rest_value is not None and (not hasattr(rest_value, 'unit') or not rest_value.unit.is_equivalent(u.m, u.spectral()))): raise ValueError("Rest value must be specified as an astropy " "quantity with spectral equivalence.") # Shorter versions to keep lines under 80 ctype_from_vconv = determine_ctype_from_vconv vc = velocity_convention meta = self._meta.copy() if 'Original Unit' not in self._meta: meta['Original Unit'] = self._wcs.wcs.cunit[self._wcs.wcs.spec] meta['Original Type'] = self._wcs.wcs.ctype[self._wcs.wcs.spec] out_ctype = ctype_from_vconv(self._wcs.wcs.ctype[self._wcs.wcs.spec], unit, velocity_convention=velocity_convention) newwcs = convert_spectral_axis(self._wcs, unit, out_ctype, rest_value=rest_value) if self._mask is not None: newmask = self._mask.with_spectral_unit(unit, velocity_convention=vc, rest_value=rest_value) newmask._wcs = newwcs else: newmask = None newwcs.wcs.set() cube = self._new_cube_with(wcs=newwcs, mask=newmask, meta=meta, spectral_unit=unit) return cube def _get_filled_data(self, view=(), fill=np.nan, check_endian=False): """ Return the underlying data as a numpy array. Always returns the spectral axis as the 0th axis Sets masked values to *fill* """ if check_endian: if not self._data.dtype.isnative: kind = str(self._data.dtype.kind) sz = str(self._data.dtype.itemsize) dt = '=' + kind + sz data = self._data.astype(dt) else: data = self._data else: data = self._data if self._mask is None: return data[view] return self._mask._filled(data=data, wcs=self._wcs, fill=fill, view=view) @cube_utils.slice_syntax def unmasked_data(self, view): """ Return a view of the subset of the underlying data, ignoring the mask. Returns ------- data : Quantity instance The unmasked data """ return u.Quantity(self._data[view], self.unit, copy=False) @property def wcs(self): """ The WCS describing the cube """ return self._wcs @cached def _pix_cen(self): """ Offset of every pixel from the origin, along each direction Returns ------- tuple of spectral_offset, y_offset, x_offset, each 3D arrays describing the distance from the origin Notes ----- These arrays are broadcast, and are not memory intensive Each array is in the units of the corresponding wcs.cunit, but this is implicit (e.g., they are not astropy Quantity arrays) """ # Start off by extracting the world coordinates of the pixels _, lat, lon = self.world[0, :, :] spectral, _, _ = self.world[:, 0, 0] spectral -= spectral[0] # offset from first pixel # Convert to radians lon = np.radians(lon) lat = np.radians(lat) # Find the dx and dy arrays from astropy.coordinates.angle_utilities import angular_separation dx = angular_separation(lon[:, :-1], lat[:, :-1], lon[:, 1:], lat[:, :-1]) dy = angular_separation(lon[:-1, :], lat[:-1, :], lon[1:, :], lat[1:, :]) # Find the cumulative offset - need to add a zero at the start x = np.zeros(self._data.shape[1:]) y = np.zeros(self._data.shape[1:]) x[:, 1:] = np.cumsum(np.degrees(dx), axis=1) y[1:, :] = np.cumsum(np.degrees(dy), axis=0) x, y, spectral = np.broadcast_arrays(x[None,:,:], y[None,:,:], spectral[:,None,None]) return spectral, y, x @cached def _pix_size_slice(self, axis): """ Return the size of each pixel along any given direction. Assumes pixels have equal size. Also assumes that the spectral and spatial directions are separable, which is enforced throughout this code. Parameters ---------- axis : 0, 1, or 2 The axis along which to compute the pixel size Returns ------- Pixel size in units of either degrees or the appropriate spectral unit """ if axis == 0: # note that self._spectral_scale is required here because wcs # forces into units of m, m/s, or Hz return np.abs(self.wcs.pixel_scale_matrix[2,2]) * self._spectral_scale elif axis in (1,2): # the pixel size is a projection. I think the pixel_scale_matrix # must be symmetric, such that psm[axis,:]**2 == psm[:,axis]**2 return np.sum(self.wcs.pixel_scale_matrix[2-axis,:]**2)**0.5 else: raise ValueError("Cubes have 3 axes.") @cached def _pix_size(self): """ Return the size of each pixel along each direction, in world units Returns ------- dv, dy, dx : tuple of 3D arrays The extent of each pixel along each direction Notes ----- These arrays are broadcast, and are not memory intensive Each array is in the units of the corresponding wcs.cunit, but this is implicit (e.g., they are not astropy Quantity arrays) """ # First, scale along x direction xpix = np.linspace(-0.5, self._data.shape[2] - 0.5, self._data.shape[2] + 1) ypix = np.linspace(0., self._data.shape[1] - 1, self._data.shape[1]) xpix, ypix = np.meshgrid(xpix, ypix) zpix = np.zeros(xpix.shape) lon, lat, _ = self._wcs.all_pix2world(xpix, ypix, zpix, 0) # Convert to radians lon = np.radians(lon) lat = np.radians(lat) # Find the dx and dy arrays from astropy.coordinates.angle_utilities import angular_separation dx = angular_separation(lon[:, :-1], lat[:, :-1], lon[:, 1:], lat[:, :-1]) # Next, scale along y direction xpix = np.linspace(0., self._data.shape[2] - 1, self._data.shape[2]) ypix = np.linspace(-0.5, self._data.shape[1] - 0.5, self._data.shape[1] + 1) xpix, ypix = np.meshgrid(xpix, ypix) zpix = np.zeros(xpix.shape) lon, lat, _ = self._wcs.all_pix2world(xpix, ypix, zpix, 0) # Convert to radians lon = np.radians(lon) lat = np.radians(lat) # Find the dx and dy arrays from astropy.coordinates.angle_utilities import angular_separation dy = angular_separation(lon[:-1, :], lat[:-1, :], lon[1:, :], lat[1:, :]) # Next, spectral coordinates zpix = np.linspace(-0.5, self._data.shape[0] - 0.5, self._data.shape[0] + 1) xpix = np.zeros(zpix.shape) ypix = np.zeros(zpix.shape) _, _, spectral = self._wcs.all_pix2world(xpix, ypix, zpix, 0) # Take spectral units into account # order of operations here is crucial! If this is done after # broadcasting, the full array size is allocated, which is bad! dspectral = np.diff(spectral) * self._spectral_scale dx = np.abs(np.degrees(dx.reshape(1, dx.shape[0], dx.shape[1]))) dy = np.abs(np.degrees(dy.reshape(1, dy.shape[0], dy.shape[1]))) dspectral = np.abs(dspectral.reshape(-1, 1, 1)) dx, dy, dspectral = np.broadcast_arrays(dx, dy, dspectral) return dspectral, dy, dx def moment(self, order=0, axis=0, how='auto'): """ Compute moments along the spectral axis. Moments are defined as follows: Moment 0: .. math:: M_0 \\int I dl Moment 1: .. math:: M_1 = \\frac{\\int I l dl}{M_0} Moment N: .. math:: M_N = \\frac{\\int I (l - M1)**N dl}{M_0} Parameters ---------- order : int The order of the moment to take. Default=0 axis : int The axis along which to compute the moment. Default=0 how : cube | slice | ray | auto How to compute the moment. All strategies give the same result, but certain strategies are more efficient depending on data size and layout. Cube/slice/ray iterate over decreasing subsets of the data, to conserve memory. Default='auto' Returns ------- map [, wcs] The moment map (numpy array) and, if wcs=True, the WCS object describing the map Notes ----- Generally, how='cube' is fastest for small cubes that easily fit into memory. how='slice' is best for most larger datasets. how='ray' is probably only a good idea for very large cubes whose data are contiguous over the axis of the moment map. For the first moment, the result for axis=1, 2 is the angular offset *relative to the cube face*. For axis=0, it is the *absolute* velocity/frequency of the first moment. """ from ._moments import (moment_slicewise, moment_cubewise, moment_raywise, moment_auto) dispatch = dict(slice=moment_slicewise, cube=moment_cubewise, ray=moment_raywise, auto=moment_auto) if how not in dispatch: return ValueError("Invalid how. Must be in %s" % sorted(list(dispatch.keys()))) out = dispatch[how](self, order, axis) # apply units if order == 0: if axis == 0 and self._spectral_unit is not None: axunit = unit = self._spectral_unit else: axunit = unit = u.Unit(self._wcs.wcs.cunit[np2wcs[axis]]) out = u.Quantity(out, self.unit * axunit, copy=False) else: if axis == 0 and self._spectral_unit is not None: unit = self._spectral_unit ** max(order, 1) else: unit = u.Unit(self._wcs.wcs.cunit[np2wcs[axis]]) ** max(order, 1) out = u.Quantity(out, unit, copy=False) # special case: for order=1, axis=0, you usually want # the absolute velocity and not the offset if order == 1 and axis == 0: out += self.world[0, :, :][0] new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis]) meta = {'moment_order': order, 'moment_axis': axis, 'moment_method': how} meta.update(self._meta) return Projection(out, copy=False, wcs=new_wcs, meta=meta, header=self._nowcs_header) def moment0(self, axis=0, how='auto'): """Compute the zeroth moment along an axis. See :meth:`moment`. """ return self.moment(axis=axis, order=0, how=how) def moment1(self, axis=0, how='auto'): """ Compute the 1st moment along an axis. See :meth:`moment` """ return self.moment(axis=axis, order=1, how=how) def moment2(self, axis=0, how='auto'): """ Compute the 2nd moment along an axis. See :meth:`moment` """ return self.moment(axis=axis, order=2, how=how) @property def spectral_axis(self): """ A `~astropy.units.Quantity` array containing the central values of each channel along the spectral axis. """ return self.world[:, 0, 0][0].ravel() @property def velocity_convention(self): """ The `~astropy.units.equivalencies` that describes the spectral axis """ return spectral_axis.determine_vconv_from_ctype(self.wcs.wcs.ctype[self.wcs.wcs.spec]) @property def spatial_coordinate_map(self): return self.world[0, :, :][1:] def closest_spectral_channel(self, value): """ Find the index of the closest spectral channel to the specified spectral coordinate. Parameters ---------- value : :class:`~astropy.units.Quantity` The value of the spectral coordinate to search for. """ # TODO: we have to not compute this every time spectral_axis = self.spectral_axis try: value = value.to(spectral_axis.unit, equivalencies=u.spectral()) except u.UnitsError: if value.unit.is_equivalent(u.Hz, equivalencies=u.spectral()): if spectral_axis.unit.is_equivalent(u.m / u.s): raise u.UnitsError("Spectral axis is in velocity units and " "'value' is in frequency-equivalent units " "- use SpectralCube.with_spectral_unit " "first to convert the cube to frequency-" "equivalent units, or search for a " "velocity instead") else: raise u.UnitsError("Unexpected spectral axis units: {0}".format(spectal_axis.unit)) elif value.unit.is_equivalent(u.m / u.s): if spectral_axis.unit.is_equivalent(u.Hz, equivalencies=u.spectral()): raise u.UnitsError("Spectral axis is in frequency-equivalent " "units and 'value' is in velocity units " "- use SpectralCube.with_spectral_unit " "first to convert the cube to frequency-" "equivalent units, or search for a " "velocity instead") else: raise u.UnitsError("Unexpected spectral axis units: {0}".format(spectal_axis.unit)) else: raise u.UnitsError("'value' should be in frequency equivalent or velocity units (got {0})".format(value.unit)) # TODO: optimize the next line - just brute force for now return np.argmin(np.abs(spectral_axis - value)) def spectral_slab(self, lo, hi): """ Extract a new cube between two spectral coordinates Parameters ---------- lo, hi : :class:`~astropy.units.Quantity` The lower and upper spectral coordinate for the slab range. The units should be compatible with the units of the spectral axis. If the spectral axis is in frequency-equivalent units and you want to select a range in velocity, or vice-versa, you should first use :meth:`~spectral_cube.SpectralCube.with_spectral_unit` to convert the units of the spectral axis. """ # Find range of values for spectral axis ilo = self.closest_spectral_channel(lo) ihi = self.closest_spectral_channel(hi) if ilo > ihi: ilo, ihi = ihi, ilo ihi += 1 # Create WCS slab wcs_slab = self._wcs.deepcopy() wcs_slab.wcs.crpix[2] -= ilo # Create mask slab if self._mask is None: mask_slab = None else: try: mask_slab = self._mask[ilo:ihi, :, :] except NotImplementedError: warnings.warn("Mask slicing not implemented for " "{0} - dropping mask". format(self._mask.__class__.__name__)) mask_slab = None # Create new spectral cube slab = self._new_cube_with(data=self._data[ilo:ihi], wcs=wcs_slab, mask=mask_slab) # TODO: we could change the WCS to give a spectral axis in the # correct units as requested - so if the initial cube is in Hz and we # request a range in km/s, we could adjust the WCS to be in km/s # instead return slab def minimal_subcube(self): """ Return the minimum enclosing subcube where the mask is valid """ return self[self.subcube_slices_from_mask(self._mask)] def subcube_from_mask(self, region_mask): """ Given a mask, return the minimal subcube that encloses the mask Parameters ---------- region_mask: `masks.MaskBase` or boolean `numpy.ndarray` The mask with appropraite WCS or an ndarray with matched coordinates """ return self[self.subcube_slices_from_mask(region_mask)] def subcube_slices_from_mask(self, region_mask): """ Given a mask, return the slices corresponding to the minimum subcube that encloses the mask Parameters ---------- region_mask: `masks.MaskBase` or boolean `numpy.ndarray` The mask with appropraite WCS or an ndarray with matched coordinates """ if not scipyOK: raise ImportError("Scipy could not be imported: this function won't work.") if isinstance(region_mask, np.ndarray): if is_broadcastable_and_smaller(region_mask.shape, self.shape): region_mask = BooleanArrayMask(region_mask, self._wcs) else: raise ValueError("Mask shape does not match cube shape.") include = region_mask.include(self._data, self._wcs) slices = ndimage.find_objects(np.broadcast_arrays(include, self._data)[0])[0] return slices def subcube(self, xlo='min', xhi='max', ylo='min', yhi='max', zlo='min', zhi='max', rest_value=None): """ Extract a sub-cube spatially and spectrally. Parameters ---------- [xyz]lo/[xyz]hi : int or `Quantity` or `min`/`max` The endpoints to extract. If given as a quantity, will be interpreted as World coordinates. If given as a string or int, will be interpreted as pixel coordinates. """ limit_dict = {'xlo':0 if xlo == 'min' else xlo, 'ylo':0 if ylo == 'min' else ylo, 'zlo':0 if zlo == 'min' else zlo, 'xhi':self.shape[2] if xhi=='max' else xhi, 'yhi':self.shape[1] if yhi=='max' else yhi, 'zhi':self.shape[0] if zhi=='max' else zhi} dims = {'x': 2, 'y': 1, 'z': 0} # Specific warning for slicing a frequency axis with a velocity or # vice/versa if ((hasattr(zlo, 'unit') and not zlo.unit.is_equivalent(self.spectral_axis.unit)) or (hasattr(zhi, 'unit') and not zhi.unit.is_equivalent(self.spectral_axis.unit))): raise u.UnitsError("Spectral units are not equivalent to the " "spectral slice. Use `.with_spectral_unit` " "to convert to equivalent units first") for val in (xlo,ylo,xhi,yhi): if hasattr(val, 'unit') and not val.unit.is_equivalent(u.degree): raise u.UnitsError("The X and Y slices must be specified in " "degree-equivalent units.") for lim in limit_dict: limval = limit_dict[lim] if hasattr(limval, 'unit'): dim = dims[lim[0]] sl = [slice(0,1)]*2 sl.insert(dim, slice(None)) spine = self.world[sl][dim] val = np.argmin(np.abs(limval-spine)) if limval > spine.max() or limval < spine.min(): log.warn("The limit {0} is out of bounds." " Using min/max instead.".format(lim)) if lim[1:] == 'hi': # End-inclusive indexing: need to add one for the high # slice limit_dict[lim] = val + 1 else: limit_dict[lim] = val slices = [slice(limit_dict[xx+'lo'], limit_dict[xx+'hi']) for xx in 'zyx'] return self[slices] def subcube_from_ds9region(self, ds9region): """ Extract a masked subcube from a ds9 region or a pyregion Region object (only functions on celestial dimensions) Parameters ---------- ds9region: str or `pyregion.Shape` The region to extract """ import pyregion if isinstance(ds9region, six.string_types): shapelist = pyregion.parse(ds9region) else: shapelist = ds9region if shapelist[0].coord_format not in ('physical','image'): # Requires astropy >0.4... # pixel_regions = shapelist.as_imagecoord(self.wcs.celestial.to_header()) # convert the regions to image (pixel) coordinates celhdr = self.wcs.sub([wcs.WCSSUB_CELESTIAL]).to_header() pixel_regions = shapelist.as_imagecoord(celhdr) else: # For this to work, we'd need to change the reference pixel after cropping. # Alternatively, we can just make the full-sized mask... todo.... raise NotImplementedError("Can't use non-celestial coordinates with regions.") pixel_regions = shapelist # This is a hack to use mpl to determine the outer bounds of the regions # (but it's a legit hack - pyregion needs a major internal refactor # before we can approach this any other way, I think -AG) mpl_objs = pixel_regions.get_mpl_patches_texts()[0] # Find the minimal enclosing box containing all of the regions # (this will speed up the mask creation below) extent = mpl_objs[0].get_extents() xlo, ylo = extent.min xhi, yhi = extent.max all_extents = [obj.get_extents() for obj in mpl_objs] for ext in all_extents: xlo = xlo if xlo < ext.min[0] else ext.min[0] ylo = ylo if ylo < ext.min[1] else ext.min[1] xhi = xhi if xhi > ext.max[0] else ext.max[0] yhi = yhi if yhi > ext.max[1] else ext.max[1] # Negative indices will do bad things, like wrap around the cube # If xhi/yhi are negative, there is not overlap if (xhi < 0) or (yhi < 0): raise ValueError("Region is outside of cube.") # if xlo/ylo are negative, we need to crop if xlo < 0: xlo = 0 if ylo < 0: ylo = 0 log.debug("Region boundaries: ") log.debug("xlo={xlo}, ylo={ylo}, xhi={xhi}, yhi={yhi}".format(xlo=xlo, ylo=ylo, xhi=xhi, yhi=yhi)) subcube = self.subcube(xlo=xlo, ylo=ylo, xhi=xhi, yhi=yhi) if any(dim == 0 for dim in subcube.shape): raise ValueError("The derived subset is empty: the region does not" " overlap with the cube.") subhdr = subcube.wcs.sub([wcs.WCSSUB_CELESTIAL]).to_header() mask = shapelist.get_mask(header=subhdr, shape=subcube.shape[1:]) return subcube.with_mask(BooleanArrayMask(mask, subcube.wcs, shape=subcube.shape)) def world_spines(self): """ Returns a list of 1D arrays, for the world coordinates along each pixel axis. Raises error if this operation is ill-posed (e.g. rotated world coordinates, strong distortions) This method is not currently implemented. Use :meth:`world` instead. """ raise NotImplementedError() @cube_utils.slice_syntax def world(self, view): """ Return a list of the world coordinates in a cube (or a view of it). Cube.world is called with *bracket notation*, like a NumPy array:: c.world[0:3, :, :] Returns ------- [v, y, x] : list of NumPy arryas The 3 world coordinates at each pixel in the view. Examples -------- >>> c = SpectralCube.read('xyv.fits') Extract the first 3 velocity channels of the cube: >>> v, y, x = c.world[0:3] Extract all the world coordinates >>> v, y, x = c.world[:, :, :] Extract every other pixel along all axes >>> v, y, x = c.world[::2, ::2, ::2] """ # note: view is a tuple of view # the next 3 lines are equivalent to (but more efficient than) # inds = np.indices(self._data.shape) # inds = [i[view] for i in inds] inds = np.ogrid[[slice(0, s) for s in self._data.shape]] inds = np.broadcast_arrays(*inds) inds = [i[view] for i in inds[::-1]] # numpy -> wcs order shp = inds[0].shape inds = np.column_stack([i.ravel() for i in inds]) world = self._wcs.all_pix2world(inds, 0).T world = [w.reshape(shp) for w in world] # 1D->3D # apply units world = [w * u.Unit(self._wcs.wcs.cunit[i]) for i, w in enumerate(world)] # convert spectral unit if needed if self._spectral_unit is not None: world[2] = world[2].to(self._spectral_unit) return world[::-1] # reverse WCS -> numpy order def _val_to_own_unit(self, value, operation='compare', tofrom='to', keepunit=False): """ Given a value, check if it has a unit. If it does, convert to the cube's unit. If it doesn't, raise an exception. """ if isinstance(value, SpectralCube): if self.unit.is_equivalent(value.unit): return value else: return value.to(self.unit) elif hasattr(value, 'unit'): if keepunit: return value.to(self.unit) else: return value.to(self.unit).value else: raise ValueError("Can only {operation} cube objects {tofrom}" " SpectralCubes or Quantities with " "a unit attribute." .format(operation=operation, tofrom=tofrom)) def __gt__(self, value): """ Return a LazyMask representing the inequality Parameters ---------- value : number The threshold """ value = self._val_to_own_unit(value) return LazyComparisonMask(operator.gt, value, data=self._data, wcs=self._wcs) def __ge__(self, value): value = self._val_to_own_unit(value) return LazyComparisonMask(operator.ge, value, data=self._data, wcs=self._wcs) def __le__(self, value): value = self._val_to_own_unit(value) return LazyComparisonMask(operator.le, value, data=self._data, wcs=self._wcs) def __lt__(self, value): value = self._val_to_own_unit(value) return LazyComparisonMask(operator.lt, value, data=self._data, wcs=self._wcs) def __eq__(self, value): value = self._val_to_own_unit(value) return LazyComparisonMask(operator.eq, value, data=self._data, wcs=self._wcs) def __hash__(self): return id(self) def __ne__(self, value): value = self._val_to_own_unit(value) return LazyComparisonMask(operator.ne, value, data=self._data, wcs=self._wcs) def __add__(self, value): if isinstance(value, SpectralCube): return self._cube_on_cube_operation(operator.add, value) else: value = self._val_to_own_unit(value, operation='add', tofrom='from', keepunit=True) return self._apply_everywhere(operator.add, value) def __sub__(self, value): if isinstance(value, SpectralCube): return self._cube_on_cube_operation(operator.sub, value) else: value = self._val_to_own_unit(value, operation='subtract', tofrom='from', keepunit=True) return self._apply_everywhere(operator.sub, value) def __mul__(self, value): if isinstance(value, SpectralCube): return self._cube_on_cube_operation(operator.mul, value) else: return self._apply_everywhere(operator.mul, value) def __truediv__(self, value): return self.__div__(value) def __div__(self, value): if isinstance(value, SpectralCube): return self._cube_on_cube_operation(operator.truediv, value) else: return self._apply_everywhere(operator.truediv, value) def __pow__(self, value): if isinstance(value, SpectralCube): return self._cube_on_cube_operation(operator.pow, value) else: return self._apply_everywhere(operator.pow, value) @classmethod def read(cls, filename, format=None, hdu=None, **kwargs): """ Read a spectral cube from a file. If the file contains Stokes axes, they will automatically be dropped. If you want to read in all Stokes informtion, use :meth:`~spectral_cube.StokesSpectralCube.read` instead. Parameters ---------- filename : str The file to read the cube from format : str The format of the file to read. (Currently limited to 'fits' and 'casa_image') hdu : int or str For FITS files, the HDU to read in (can be the ID or name of an HDU). kwargs : dict If the format is 'fits', the kwargs are passed to :func:`~astropy.io.fits.open`. """ from .io.core import read from .stokes_spectral_cube import StokesSpectralCube cube = read(filename, format=format, hdu=hdu, **kwargs) if isinstance(cube, StokesSpectralCube): if hasattr(cube, 'I'): warnings.warn("Cube is a Stokes cube, returning spectral cube for I component") return cube.I else: raise ValueError("Spectral cube is a Stokes cube that does not have an I component") else: return cube def write(self, filename, overwrite=False, format=None): """ Write the spectral cube to a file. Parameters ---------- filename : str The path to write the file to format : str The format of the file to write. (Currently limited to 'fits') overwrite : bool If True, overwrite `filename` if it exists """ from .io.core import write write(filename, self, overwrite=overwrite, format=format) def to_yt(self, spectral_factor=1.0, nprocs=None, **kwargs): """ Convert a spectral cube to a yt object that can be further analyzed in yt. Parameters ---------- spectral_factor : float, optional Factor by which to stretch the spectral axis. If set to 1, one pixel in spectral coordinates is equivalent to one pixel in spatial coordinates. If using yt 3.0 or later, additional keyword arguments will be passed onto yt's ``FITSDataset`` constructor. See the yt documentation (http://yt-project.org/docs/3.0/examining/loading_data.html?#fits-data) for details on options for reading FITS data. """ import yt if ('dev' in yt.__version__ or StrictVersion(yt.__version__) >= StrictVersion('3.0')): from yt.frontends.fits.api import FITSDataset from yt.units.unit_object import UnitParseError hdu = PrimaryHDU(self._get_filled_data(fill=0.), header=self.wcs.to_header()) units = str(self.unit.to_string()) hdu.header["BUNIT"] = units hdu.header["BTYPE"] = "flux" ds = FITSDataset(hdu, nprocs=nprocs, spectral_factor=spectral_factor, **kwargs) # Check to make sure the units are legit try: ds.quan(1.0,units) except UnitParseError: raise RuntimeError("The unit %s was not parsed by yt. " % units+ "Check to make sure it is correct.") else: from yt.mods import load_uniform_grid data = {'flux': self._get_filled_data(fill=0.).transpose()} nz, ny, nx = self.shape if nprocs is None: nprocs = 1 bbox = np.array([[0.5,float(nx)+0.5], [0.5,float(ny)+0.5], [0.5,spectral_factor*float(nz)+0.5]]) ds = load_uniform_grid(data, [nx,ny,nz], 1., bbox=bbox, nprocs=nprocs, periodicity=(False, False, False)) return ytCube(self, ds, spectral_factor=spectral_factor) def to_glue(self, name=None, glue_app=None, dataset=None, start_gui=True): """ Send data to a new or existing Glue application Parameters ---------- name : str or None The name of the dataset within Glue. If None, defaults to 'SpectralCube'. If a dataset with the given name already exists, a new dataset with "_" appended will be added instead. glue_app : GlueApplication or None A glue application to send the data to. If this is not specified, a new glue application will be started if one does not already exist for this cube. Otherwise, the data will be sent to the existing glue application, `self._glue_app`. dataset : glue.core.Data or None An existing Data object to add the cube to. This is a good way to compare cubes with the same dimensions. Supercedes ``glue_app`` start_gui : bool Start the GUI when this is run. Set to False for testing. """ if name is None: name = 'SpectralCube' from glue.qt.glue_application import GlueApplication from glue.core import DataCollection, Data, Component from glue.core.coordinates import coordinates_from_header from glue.qt.widgets import ImageWidget if dataset is not None: if name in [d.label for d in dataset.components]: name = name+"_" dataset[name] = self else: result = Data(label=name) result.coords = coordinates_from_header(self.header) result.add_component(self, name) if glue_app is None: if hasattr(self,'_glue_app'): glue_app = self._glue_app else: # Start a new glue session. This will quit when done. # I don't think the return statement is ever reached, based on # past attempts [@ChrisBeaumont - chime in here if you'd like] dc = DataCollection([result]) #start Glue ga = self._glue_app = GlueApplication(dc) self._glue_viewer = ga.new_data_viewer(ImageWidget, data=result) if start_gui: self._glue_app.start() return self._glue_app glue_app.add_datasets(self._glue_app.data_collection, result) def to_pvextractor(self): """ Open the cube in a quick viewer written in matplotlib that allows you to create PV extractions within the GUI """ from pvextractor.gui import PVSlicer return PVSlicer(self) def to_ds9(self, ds9id=None, newframe=False): """ Send the data to ds9 (this will create a copy in memory) Parameters ---------- ds9id: None or string The DS9 session ID. If 'None', a new one will be created. To find your ds9 session ID, open the ds9 menu option File:XPA:Information and look for the XPA_METHOD string, e.g. ``XPA_METHOD: 86ab2314:60063``. You would then calll this function as ``cube.to_ds9('86ab2314:60063')`` newframe: bool Send the cube to a new frame or to the current frame? """ try: import ds9 except ImportError: import pyds9 as ds9 if ds9id is None: dd = ds9.ds9(start=True) else: dd = ds9.ds9(target=ds9id, start=False) if newframe: dd.set('frame new') dd.set_pyfits(HDUList(self.hdu)) return dd @property def _nowcs_header(self): """ Return a copy of the header with no WCS information attached """ return wcs_utils.strip_wcs_from_header(self._header) @property def header(self): # Preserve non-WCS information from previous header iteration header = self._nowcs_header header.update(self.wcs.to_header()) if self.unit == u.dimensionless_unscaled and 'BUNIT' in self._meta: # preserve the BUNIT even though it's not technically valid # (Jy/Beam) header['BUNIT'] = self._meta['BUNIT'] else: header['BUNIT'] = self.unit.to_string(format='FITS') header.insert(2, Card(keyword='NAXIS', value=self._data.ndim)) header.insert(3, Card(keyword='NAXIS1', value=self.shape[2])) header.insert(4, Card(keyword='NAXIS2', value=self.shape[1])) header.insert(5, Card(keyword='NAXIS3', value=self.shape[0])) # Preserve the cube's spectral units if self._spectral_unit != u.Unit(header['CUNIT3']): header['CDELT3'] *= self._spectral_scale header['CRVAL3'] *= self._spectral_scale header['CUNIT3'] = self._spectral_unit.to_string(format='FITS') if 'beam' in self._meta: header = self._meta['beam'].attach_to_header(header) # TODO: incorporate other relevant metadata here return header @property def hdu(self): """ HDU version of self """ hdu = PrimaryHDU(self.filled_data[:].value, header=self.header) return hdu def to(self, unit, equivalencies=()): """ Return the cube converted to the given unit (assuming it is equivalent). If conversion was required, this will be a copy, otherwise it will """ if not isinstance(unit, u.Unit): unit = u.Unit(unit) if unit == self.unit: # No copying return self # scaling factor factor = self.unit.to(unit, equivalencies=equivalencies) return self._new_cube_with(data=self._data*factor, unit=unit) def find_lines(self, velocity_offset=None, velocity_convention=None, rest_value=None, **kwargs): """ Using astroquery's splatalogue interface, search for lines within the spectral band. See `astroquery.splatalogue.Splatalogue` for information on keyword arguments Parameters ---------- velocity_offset : u.km/u.s equivalent An offset by which the spectral axis should be shifted before searching splatalogue. This value will be *added* to the velocity, so if you want to redshift a spectrum, make this value positive, and if you want to un-redshift it, make this value negative. velocity_convention : 'radio', 'optical', 'relativistic' The doppler convention to pass to `with_spectral_unit` rest_value : u.GHz equivalent The rest frequency (or wavelength or energy) to be passed to `with_spectral_unit` """ warnings.warn("The line-finding routine is experimental. Please " "report bugs on the Issues page: " "https://github.com/radio-astro-tools/spectral-cube/issues") from astroquery.splatalogue import Splatalogue if velocity_convention in DOPPLER_CONVENTIONS: velocity_convention = DOPPLER_CONVENTIONS[velocity_convention] if velocity_offset is not None: spectral_axis = (self.with_spectral_unit(u.km/u.s, velocity_convention=velocity_convention, rest_value=rest_value).spectral_axis + velocity_offset).to(u.GHz, velocity_convention(rest_value)) else: spectral_axis = self.spectral_axis.to(u.GHz) numin,numax = spectral_axis.min(), spectral_axis.max() log.log(19, "Min/max frequency: {0},{1}".format(numin, numax)) result = Splatalogue.query_lines(numin, numax, **kwargs) return result def determine_format_from_filename(filename): if filename[-4:] == 'fits': return 'fits' elif filename[-5:] == 'image': return 'casa_image' elif filename[-3:] == 'lmv': return 'class_lmv' spectral-cube-0.3.1/spectral_cube/stokes_spectral_cube.py0000644000077000000240000001502212647754466023702 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import numpy as np from astropy.extern import six from .spectral_cube import SpectralCube from . import wcs_utils from .masks import BooleanArrayMask, is_broadcastable_and_smaller __all__ = ['StokesSpectalCube'] VALID_STOKES = ['I', 'Q', 'U', 'V', 'RR', 'LL', 'RL', 'LR'] class StokesSpectralCube(object): """ A class to store a spectral cube with multiple Stokes parameters. The individual Stokes cubes can share a common mask in addition to having component-specific masks. """ def __init__(self, stokes_data, mask=None, meta=None, fill_value=None): self._stokes_data = stokes_data self._meta = meta or {} self._fill_value = fill_value reference = tuple(stokes_data.keys())[0] for component in stokes_data: if not isinstance(stokes_data[component], SpectralCube): raise TypeError("stokes_data should be a dictionary of " "SpectralCube objects") if not wcs_utils.check_equality(stokes_data[component].wcs, stokes_data[reference].wcs): raise ValueError("All spectral cubes in stokes_data " "should have the same WCS") if component not in VALID_STOKES: raise ValueError("Invalid Stokes component: {0} - should be " "one of I, Q, U, V, RR, LL, RL, LR".format(component)) if stokes_data[component].shape != stokes_data[reference].shape: raise ValueError("All spectral cubes should have the same shape") self._wcs = stokes_data[reference].wcs self._shape = stokes_data[reference].shape if isinstance(mask, BooleanArrayMask): if not is_broadcastable_and_smaller(mask.shape, self._shape): raise ValueError("Mask shape is not broadcastable to data shape:" " {0} vs {1}".format(mask.shape, self._shape)) self._mask = mask @property def shape(self): return self._shape @property def mask(self): """ The underlying mask """ return self._mask @property def wcs(self): return self._wcs def __dir__(self): if six.PY2: return self.components + dir(type(self)) + list(self.__dict__) else: return self.components + super(StokesSpectralCube, self).__dir__() @property def components(self): return list(self._stokes_data.keys()) def __getattr__(self, attribute): """ Descriptor to return the Stokes cubes """ if attribute in self._stokes_data: if self.mask is not None: return self._stokes_data[attribute].with_mask(self.mask) else: return self._stokes_data[attribute] else: raise AttributeError("StokesSpectralCube has no attribute {0}".format(attribute)) def with_mask(self, mask, inherit_mask=True): """ Return a new StokesSpectralCube instance that contains a composite mask of the current StokesSpectralCube and the new ``mask``. Parameters ---------- mask : :class:`MaskBase` instance, or boolean numpy array The mask to apply. If a boolean array is supplied, it will be converted into a mask, assuming that True values indicate included elements. inherit_mask : bool (optional, default=True) If True, combines the provided mask with the mask currently attached to the cube Returns ------- new_cube : :class:`StokesSpectralCube` A cube with the new mask applied. Notes ----- This operation returns a view into the data, and not a copy. """ if isinstance(mask, np.ndarray): if not is_broadcastable_and_smaller(mask.shape, self.shape): raise ValueError("Mask shape is not broadcastable to data shape: " "%s vs %s" % (mask.shape, self.shape)) mask = BooleanArrayMask(mask, self.wcs) if self._mask is not None: return self._new_cube_with(mask=self.mask & mask if inherit_mask else mask) else: return self._new_cube_with(mask=mask) def _new_cube_with(self, stokes_data=None, mask=None, meta=None, fill_value=None): data = self._stokes_data if stokes_data is None else stokes_data mask = self._mask if mask is None else mask if meta is None: meta = {} meta.update(self._meta) fill_value = self._fill_value if fill_value is None else fill_value cube = StokesSpectralCube(stokes_data=data, mask=mask, meta=meta, fill_value=fill_value) return cube def with_spectral_unit(self, unit, **kwargs): stokes_data = {k: self._stokes_data[k].with_spectral_unit(unit, **kwargs) for k in self._stokes_data} return self._new_cube_with(stokes_data=stokes_data) @classmethod def read(cls, filename, format=None, hdu=None, **kwargs): """ Read a spectral cube from a file. If the file contains Stokes axes, they will be read in. If you are only interested in the unpolarized emission (I), you can use :meth:`~spectral_cube.SpectralCube.read` instead. Parameters ---------- filename : str The file to read the cube from format : str The format of the file to read. (Currently limited to 'fits' and 'casa_image') hdu : int or str For FITS files, the HDU to read in (can be the ID or name of an HDU). Returns ------- cube : :class:`SpectralCube` """ from .io.core import read cube = read(filename, format=format, hdu=hdu) if isinstance(cube, SpectralCube): cube = StokesSpectralCube({'I': cube}) return cube def write(self, filename, overwrite=False, format=None): """ Write the spectral cube to a file. Parameters ---------- filename : str The path to write the file to format : str The format of the file to write. (Currently limited to 'fits') overwrite : bool If True, overwrite `filename` if it exists """ raise NotImplementedError("") spectral-cube-0.3.1/spectral_cube/tests/0000755000077000000240000000000012654610601020243 5ustar adamstaff00000000000000spectral-cube-0.3.1/spectral_cube/tests/__init__.py0000644000077000000240000000024712643464660022371 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import os def path(filename): return os.path.join(os.path.dirname(__file__), 'data', filename) spectral-cube-0.3.1/spectral_cube/tests/coveragerc0000644000077000000240000000140012551776560022316 0ustar adamstaff00000000000000[run] source = {packagename} omit = {packagename}/_astropy_init* {packagename}/conftest* {packagename}/cython_version* {packagename}/setup_package* {packagename}/*/setup_package* {packagename}/*/*/setup_package* {packagename}/tests/* {packagename}/*/tests/* {packagename}/*/*/tests/* {packagename}/version* [report] exclude_lines = # Have to re-enable the standard pragma pragma: no cover # Don't complain about packages we have installed except ImportError # Don't complain if tests don't hit assertions raise AssertionError raise NotImplementedError # Don't complain about script hooks def main\(.*\): # Ignore branches that don't pertain to this version of Python pragma: py{ignore_python_version}spectral-cube-0.3.1/spectral_cube/tests/data/0000755000077000000240000000000012654610601021154 5ustar adamstaff00000000000000spectral-cube-0.3.1/spectral_cube/tests/data/adv.fits0000644000077000000240000002070012654610330022613 0ustar adamstaff00000000000000SIMPLE = T / conforms to FITS standard BITPIX = -64 / array data type NAXIS = 3 / number of array dimensions NAXIS1 = 2 NAXIS2 = 3 NAXIS3 = 4 BLANK = -1 / TELESCOP= 'VLA ' / CDELT1 = -5.55555561268E-04 / CRPIX1 = 1.37300000000E+03 / CRVAL1 = 2.31837500515E+01 / CTYPE1 = 'RA---SIN' / CDELT2 = 5.55555561268E-04 / CRPIX2 = 1.15200000000E+03 / CRVAL2 = 3.05765277962E+01 / CTYPE2 = 'DEC--SIN' / CDELT3 = 1.28821496879E+03 / CRPIX3 = 1.00000000000E+00 / CRVAL3 = -3.21214698632E+05 / CTYPE3 = 'VELO-HEL' / DATE-OBS= '1998-06-18T16:30:25.4' / RESTFREQ= 1.42040571841E+09 / CELLSCAL= 'CONSTANT' / BUNIT = 'K ' EPOCH = 2.00000000000E+03 / OBJECT = 'M33 ' / OBSERVER= 'AT206 ' / VOBS = -2.57256763070E+01 / LTYPE = 'channel ' / LSTART = 2.15000000000E+02 / LWIDTH = 1.00000000000E+00 / LSTEP = 1.00000000000E+00 / BTYPE = 'intensity' / DATAMIN = -6.57081836835E-03 / DATAMAX = 1.52362231165E-02 / BMAJ = 0.0002777777777777778 BMIN = 0.0002777777777777778 BPA = 0.0 END ?Ý0@(¹Ü?é )Vy:.?ÉŽét?àt›ÇîÔ?âõeÝ÷?§ÈY7 Cp?ãq渢Š?ÅÓ¼5>´?°§8 lª ?î]E3ÄЉ?îæu&@Π?éÞdŠªŸ?Ó~ÊÀ ²ê?¹ 'c{p?åå<¨öÏa?Ü+u]wRÜ?¿=åÓ…°?ß°ú~ªb?¡›_Kh?í'r1?ÐÙéu80?å3aï}v?Óóf¸?à¤e°Ôî¨spectral-cube-0.3.1/spectral_cube/tests/data/advs.fits0000644000077000000240000002070012654610330022776 0ustar adamstaff00000000000000SIMPLE = T / conforms to FITS standard BITPIX = -64 / array data type NAXIS = 4 / number of array dimensions NAXIS1 = 4 NAXIS2 = 3 NAXIS3 = 2 NAXIS4 = 1 BLANK = -1 / TELESCOP= 'VLA ' / CDELT1 = -5.55555561268E-04 / CRPIX1 = 1.37300000000E+03 / CRVAL1 = 2.31837500515E+01 / CTYPE1 = 'RA---SIN' / CDELT2 = 5.55555561268E-04 / CRPIX2 = 1.15200000000E+03 / CRVAL2 = 3.05765277962E+01 / CTYPE2 = 'DEC--SIN' / CDELT3 = 1.28821496879E+03 / CRPIX3 = 1.00000000000E+00 / CRVAL3 = -3.21214698632E+05 / CTYPE3 = 'VELO-HEL' / CDELT4 = 1.00000000000E+00 / CRPIX4 = 1.00000000000E+00 / CRVAL4 = 1.00000000000E+00 / CTYPE4 = 'STOKES ' / DATE-OBS= '1998-06-18T16:30:25.4' / RESTFREQ= 1.42040571841E+09 / CELLSCAL= 'CONSTANT' / BUNIT = 'K ' EPOCH = 2.00000000000E+03 / OBJECT = 'M33 ' / OBSERVER= 'AT206 ' / VOBS = -2.57256763070E+01 / LTYPE = 'channel ' / LSTART = 2.15000000000E+02 / LWIDTH = 1.00000000000E+00 / LSTEP = 1.00000000000E+00 / BTYPE = 'intensity' / DATAMIN = -6.57081836835E-03 / DATAMAX = 1.52362231165E-02 / BMAJ = 0.0002777777777777778 BMIN = 0.0002777777777777778 BPA = 0.0 END ?×øw_Qì?îl@h»ÖT?çl~gQ?ã(5Öc,°?Ãøk7"!„?Ã÷ á™L?­½"dUp?ë·· U·µ?ã3hS@@ ÝŠº¿ð?ð?ð?Ø+x( Ñf¼_¨ž<HJy/beam RA DEC VELOCITY EQUATORIAL 0IRAS2A ‰i·í+í?lÛ%"sá?Cî¶²@ÃxT׿úD$ëöÕ=ï+í?lÛ%"sá?0HDO ´?*\ÂE“ AéVÔ½à@ eË6¡Ì 6®Å>™§<X3ETd0ETd¯'4Py‡»' •»õ†½»KЇ»Íj˜»€Ñ»vÕ¡»vˆ¼»Wí¼4³»ëÚ»wü¼_¨ž<Žé›<Ìæ<›•ž<*çš< øŠ<€£‹<»†<˜šj<¤cj´?¹ 'c{p?í'r1?ÉŽét?°§8 lª ?åå<¨öÏa?ÐÙéu80?àt›ÇîÔ?î]E3ÄЉ?Ü+u]wRÜ?å3aï}v?âõeÝ÷?îæu&@Π?¿=åÓ…°?Óóf¸?§ÈY7 Cp?éÞdŠªŸ?ß°ú~ªb?à¤e°Ôî¨spectral-cube-0.3.1/spectral_cube/tests/data/vda.fits0000644000077000000240000002070012654610330022613 0ustar adamstaff00000000000000SIMPLE = T / conforms to FITS standard BITPIX = -64 / array data type NAXIS = 3 / number of array dimensions NAXIS1 = 3 NAXIS2 = 2 NAXIS3 = 4 BLANK = -1 / TELESCOP= 'VLA ' / CDELT1 = 0.000555555561268 CRPIX1 = 1152.0 CRVAL1 = 30.5765277962 CTYPE1 = 'DEC--SIN' CDELT2 = -0.000555555561268 CRPIX2 = 1373.0 CRVAL2 = 23.1837500515 CTYPE2 = 'RA---SIN' CDELT3 = 1288.21496879 CRPIX3 = 1.0 CRVAL3 = -321214.698632 CTYPE3 = 'VELO-HEL' DATE-OBS= '1998-06-18T16:30:25.4' / RESTFREQ= 1.42040571841E+09 / CELLSCAL= 'CONSTANT' / BUNIT = 'K ' EPOCH = 2.00000000000E+03 / OBJECT = 'M33 ' / OBSERVER= 'AT206 ' / VOBS = -2.57256763070E+01 / LTYPE = 'channel ' / LSTART = 2.15000000000E+02 / LWIDTH = 1.00000000000E+00 / LSTEP = 1.00000000000E+00 / BTYPE = 'intensity' / DATAMIN = -6.57081836835E-03 / DATAMAX = 1.52362231165E-02 / BMAJ = 0.0002777777777777778 BMIN = 0.0002777777777777778 BPA = 0.0 END ?Ý0@(¹Ü?ÉŽét?âõeÝ÷?é )Vy:.?àt›ÇîÔ?§ÈY7 Cp?ãq渢Š?°§8 lª ?îæu&@Π?ÅÓ¼5>´?î]E3ÄЉ?éÞdŠªŸ?Ó~ÊÀ ²ê?åå<¨öÏa?¿=åÓ…°?¹ 'c{p?Ü+u]wRÜ?ß°ú~ªb?¡›_Kh?ÐÙéu80?Óóf¸?í'r1?å3aï}v?à¤e°Ôî¨spectral-cube-0.3.1/spectral_cube/tests/data/vda_Jybeam_lower.fits0000644000077000000240000002070012654610330025312 0ustar adamstaff00000000000000SIMPLE = T / conforms to FITS standard BITPIX = -64 / array data type NAXIS = 3 / number of array dimensions NAXIS1 = 3 NAXIS2 = 2 NAXIS3 = 4 BLANK = -1 / TELESCOP= 'VLA ' / CDELT1 = 0.000555555561268 CRPIX1 = 1152.0 CRVAL1 = 30.5765277962 CTYPE1 = 'DEC--SIN' CDELT2 = -0.000555555561268 CRPIX2 = 1373.0 CRVAL2 = 23.1837500515 CTYPE2 = 'RA---SIN' CDELT3 = 1288.21496879 CRPIX3 = 1.0 CRVAL3 = -321214.698632 CTYPE3 = 'VELO-HEL' DATE-OBS= '1998-06-18T16:30:25.4' / RESTFREQ= 1.42040571841E+09 / CELLSCAL= 'CONSTANT' / BUNIT = 'Jy/beam ' EPOCH = 2.00000000000E+03 / OBJECT = 'M33 ' / OBSERVER= 'AT206 ' / VOBS = -2.57256763070E+01 / LTYPE = 'channel ' / LSTART = 2.15000000000E+02 / LWIDTH = 1.00000000000E+00 / LSTEP = 1.00000000000E+00 / BTYPE = 'intensity' / DATAMIN = -6.57081836835E-03 / DATAMAX = 1.52362231165E-02 / BMAJ = 0.0002777777777777778 BMIN = 0.0002777777777777778 BPA = 0.0 END ?Ý0@(¹Ü?ÉŽét?âõeÝ÷?é )Vy:.?àt›ÇîÔ?§ÈY7 Cp?ãq渢Š?°§8 lª ?îæu&@Π?ÅÓ¼5>´?î]E3ÄЉ?éÞdŠªŸ?Ó~ÊÀ ²ê?åå<¨öÏa?¿=åÓ…°?¹ 'c{p?Ü+u]wRÜ?ß°ú~ªb?¡›_Kh?ÐÙéu80?Óóf¸?í'r1?å3aï}v?à¤e°Ôî¨spectral-cube-0.3.1/spectral_cube/tests/data/vda_JYBEAM_upper.fits0000644000077000000240000002070012654610330025055 0ustar adamstaff00000000000000SIMPLE = T / conforms to FITS standard BITPIX = -64 / array data type NAXIS = 3 / number of array dimensions NAXIS1 = 3 NAXIS2 = 2 NAXIS3 = 4 BLANK = -1 / TELESCOP= 'VLA ' / CDELT1 = 0.000555555561268 CRPIX1 = 1152.0 CRVAL1 = 30.5765277962 CTYPE1 = 'DEC--SIN' CDELT2 = -0.000555555561268 CRPIX2 = 1373.0 CRVAL2 = 23.1837500515 CTYPE2 = 'RA---SIN' CDELT3 = 1288.21496879 CRPIX3 = 1.0 CRVAL3 = -321214.698632 CTYPE3 = 'VELO-HEL' DATE-OBS= '1998-06-18T16:30:25.4' / RESTFREQ= 1.42040571841E+09 / CELLSCAL= 'CONSTANT' / BUNIT = 'JY/BEAM ' EPOCH = 2.00000000000E+03 / OBJECT = 'M33 ' / OBSERVER= 'AT206 ' / VOBS = -2.57256763070E+01 / LTYPE = 'channel ' / LSTART = 2.15000000000E+02 / LWIDTH = 1.00000000000E+00 / LSTEP = 1.00000000000E+00 / BTYPE = 'intensity' / DATAMIN = -6.57081836835E-03 / DATAMAX = 1.52362231165E-02 / BMAJ = 0.0002777777777777778 BMIN = 0.0002777777777777778 BPA = 0.0 END ?Ý0@(¹Ü?ÉŽét?âõeÝ÷?é )Vy:.?àt›ÇîÔ?§ÈY7 Cp?ãq渢Š?°§8 lª ?îæu&@Π?ÅÓ¼5>´?î]E3ÄЉ?éÞdŠªŸ?Ó~ÊÀ ²ê?åå<¨öÏa?¿=åÓ…°?¹ 'c{p?Ü+u]wRÜ?ß°ú~ªb?¡›_Kh?ÐÙéu80?Óóf¸?í'r1?å3aï}v?à¤e°Ôî¨spectral-cube-0.3.1/spectral_cube/tests/data/vda_Jybeam_whitespace.fits0000644000077000000240000002070012654610330026316 0ustar adamstaff00000000000000SIMPLE = T / conforms to FITS standard BITPIX = -64 / array data type NAXIS = 3 / number of array dimensions NAXIS1 = 3 NAXIS2 = 2 NAXIS3 = 4 BLANK = -1 / TELESCOP= 'VLA ' / CDELT1 = 0.000555555561268 CRPIX1 = 1152.0 CRVAL1 = 30.5765277962 CTYPE1 = 'DEC--SIN' CDELT2 = -0.000555555561268 CRPIX2 = 1373.0 CRVAL2 = 23.1837500515 CTYPE2 = 'RA---SIN' CDELT3 = 1288.21496879 CRPIX3 = 1.0 CRVAL3 = -321214.698632 CTYPE3 = 'VELO-HEL' DATE-OBS= '1998-06-18T16:30:25.4' / RESTFREQ= 1.42040571841E+09 / CELLSCAL= 'CONSTANT' / BUNIT = ' Jy / beam ' EPOCH = 2.00000000000E+03 / OBJECT = 'M33 ' / OBSERVER= 'AT206 ' / VOBS = -2.57256763070E+01 / LTYPE = 'channel ' / LSTART = 2.15000000000E+02 / LWIDTH = 1.00000000000E+00 / LSTEP = 1.00000000000E+00 / BTYPE = 'intensity' / DATAMIN = -6.57081836835E-03 / DATAMAX = 1.52362231165E-02 / BMAJ = 0.0002777777777777778 BMIN = 0.0002777777777777778 BPA = 0.0 END ?Ý0@(¹Ü?ÉŽét?âõeÝ÷?é )Vy:.?àt›ÇîÔ?§ÈY7 Cp?ãq渢Š?°§8 lª ?îæu&@Π?ÅÓ¼5>´?î]E3ÄЉ?éÞdŠªŸ?Ó~ÊÀ ²ê?åå<¨öÏa?¿=åÓ…°?¹ 'c{p?Ü+u]wRÜ?ß°ú~ªb?¡›_Kh?ÐÙéu80?Óóf¸?í'r1?å3aï}v?à¤e°Ôî¨spectral-cube-0.3.1/spectral_cube/tests/data/vsad.fits0000644000077000000240000002070012654610330022776 0ustar adamstaff00000000000000SIMPLE = T / conforms to FITS standard BITPIX = -64 / array data type NAXIS = 4 / number of array dimensions NAXIS1 = 2 NAXIS2 = 1 NAXIS3 = 4 NAXIS4 = 3 BLANK = -1 / TELESCOP= 'VLA ' / CDELT1 = 1288.21496879 CRPIX1 = 1.0 CRVAL1 = -321214.698632 CTYPE1 = 'VELO-HEL' CDELT2 = 1.0 CRPIX2 = 1.0 CRVAL2 = 1.0 CTYPE2 = 'STOKES ' CDELT3 = -0.000555555561268 CRPIX3 = 1373.0 CRVAL3 = 23.1837500515 CTYPE3 = 'RA---SIN' CDELT4 = 0.000555555561268 CRPIX4 = 1152.0 CRVAL4 = 30.5765277962 CTYPE4 = 'DEC--SIN' DATE-OBS= '1998-06-18T16:30:25.4' / RESTFREQ= 1.42040571841E+09 / CELLSCAL= 'CONSTANT' / BUNIT = 'K ' EPOCH = 2.00000000000E+03 / OBJECT = 'M33 ' / OBSERVER= 'AT206 ' / VOBS = -2.57256763070E+01 / LTYPE = 'channel ' / LSTART = 2.15000000000E+02 / LWIDTH = 1.00000000000E+00 / LSTEP = 1.00000000000E+00 / BTYPE = 'intensity' / DATAMIN = -6.57081836835E-03 / DATAMAX = 1.52362231165E-02 / BMAJ = 0.0002777777777777778 BMIN = 0.0002777777777777778 BPA = 0.0 END ?×øw_Qì?ê£^¿Áö?îl@h»ÖT?Ë-í0˜?çl~gQ?ÇF _à8?ã(5Öc,°?Çỷá4H?Ãøk7"!„?Óx´täªh?Ã÷ á™L?àÊÎ 2, data=data, wcs=wcs) assert_allclose(m.include(data, wcs), [[[0, 0, 0, 1, 1]]]) assert_allclose(m.exclude(data, wcs), [[[1, 1, 1, 0, 0]]]) assert_allclose(m._filled(data, wcs), [[[np.nan, np.nan, np.nan, 3, 4]]]) assert_allclose(m._flattened(data, wcs), [3, 4]) assert_allclose(m.include(data, wcs, view=(0, 0, slice(1, 4))), [0, 0, 1]) assert_allclose(m.exclude(data, wcs, view=(0, 0, slice(1, 4))), [1, 1, 0]) assert_allclose(m._filled(data, wcs, view=(0, 0, slice(1, 4))), [np.nan, np.nan, 3]) assert_allclose(m._flattened(data, wcs, view=(0, 0, slice(1, 4))), [3]) # Now if we call with different data, the results for include and exclude # should *not* change. data = (3 - np.arange(5)).reshape((1, 1, 5)) assert_allclose(m.include(data, wcs), [[[0, 0, 0, 1, 1]]]) assert_allclose(m.exclude(data, wcs), [[[1, 1, 1, 0, 0]]]) assert_allclose(m._filled(data, wcs), [[[np.nan, np.nan, np.nan, 0, -1]]]) assert_allclose(m._flattened(data, wcs), [0, -1]) assert_allclose(m.include(data, wcs, view=(0, 0, slice(1, 4))), [0, 0, 1]) assert_allclose(m.exclude(data, wcs, view=(0, 0, slice(1, 4))), [1, 1, 0]) assert_allclose(m._filled(data, wcs, view=(0, 0, slice(1, 4))), [np.nan, np.nan, 0]) assert_allclose(m._flattened(data, wcs, view=(0, 0, slice(1, 4))), [0]) def test_lazy_comparison_mask(): data = np.arange(5).reshape((1, 1, 5)) wcs = WCS() m = LazyComparisonMask(operator.gt, 2, data=data, wcs=wcs) assert_allclose(m.include(data, wcs), [[[0, 0, 0, 1, 1]]]) assert_allclose(m.exclude(data, wcs), [[[1, 1, 1, 0, 0]]]) assert_allclose(m._filled(data, wcs), [[[np.nan, np.nan, np.nan, 3, 4]]]) assert_allclose(m._flattened(data, wcs), [3, 4]) assert_allclose(m.include(data, wcs, view=(0, 0, slice(1, 4))), [0, 0, 1]) assert_allclose(m.exclude(data, wcs, view=(0, 0, slice(1, 4))), [1, 1, 0]) assert_allclose(m._filled(data, wcs, view=(0, 0, slice(1, 4))), [np.nan, np.nan, 3]) assert_allclose(m._flattened(data, wcs, view=(0, 0, slice(1, 4))), [3]) # Now if we call with different data, the results for include and exclude # should *not* change. data = (3 - np.arange(5)).reshape((1, 1, 5)) assert_allclose(m.include(data, wcs), [[[0, 0, 0, 1, 1]]]) assert_allclose(m.exclude(data, wcs), [[[1, 1, 1, 0, 0]]]) assert_allclose(m._filled(data, wcs), [[[np.nan, np.nan, np.nan, 0, -1]]]) assert_allclose(m._flattened(data, wcs), [0, -1]) assert_allclose(m.include(data, wcs, view=(0, 0, slice(1, 4))), [0, 0, 1]) assert_allclose(m.exclude(data, wcs, view=(0, 0, slice(1, 4))), [1, 1, 0]) assert_allclose(m._filled(data, wcs, view=(0, 0, slice(1, 4))), [np.nan, np.nan, 0]) assert_allclose(m._flattened(data, wcs, view=(0, 0, slice(1, 4))), [0]) def test_function_mask_incorrect_shape(): # The following function will return the incorrect shape because it does # not apply the view def threshold(data, wcs, view=()): return data > 2 m = FunctionMask(threshold) data = np.arange(5).reshape((1, 1, 5)) wcs = WCS() with pytest.raises(ValueError) as exc: m.include(data, wcs, view=(0, 0, slice(1, 4))) assert exc.value.args[0] == "Function did not return mask with correct shape - expected (3,), got (1, 1, 5)" def test_function_mask(): def threshold(data, wcs, view=()): return data[view] > 2 m = FunctionMask(threshold) data = np.arange(5).reshape((1, 1, 5)) wcs = WCS() assert_allclose(m.include(data, wcs), [[[0, 0, 0, 1, 1]]]) assert_allclose(m.exclude(data, wcs), [[[1, 1, 1, 0, 0]]]) assert_allclose(m._filled(data, wcs), [[[np.nan, np.nan, np.nan, 3, 4]]]) assert_allclose(m._flattened(data, wcs), [3, 4]) assert_allclose(m.include(data, wcs, view=(0, 0, slice(1, 4))), [0, 0, 1]) assert_allclose(m.exclude(data, wcs, view=(0, 0, slice(1, 4))), [1, 1, 0]) assert_allclose(m._filled(data, wcs, view=(0, 0, slice(1, 4))), [np.nan, np.nan, 3]) assert_allclose(m._flattened(data, wcs, view=(0, 0, slice(1, 4))), [3]) # Now if we call with different data, the results for include and exclude # *should* change. data = (3 - np.arange(5)).reshape((1, 1, 5)) assert_allclose(m.include(data, wcs), [[[1, 0, 0, 0, 0]]]) assert_allclose(m.exclude(data, wcs), [[[0, 1, 1, 1, 1]]]) assert_allclose(m._filled(data, wcs), [[[3, np.nan, np.nan, np.nan, np.nan]]]) assert_allclose(m._flattened(data, wcs), [3]) assert_allclose(m.include(data, wcs, view=(0, 0, slice(0, 3))), [1, 0, 0]) assert_allclose(m.exclude(data, wcs, view=(0, 0, slice(0, 3))), [0, 1, 1]) assert_allclose(m._filled(data, wcs, view=(0, 0, slice(0, 3))), [3, np.nan, np.nan]) assert_allclose(m._flattened(data, wcs, view=(0, 0, slice(0, 3))), [3]) def test_composite_mask(): def lower_threshold(data, wcs, view=()): return data[view] > 0 def upper_threshold(data, wcs, view=()): return data[view] < 3 m1 = FunctionMask(lower_threshold) m2 = FunctionMask(upper_threshold) m = m1 & m2 data = np.arange(5).reshape((1, 1, 5)) wcs = WCS() assert_allclose(m.include(data, wcs), [[[0, 1, 1, 0, 0]]]) assert_allclose(m.exclude(data, wcs), [[[1, 0, 0, 1, 1]]]) assert_allclose(m._filled(data, wcs), [[[np.nan, 1, 2, np.nan, np.nan]]]) assert_allclose(m._flattened(data, wcs), [1, 2]) assert_allclose(m.include(data, wcs, view=(0, 0, slice(1, 4))), [1, 1, 0]) assert_allclose(m.exclude(data, wcs, view=(0, 0, slice(1, 4))), [0, 0, 1]) assert_allclose(m._filled(data, wcs, view=(0, 0, slice(1, 4))), [1, 2, np.nan]) assert_allclose(m._flattened(data, wcs, view=(0, 0, slice(1, 4))), [1, 2]) def test_mask_logic(): data = np.arange(5).reshape((1, 1, 5)) wcs = WCS() def threshold_1(data, wcs, view=()): return data[view] > 0 def threshold_2(data, wcs, view=()): return data[view] < 4 def threshold_3(data, wcs, view=()): return data[view] != 2 m1 = FunctionMask(threshold_1) m2 = FunctionMask(threshold_2) m3 = FunctionMask(threshold_3) m = m1 & m2 assert_allclose(m.include(data, wcs), [[[0, 1, 1, 1, 0]]]) m = m1 | m2 assert_allclose(m.include(data, wcs), [[[1, 1, 1, 1, 1]]]) m = m1 | ~m2 assert_allclose(m.include(data, wcs), [[[0, 1, 1, 1, 1]]]) m = m1 & m2 & m3 assert_allclose(m.include(data, wcs), [[[0, 1, 0, 1, 0]]]) m = (m1 | m3) & m2 assert_allclose(m.include(data, wcs), [[[1, 1, 1, 1, 0]]]) m = m1 ^ m2 assert_allclose(m.include(data, wcs), [[[1, 0, 0, 0, 1]]]) m = m1 ^ m3 assert_allclose(m.include(data, wcs), [[[1, 0, 1, 0, 0]]]) @pytest.mark.parametrize(('name'), (('advs'), ('dvsa'), ('sdav'), ('sadv'), ('vsad'), ('vad'), ('adv'), )) def test_mask_spectral_unit(name): cube, data = cube_and_raw(name + '.fits') mask = BooleanArrayMask(data, cube._wcs) mask_freq = mask.with_spectral_unit(u.Hz) assert mask_freq._wcs.wcs.ctype[mask_freq._wcs.wcs.spec] == 'FREQ-W2F' # values taken from header rest = 1.42040571841E+09*u.Hz crval = -3.21214698632E+05*u.m/u.s outcv = crval.to(u.m, u.doppler_optical(rest)).to(u.Hz, u.spectral()) assert_allclose(mask_freq._wcs.wcs.crval[mask_freq._wcs.wcs.spec], outcv.to(u.Hz).value) def test_wcs_validity_check(): cube, data = cube_and_raw('adv.fits') mask = BooleanArrayMask(data>0, cube._wcs) cube = cube.with_mask(mask) s2 = cube.spectral_slab(-2 * u.km / u.s, 2 * u.km / u.s) s3 = s2.with_spectral_unit(u.km / u.s, velocity_convention=u.doppler_radio) # just checking that this works, not that it does anything in particular moment_map = s3.moment(order=1) def test_mask_spectral_unit_functions(): cube, data = cube_and_raw('adv.fits') # function mask should do nothing mask1 = FunctionMask(lambda x: x>0) mask_freq1 = mask1.with_spectral_unit(u.Hz) # lazy mask behaves like booleanarraymask mask2 = LazyMask(lambda x: x>0, cube=cube) mask_freq2 = mask2.with_spectral_unit(u.Hz) assert mask_freq2._wcs.wcs.ctype[mask_freq2._wcs.wcs.spec] == 'FREQ-W2F' # values taken from header rest = 1.42040571841E+09*u.Hz crval = -3.21214698632E+05*u.m/u.s outcv = crval.to(u.m, u.doppler_optical(rest)).to(u.Hz, u.spectral()) assert_allclose(mask_freq2._wcs.wcs.crval[mask_freq2._wcs.wcs.spec], outcv.to(u.Hz).value) # again, test that it works mask3 = CompositeMask(mask1,mask2) mask_freq3 = mask3.with_spectral_unit(u.Hz) mask_freq3 = CompositeMask(mask_freq1,mask_freq2) mask_freq_freq3 = mask_freq3.with_spectral_unit(u.Hz) # this one should fail #failedmask = CompositeMask(mask_freq1,mask2) def is_broadcastable_try(shp1, shp2): """ Test whether an array shape can be broadcast to another (this is the try/fail approach, which is guaranteed right.... right?) http://stackoverflow.com/questions/24743753/test-if-an-array-is-broadcastable-to-a-shape/24745359#24745359 """ #This variant does not work as of np 1.10: the strided arrays aren't #writable and therefore apparently cannot be broadcast # x = np.array([1]) # a = as_strided(x, shape=shp1, strides=[0] * len(shp1)) # b = as_strided(x, shape=shp2, strides=[0] * len(shp2)) a = np.ones(shp1) b = np.ones(shp2) try: c = np.broadcast_arrays(a, b) # reverse order: compare last dim first (as broadcasting does) if any(bi= StrictVersion('1.0'): # The relative error is slightly larger on astropy-dev # There is no obvious reason for this. rtol = 2e-7 atol = 1e-30 else: rtol = 1e-7 atol = 0.0 @axis_order def test_strategies_consistent(axis, order): mc_hdu = moment_cube() sc = SpectralCube.read(mc_hdu) cwise = sc.moment(axis=axis, order=order, how='cube') swise = sc.moment(axis=axis, order=order, how='slice') rwise = sc.moment(axis=axis, order=order, how='ray') assert_allclose(cwise, swise, rtol=rtol, atol=atol) assert_allclose(cwise, rwise, rtol=rtol, atol=atol) @pytest.mark.parametrize(('order', 'axis', 'how'), [(o, a, h) for o in [0, 1, 2] for a in [0, 1, 2] for h in ['cube', 'slice', 'auto', 'ray']]) def test_reference(order, axis, how): mc_hdu = moment_cube() sc = SpectralCube.read(mc_hdu) mom_sc = sc.moment(order=order, axis=axis, how=how) assert_allclose(mom_sc, MOMENTS[order][axis]) @axis_order def test_consistent_mask_handling(axis, order): mc_hdu = moment_cube() sc = SpectralCube.read(mc_hdu) sc._mask = sc > 4*u.K cwise = sc.moment(axis=axis, order=order, how='cube') swise = sc.moment(axis=axis, order=order, how='slice') rwise = sc.moment(axis=axis, order=order, how='ray') assert_allclose(cwise, swise, rtol=rtol, atol=atol) assert_allclose(cwise, rwise, rtol=rtol, atol=atol) def test_convenience_methods(): mc_hdu = moment_cube() sc = SpectralCube.read(mc_hdu) assert_allclose(sc.moment0(axis=0), MOMENTS[0][0]) assert_allclose(sc.moment1(axis=2), MOMENTS[1][2]) assert_allclose(sc.moment2(axis=1), MOMENTS[2][1]) def test_preserve_unit(): mc_hdu = moment_cube() sc = SpectralCube.read(mc_hdu) sc_kms = sc.with_spectral_unit(u.km/u.s) m0 = sc_kms.moment0(axis=0) m1 = sc_kms.moment1(axis=0) assert_allclose(m0, MOMENTS[0][0].to(u.K*u.km/u.s)) assert_allclose(m1, MOMENTS[1][0].to(u.km/u.s)) def test_with_flux_unit(): """ As of Issue 184, redundant with test_reference """ mc_hdu = moment_cube() sc = SpectralCube.read(mc_hdu) sc._unit = u.K sc_kms = sc.with_spectral_unit(u.km/u.s) m0 = sc_kms.moment0(axis=0) m1 = sc_kms.moment1(axis=0) assert sc.unit == u.K assert sc.filled_data[:].unit == u.K assert_allclose(m0, MOMENTS[0][0].to(u.K*u.km/u.s)) assert_allclose(m1, MOMENTS[1][0].to(u.km/u.s)) @pytest.mark.parametrize(('order', 'axis', 'how'), [(o, a, h) for o in [0, 1, 2] for a in [0, 1, 2] for h in ['cube', 'slice', 'auto', 'ray']]) def test_how_withfluxunit(order, axis, how): """ Regression test for issue 180 As of issue 184, this is mostly redundant with test_reference except that it (kind of) checks that units are set """ mc_hdu = moment_cube() sc = SpectralCube.read(mc_hdu) sc._unit = u.K mom_sc = sc.moment(order=order, axis=axis, how=how) assert sc.unit == u.K assert sc.filled_data[:].unit == u.K assert_allclose(mom_sc, MOMENTSu[order][axis]) spectral-cube-0.3.1/spectral_cube/tests/test_performance.py0000644000077000000240000000273412643464660024175 0ustar adamstaff00000000000000""" Performance-related tests to make sure we don't use more memory than we should """ from __future__ import print_function, absolute_import, division from .test_moments import moment_cube from .helpers import assert_allclose from ..spectral_cube import SpectralCube def find_base_nbytes(obj): # from http://stackoverflow.com/questions/34637875/size-of-numpy-strided-array-broadcast-array-in-memory if obj.base is not None: return find_base_nbytes(obj.base) return obj.nbytes def test_pix_size(): mc_hdu = moment_cube() sc = SpectralCube.read(mc_hdu) s,y,x = sc._pix_size() # float64 by default bytes_per_pix = 8 assert find_base_nbytes(s) == sc.shape[0]*bytes_per_pix assert find_base_nbytes(y) == sc.shape[1]*sc.shape[2]*bytes_per_pix assert find_base_nbytes(x) == sc.shape[1]*sc.shape[2]*bytes_per_pix def test_compare_pix_size_approaches(): mc_hdu = moment_cube() sc = SpectralCube.read(mc_hdu) sa,ya,xa = sc._pix_size() s,y,x = (sc._pix_size_slice(ii) for ii in range(3)) assert_allclose(sa, s) assert_allclose(ya, y) assert_allclose(xa, x) def test_pix_cen(): mc_hdu = moment_cube() sc = SpectralCube.read(mc_hdu) s,y,x = sc._pix_cen() # float64 by default bytes_per_pix = 8 assert find_base_nbytes(s) == sc.shape[0]*bytes_per_pix assert find_base_nbytes(y) == sc.shape[1]*sc.shape[2]*bytes_per_pix assert find_base_nbytes(x) == sc.shape[1]*sc.shape[2]*bytes_per_pix spectral-cube-0.3.1/spectral_cube/tests/test_projection.py0000644000077000000240000000630412643657174024051 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import pytest import numpy as np from astropy import units as u from .helpers import assert_allclose from ..lower_dimensional_structures import Projection, Slice, OneDSpectrum # set up for parametrization LDOs = (Projection, Slice, OneDSpectrum) LDOs_2d = (Projection, Slice,) two_qty_2d = np.ones((2,2)) * u.Jy twelve_qty_2d = np.ones((12,12)) * u.Jy two_qty_1d = np.ones((2,)) * u.Jy twelve_qty_1d = np.ones((12,)) * u.Jy data_two = (two_qty_2d, two_qty_2d, two_qty_1d) data_twelve = (twelve_qty_2d, twelve_qty_2d, twelve_qty_1d) data_two_2d = (two_qty_2d, two_qty_2d,) data_twelve_2d = (twelve_qty_2d, twelve_qty_2d,) @pytest.mark.parametrize(('LDO', 'data'), zip(LDOs_2d, data_two_2d)) def test_slices_of_projections_not_projections(LDO, data): # slices of projections that have <2 dimensions should not be projections p = LDO(data, copy=False) assert not isinstance(p[0,0], LDO) assert not isinstance(p[0], LDO) @pytest.mark.parametrize(('LDO', 'data'), zip(LDOs_2d, data_twelve_2d)) def test_copy_false(LDO, data): # copy the data so we can manipulate inplace without affecting other tests image = data.copy() p = LDO(image, copy=False) image[3,4] = 2 * u.Jy assert_allclose(p[3,4], 2 * u.Jy) @pytest.mark.parametrize(('LDO', 'data'), zip(LDOs, data_twelve)) def test_write(LDO, data, tmpdir): p = LDO(data) p.write(tmpdir.join('test.fits').strpath) @pytest.mark.parametrize(('LDO', 'data'), zip(LDOs_2d, data_twelve_2d)) def test_preserve_wcs_to(LDO, data): # regression for #256 image = data.copy() p = LDO(image, copy=False) image[3,4] = 2 * u.Jy p2 = p.to(u.mJy) assert_allclose(p[3,4], 2 * u.Jy) assert_allclose(p[3,4], 2000 * u.mJy) assert p2.wcs == p.wcs @pytest.mark.parametrize(('LDO', 'data'), zip(LDOs, data_twelve)) def test_multiplication(LDO, data): # regression: 265 p = LDO(data, copy=False) p2 = p * 5 assert p2.unit == u.Jy assert hasattr(p2, '_wcs') assert p2.wcs == p.wcs assert np.all(p2.value == 5) @pytest.mark.parametrize(('LDO', 'data'), zip(LDOs, data_twelve)) def test_unit_division(LDO, data): # regression: 265 image = data p = LDO(image, copy=False) p2 = p / u.beam assert p2.unit == u.Jy/u.beam assert hasattr(p2, '_wcs') assert p2.wcs == p.wcs @pytest.mark.parametrize(('LDO', 'data'), zip(LDOs_2d, data_twelve_2d)) def test_isnan(LDO, data): # Check that np.isnan strips units image = data.copy() image[5,6] = np.nan p = LDO(image, copy=False) mask = np.isnan(p) assert mask.sum() == 1 assert not hasattr(mask, 'unit') @pytest.mark.parametrize(('LDO', 'data'), zip(LDOs, data_twelve)) def test_self_arith(LDO, data): image = data p = LDO(image, copy=False) p2 = p + p assert hasattr(p2, '_wcs') assert p2.wcs == p.wcs assert np.all(p2.value==2) p2 = p - p assert hasattr(p2, '_wcs') assert p2.wcs == p.wcs assert np.all(p2.value==0) spectral-cube-0.3.1/spectral_cube/tests/test_spectral_axis.py0000644000077000000240000005323712643464660024541 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division from astropy import wcs from astropy.io import fits from astropy import units as u from astropy import constants from astropy.tests.helper import pytest import warnings import os import numpy as np from .helpers import assert_allclose from . import path as data_path from ..spectral_axis import (convert_spectral_axis, determine_ctype_from_vconv, cdelt_derivative, determine_vconv_from_ctype, get_rest_value_from_wcs, air_to_vac, air_to_vac_deriv, vac_to_air) def test_cube_wcs_freqtovel(): header = fits.Header.fromtextfile(data_path('cubewcs1.hdr')) w1 = wcs.WCS(header) # CTYPE3 = 'FREQ' newwcs = convert_spectral_axis(w1, 'km/s', 'VRAD', rest_value=w1.wcs.restfrq*u.Hz) assert newwcs.wcs.ctype[2] == 'VRAD' assert newwcs.wcs.crval[2] == 305.2461585938794 assert newwcs.wcs.cunit[2] == u.Unit('km/s') newwcs = convert_spectral_axis(w1, 'km/s', 'VRAD') assert newwcs.wcs.ctype[2] == 'VRAD' assert newwcs.wcs.crval[2] == 305.2461585938794 assert newwcs.wcs.cunit[2] == u.Unit('km/s') def test_cube_wcs_freqtovopt(): header = fits.Header.fromtextfile(data_path('cubewcs1.hdr')) w1 = wcs.WCS(header) w2 = convert_spectral_axis(w1, 'km/s', 'VOPT') # TODO: what should w2's values be? test them # these need to be set to zero to test the failure w1.wcs.restfrq = 0.0 w1.wcs.restwav = 0.0 with pytest.raises(ValueError) as exc: convert_spectral_axis(w1, 'km/s', 'VOPT') assert exc.value.args[0] == 'If converting from wavelength/frequency to speed, a reference wavelength/frequency is required.' @pytest.mark.parametrize('wcstype',('Z','W','R','V')) def test_greisen2006(wcstype): # This is the header extracted from Greisen 2006, including many examples # of valid transforms. It should be the gold standard (in principle) hdr = fits.Header.fromtextfile(data_path('greisen2006.hdr')) # We have not implemented frame conversions, so we can only convert bary # <-> bary in this case wcs0 = wcs.WCS(hdr, key='F') wcs1 = wcs.WCS(hdr, key=wcstype) if wcstype in ('R','V','Z'): if wcs1.wcs.restfrq: rest = wcs1.wcs.restfrq*u.Hz elif wcs1.wcs.restwav: rest = wcs1.wcs.restwav*u.m else: rest = None outunit = u.Unit(wcs1.wcs.cunit[wcs1.wcs.spec]) out_ctype = wcs1.wcs.ctype[wcs1.wcs.spec] wcs2 = convert_spectral_axis(wcs0, outunit, out_ctype, rest_value=rest) assert_allclose(wcs2.wcs.cdelt[wcs2.wcs.spec], wcs1.wcs.cdelt[wcs1.wcs.spec], rtol=1.e-3) assert_allclose(wcs2.wcs.crval[wcs2.wcs.spec], wcs1.wcs.crval[wcs1.wcs.spec], rtol=1.e-3) assert wcs2.wcs.ctype[wcs2.wcs.spec] == wcs1.wcs.ctype[wcs1.wcs.spec] assert wcs2.wcs.cunit[wcs2.wcs.spec] == wcs1.wcs.cunit[wcs1.wcs.spec] # round trip test: inunit = u.Unit(wcs0.wcs.cunit[wcs0.wcs.spec]) in_ctype = wcs0.wcs.ctype[wcs0.wcs.spec] wcs3 = convert_spectral_axis(wcs2, inunit, in_ctype, rest_value=rest) assert_allclose(wcs3.wcs.crval[wcs3.wcs.spec], wcs0.wcs.crval[wcs0.wcs.spec], rtol=1.e-3) assert_allclose(wcs3.wcs.cdelt[wcs3.wcs.spec], wcs0.wcs.cdelt[wcs0.wcs.spec], rtol=1.e-3) assert wcs3.wcs.ctype[wcs3.wcs.spec] == wcs0.wcs.ctype[wcs0.wcs.spec] assert wcs3.wcs.cunit[wcs3.wcs.spec] == wcs0.wcs.cunit[wcs0.wcs.spec] def test_byhand_f2v(): # VELO-F2V CRVAL3F = 1.37847121643E+09 CDELT3F = 9.764775E+04 RESTFRQV= 1.420405752E+09 CRVAL3V = 8.98134229811E+06 CDELT3V = -2.1217551E+04 CUNIT3V = 'm/s' CUNIT3F = 'Hz' crvalf = CRVAL3F * u.Unit(CUNIT3F) crvalv = CRVAL3V * u.Unit(CUNIT3V) restfreq = RESTFRQV * u.Unit(CUNIT3F) cdeltf = CDELT3F * u.Unit(CUNIT3F) cdeltv = CDELT3V * u.Unit(CUNIT3V) # (Pdb) crval_in,crval_lin1,crval_lin2,crval_out # (, , , ) (Pdb) # cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out # (, , , ) crvalv_computed = crvalf.to(CUNIT3V, u.doppler_relativistic(restfreq)) cdeltv_computed = -4*constants.c*cdeltf*crvalf*restfreq**2 / (crvalf**2+restfreq**2)**2 cdeltv_computed_byfunction = cdelt_derivative(crvalf, cdeltf, intype='frequency', outtype='speed', rest=restfreq) # this should be EXACT assert cdeltv_computed == cdeltv_computed_byfunction assert_allclose(crvalv_computed, crvalv, rtol=1.e-3) assert_allclose(cdeltv_computed, cdeltv, rtol=1.e-3) # round trip # (Pdb) crval_in,crval_lin1,crval_lin2,crval_out # (, , # , ) # (Pdb) cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out # (, , , ) crvalf_computed = crvalv_computed.to(CUNIT3F, u.doppler_relativistic(restfreq)) cdeltf_computed = -(cdeltv_computed * constants.c * restfreq / ((constants.c+crvalv_computed)*(constants.c**2 - crvalv_computed**2)**0.5)) assert_allclose(crvalf_computed, crvalf, rtol=1.e-2) assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-2) cdeltf_computed_byfunction = cdelt_derivative(crvalv_computed, cdeltv_computed, intype='speed', outtype='frequency', rest=restfreq) # this should be EXACT assert cdeltf_computed == cdeltf_computed_byfunction def test_byhand_vrad(): # VRAD CRVAL3F = 1.37847121643E+09 CDELT3F = 9.764775E+04 RESTFRQR= 1.420405752E+09 CRVAL3R = 8.85075090419E+06 CDELT3R = -2.0609645E+04 CUNIT3R = 'm/s' CUNIT3F = 'Hz' crvalf = CRVAL3F * u.Unit(CUNIT3F) crvalv = CRVAL3R * u.Unit(CUNIT3R) restfreq = RESTFRQR * u.Unit(CUNIT3F) cdeltf = CDELT3F * u.Unit(CUNIT3F) cdeltv = CDELT3R * u.Unit(CUNIT3R) # (Pdb) crval_in,crval_lin1,crval_lin2,crval_out # (, , , ) # (Pdb) cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out # (, , , ) crvalv_computed = crvalf.to(CUNIT3R, u.doppler_radio(restfreq)) cdeltv_computed = -(cdeltf / restfreq)*constants.c assert_allclose(crvalv_computed, crvalv, rtol=1.e-3) assert_allclose(cdeltv_computed, cdeltv, rtol=1.e-3) crvalf_computed = crvalv_computed.to(CUNIT3F, u.doppler_radio(restfreq)) cdeltf_computed = -(cdeltv_computed/constants.c) * restfreq assert_allclose(crvalf_computed, crvalf, rtol=1.e-3) assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-3) # round trip: # (Pdb) crval_in,crval_lin1,crval_lin2,crval_out # (, , , ) # (Pdb) cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out # (, , , ) # (Pdb) myunit,lin_cunit,out_lin_cunit,outunit # WRONG (Unit("m / s"), Unit("m / s"), Unit("Hz"), Unit("Hz")) def test_byhand_vopt(): # VOPT: case "Z" CRVAL3F = 1.37847121643E+09 CDELT3F = 9.764775E+04 CUNIT3F = 'Hz' RESTWAVZ= 0.211061139 #CTYPE3Z = 'VOPT-F2W' # This comes from Greisen 2006, but appears to be wrong: CRVAL3Z = 9.120000E+06 CRVAL3Z = 9.120002206E+06 CDELT3Z = -2.1882651E+04 CUNIT3Z = 'm/s' crvalf = CRVAL3F * u.Unit(CUNIT3F) crvalv = CRVAL3Z * u.Unit(CUNIT3Z) restwav = RESTWAVZ * u.m cdeltf = CDELT3F * u.Unit(CUNIT3F) cdeltv = CDELT3Z * u.Unit(CUNIT3Z) # Forward: freq -> vopt # crval: (, , , ) # cdelt: (, , , ) #crvalv_computed = crvalf.to(CUNIT3R, u.doppler_radio(restwav)) crvalw_computed = crvalf.to(u.m, u.spectral()) crvalw_computed32 = crvalf.astype('float32').to(u.m, u.spectral()) cdeltw_computed = -(cdeltf / crvalf**2)*constants.c cdeltw_computed_byfunction = cdelt_derivative(crvalf, cdeltf, intype='frequency', outtype='length', rest=None) # this should be EXACT assert cdeltw_computed == cdeltw_computed_byfunction crvalv_computed = crvalw_computed.to(CUNIT3Z, u.doppler_optical(restwav)) crvalv_computed32 = crvalw_computed32.astype('float32').to(CUNIT3Z, u.doppler_optical(restwav)) #cdeltv_computed = (cdeltw_computed * # 4*constants.c*crvalw_computed*restwav**2 / # (restwav**2+crvalw_computed**2)**2) cdeltv_computed = (cdeltw_computed / restwav)*constants.c cdeltv_computed_byfunction = cdelt_derivative(crvalw_computed, cdeltw_computed, intype='length', outtype='speed', rest=restwav, linear=True) # Disagreement is 2.5e-7: good, but not really great... #assert np.abs((crvalv_computed-crvalv)/crvalv) < 1e-6 assert_allclose(crvalv_computed, crvalv, rtol=1.e-2) assert_allclose(cdeltv_computed, cdeltv, rtol=1.e-2) # Round=trip test: # from velo_opt -> freq # (, , , ) # (, , , ) crvalw_computed = crvalv_computed.to(u.m, u.doppler_optical(restwav)) cdeltw_computed = (cdeltv_computed/constants.c) * restwav cdeltw_computed_byfunction = cdelt_derivative(crvalv_computed, cdeltv_computed, intype='speed', outtype='length', rest=restwav, linear=True) assert cdeltw_computed == cdeltw_computed_byfunction crvalf_computed = crvalw_computed.to(CUNIT3F, u.spectral()) cdeltf_computed = -cdeltw_computed * constants.c / crvalw_computed**2 assert_allclose(crvalf_computed, crvalf, rtol=1.e-3) assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-3) cdeltf_computed_byfunction = cdelt_derivative(crvalw_computed, cdeltw_computed, intype='length', outtype='frequency', rest=None) assert cdeltf_computed == cdeltf_computed_byfunction # Fails intentionally (but not really worth testing) #crvalf_computed = crvalv_computed.to(CUNIT3F, u.spectral()+u.doppler_optical(restwav)) #cdeltf_computed = -(cdeltv_computed / constants.c) * restwav.to(u.Hz, u.spectral()) #assert_allclose(crvalf_computed, crvalf, rtol=1.e-3) #assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-3) def test_byhand_f2w(): CRVAL3F = 1.37847121643E+09 CDELT3F = 9.764775E+04 CUNIT3F = 'Hz' #CTYPE3W = 'WAVE-F2W' CRVAL3W = 0.217481841062 CDELT3W = -1.5405916E-05 CUNIT3W = 'm' crvalf = CRVAL3F * u.Unit(CUNIT3F) crvalw = CRVAL3W * u.Unit(CUNIT3W) cdeltf = CDELT3F * u.Unit(CUNIT3F) cdeltw = CDELT3W * u.Unit(CUNIT3W) crvalf_computed = crvalw.to(CUNIT3F, u.spectral()) cdeltf_computed = -constants.c * cdeltw / crvalw**2 assert_allclose(crvalf_computed, crvalf, rtol=0.1) assert_allclose(cdeltf_computed, cdeltf, rtol=0.1) @pytest.mark.parametrize(('ctype','unit','velocity_convention','result'), (('VELO-F2V', "Hz", None, 'FREQ'), ('VELO-F2V', "m", None, 'WAVE-F2W'), ('VOPT', "m", None, 'WAVE'), ('VOPT', "Hz", None, 'FREQ-W2F'), ('VELO', "Hz", None, 'FREQ-V2F'), ('WAVE', "Hz", None, 'FREQ-W2F'), ('FREQ', 'm/s', None, ValueError('A velocity convention must be specified')), ('FREQ', 'm/s', u.doppler_radio, 'VRAD'), ('FREQ', 'm/s', u.doppler_optical, 'VOPT-F2W'), ('FREQ', 'm/s', u.doppler_relativistic, 'VELO-F2V'), ('WAVE', 'm/s', u.doppler_radio, 'VRAD-W2F'))) def test_ctype_determinator(ctype,unit,velocity_convention,result): if isinstance(result, Exception): with pytest.raises(Exception) as exc: determine_ctype_from_vconv(ctype, unit, velocity_convention=velocity_convention) assert exc.value.args[0] == result.args[0] assert type(exc.value) == type(result) else: outctype = determine_ctype_from_vconv(ctype, unit, velocity_convention=velocity_convention) assert outctype == result @pytest.mark.parametrize(('ctype','vconv'), (('VELO-F2W', u.doppler_optical), ('VELO-F2V', u.doppler_relativistic), ('VRAD', u.doppler_radio), ('VOPT', u.doppler_optical), ('VELO', u.doppler_relativistic), ('WAVE', u.doppler_optical), ('WAVE-F2W', u.doppler_optical), ('WAVE-V2W', u.doppler_optical), ('FREQ', u.doppler_radio), ('FREQ-V2F', u.doppler_radio), ('FREQ-W2F', u.doppler_radio),)) def test_vconv_determinator(ctype, vconv): assert determine_vconv_from_ctype(ctype) == vconv @pytest.mark.parametrize(('name'), (('advs'), ('dvsa'), ('sdav'), ('sadv'), ('vsad'), ('vad'), ('adv'), )) def test_vopt_to_freq(name): h = fits.getheader(data_path(name+".fits")) wcs0 = wcs.WCS(h) # check to make sure astropy.wcs's "fix" changes VELO-HEL to VOPT assert wcs0.wcs.ctype[wcs0.wcs.spec] == 'VOPT' out_ctype = determine_ctype_from_vconv('VOPT', u.Hz) wcs1 = convert_spectral_axis(wcs0, u.Hz, out_ctype) assert wcs1.wcs.ctype[wcs1.wcs.spec] == 'FREQ-W2F' @pytest.mark.parametrize('wcstype',('Z','W','R','V','F')) def test_change_rest_frequency(wcstype): # This is the header extracted from Greisen 2006, including many examples # of valid transforms. It should be the gold standard (in principle) hdr = fits.Header.fromtextfile(data_path('greisen2006.hdr')) wcs0 = wcs.WCS(hdr, key=wcstype) old_rest = get_rest_value_from_wcs(wcs0) if old_rest is None: # This test doesn't matter if there was no rest frequency in the first # place but I prefer to keep the option open in case we want to try # forcing a rest frequency on some of the non-velocity frames at some # point return vconv1 = determine_vconv_from_ctype(hdr['CTYPE3'+wcstype]) new_rest = (100*u.km/u.s).to(u.Hz, vconv1(old_rest)) wcs1 = wcs.WCS(hdr, key='V') vconv2 = determine_vconv_from_ctype(hdr['CTYPE3V']) inunit = u.Unit(wcs0.wcs.cunit[wcs0.wcs.spec]) outunit = u.Unit(wcs1.wcs.cunit[wcs1.wcs.spec]) # VELO-F2V out_ctype = wcs1.wcs.ctype[wcs1.wcs.spec] wcs2 = convert_spectral_axis(wcs0, outunit, out_ctype, rest_value=new_rest) sp1 = wcs1.sub([wcs.WCSSUB_SPECTRAL]) sp2 = wcs2.sub([wcs.WCSSUB_SPECTRAL]) p_old = sp1.wcs_world2pix([old_rest.to(inunit, vconv1(old_rest)).value, new_rest.to(inunit, vconv1(old_rest)).value],0) p_new = sp2.wcs_world2pix([old_rest.to(outunit, vconv2(new_rest)).value, new_rest.to(outunit, vconv2(new_rest)).value],0) assert_allclose(p_old, p_new, rtol=1e-3) assert_allclose(p_old, p_new, rtol=1e-3) # from http://classic.sdss.org/dr5/products/spectra/vacwavelength.html # these aren't accurate enough for my liking, but I can't find a better one readily air_vac = { 'H-beta':(4861.363, 4862.721)*u.AA, '[O III]':(4958.911, 4960.295)*u.AA, '[O III]':(5006.843, 5008.239)*u.AA, '[N II]':(6548.05, 6549.86)*u.AA, 'H-alpha':(6562.801, 6564.614)*u.AA, '[N II]':(6583.45, 6585.27)*u.AA, '[S II]':(6716.44, 6718.29)*u.AA, '[S II]':(6730.82, 6732.68)*u.AA, } @pytest.mark.parametrize(('air','vac'), air_vac.values()) def test_air_to_vac(air, vac): # This is the accuracy provided by the line list we have. # I'm not sure if the formula are incorrect or if the reference wavelengths # are, but this is an accuracy of only 6 km/s, which is *very bad* for # astrophysical applications. assert np.abs((air_to_vac(air)- vac)) < 0.15*u.AA assert np.abs((vac_to_air(vac)- air)) < 0.15*u.AA assert np.abs((air_to_vac(air)- vac)/vac) < 2e-5 assert np.abs((vac_to_air(vac)- air)/air) < 2e-5 # round tripping assert np.abs((vac_to_air(air_to_vac(air))-air))/air < 1e-8 assert np.abs((air_to_vac(vac_to_air(vac))-vac))/vac < 1e-8 def test_byhand_awav2vel(): # AWAV CRVAL3A = (6560*u.AA).to(u.m).value CDELT3A = (1.0*u.AA).to(u.m).value CUNIT3A = 'm' CRPIX3A = 1.0 # restwav MUST be vacuum restwl = air_to_vac(6562.81*u.AA) RESTWAV = restwl.to(u.m).value CRVAL3V = (CRVAL3A*u.m).to(u.m/u.s, u.doppler_optical(restwl)).value CDELT3V = (CDELT3A*u.m*air_to_vac_deriv(CRVAL3A*u.m)/restwl) * constants.c CUNIT3V = 'm/s' mywcs = wcs.WCS(naxis=1) mywcs.wcs.ctype[0] = 'AWAV' mywcs.wcs.crval[0] = CRVAL3A mywcs.wcs.crpix[0] = CRPIX3A mywcs.wcs.cunit[0] = CUNIT3A mywcs.wcs.cdelt[0] = CDELT3A mywcs.wcs.restwav = RESTWAV mywcs.wcs.set() newwcs = convert_spectral_axis(mywcs, u.km/u.s, determine_ctype_from_vconv(mywcs.wcs.ctype[0], u.km/u.s, 'optical')) newwcs.wcs.set() assert newwcs.wcs.cunit[0] == 'm / s' np.testing.assert_almost_equal(newwcs.wcs.crval, air_to_vac(CRVAL3A*u.m).to(u.m/u.s, u.doppler_optical(restwl)).value) # Check that the cdelts match the expected cdelt, 1 angstrom / rest # wavelength (vac) np.testing.assert_almost_equal(newwcs.wcs.cdelt, CDELT3V.to(u.m/u.s).value) # Check that the reference wavelength is 2.81 angstroms up np.testing.assert_almost_equal(newwcs.wcs_pix2world((2.81,), 0), 0.0, decimal=3) # Go through a full-on sanity check: vline = 100*u.km/u.s wave_line_vac = vline.to(u.AA, u.doppler_optical(restwl)) wave_line_air = vac_to_air(wave_line_vac) pix_line_input = mywcs.wcs_world2pix((wave_line_air.to(u.m).value,), 0) pix_line_output = newwcs.wcs_world2pix((vline.to(u.m/u.s).value,), 0) np.testing.assert_almost_equal(pix_line_output, pix_line_input, decimal=4) def test_byhand_awav2wav(): # AWAV CRVAL3A = (6560*u.AA).to(u.m).value CDELT3A = (1.0*u.AA).to(u.m).value CUNIT3A = 'm' CRPIX3A = 1.0 mywcs = wcs.WCS(naxis=1) mywcs.wcs.ctype[0] = 'AWAV' mywcs.wcs.crval[0] = CRVAL3A mywcs.wcs.crpix[0] = CRPIX3A mywcs.wcs.cunit[0] = CUNIT3A mywcs.wcs.cdelt[0] = CDELT3A mywcs.wcs.set() newwcs = convert_spectral_axis(mywcs, u.AA, 'WAVE') newwcs.wcs.set() np.testing.assert_almost_equal(newwcs.wcs_pix2world((0,),0), air_to_vac(mywcs.wcs_pix2world((0,),0)*u.m).value) np.testing.assert_almost_equal(newwcs.wcs_pix2world((10,),0), air_to_vac(mywcs.wcs_pix2world((10,),0)*u.m).value) # At least one of the components MUST change assert not (mywcs.wcs.crval[0] == newwcs.wcs.crval[0] and mywcs.wcs.crpix[0] == newwcs.wcs.crpix[0]) spectral-cube-0.3.1/spectral_cube/tests/test_spectral_cube.py0000644000077000000240000011421512647754466024517 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import operator import itertools import warnings import mmap from distutils.version import StrictVersion, LooseVersion # needed to test for warnings later warnings.simplefilter('always', UserWarning) import pytest from astropy.io import fits from astropy import units as u from astropy.wcs import WCS from astropy.wcs import _wcs import numpy as np from .. import (SpectralCube, BooleanArrayMask, FunctionMask, LazyMask, CompositeMask) from ..spectral_cube import OneDSpectrum, Projection from ..np_compat import allbadtonan from .. import spectral_axis from . import path from .helpers import assert_allclose, assert_array_equal try: import yt YT_INSTALLED = True YT_LT_301 = LooseVersion(yt.__version__) < LooseVersion('3.0.1') except ImportError: YT_INSTALLED = False YT_LT_301 = False try: import bottleneck BOTTLENECK_INSTALLED = True except ImportError: BOTTLENECK_INSTALLED = False try: from radio_beam import Beam RADIO_BEAM_INSTALLED = True except ImportError: RADIO_BEAM_INSTALLED = False NUMPY_LT_19 = LooseVersion(np.__version__) < LooseVersion('1.9.0') def cube_and_raw(filename): p = path(filename) d = fits.getdata(p) c = SpectralCube.read(p, format='fits') return c, d def test_arithmetic_warning(recwarn): cube, data = cube_and_raw('vda_Jybeam_lower.fits') assert not cube._is_huge # make sure the small cube raises a warning about loading into memory cube + 5*cube.unit w = recwarn.list[-1] assert 'requires loading the entire cube into' in str(w.message) def test_huge_disallowed(): cube, data = cube_and_raw('vda_Jybeam_lower.fits') cube = SpectralCube(data=data, wcs=cube.wcs) assert not cube._is_huge # We need to reduce the memory threshold rather than use a large cube to # make sure we don't use too much memory during testing. from .. import cube_utils OLD_MEMORY_THRESHOLD = cube_utils.MEMORY_THRESHOLD try: cube_utils.MEMORY_THRESHOLD = 10 assert cube._is_huge with pytest.raises(ValueError) as exc: cube + 5*cube.unit assert 'entire cube into memory' in exc.value.args[0] cube.allow_huge_operations = True # just make sure it doesn't fail cube + 5*cube.unit finally: cube_utils.MEMORY_THRESHOLD = OLD_MEMORY_THRESHOLD class BaseTest(object): def setup_method(self, method): c, d = cube_and_raw('adv.fits') mask = BooleanArrayMask(d > 0.5, c._wcs) c._mask = mask self.c = c self.mask = mask self.d = d class TestSpectralCube(object): @pytest.mark.parametrize(('name', 'trans'), ( ('advs', [0, 1, 2, 3]), ('dvsa', [2, 3, 0, 1]), ('sdav', [0, 2, 1, 3]), ('sadv', [0, 1, 2, 3]), ('vsad', [3, 0, 1, 2]), ('vad', [2, 0, 1]), ('vda', [0, 2, 1]), ('adv', [0, 1, 2]), )) def test_consistent_transposition(self, name, trans): """data() should return velocity axis first, then world 1, then world 0""" c, d = cube_and_raw(name + '.fits') expected = np.squeeze(d.transpose(trans)) assert_allclose(c._get_filled_data(), expected) @pytest.mark.parametrize(('file', 'view'), ( ('adv.fits', np.s_[:, :,:]), ('adv.fits', np.s_[::2, :, :2]), ('adv.fits', np.s_[0]), )) def test_world(self, file, view): p = path(file) d = fits.getdata(p) wcs = WCS(p) c = SpectralCube(d, wcs) shp = d.shape inds = np.indices(d.shape) pix = np.column_stack([i.ravel() for i in inds[::-1]]) world = wcs.all_pix2world(pix, 0).T world = [w.reshape(shp) for w in world] world = [w[view] * u.Unit(wcs.wcs.cunit[i]) for i, w in enumerate(world)][::-1] w2 = c.world[view] for result, expected in zip(w2, world): assert_allclose(result, expected) @pytest.mark.parametrize('view', (np.s_[:, :,:], np.s_[:2, :3, ::2])) def test_world_transposes_3d(self, view): c1, d1 = cube_and_raw('adv.fits') c2, d2 = cube_and_raw('vad.fits') for w1, w2 in zip(c1.world[view], c2.world[view]): assert_allclose(w1, w2) @pytest.mark.parametrize('view', (np.s_[:, :,:], np.s_[:2, :3, ::2], np.s_[::3, ::2, :1], np.s_[:], )) def test_world_transposes_4d(self, view): c1, d1 = cube_and_raw('advs.fits') c2, d2 = cube_and_raw('sadv.fits') for w1, w2 in zip(c1.world[view], c2.world[view]): assert_allclose(w1, w2) @pytest.mark.parametrize(('name','masktype','unit'), itertools.product(('advs', 'dvsa', 'sdav', 'sadv', 'vsad', 'vad', 'adv',), (BooleanArrayMask, LazyMask, FunctionMask, CompositeMask), ('Hz', u.Hz), ) ) def test_with_spectral_unit(self, name, masktype, unit): cube, data = cube_and_raw(name + '.fits') cube_freq = cube.with_spectral_unit(unit) if masktype == BooleanArrayMask: mask = BooleanArrayMask(data>0, wcs=cube._wcs) elif masktype == LazyMask: mask = LazyMask(lambda x: x>0, cube=cube) elif masktype == FunctionMask: mask = FunctionMask(lambda x: x>0) elif masktype == CompositeMask: mask1 = FunctionMask(lambda x: x>0) mask2 = LazyMask(lambda x: x>0, cube) mask = CompositeMask(mask1, mask2) cube2 = cube.with_mask(mask) cube_masked_freq = cube2.with_spectral_unit(unit) assert cube_freq._wcs.wcs.ctype[cube_freq._wcs.wcs.spec] == 'FREQ-W2F' assert cube_masked_freq._wcs.wcs.ctype[cube_masked_freq._wcs.wcs.spec] == 'FREQ-W2F' assert cube_masked_freq._mask._wcs.wcs.ctype[cube_masked_freq._mask._wcs.wcs.spec] == 'FREQ-W2F' # values taken from header rest = 1.42040571841E+09*u.Hz crval = -3.21214698632E+05*u.m/u.s outcv = crval.to(u.m, u.doppler_optical(rest)).to(u.Hz, u.spectral()) assert_allclose(cube_freq._wcs.wcs.crval[cube_freq._wcs.wcs.spec], outcv.to(u.Hz).value) assert_allclose(cube_masked_freq._wcs.wcs.crval[cube_masked_freq._wcs.wcs.spec], outcv.to(u.Hz).value) assert_allclose(cube_masked_freq._mask._wcs.wcs.crval[cube_masked_freq._mask._wcs.wcs.spec], outcv.to(u.Hz).value) @pytest.mark.parametrize(('operation', 'value'), ((operator.add, 0.5*u.K), (operator.sub, 0.5*u.K), (operator.mul, 0.5*u.K), (operator.truediv, 0.5*u.K), (operator.div if hasattr(operator,'div') else operator.floordiv, 0.5*u.K), )) def test_apply_everywhere(self, operation, value): c1, d1 = cube_and_raw('advs.fits') # append 'o' to indicate that it has been operated on c1o = c1._apply_everywhere(operation, value) d1o = operation(u.Quantity(d1, u.K), value) assert np.all(d1o == c1o.filled_data[:]) # allclose fails on identical data? #assert_allclose(d1o, c1o.filled_data[:]) @pytest.mark.parametrize(('name', 'trans'), ( ('advs', [0, 1, 2, 3]), ('dvsa', [2, 3, 0, 1]), ('sdav', [0, 2, 1, 3]), ('sadv', [0, 1, 2, 3]), ('vsad', [3, 0, 1, 2]), ('vad', [2, 0, 1]), ('vda', [0, 2, 1]), ('adv', [0, 1, 2]), )) def test_getitem(self, name, trans): c, d = cube_and_raw(name + '.fits') expected = np.squeeze(d.transpose(trans)) assert_allclose(c[0,:,:].value, expected[0,:,:]) assert_allclose(c[:,:,0].value, expected[:,:,0]) assert_allclose(c[:,0,:].value, expected[:,0,:]) # Not implemented: #assert_allclose(c[0,0,:].value, expected[0,0,:]) #assert_allclose(c[0,:,0].value, expected[0,:,0]) assert_allclose(c[:,0,0].value, expected[:,0,0]) assert_allclose(c[1,:,:].value, expected[1,:,:]) assert_allclose(c[:,:,1].value, expected[:,:,1]) assert_allclose(c[:,1,:].value, expected[:,1,:]) # Not implemented: #assert_allclose(c[1,1,:].value, expected[1,1,:]) #assert_allclose(c[1,:,1].value, expected[1,:,1]) assert_allclose(c[:,1,1].value, expected[:,1,1]) c2 = c.with_spectral_unit(u.km/u.s, velocity_convention='radio') assert_allclose(c2[0,:,:].value, expected[0,:,:]) assert_allclose(c2[:,:,0].value, expected[:,:,0]) assert_allclose(c2[:,0,:].value, expected[:,0,:]) # Not implemented: #assert_allclose(c2[0,0,:].value, expected[0,0,:]) #assert_allclose(c2[0,:,0].value, expected[0,:,0]) assert_allclose(c2[:,0,0].value, expected[:,0,0]) assert_allclose(c2[1,:,:].value, expected[1,:,:]) assert_allclose(c2[:,:,1].value, expected[:,:,1]) assert_allclose(c2[:,1,:].value, expected[:,1,:]) # Not implemented: #assert_allclose(c2[1,1,:].value, expected[1,1,:]) #assert_allclose(c2[1,:,1].value, expected[1,:,1]) assert_allclose(c2[:,1,1].value, expected[:,1,1]) class TestArithmetic(object): def setup_method(self, method): self.c1, self.d1 = cube_and_raw('adv.fits') # make nice easy-to-test numbers self.d1.flat[:] = np.arange(self.d1.size) self.c1._data.flat[:] = np.arange(self.d1.size) @pytest.mark.parametrize(('value'),(1,1.0,2,2.0)) def test_add(self,value): d2 = self.d1 + value c2 = self.c1 + value*u.K assert np.all(d2 == c2.filled_data[:].value) assert c2.unit == u.K def test_add_cubes(self): d2 = self.d1 + self.d1 c2 = self.c1 + self.c1 assert np.all(d2 == c2.filled_data[:].value) assert c2.unit == u.K @pytest.mark.parametrize(('value'),(1,1.0,2,2.0)) def test_subtract(self, value): d2 = self.d1 - value c2 = self.c1 - value*u.K assert np.all(d2 == c2.filled_data[:].value) assert c2.unit == u.K # regression test #251: the _data attribute must not be a quantity assert not hasattr(c2._data, 'unit') def test_subtract_cubes(self): d2 = self.d1 - self.d1 c2 = self.c1 - self.c1 assert np.all(d2 == c2.filled_data[:].value) assert np.all(c2.filled_data[:].value == 0) assert c2.unit == u.K # regression test #251: the _data attribute must not be a quantity assert not hasattr(c2._data, 'unit') @pytest.mark.parametrize(('value'),(1,1.0,2,2.0)) def test_mul(self, value): d2 = self.d1 * value c2 = self.c1 * value assert np.all(d2 == c2.filled_data[:].value) assert c2.unit == u.K def test_mul_cubes(self): d2 = self.d1 * self.d1 c2 = self.c1 * self.c1 assert np.all(d2 == c2.filled_data[:].value) assert c2.unit == u.K**2 @pytest.mark.parametrize(('value'),(1,1.0,2,2.0)) def test_div(self, value): d2 = self.d1 / value c2 = self.c1 / value assert np.all(d2 == c2.filled_data[:].value) assert c2.unit == u.K def test_div_cubes(self): d2 = self.d1 / self.d1 c2 = self.c1 / self.c1 assert np.all((d2 == c2.filled_data[:].value) | (np.isnan(c2.filled_data[:]))) assert np.all((c2.filled_data[:] == 1) | (np.isnan(c2.filled_data[:]))) assert c2.unit == u.dimensionless_unscaled @pytest.mark.parametrize(('value'), (1,1.0,2,2.0)) def test_pow(self, value): d2 = self.d1 ** value c2 = self.c1 ** value assert np.all(d2 == c2.filled_data[:].value) assert c2.unit == u.K**value def test_cube_add(self): c2 = self.c1 + self.c1 d2 = self.d1 + self.d1 assert np.all(d2 == c2.filled_data[:].value) assert c2.unit == u.K class TestFilters(BaseTest): def test_mask_data(self): c, d = self.c, self.d expected = np.where(d > .5, d, np.nan) assert_allclose(c._get_filled_data(), expected) expected = np.where(d > .5, d, 0) assert_allclose(c._get_filled_data(fill=0), expected) @pytest.mark.parametrize('operation', (operator.lt, operator.gt, operator.le, operator.ge)) def test_mask_comparison(self, operation): c, d = self.c, self.d dmask = operation(d, 0.6) & self.c.mask.include() cmask = operation(c, 0.6*u.K) assert (self.c.mask.include() & cmask.include()).sum() == dmask.sum() np.testing.assert_almost_equal(c.with_mask(cmask).sum().value, d[dmask].sum()) def test_flatten(self): c, d = self.c, self.d expected = d[d > 0.5] assert_allclose(c.flattened(), expected) def test_flatten_weights(self): c, d = self.c, self.d expected = d[d > 0.5] ** 2 assert_allclose(c.flattened(weights=d), expected) def test_slice(self): c, d = self.c, self.d expected = d[:3, :2, ::2] expected = expected[expected > 0.5] assert_allclose(c[0:3, 0:2, 0::2].flattened(), expected) class TestNumpyMethods(BaseTest): def _check_numpy(self, cubemethod, array, func): for axis in [None, 0, 1, 2]: for how in ['auto', 'slice', 'cube', 'ray']: expected = func(array, axis=axis) actual = cubemethod(axis=axis) assert_allclose(actual, expected) def test_sum(self): d = np.where(self.d > 0.5, self.d, np.nan) self._check_numpy(self.c.sum, d, allbadtonan(np.nansum)) # Need a secondary check to make sure it works with no # axis keyword being passed (regression test for issue introduced in # 150) assert np.all(self.c.sum().value == np.nansum(d)) def test_max(self): d = np.where(self.d > 0.5, self.d, np.nan) self._check_numpy(self.c.max, d, np.nanmax) def test_min(self): d = np.where(self.d > 0.5, self.d, np.nan) self._check_numpy(self.c.min, d, np.nanmin) def test_argmax(self): d = np.where(self.d > 0.5, self.d, -10) self._check_numpy(self.c.argmax, d, np.nanargmax) def test_argmin(self): d = np.where(self.d > 0.5, self.d, 10) self._check_numpy(self.c.argmin, d, np.nanargmin) @pytest.mark.parametrize('iterate_rays', (True,False)) def test_median(self, iterate_rays): # Make sure that medians ignore empty/bad/NaN values m = np.empty(self.d.shape[1:]) for y in range(m.shape[0]): for x in range(m.shape[1]): ray = self.d[:, y, x] # the cube mask is for values >0.5 ray = ray[ray > 0.5] m[y, x] = np.median(ray) scmed = self.c.median(axis=0, iterate_rays=iterate_rays) assert_allclose(scmed, m) assert not np.any(np.isnan(scmed.value)) assert scmed.unit == self.c.unit @pytest.mark.skipif('NUMPY_LT_19') def test_bad_median_apply(self): # this is a test for manually-applied numpy medians, which are different # from the cube.median method that does "the right thing" # # for regular median, we expect a failure, which is why we don't use # regular median. scmed = self.c.apply_numpy_function(np.median, axis=0) if StrictVersion(np.__version__) <= StrictVersion('1.9.3'): assert np.count_nonzero(np.isnan(scmed)) == 5 else: assert np.count_nonzero(np.isnan(scmed)) == 6 scmed = self.c.apply_numpy_function(np.nanmedian, axis=0) assert np.count_nonzero(np.isnan(scmed)) == 0 # use a more aggressive mask to force there to be some all-nan axes m2 = self.c>0.65*self.c.unit scmed = self.c.with_mask(m2).apply_numpy_function(np.nanmedian, axis=0) assert np.count_nonzero(np.isnan(scmed)) == 1 @pytest.mark.parametrize('iterate_rays', (True,False)) def test_bad_median(self, iterate_rays): # This should have the same result as np.nanmedian, though it might be # faster if bottleneck loads scmed = self.c.median(axis=0, iterate_rays=iterate_rays) assert np.count_nonzero(np.isnan(scmed)) == 0 m2 = self.c>0.65*self.c.unit scmed = self.c.with_mask(m2).median(axis=0, iterate_rays=iterate_rays) assert np.count_nonzero(np.isnan(scmed)) == 1 @pytest.mark.parametrize(('pct', 'iterate_rays'), (zip((3,25,50,75,97)*2,(True,)*5 + (False,)*5))) def test_percentile(self, pct, iterate_rays): m = np.empty(self.d.sum(axis=0).shape) for y in range(m.shape[0]): for x in range(m.shape[1]): ray = self.d[:, y, x] ray = ray[ray > 0.5] m[y, x] = np.percentile(ray, pct) scpct = self.c.percentile(pct, axis=0, iterate_rays=iterate_rays) assert_allclose(scpct, m) assert not np.any(np.isnan(scpct.value)) assert scpct.unit == self.c.unit @pytest.mark.parametrize('method', ('sum', 'min', 'max', 'median', 'argmin', 'argmax')) def test_transpose(self, method): c1, d1 = cube_and_raw('adv.fits') c2, d2 = cube_and_raw('vad.fits') for axis in [None, 0, 1, 2]: assert_allclose(getattr(c1, method)(axis=axis), getattr(c2, method)(axis=axis)) class TestSlab(BaseTest): def test_closest_spectral_channel(self): c = self.c ms = u.m / u.s assert c.closest_spectral_channel(-321214.698632 * ms) == 0 assert c.closest_spectral_channel(-319926.48366321 * ms) == 1 assert c.closest_spectral_channel(-318638.26869442 * ms) == 2 assert c.closest_spectral_channel(-320000 * ms) == 1 assert c.closest_spectral_channel(-340000 * ms) == 0 assert c.closest_spectral_channel(0 * ms) == 3 def test_spectral_channel_bad_units(self): with pytest.raises(u.UnitsError) as exc: self.c.closest_spectral_channel(1 * u.s) assert exc.value.args[0] == "'value' should be in frequency equivalent or velocity units (got s)" with pytest.raises(u.UnitsError) as exc: self.c.closest_spectral_channel(1. * u.Hz) assert exc.value.args[0] == "Spectral axis is in velocity units and 'value' is in frequency-equivalent units - use SpectralCube.with_spectral_unit first to convert the cube to frequency-equivalent units, or search for a velocity instead" def test_slab(self): ms = u.m / u.s c2 = self.c.spectral_slab(-320000 * ms, -318600 * ms) assert_allclose(c2._data, self.d[1:3]) assert c2._mask is not None def test_slab_reverse_limits(self): ms = u.m / u.s c2 = self.c.spectral_slab(-318600 * ms, -320000 * ms) assert_allclose(c2._data, self.d[1:3]) assert c2._mask is not None def test_slab_preserves_wcs(self): # regression test ms = u.m / u.s crpix = list(self.c._wcs.wcs.crpix) self.c.spectral_slab(-318600 * ms, -320000 * ms) assert list(self.c._wcs.wcs.crpix) == crpix class TestRepr(BaseTest): def test_repr(self): assert repr(self.c) == """ SpectralCube with shape=(4, 3, 2) and unit=K: n_x: 2 type_x: RA---SIN unit_x: deg range: 24.062698 deg: 24.063349 deg n_y: 3 type_y: DEC--SIN unit_y: deg range: 29.934094 deg: 29.935209 deg n_s: 4 type_s: VOPT unit_s: m / s range: -321214.699 m / s: -317350.054 m / s """.strip() def test_repr_withunit(self): self.c._unit = u.Jy assert repr(self.c) == """ SpectralCube with shape=(4, 3, 2) and unit=Jy: n_x: 2 type_x: RA---SIN unit_x: deg range: 24.062698 deg: 24.063349 deg n_y: 3 type_y: DEC--SIN unit_y: deg range: 29.934094 deg: 29.935209 deg n_s: 4 type_s: VOPT unit_s: m / s range: -321214.699 m / s: -317350.054 m / s """.strip() @pytest.mark.skipif('not YT_INSTALLED') class TestYt(): def setup_method(self, method): self.cube = SpectralCube.read(path('adv.fits')) # Without any special arguments self.ytc1 = self.cube.to_yt() # With spectral factor = 0.5 self.spectral_factor = 0.5 self.ytc2 = self.cube.to_yt(spectral_factor=self.spectral_factor) # With nprocs = 4 self.nprocs = 4 self.ytc3 = self.cube.to_yt(nprocs=self.nprocs) def test_yt(self): # The following assertions just make sure everything is # kosher with the datasets generated in different ways ytc1,ytc2,ytc3 = self.ytc1,self.ytc2,self.ytc3 ds1,ds2,ds3 = ytc1.dataset, ytc2.dataset, ytc3.dataset assert_array_equal(ds1.domain_dimensions, ds2.domain_dimensions) assert_array_equal(ds2.domain_dimensions, ds3.domain_dimensions) assert_allclose(ds1.domain_left_edge.value, ds2.domain_left_edge.value) assert_allclose(ds2.domain_left_edge.value, ds3.domain_left_edge.value) assert_allclose(ds1.domain_width.value, ds2.domain_width.value*np.array([1,1,1.0/self.spectral_factor])) assert_allclose(ds1.domain_width.value, ds3.domain_width.value) assert self.nprocs == len(ds3.index.grids) assert ds1.spec_cube assert ds2.spec_cube assert ds3.spec_cube ds1.index ds2.index ds3.index unit1 = ds1.field_info["fits","flux"].units unit2 = ds2.field_info["fits","flux"].units unit3 = ds3.field_info["fits","flux"].units ds1.quan(1.0,unit1) ds2.quan(1.0,unit2) ds3.quan(1.0,unit3) @pytest.mark.skipif('YT_LT_301', reason='yt 3.0 has a FITS-related bug') def test_yt_fluxcompare(self): # Now check that we can compute quantities of the flux # and that they are equal ytc1,ytc2,ytc3 = self.ytc1,self.ytc2,self.ytc3 ds1,ds2,ds3 = ytc1.dataset, ytc2.dataset, ytc3.dataset dd1 = ds1.all_data() dd2 = ds2.all_data() dd3 = ds3.all_data() flux1_tot = dd1.quantities.total_quantity("flux") flux2_tot = dd2.quantities.total_quantity("flux") flux3_tot = dd3.quantities.total_quantity("flux") flux1_min, flux1_max = dd1.quantities.extrema("flux") flux2_min, flux2_max = dd2.quantities.extrema("flux") flux3_min, flux3_max = dd3.quantities.extrema("flux") assert flux1_tot == flux2_tot assert flux1_tot == flux3_tot assert flux1_min == flux2_min assert flux1_min == flux3_min assert flux1_max == flux2_max assert flux1_max == flux3_max def test_yt_roundtrip_wcs(self): # Now test round-trip conversions between yt and world coordinates ytc1,ytc2,ytc3 = self.ytc1,self.ytc2,self.ytc3 ds1,ds2,ds3 = ytc1.dataset, ytc2.dataset, ytc3.dataset yt_coord1 = ds1.domain_left_edge + np.random.random(size=3)*ds1.domain_width world_coord1 = ytc1.yt2world(yt_coord1) assert_allclose(ytc1.world2yt(world_coord1), yt_coord1.value) yt_coord2 = ds2.domain_left_edge + np.random.random(size=3)*ds2.domain_width world_coord2 = ytc2.yt2world(yt_coord2) assert_allclose(ytc2.world2yt(world_coord2), yt_coord2.value) yt_coord3 = ds3.domain_left_edge + np.random.random(size=3)*ds3.domain_width world_coord3 = ytc3.yt2world(yt_coord3) assert_allclose(ytc3.world2yt(world_coord3), yt_coord3.value) def test_read_write_rountrip(tmpdir): cube = SpectralCube.read(path('adv.fits')) tmp_file = str(tmpdir.join('test.fits')) cube.write(tmp_file) cube2 = SpectralCube.read(tmp_file) assert cube.shape == cube.shape assert_allclose(cube._data, cube2._data) if (((hasattr(_wcs, '__version__') and StrictVersion(_wcs.__version__) < StrictVersion('5.9')) or not hasattr(_wcs, '__version__'))): # see https://github.com/astropy/astropy/pull/3992 for reasons: # we should upgrade this for 5.10 when the absolute accuracy is # maximized assert cube._wcs.to_header_string() == cube2._wcs.to_header_string() # in 5.11 and maybe even 5.12, the round trip fails. Maybe # https://github.com/astropy/astropy/issues/4292 will solve it? @pytest.mark.parametrize(('memmap', 'base'), ((True, mmap.mmap), (False, None))) def test_read_memmap(memmap, base): cube = SpectralCube.read(path('adv.fits'), memmap=memmap) bb = cube.base while hasattr(bb, 'base'): bb = bb.base if base is None: assert bb is None else: assert isinstance(bb, base) def _dummy_cube(): data = np.array([[[0, 1, 2, 3, 4]]]) wcs = WCS(naxis=3) wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VELO-HEL'] def lower_threshold(data, wcs, view=()): return data[view] > 0 m1 = FunctionMask(lower_threshold) cube = SpectralCube(data, wcs=wcs, mask=m1) return cube def test_with_mask(): def upper_threshold(data, wcs, view=()): return data[view] < 3 m2 = FunctionMask(upper_threshold) cube = _dummy_cube() cube2 = cube.with_mask(m2) assert_allclose(cube._get_filled_data(), [[[np.nan, 1, 2, 3, 4]]]) assert_allclose(cube2._get_filled_data(), [[[np.nan, 1, 2, np.nan, np.nan]]]) def test_with_mask_with_boolean_array(): cube = _dummy_cube() mask = cube._data > 2 cube2 = cube.with_mask(mask, inherit_mask=False) assert isinstance(cube2._mask, BooleanArrayMask) assert cube2._mask._wcs is cube._wcs assert cube2._mask._mask is mask def test_with_mask_with_bad_array_shape(): cube = _dummy_cube() mask = np.zeros((5, 5), dtype=np.bool) with pytest.raises(ValueError) as exc: cube.with_mask(mask) assert exc.value.args[0] == ("Mask shape is not broadcastable to data shape: " "(5, 5) vs (1, 1, 5)") class TestMasks(BaseTest): @pytest.mark.parametrize('op', (operator.gt, operator.lt, operator.le, operator.ge)) def test_operator_threshold(self, op): # choose thresh to exercise proper equality tests thresh = self.d.ravel()[0] m = op(self.c, thresh*u.K) self.c._mask = m expected = self.d[op(self.d, thresh)] actual = self.c.flattened() assert_allclose(actual, expected) def test_preserve_spectral_unit(): # astropy.wcs has a tendancy to change spectral units from e.g. km/s to # m/s, so we have a workaround - check that it works. cube, data = cube_and_raw('advs.fits') cube_freq = cube.with_spectral_unit(u.GHz) assert cube_freq.wcs.wcs.cunit[2] == 'Hz' # check internal assert cube_freq.spectral_axis.unit is u.GHz # Check that this preferred unit is propagated new_cube = cube_freq.with_fill_value(fill_value=3.4) assert new_cube.spectral_axis.unit is u.GHz @pytest.mark.skipif('not BOTTLENECK_INSTALLED') def test_endians(): """ Test that the endianness checking returns something in Native form (this is only needed for non-numpy functions that worry about the endianness of their data) WARNING: Because the endianness is machine-dependent, this may fail on different architectures! This is because numpy automatically converts little-endian to native in the dtype parameter; I need a workaround for this. """ big = np.array([[[1],[2]]], dtype='>f4') lil = np.array([[[1],[2]]], dtype='' assert xlil.dtype.byteorder == '=' def test_header_naxis(): cube, data = cube_and_raw('advs.fits') assert cube.header['NAXIS'] == 3 # NOT data.ndim == 4 assert cube.header['NAXIS1'] == data.shape[3] assert cube.header['NAXIS2'] == data.shape[2] assert cube.header['NAXIS3'] == data.shape[1] assert 'NAXIS4' not in cube.header def test_slicing(): cube, data = cube_and_raw('advs.fits') # just to check that we're starting in the right place assert cube.shape == (2,3,4) sl = cube[:,1,:] assert sl.shape == (2,4) v = cube[1:2,:,:] assert v.shape == (1,3,4) assert cube[:,:,:].shape == (2,3,4) assert cube[:,:].shape == (2,3,4) assert cube[:].shape == (2,3,4) assert cube[:1,:1,:1].shape == (1,1,1) @pytest.mark.parametrize(('view','naxis'), [((slice(None), 1, slice(None)), 2), ((1, slice(None), slice(None)), 2), ((slice(None), slice(None), 1), 2), ((slice(None), slice(None), slice(1)), 3), ((slice(1), slice(1), slice(1)), 3), ]) def test_slice_wcs(view, naxis): cube, data = cube_and_raw('advs.fits') sl = cube[view] assert sl.wcs.naxis == naxis def test_header_units_consistent(): cube, data = cube_and_raw('advs.fits') cube_kms = cube.with_spectral_unit(u.km/u.s) cube_Mms = cube.with_spectral_unit(u.Mm/u.s) assert cube.header['CUNIT3'] == 'm s-1' assert cube_kms.header['CUNIT3'] == 'km s-1' assert cube_Mms.header['CUNIT3'] == 'Mm s-1' # Wow, the tolerance here is really terrible... assert_allclose(cube_Mms.header['CDELT3'], cube.header['CDELT3']/1e6,rtol=1e-3,atol=1e-5) assert_allclose(cube.header['CDELT3']/1e3, cube_kms.header['CDELT3'],rtol=1e-2,atol=1e-5) cube_freq = cube.with_spectral_unit(u.Hz) assert cube_freq.header['CUNIT3'] == 'Hz' cube_freq_GHz = cube.with_spectral_unit(u.GHz) assert cube_freq_GHz.header['CUNIT3'] == 'GHz' def test_spectral_unit_conventions(): cube, data = cube_and_raw('advs.fits') cube_frq = cube.with_spectral_unit(u.Hz) cube_opt = cube.with_spectral_unit(u.km/u.s, rest_value=cube_frq.spectral_axis[0], velocity_convention='optical') cube_rad = cube.with_spectral_unit(u.km/u.s, rest_value=cube_frq.spectral_axis[0], velocity_convention='radio') cube_rel = cube.with_spectral_unit(u.km/u.s, rest_value=cube_frq.spectral_axis[0], velocity_convention='relativistic') # should all be exactly 0 km/s for x in (cube_rel.spectral_axis[0], cube_rad.spectral_axis[0], cube_opt.spectral_axis[0]): np.testing.assert_almost_equal(0,x.value) assert cube_rel.spectral_axis[1] != cube_rad.spectral_axis[1] assert cube_opt.spectral_axis[1] != cube_rad.spectral_axis[1] assert cube_rel.spectral_axis[1] != cube_opt.spectral_axis[1] assert cube_rel.velocity_convention == u.doppler_relativistic assert cube_rad.velocity_convention == u.doppler_radio assert cube_opt.velocity_convention == u.doppler_optical def test_invalid_spectral_unit_conventions(): cube, data = cube_and_raw('advs.fits') with pytest.raises(ValueError) as exc: cube.with_spectral_unit(u.km/u.s, velocity_convention='invalid velocity convention') assert exc.value.args[0] == ("Velocity convention must be radio, optical, " "or relativistic.") @pytest.mark.parametrize('rest', (50, 50*u.K)) def test_invalid_rest(rest): cube, data = cube_and_raw('advs.fits') with pytest.raises(ValueError) as exc: cube.with_spectral_unit(u.km/u.s, velocity_convention='radio', rest_value=rest) assert exc.value.args[0] == ("Rest value must be specified as an astropy " "quantity with spectral equivalence.") def test_airwave_to_wave(): cube, data = cube_and_raw('advs.fits') cube._wcs.wcs.ctype[2] = 'AWAV' cube._wcs.wcs.cunit[2] = 'm' cube._spectral_unit = u.m cube._wcs.wcs.cdelt[2] = 1e-7 cube._wcs.wcs.crval[2] = 5e-7 ax1 = cube.spectral_axis ax2 = cube.with_spectral_unit(u.m).spectral_axis np.testing.assert_almost_equal(spectral_axis.air_to_vac(ax1).value, ax2.value) @pytest.mark.parametrize(('func','how','axis'), itertools.product(('sum','std','max','min','mean'), ('slice','cube','auto'), (0,1,2) )) def test_twod_numpy(func, how, axis): # Check that a numpy function returns the correct result when applied along # one axis # This is partly a regression test for #211 cube, data = cube_and_raw('advs.fits') cube._meta['BUNIT'] = 'K' cube._unit = u.K proj = getattr(cube,func)(axis=axis, how=how) # data has a redundant 1st axis dproj = getattr(data,func)(axis=(0,axis+1)).squeeze() assert isinstance(proj, Projection) np.testing.assert_equal(proj.value, dproj) assert cube.unit == proj.unit @pytest.mark.parametrize('func',('sum','std','max','min','mean')) def test_oned_numpy(func): # Check that a numpy function returns an appropriate spectrum cube, data = cube_and_raw('advs.fits') cube._meta['BUNIT'] = 'K' cube._unit = u.K spec = getattr(cube,func)(axis=(1,2)) dspec = getattr(data,func)(axis=(2,3)).squeeze() assert isinstance(spec, OneDSpectrum) # data has a redundant 1st axis np.testing.assert_equal(spec.value, dspec) assert cube.unit == spec.unit def test_oned_slice(): # Check that a slice returns an appropriate spectrum cube, data = cube_and_raw('advs.fits') cube._meta['BUNIT'] = 'K' cube._unit = u.K spec = cube[:,0,0] assert isinstance(spec, OneDSpectrum) # data has a redundant 1st axis np.testing.assert_equal(spec.value, data[0,:,0,0]) assert cube.unit == spec.unit def test_preserve_bunit(): cube, data = cube_and_raw('advs.fits') assert cube.header['BUNIT'] == 'K' hdu = fits.open(path('advs.fits'))[0] hdu.header['BUNIT'] = 'Jy' cube = SpectralCube.read(hdu) assert cube.unit == u.Jy assert cube.header['BUNIT'] == 'Jy' @pytest.mark.skipif('not RADIO_BEAM_INSTALLED') def test_preserve_beam(): cube, data = cube_and_raw('advs.fits') beam = Beam.from_fits_header(path("advs.fits")) assert cube.beam == beam @pytest.mark.skipif('not RADIO_BEAM_INSTALLED') def test_append_beam_to_hdr(): cube, data = cube_and_raw('advs.fits') orig_hdr = fits.getheader(path('advs.fits')) assert cube.header['BMAJ'] == orig_hdr['BMAJ'] assert cube.header['BMIN'] == orig_hdr['BMIN'] assert cube.header['BPA'] == orig_hdr['BPA'] def test_cube_with_swapped_axes(): """ Regression test for #208 """ cube, data = cube_and_raw('vda.fits') # Check that masking works (this should apply a lazy mask) cube.filled_data[:] def test_jybeam_upper(): cube, data = cube_and_raw('vda_JYBEAM_upper.fits') assert cube.unit == u.Jy if RADIO_BEAM_INSTALLED: assert hasattr(cube, 'beam') np.testing.assert_almost_equal(cube.beam.sr.value, (((1*u.arcsec/np.sqrt(8*np.log(2)))**2).to(u.sr)*2*np.pi).value) def test_jybeam_lower(): cube, data = cube_and_raw('vda_Jybeam_lower.fits') assert cube.unit == u.Jy if RADIO_BEAM_INSTALLED: assert hasattr(cube, 'beam') np.testing.assert_almost_equal(cube.beam.sr.value, (((1*u.arcsec/np.sqrt(8*np.log(2)))**2).to(u.sr)*2*np.pi).value) # Regression test for #257 (https://github.com/radio-astro-tools/spectral-cube/pull/257) def test_jybeam_whitespace(): cube, data = cube_and_raw('vda_Jybeam_whitespace.fits') assert cube.unit == u.Jy if RADIO_BEAM_INSTALLED: assert hasattr(cube, 'beam') np.testing.assert_almost_equal(cube.beam.sr.value, (((1*u.arcsec/np.sqrt(8*np.log(2)))**2).to(u.sr)*2*np.pi).value) @pytest.mark.skipif('not RADIO_BEAM_INSTALLED') def test_beam_proj_meta(): cube, data = cube_and_raw('advs.fits') moment = cube.moment0(axis=0) # regression test for #250 assert 'beam' in moment.meta assert 'BMAJ' in moment.hdu.header slc = cube[0,:,:] assert 'beam' in slc.meta proj = cube.max(axis=0) assert 'beam' in proj.meta def test_proj_meta(): cube, data = cube_and_raw('advs.fits') moment = cube.moment0(axis=0) assert 'BUNIT' in moment.meta assert moment.meta['BUNIT'] == 'K' slc = cube[0,:,:] assert 'BUNIT' in slc.meta assert slc.meta['BUNIT'] == 'K' proj = cube.max(axis=0) assert 'BUNIT' in proj.meta assert proj.meta['BUNIT'] == 'K' def test_pix_sign(): cube, data = cube_and_raw('advs.fits') s,y,x = (cube._pix_size_slice(ii) for ii in range(3)) assert s>0 assert y>0 assert x>0 cube.wcs.wcs.cdelt *= -1 s,y,x = (cube._pix_size_slice(ii) for ii in range(3)) assert s>0 assert y>0 assert x>0 cube.wcs.wcs.pc *= -1 s,y,x = (cube._pix_size_slice(ii) for ii in range(3)) assert s>0 assert y>0 assert x>0 spectral-cube-0.3.1/spectral_cube/tests/test_stokes_spectral_cube.py0000644000077000000240000001600312647754466026103 0ustar adamstaff00000000000000from collections import OrderedDict import numpy as np from numpy.testing import assert_allclose, assert_equal from astropy.wcs import WCS from astropy.tests.helper import pytest from astropy.utils import OrderedDict, NumpyRNGContext from ..spectral_cube import SpectralCube from ..stokes_spectral_cube import StokesSpectralCube from ..masks import BooleanArrayMask class TestStokesSpectralCube(): def setup_class(self): self.wcs = WCS(naxis=3) self.wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ'] self.data = np.arange(4)[:, None, None, None] * np.ones((5, 20, 30)) def test_direct_init(self): stokes_data = dict(I=SpectralCube(self.data[0], wcs=self.wcs), Q=SpectralCube(self.data[1], wcs=self.wcs), U=SpectralCube(self.data[2], wcs=self.wcs), V=SpectralCube(self.data[3], wcs=self.wcs)) cube = StokesSpectralCube(stokes_data) def test_direct_init_invalid_type(self): stokes_data = dict(I=self.data[0], Q=self.data[1], U=self.data[2], V=self.data[3]) with pytest.raises(TypeError) as exc: cube = StokesSpectralCube(stokes_data) assert exc.value.args[0] == "stokes_data should be a dictionary of SpectralCube objects" def test_direct_init_invalid_shape(self): stokes_data = dict(I=SpectralCube(np.ones((6, 2, 30)), wcs=self.wcs), Q=SpectralCube(self.data[1], wcs=self.wcs), U=SpectralCube(self.data[2], wcs=self.wcs), V=SpectralCube(self.data[3], wcs=self.wcs)) with pytest.raises(ValueError) as exc: cube = StokesSpectralCube(stokes_data) assert exc.value.args[0] == "All spectral cubes should have the same shape" @pytest.mark.parametrize('component', ('I', 'Q', 'U', 'V', 'RR', 'RL', 'LR', 'LL')) def test_valid_component_name(self, component): stokes_data = {component: SpectralCube(self.data[0], wcs=self.wcs)} cube = StokesSpectralCube(stokes_data) assert cube.components == [component] @pytest.mark.parametrize('component', ('A', 'B', 'IQUV')) def test_invalid_component_name(self, component): stokes_data = {component: SpectralCube(self.data[0], wcs=self.wcs)} with pytest.raises(ValueError) as exc: cube = StokesSpectralCube(stokes_data) assert exc.value.args[0] == "Invalid Stokes component: {0} - should be one of I, Q, U, V, RR, LL, RL, LR".format(component) def test_invalid_wcs(self): wcs2 = WCS(naxis=3) wcs2.wcs.ctype = ['GLON-CAR', 'GLAT-CAR', 'FREQ'] stokes_data = dict(I=SpectralCube(self.data[0], wcs=self.wcs), Q=SpectralCube(self.data[1], wcs2)) with pytest.raises(ValueError) as exc: cube = StokesSpectralCube(stokes_data) assert exc.value.args[0] == "All spectral cubes in stokes_data should have the same WCS" def test_attributes(self): stokes_data = OrderedDict() stokes_data['I'] = SpectralCube(self.data[0], wcs=self.wcs) stokes_data['Q'] = SpectralCube(self.data[1], wcs=self.wcs) stokes_data['U'] = SpectralCube(self.data[2], wcs=self.wcs) stokes_data['V'] = SpectralCube(self.data[3], wcs=self.wcs) cube = StokesSpectralCube(stokes_data) assert_allclose(cube.I.unmasked_data[...], 0) assert_allclose(cube.Q.unmasked_data[...], 1) assert_allclose(cube.U.unmasked_data[...], 2) assert_allclose(cube.V.unmasked_data[...], 3) assert cube.components == ['I', 'Q', 'U', 'V'] def test_dir(self): stokes_data = dict(I=SpectralCube(self.data[0], wcs=self.wcs), Q=SpectralCube(self.data[1], wcs=self.wcs), U=SpectralCube(self.data[2], wcs=self.wcs)) cube = StokesSpectralCube(stokes_data) attributes = dir(cube) for stokes in 'IQU': assert stokes in attributes assert 'V' not in attributes assert 'mask' in attributes assert 'wcs' in attributes assert 'shape' in attributes def test_mask(self): with NumpyRNGContext(12345): mask1 = BooleanArrayMask(np.random.random((5, 20, 30)) > 0.2, wcs=self.wcs) # Deliberately don't use a BooleanArrayMask to check auto-conversion mask2 = np.random.random((5, 20, 30)) > 0.4 stokes_data = dict(I=SpectralCube(self.data[0], wcs=self.wcs), Q=SpectralCube(self.data[1], wcs=self.wcs), U=SpectralCube(self.data[2], wcs=self.wcs), V=SpectralCube(self.data[3], wcs=self.wcs)) cube1 = StokesSpectralCube(stokes_data, mask=mask1) cube2 = cube1.with_mask(mask2) assert_equal(cube2.mask.include(), (mask1).include() & mask2) def test_mask_invalid_component_name(self): stokes_data = {'BANANA': SpectralCube(self.data[0], wcs=self.wcs)} with pytest.raises(ValueError) as exc: cube = StokesSpectralCube(stokes_data) assert exc.value.args[0] == "Invalid Stokes component: BANANA - should be one of I, Q, U, V, RR, LL, RL, LR" def test_mask_invalid_shape(self): stokes_data = dict(I=SpectralCube(self.data[0], wcs=self.wcs), Q=SpectralCube(self.data[1], wcs=self.wcs), U=SpectralCube(self.data[2], wcs=self.wcs), V=SpectralCube(self.data[3], wcs=self.wcs)) mask1 = BooleanArrayMask(np.random.random((5, 20, 15)) > 0.2, wcs=self.wcs) with pytest.raises(ValueError) as exc: cube1 = StokesSpectralCube(stokes_data, mask=mask1) assert exc.value.args[0] == "Mask shape is not broadcastable to data shape: (5, 20, 15) vs (5, 20, 30)" def test_separate_mask(self): with NumpyRNGContext(12345): mask1 = BooleanArrayMask(np.random.random((5, 20, 30)) > 0.2, wcs=self.wcs) mask2 = [BooleanArrayMask(np.random.random((5, 20, 30)) > 0.4, wcs=self.wcs) for i in range(4)] mask3 = BooleanArrayMask(np.random.random((5, 20, 30)) > 0.2, wcs=self.wcs) stokes_data = dict(I=SpectralCube(self.data[0], wcs=self.wcs, mask=mask2[0]), Q=SpectralCube(self.data[1], wcs=self.wcs, mask=mask2[1]), U=SpectralCube(self.data[2], wcs=self.wcs, mask=mask2[2]), V=SpectralCube(self.data[3], wcs=self.wcs, mask=mask2[3])) cube1 = StokesSpectralCube(stokes_data, mask=mask1) assert_equal(cube1.I.mask.include(), (mask1 & mask2[0]).include()) assert_equal(cube1.Q.mask.include(), (mask1 & mask2[1]).include()) assert_equal(cube1.U.mask.include(), (mask1 & mask2[2]).include()) assert_equal(cube1.V.mask.include(), (mask1 & mask2[3]).include()) cube2 = cube1.I.with_mask(mask3) assert_equal(cube2.mask.include(), (mask1 & mask2[0] & mask3).include()) spectral-cube-0.3.1/spectral_cube/tests/test_subcubes.py0000644000077000000240000000456712643464660023515 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import pytest import operator import itertools from astropy.io import fits from astropy import units as u from astropy.wcs import WCS import numpy as np from .. import (SpectralCube, BooleanArrayMask, FunctionMask, LazyMask, CompositeMask) from ..spectral_cube import OneDSpectrum, Projection from ..np_compat import allbadtonan from .. import spectral_axis from . import path from .helpers import assert_allclose, assert_array_equal from .test_spectral_cube import cube_and_raw from distutils.version import StrictVersion try: import pyregion pyregionOK = True except ImportError: pyregionOK = False def test_subcube(): cube, data = cube_and_raw('advs.fits') sc1 = cube.subcube(xlo=1, xhi=3) sc2 = cube.subcube(xlo=24.06269*u.deg, xhi=24.06206*u.deg) assert sc1.shape == (2,3,2) assert sc2.shape == (2,3,2) assert sc1.wcs.wcs.compare(sc2.wcs.wcs) sc3 = cube.subcube() assert sc3.shape == cube.shape assert sc3.wcs.wcs.compare(cube.wcs.wcs) assert np.all(sc3._data == cube._data) #@pytest.mark.skipif(not pyregionOK, reason='Could not import pyregion') #@pytest.mark.parametrize(('regfile','result'), # (('fk5.reg', [slice(None),1,slice(None)]), # ('image.reg', NotImplementedError), # ('partial_overlap_image.reg', NotImplementedError), # ('no_overlap_image.reg', NotImplementedError), # ('partial_overlap_fk5.reg', [slice(None),1,1]), # ('no_overlap_fk5.reg', ValueError), # )) #def test_ds9region(regfile, result): # cube, data = cube_and_raw('adv.fits') # # regions = pyregion.open(regfile) # # if issubclass(result, Exception): # with pytest.raises(result) as exc: # sc = cube.subcube_from_ds9region(regions) # else: # sc = cube.subcube_from_ds9region(regions) # scsum = sc.sum() # dsum = data[result].sum() # assert scsum == dsum #region = 'fk5\ncircle(29.9346557, 24.0623827, 0.11111)' #subcube = cube.subcube_from_ds9region(region) # THIS TEST FAILS! # I think the coordinate transformation in ds9 is wrong; # it uses kapteyn? #region = 'circle(2,2,2)' #subcube = cube.subcube_from_ds9region(region) spectral-cube-0.3.1/spectral_cube/tests/test_visualization.py0000644000077000000240000000262612643464660024575 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import pytest try: import pvextractor PVEXTRACTOR_INSTALLED = True except ImportError: PVEXTRACTOR_INSTALLED = False try: import matplotlib.pyplot as plt MATPLOTLIB_INSTALLED = True except ImportError: MATPLOTLIB_INSTALLED = False try: import aplpy APLPY_INSTALLED = True except ImportError: APLPY_INSTALLED = False from .. import (SpectralCube, BooleanArrayMask, FunctionMask, LazyMask, CompositeMask) from ..spectral_cube import OneDSpectrum, Projection from ..np_compat import allbadtonan from .. import spectral_axis from .test_spectral_cube import cube_and_raw @pytest.mark.skipif("not PVEXTRACTOR_INSTALLED") def test_to_pvextractor(): cube, data = cube_and_raw('vda_Jybeam_lower.fits') pv = cube.to_pvextractor() @pytest.mark.skipif("not MATPLOTLIB_INSTALLED") def test_projvis(): cube, data = cube_and_raw('vda_Jybeam_lower.fits') mom0 = cube.moment0() mom0.quicklook(use_aplpy=False) @pytest.mark.skipif("not APLPY_INSTALLED") def test_projvis(): cube, data = cube_and_raw('vda_Jybeam_lower.fits') mom0 = cube.moment0() mom0.quicklook(use_aplpy=True) @pytest.mark.skipif("not APLPY_INSTALLED") def test_mask_quicklook(): cube, data = cube_and_raw('vda_Jybeam_lower.fits') cube.mask.quicklook(view=(0, slice(None), slice(None)), use_aplpy=True) spectral-cube-0.3.1/spectral_cube/tests/test_wcs_utils.py0000644000077000000240000000574712643464660023717 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division from ..wcs_utils import * from . import path def test_wcs_dropping(): wcs = WCS(naxis=4) wcs.wcs.pc = np.zeros([4, 4]) np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5)) pc = wcs.wcs.pc # for later use below dropped = drop_axis(wcs, 0) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4])) dropped = drop_axis(wcs, 1) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4])) dropped = drop_axis(wcs, 2) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4])) dropped = drop_axis(wcs, 3) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3])) wcs = WCS(naxis=4) wcs.wcs.cd = pc dropped = drop_axis(wcs, 0) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4])) dropped = drop_axis(wcs, 1) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4])) dropped = drop_axis(wcs, 2) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4])) dropped = drop_axis(wcs, 3) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3])) def test_wcs_swapping(): wcs = WCS(naxis=4) wcs.wcs.pc = np.zeros([4, 4]) np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5)) pc = wcs.wcs.pc # for later use below swapped = wcs_swapaxes(wcs, 0, 1) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4])) swapped = wcs_swapaxes(wcs, 0, 3) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1])) swapped = wcs_swapaxes(wcs, 2, 3) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3])) wcs = WCS(naxis=4) wcs.wcs.cd = pc swapped = wcs_swapaxes(wcs, 0, 1) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4])) swapped = wcs_swapaxes(wcs, 0, 3) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1])) swapped = wcs_swapaxes(wcs, 2, 3) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3])) def test_add_stokes(): wcs = WCS(naxis=3) for ii in range(4): outwcs = add_stokes_axis_to_wcs(wcs, ii) assert outwcs.wcs.naxis == 4 def test_axis_names(): wcs = WCS(path('adv.fits')) assert axis_names(wcs) == ['RA', 'DEC', 'VOPT'] wcs = WCS(path('vad.fits')) assert axis_names(wcs) == ['VOPT', 'RA', 'DEC'] def test_wcs_slice(): wcs = WCS(naxis=3) wcs.wcs.crpix = [50., 45., 30.] wcs_new = slice_wcs(wcs, (slice(10,20), slice(None), slice(20,30))) np.testing.assert_allclose(wcs_new.wcs.crpix, [30., 45., 20.]) def test_wcs_comparison(): wcs1 = WCS(naxis=3) wcs1.wcs.crpix = np.array([50., 45., 30.], dtype='float32') wcs2 = WCS(naxis=3) wcs2.wcs.crpix = np.array([50., 45., 30.], dtype='float64') wcs3 = WCS(naxis=3) wcs3.wcs.crpix = np.array([50., 45., 31.], dtype='float64') assert check_equality(wcs1,wcs2) assert not check_equality(wcs1,wcs3) spectral-cube-0.3.1/spectral_cube/version.py0000644000077000000240000001546612654610270021156 0ustar adamstaff00000000000000# Autogenerated by Astropy-affiliated package spectral_cube's setup.py on 2016-02-04 09:50:00.911265 from __future__ import unicode_literals import datetime import locale import os import subprocess import warnings def _decode_stdio(stream): try: stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8' except ValueError: stdio_encoding = 'utf-8' try: text = stream.decode(stdio_encoding) except UnicodeDecodeError: # Final fallback text = stream.decode('latin1') return text def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: #otherwise it's already the true/release version return version def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not _get_repo_path(path, levels=0): return '' if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip() def _get_repo_path(pathname, levels=None): """ Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo. """ if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None _packagename = "spectral_cube" _last_generated_version = "0.3.1" _last_githash = "f80b85a1dda853d63d1a5346af8cb746cfe1b474" # Determine where the source code for this module # lives. If __file__ is not a filesystem path then # it is assumed not to live in a git repo at all. if _get_repo_path(__file__, levels=len(_packagename.split('.'))): version = update_git_devstr(_last_generated_version, path=__file__) githash = get_git_devstr(sha=True, show_warning=False, path=__file__) or _last_githash else: # The file does not appear to live in a git repo so don't bother # invoking git version = _last_generated_version githash = _last_githash major = 0 minor = 3 bugfix = 1 release = True timestamp = datetime.datetime(2016, 2, 4, 9, 50, 0, 911265) debug = False try: from ._compiler import compiler except ImportError: compiler = "unknown" try: from .cython_version import cython_version except ImportError: cython_version = "unknown" spectral-cube-0.3.1/spectral_cube/wcs_utils.py0000644000077000000240000002612612643464660021510 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import numpy as np from astropy.wcs import WCS import warnings from astropy import units as u from astropy import log wcs_parameters_to_preserve = ['cel_offset', 'dateavg', 'dateobs', 'equinox', 'latpole', 'lonpole', 'mjdavg', 'mjdobs', 'name', 'obsgeo', 'phi0', 'radesys', 'restfrq', 'restwav', 'specsys', 'ssysobs', 'ssyssrc', 'theta0', 'velangl', 'velosys', 'zsource'] # not writable: # 'lat', 'lng', 'lattyp', 'lngtyp', bad_spectypes_mapping = {'VELOCITY':'VELO', 'WAVELENG':'WAVE', } def drop_axis(wcs, dropax): """ Drop the ax on axis dropax Remove an axis from the WCS Parameters ---------- wcs: astropy.wcs.WCS The WCS with naxis to be chopped to naxis-1 dropax: int The index of the WCS to drop, counting from 0 (i.e., python convention, not FITS convention) """ inds = list(range(wcs.wcs.naxis)) inds.pop(dropax) inds = np.array(inds) return reindex_wcs(wcs, inds) def add_stokes_axis_to_wcs(wcs, add_before_ind): """ Add a new Stokes axis that is uncorrelated with any other axes Parameters ---------- wcs: astropy.wcs.WCS The WCS to add to add_before_ind: int Index of the WCS to insert the new Stokes axis in front of. To add at the end, do add_before_ind = wcs.wcs.naxis """ naxin = wcs.wcs.naxis naxout = naxin + 1 inds = list(range(naxout)) inds.pop(add_before_ind) inds = np.array(inds) outwcs = WCS(naxis=naxout) for par in wcs_parameters_to_preserve: setattr(outwcs.wcs, par, getattr(wcs.wcs, par)) pc = np.zeros([naxout, naxout]) pc[inds[:, np.newaxis], inds[np.newaxis, :]] = wcs.wcs.get_pc() pc[add_before_ind, add_before_ind] = 1 def append_to_posn(val, posn, lst): """ insert a value at index into a list """ return list(lst)[:posn] + [val] + list(lst)[posn:] outwcs.wcs.crpix = append_to_posn(1, add_before_ind, wcs.wcs.crpix) outwcs.wcs.cdelt = append_to_posn(1, add_before_ind, wcs.wcs.get_cdelt()) outwcs.wcs.crval = append_to_posn(1, add_before_ind, wcs.wcs.crval) outwcs.wcs.cunit = append_to_posn("", add_before_ind, wcs.wcs.cunit) outwcs.wcs.ctype = append_to_posn("STOKES", add_before_ind, wcs.wcs.ctype) outwcs.wcs.cname = append_to_posn("STOKES", add_before_ind, wcs.wcs.cname) outwcs.wcs.pc = pc return outwcs def wcs_swapaxes(wcs, ax0, ax1): """ Swap axes in a WCS Parameters ---------- wcs: astropy.wcs.WCS The WCS to have its axes swapped ax0: int ax1: int The indices of the WCS to be swapped, counting from 0 (i.e., python convention, not FITS convention) """ inds = list(range(wcs.wcs.naxis)) inds[ax0], inds[ax1] = inds[ax1], inds[ax0] inds = np.array(inds) return reindex_wcs(wcs, inds) def reindex_wcs(wcs, inds): """ Re-index a WCS given indices. The number of axes may be reduced. Parameters ---------- wcs: astropy.wcs.WCS The WCS to be manipulated inds: np.array(dtype='int') The indices of the array to keep in the output. e.g. swapaxes: [0,2,1,3] dropaxes: [0,1,3] """ if not isinstance(inds, np.ndarray): raise TypeError("Indices must be an ndarray") if inds.dtype.kind != 'i': raise TypeError('Indices must be integers') outwcs = WCS(naxis=len(inds)) for par in wcs_parameters_to_preserve: setattr(outwcs.wcs, par, getattr(wcs.wcs, par)) cdelt = wcs.wcs.get_cdelt() pc = wcs.wcs.get_pc() outwcs.wcs.crpix = wcs.wcs.crpix[inds] outwcs.wcs.cdelt = cdelt[inds] outwcs.wcs.crval = wcs.wcs.crval[inds] outwcs.wcs.cunit = [wcs.wcs.cunit[i] for i in inds] outwcs.wcs.ctype = [wcs.wcs.ctype[i] for i in inds] outwcs.wcs.cname = [wcs.wcs.cname[i] for i in inds] outwcs.wcs.pc = pc[inds[:, None], inds[None, :]] pv_cards = [] for i, j in enumerate(inds): for k, m, v in wcs.wcs.get_pv(): if k == j: pv_cards.append((i, m, v)) outwcs.wcs.set_pv(pv_cards) ps_cards = [] for i, j in enumerate(inds): for k, m, v in wcs.wcs.get_ps(): if k == j: ps_cards.append((i, m, v)) outwcs.wcs.set_ps(ps_cards) return outwcs def axis_names(wcs): """ Extract world names for each coordinate axis Parameters ---------- wcs : astropy.wcs.WCS The WCS object to extract names from Returns ------- A tuple of names along each axis """ names = list(wcs.wcs.cname) types = wcs.wcs.ctype for i in range(len(names)): if len(names[i]) > 0: continue names[i] = types[i].split('-')[0] return names def slice_wcs(mywcs, view, numpy_order=True): """ Slice a WCS instance using a Numpy slice. The order of the slice should be reversed (as for the data) compared to the natural WCS order. Parameters ---------- view : tuple A tuple containing the same number of slices as the WCS system. The ``step`` method, the third argument to a slice, is not presently supported. numpy_order : bool Use numpy order, i.e. slice the WCS so that an identical slice applied to a numpy array will slice the array and WCS in the same way. If set to `False`, the WCS will be sliced in FITS order, meaning the first slice will be applied to the *last* numpy index but the *first* WCS axis. Returns ------- wcs_new : `~astropy.wcs.WCS` A new resampled WCS axis """ if hasattr(view, '__len__') and len(view) > mywcs.wcs.naxis: raise ValueError("Must have # of slices <= # of WCS axes") elif not hasattr(view, '__len__'): # view MUST be an iterable view = [view] if not all([isinstance(x, slice) for x in view]): raise ValueError("Cannot downsample a WCS with indexing. Use " "wcs.sub or wcs.dropaxis if you want to remove " "axes.") wcs_new = mywcs.deepcopy() for i, iview in enumerate(view): if iview.step is not None and iview.start is None: # Slice from "None" is equivalent to slice from 0 (but one # might want to downsample, so allow slices with # None,None,step or None,stop,step) iview = slice(0, iview.stop, iview.step) if iview.start is not None: if numpy_order: wcs_index = mywcs.wcs.naxis - 1 - i else: wcs_index = i if iview.step not in (None, 1): crpix = mywcs.wcs.crpix[wcs_index] cdelt = mywcs.wcs.cdelt[wcs_index] # equivalently (keep this comment so you can compare eqns): # wcs_new.wcs.crpix[wcs_index] = # (crpix - iview.start)*iview.step + 0.5 - iview.step/2. crp = ((crpix - iview.start - 1.)/iview.step + 0.5 + 1./iview.step/2.) wcs_new.wcs.crpix[wcs_index] = crp wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step else: wcs_new.wcs.crpix[wcs_index] -= iview.start return wcs_new def check_equality(wcs1, wcs2, warn_missing=False, ignore_keywords=['MJD-OBS', 'VELOSYS']): """ Check if two WCSs are equal Parameters ---------- wcs1, wcs2: `astropy.wcs.WCS` The WCSs warn_missing: bool Issue warnings if one header is missing a keyword that the other has? ignore_keywords: list of str Keywords that are stored as part of the WCS but do not define part of the coordinate system and therefore can be safely ignored. """ # naive version: # return str(wcs1.to_header()) != str(wcs2.to_header()) h1 = wcs1.to_header() h2 = wcs2.to_header() # Default to headers equal; everything below changes to false if there are # any inequalities OK = True # to keep track of keywords in both matched = [] for c1 in h1.cards: key = c1[0] if key in h2: matched.append(key) c2 = h2.cards[key] # special check for units: "m/s" = "m s-1" if 'UNIT' in key: u1 = u.Unit(c1[1]) u2 = u.Unit(c2[1]) if u1 != u2: if key in ignore_keywords: log.debug("IGNORED Header 1, {0}: {1} != {2}".format(key,u1,u2)) else: OK = False log.debug("Header 1, {0}: {1} != {2}".format(key,u1,u2)) elif isinstance(c1[1], (float, np.float)): try: np.testing.assert_almost_equal(c1[1], c2[1]) except AssertionError: if key in ('RESTFRQ','RESTWAV'): warnings.warn("{0} is not equal in WCS; ignoring ".format(key)+ "under the assumption that you want to" " compare velocity cubes.") continue if key in ignore_keywords: log.debug("IGNORED Header 1, {0}: {1} != {2}".format(key,c1[1],c2[1])) else: log.debug("Header 1, {0}: {1} != {2}".format(key,c1[1],c2[1])) OK = False elif c1[1] != c2[1]: if key in ignore_keywords: log.debug("IGNORED Header 1, {0}: {1} != {2}".format(key,c1[1],c2[1])) else: log.debug("Header 1, {0}: {1} != {2}".format(key,c1[1],c2[1])) OK = False else: if warn_missing: warnings.warn("WCS2 is missing card {0}".format(key)) elif key not in ignore_keywords: OK = False # Check that there aren't any cards in header 2 that were missing from # header 1 for c2 in h2.cards: key = c2[0] if key not in matched: if warn_missing: warnings.warn("WCS1 is missing card {0}".format(key)) else: OK = False return OK def strip_wcs_from_header(header): """ Given a header with WCS information, remove ALL WCS information from that header """ hwcs = WCS(header) wcsh = hwcs.to_header() keys_to_keep = [k for k in header if (k and k not in wcsh and 'NAXIS' not in k)] newheader = header.copy() for kw in newheader.keys(): if kw not in keys_to_keep: del newheader[kw] for kw in ('CRPIX{ii}', 'CRVAL{ii}', 'CDELT{ii}', 'CUNIT{ii}', 'CTYPE{ii}', 'PC0{ii}_0{jj}', 'CD{ii}_{jj}',): for ii in range(5): for jj in range(5): k = kw.format(ii=ii,jj=jj) if k in newheader.keys(): del newheader[k] return newheader spectral-cube-0.3.1/spectral_cube/ytcube.py0000644000077000000240000002567612643464660021000 0ustar adamstaff00000000000000from __future__ import print_function, absolute_import, division import os import subprocess import numpy as np import time from astropy.utils.console import ProgressBar from astropy import log from astropy.extern import six import warnings try: import yt from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper ytOK = True except ImportError: ytOK = False class ytCube(object): """ Light wrapper of a yt object with ability to translate yt<->wcs coordinates """ def __init__(self, cube, dataset, spectral_factor=1.0): self.cube = cube self.wcs = cube.wcs self.dataset = dataset self.spectral_factor = spectral_factor def world2yt(self, world_coord, first_index=0): """ Convert a position in world coordinates to the coordinates used by a yt dataset that has been generated using the ``to_yt`` method. Parameters ---------- world_coord: `astropy.wcs.WCS.wcs_world2pix`-valid input The world coordinates first_index: 0 or 1 The first index of the data. In python and yt, this should be zero, but for the FITS coordinates, use 1 """ yt_coord = self.wcs.wcs_world2pix([world_coord], first_index)[0] yt_coord[2] = (yt_coord[2] - 0.5)*self.spectral_factor+0.5 return yt_coord def yt2world(self, yt_coord, first_index=0): """ Convert a position in yt's coordinates to world coordinates from a yt dataset that has been generated using the ``to_yt`` method. Parameters ---------- world_coord: `astropy.wcs.WCS.wcs_pix2world`-valid input The yt pixel coordinates to convert back to world coordinates first_index: 0 or 1 The first index of the data. In python and yt, this should be zero, but for the FITS coordinates, use 1 """ yt_coord = np.array(yt_coord) # stripping off units yt_coord[2] = (yt_coord[2] - 0.5)/self.spectral_factor+0.5 world_coord = self.wcs.wcs_pix2world([yt_coord], first_index)[0] return world_coord def quick_render_movie(self, outdir, size=256, nframes=30, camera_angle=(0,0,1), north_vector=(0,0,1), rot_vector=(1,0,0), colormap='doom', cmap_range='auto', transfer_function='auto', start_index=0, image_prefix="", output_filename='out.mp4', log_scale=False, rescale=True): """ Create a movie rotating the cube 360 degrees from PP -> PV -> PP -> PV -> PP Parameters ---------- outdir: str The output directory in which the individual image frames and the resulting output mp4 file should be stored size: int The size of the individual output frame in pixels (i.e., size=256 will result in a 256x256 image) nframes: int The number of frames in the resulting movie camera_angle: 3-tuple The initial angle of the camera north_vector: 3-tuple The vector of 'north' in the data cube. Default is coincident with the spectral axis rot_vector: 3-tuple The vector around which the camera will be rotated colormap: str A valid colormap. See `yt.show_colormaps` transfer_function: 'auto' or `yt.visualization.volume_rendering.TransferFunction` Either 'auto' to use the colormap specified, or a valid TransferFunction instance log_scale: bool Should the colormap be log scaled? rescale: bool If True, the images will be rescaled to have a common 95th percentile brightness, which can help reduce flickering from having a single bright pixel in some projections start_index : int The number of the first image to save image_prefix : str A string to prepend to the image name for each image that is output output_filename : str The movie file name to output. The suffix may affect the file type created. Defaults to 'out.mp4'. Will be placed in ``outdir`` Returns ------- """ if not ytOK: raise IOError("yt could not be imported. Cube renderings are not possible.") scale = np.max(self.cube.shape) if not os.path.exists(outdir): os.makedirs(outdir) elif not os.path.isdir(outdir): raise OSError("Output directory {0} exists and is not a directory.".format(outdir)) if cmap_range == 'auto': upper = self.cube.max().value lower = self.cube.std().value * 3 cmap_range = [lower,upper] if transfer_function == 'auto': tfh = self.auto_transfer_function(cmap_range, log=log_scale) tfh.tf.map_to_colormap(cmap_range[0], cmap_range[1], colormap=colormap) tf = tfh.tf else: tf = transfer_function center = self.dataset.domain_center cam = self.dataset.h.camera(center, camera_angle, scale, size, tf, north_vector=north_vector, fields='flux') im = cam.snapshot() images = [im] pb = ProgressBar(nframes) for ii,im in enumerate(cam.rotation(2 * np.pi, nframes, rot_vector=rot_vector)): images.append(im) im.write_png(os.path.join(outdir,"%s%04i.png" % (image_prefix, ii+start_index)), rescale=False) pb.update(ii+1) log.info("Rendering complete in {0}s".format(time.time() - pb._start_time)) if rescale: _rescale_images(images, os.path.join(outdir, image_prefix)) pipe = _make_movie(outdir, prefix=image_prefix, filename=output_filename) return images def auto_transfer_function(self, cmap_range, log=False, colormap='doom', **kwargs): tfh = TransferFunctionHelper(self.dataset) tfh.set_field('flux') tfh.set_bounds(bounds=cmap_range) tfh.set_log(log) tfh.build_transfer_function() return tfh def quick_isocontour(self, level='3 sigma', title='', description='', color_map='hot', color_log=False, export_to='sketchfab', filename=None, **kwargs): """ Export isocontours to sketchfab Requires that you have an account on https://sketchfab.com and are logged in Parameters ---------- level: str or float The level of the isocontours to create. Can be specified as n-sigma with strings like '3.3 sigma' or '2 sigma' (there must be a space between the number and the word) title: str A title for the uploaded figure description: str A short description for the uploaded figure color_map: str Any valid colormap. See `yt.show_colormaps` color_log: bool Whether the colormap should be log scaled. With the default parameters, this has no effect. export_to: 'sketchfab', 'obj', 'ply' You can export to sketchfab, to a .obj file (and accompanying .mtl file), or a .ply file. The latter two require ``filename`` specification filename: None or str Optional - prefix for output filenames if `export_to` is 'obj', or the full filename when `export_to` is 'ply'. Ignored for 'sketchfab' kwargs: dict Keyword arguments are passed to the appropriate yt function Returns ------- The result of the `yt.surface.export_sketchfab` function """ if isinstance(level, six.string_types): sigma = self.cube.std().value level = float(level.split()[0]) * sigma self.dataset.periodicity = (True,True,True) surface = self.dataset.surface(self.dataset.all_data(), "flux", level) if export_to == 'sketchfab': if filename is not None: warnings.warn("sketchfab export does not expect a filename entry") return surface.export_sketchfab(title=title, description=description, color_map=color_map, color_log=color_log, **kwargs) elif export_to == 'obj': if filename is None: raise ValueError("If export_to is not 'sketchfab'," " a filename must be specified") surface.export_obj(filename, color_field='ones', color_map=color_map, color_log=color_log, **kwargs) elif export_to == 'ply': if filename is None: raise ValueError("If export_to is not 'sketchfab'," " a filename must be specified") surface.export_ply(filename, color_field='ones', color_map=color_map, color_log=color_log, **kwargs) else: raise ValueError("export_to must be one of sketchfab,obj,ply") def _rescale_images(images, prefix): """ Save a sequence of images, at a common scaling Reduces flickering """ cmax = max(np.percentile(i[:, :, :3].sum(axis=2), 99.5) for i in images) amax = max(np.percentile(i[:, :, 3], 95) for i in images) for i, image in enumerate(images): image = image.rescale(cmax=cmax, amax=amax).swapaxes(0,1) image.write_png("%s%04i.png" % (prefix, i), rescale=False) def _make_movie(moviepath, prefix="", filename='out.mp4', overwrite=True): """ Use ffmpeg to generate a movie from the image series """ outpath = os.path.join(moviepath, filename) if os.path.exists(outpath) and overwrite: command = ['ffmpeg', '-y', '-r','5','-i', os.path.join(moviepath,prefix+'%04d.png'), '-r','30','-pix_fmt', 'yuv420p', outpath] elif os.path.exists(outpath): log.info("File {0} exists - skipping".format(outpath)) else: command = ['ffmpeg', '-r', '5', '-i', os.path.join(moviepath,prefix+'%04d.png'), '-r','30','-pix_fmt', 'yuv420p', outpath] pipe = subprocess.Popen(command, stdout=subprocess.PIPE, close_fds=True) pipe.wait() return pipe