pax_global_header 0000666 0000000 0000000 00000000064 14650706641 0014523 g ustar 00root root 0000000 0000000 52 comment=4c87c0e6deda77919825498b1e2a1bf873ba6f30
PyBDSF-1.11.0/ 0000775 0000000 0000000 00000000000 14650706641 0012632 5 ustar 00root root 0000000 0000000 PyBDSF-1.11.0/.dockerignore 0000664 0000000 0000000 00000000074 14650706641 0015307 0 ustar 00root root 0000000 0000000 .git/
build/
dist/
doc/build/
*.egg-info/
manylinux*/
venv/
PyBDSF-1.11.0/.github/ 0000775 0000000 0000000 00000000000 14650706641 0014172 5 ustar 00root root 0000000 0000000 PyBDSF-1.11.0/.github/workflows/ 0000775 0000000 0000000 00000000000 14650706641 0016227 5 ustar 00root root 0000000 0000000 PyBDSF-1.11.0/.github/workflows/build_release.yml 0000664 0000000 0000000 00000004425 14650706641 0021556 0 ustar 00root root 0000000 0000000 name: Build and upload to PyPI
# Only build and upload when a new release tag is created
on:
push:
tags:
- "v[0-9]+.[0-9]+.[0-9]+"
- "v[0-9]+.[0-9]+.[0-9]+[a-z]+[0-9]+"
# Alternatively, build on every branch push, tag push, and pull request change
# on: [push, pull_request]
jobs:
build_wheels:
name: Build wheels on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: true
matrix:
os: [ubuntu-latest, macos-12]
toolchain:
- {compiler: gcc, version: 12}
steps:
- uses: actions/checkout@v4
with:
# Needed for `setuptools-scm`
fetch-depth: 0
- name: Setup Fortran
uses: fortran-lang/setup-fortran@v1
id: setup-fortran
with:
compiler: ${{ matrix.toolchain.compiler }}
version: ${{ matrix.toolchain.version }}
if: runner.os == 'macOS'
- name: Build wheels
uses: pypa/cibuildwheel@v2.19
env:
MACOSX_DEPLOYMENT_TARGET: "12.0"
- name: Upload wheels
uses: actions/upload-artifact@v3
with:
path: wheelhouse/*.whl
build_sdist:
name: Build source distribution
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build sdist
run: pipx run build --sdist
- uses: actions/upload-artifact@v3
with:
path: dist/*.tar.gz
upload_pypi:
needs: [build_wheels, build_sdist]
runs-on: ubuntu-latest
# upload to PyPI on every tag starting with 'v'
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
# alternatively, to publish when a GitHub Release is created, use the following rule:
# if: github.event_name == 'release' && github.event.action == 'published'
steps:
- uses: actions/download-artifact@v3
with:
# unpacks default artifact into dist/
# if `name: artifact` is omitted, the action will create extra parent dir
name: artifact
path: dist
- uses: pypa/gh-action-pypi-publish@v1.9.0
with:
user: __token__
password: ${{ secrets.pypi_password }}
# To test:
# repository-url: https://test.pypi.org/legacy/
# password: ${{ secrets.test_pypi_password }}
PyBDSF-1.11.0/.github/workflows/ci.yml 0000664 0000000 0000000 00000001000 14650706641 0017334 0 ustar 00root root 0000000 0000000 name: Build and test PyBDSF
on: [push, pull_request]
jobs:
build:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Apt install required packages
run: |
sudo apt-get install -y libboost-python-dev libboost-numpy-dev
- name: Build and install PyBDSF
run: |
python -m pip install .
- name: Test PyBDSF
run: |
cd test && python tbdsf_process_image.py
PyBDSF-1.11.0/.gitignore 0000664 0000000 0000000 00000002216 14650706641 0014623 0 ustar 00root root 0000000 0000000 # Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# Distribution / packaging
.Python
build/
env/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
src/*/*.[ao]
*.so
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
*.whl
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# dotenv
.env
# virtualenv
.venv
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
# Visual Studio settings
.vscode
# Setuptools SCM
bdsf/_version.py
PyBDSF-1.11.0/.readthedocs.yaml 0000664 0000000 0000000 00000002011 14650706641 0016053 0 ustar 00root root 0000000 0000000 # Read the Docs configuration file for Sphinx projects
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Set the OS, Python version and other tools you might need
build:
os: ubuntu-22.04
tools:
python: "3.12"
# You can also specify other tool versions:
# nodejs: "20"
# rust: "1.70"
# golang: "1.20"
# Build documentation in the "docs/" directory with Sphinx
sphinx:
configuration: doc/source/conf.py
# You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs
# builder: "dirhtml"
# Fail on all warnings to avoid broken references
# fail_on_warning: true
# Optionally build your docs in additional formats such as PDF and ePub
# formats:
# - pdf
# - epub
# Optional but recommended, declare the Python requirements required
# to build your documentation
# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
install:
- requirements: doc/requirements.txt
PyBDSF-1.11.0/CMakeLists.txt 0000664 0000000 0000000 00000007435 14650706641 0015403 0 ustar 00root root 0000000 0000000 cmake_minimum_required(VERSION 3.18)
project(PyBDSF C CXX Fortran)
find_package(PythonExtensions REQUIRED)
find_package(Python COMPONENTS Interpreter Development.Module REQUIRED)
find_package(F2PY REQUIRED)
find_package(Boost COMPONENTS python numpy REQUIRED)
if(SKBUILD)
message(STATUS "The project is built using scikit-build")
set(PYTHON_INCLUDE_DIRS
${Python_INCLUDE_DIR}
${Python_NumPy_INCLUDE_DIRS}
)
else()
set(PYTHON_INCLUDE_DIRS ${Python_INCLUDE_DIRS})
endif()
## -- Static library: minpack -- ##
set(MINPACK_SRC
src/minpack/lmder.f
src/minpack/lmpar.f
src/minpack/qrfac.f
src/minpack/qrsolv.f
src/minpack/enorm.f
src/minpack/dpmpar.f
)
add_library(minpack STATIC ${MINPACK_SRC})
set_target_properties(minpack PROPERTIES POSITION_INDEPENDENT_CODE TRUE)
## -- Static library: port3 -- ##
set(PORT3_SRC
src/port3/dnsg.f
src/port3/dn2g.f
src/port3/drnsg.f
src/port3/drn2g.f
src/port3/d1mach.f
src/port3/da7sst.f
src/port3/dc7vfn.f
src/port3/dd7tpr.f
src/port3/dd7upd.f
src/port3/df7hes.f
src/port3/dg7lit.f
src/port3/dg7qts.f
src/port3/ditsum.f
src/port3/divset.f
src/port3/dl7itv.f
src/port3/dl7ivm.f
src/port3/dl7mst.f
src/port3/dl7nvr.f
src/port3/dl7sqr.f
src/port3/dl7srt.f
src/port3/dl7svn.f
src/port3/dl7svx.f
src/port3/dl7tsq.f
src/port3/dl7tvm.f
src/port3/dl7vml.f
src/port3/dn2cvp.f
src/port3/dn2lrd.f
src/port3/dn2rdp.f
src/port3/do7prd.f
src/port3/dparck.f
src/port3/dq7apl.f
src/port3/dq7rad.f
src/port3/dq7rfh.f
src/port3/dr7mdc.f
src/port3/drldst.f
src/port3/ds7cpr.f
src/port3/ds7lup.f
src/port3/ds7lvm.f
src/port3/dv2axy.f
src/port3/dv2nrm.f
src/port3/dv7cpy.f
src/port3/dv7dfl.f
src/port3/dv7prm.f
src/port3/dv7scl.f
src/port3/dv7scp.f
src/port3/dv7swp.f
src/port3/i1mach.f
src/port3/i7mdcn.f
src/port3/stopx.f
)
add_library(port3 STATIC ${PORT3_SRC})
set_target_properties(port3 PROPERTIES POSITION_INDEPENDENT_CODE TRUE)
## -- F2py module: pytesselate -- ##
set(F2PY_MODULE_NAME "_pytesselate")
set(F2PY_MODULE_FILE ${CMAKE_CURRENT_BINARY_DIR}/${F2PY_MODULE_NAME}${PYTHON_EXTENSION_MODULE_SUFFIX})
set(PYTESSELATE_SRC
${CMAKE_SOURCE_DIR}/src/fortran/pytess_roundness.f
${CMAKE_SOURCE_DIR}/src/fortran/pytess_simple.f
${CMAKE_SOURCE_DIR}/src/fortran/constants.h
)
add_custom_target(${F2PY_MODULE_NAME} ALL
DEPENDS ${F2PY_MODULE_FILE}
)
add_custom_command(
OUTPUT ${F2PY_MODULE_FILE}
COMMAND ${F2PY_EXECUTABLE} -m ${F2PY_MODULE_NAME} -c ${PYTESSELATE_SRC}
COMMENT "[F2PY] Building python extension module ${F2PY_MODULE_NAME}"
)
install(FILES ${F2PY_MODULE_FILE} DESTINATION bdsf)
## -- Python module: _cbdsm -- ##
set(CBDSM_SRC
src/c++/Fitter_dn2g.cc
src/c++/Fitter_dnsg.cc
src/c++/Fitter_lmder.cc
src/c++/MGFunction1.cc
src/c++/MGFunction2.cc
src/c++/cbdsm_main.cc
src/c++/stat.cc
src/c++/num_util/num_util.cpp
)
add_library(_cbdsm MODULE ${CBDSM_SRC})
target_include_directories(_cbdsm PRIVATE src/c++)
target_link_libraries(_cbdsm minpack port3 gfortran ${Boost_LIBRARIES})
python_extension_module(_cbdsm)
install(TARGETS _cbdsm LIBRARY DESTINATION bdsf)
## -- Python module: natgrid -- ##
set(NATGRID_SRC
natgrid/Src/natgrid.c
natgrid/Src/natgridd.c
natgrid/Src/natgridmodule.c
natgrid/Src/natgrids.c
natgrid/Src/nncrunch.c
natgrid/Src/nncrunchd.c
natgrid/Src/nncrunchs.c
natgrid/Src/nnerror.c
natgrid/Src/nnuser.c
natgrid/Src/nnuserd.c
natgrid/Src/nnusers.c
)
add_library(natgridmodule MODULE ${NATGRID_SRC})
target_include_directories(natgridmodule PRIVATE natgrid/Include)
python_extension_module(natgridmodule)
install(TARGETS natgridmodule LIBRARY DESTINATION bdsf/nat)
PyBDSF-1.11.0/LICENSE 0000664 0000000 0000000 00000104505 14650706641 0013644 0 ustar 00root root 0000000 0000000 GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
{one line to give the program's name and a brief idea of what it does.}
Copyright (C) {year} {name of author}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
{project} Copyright (C) {year} {fullname}
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
PyBDSF-1.11.0/MANIFEST.in 0000664 0000000 0000000 00000004115 14650706641 0014371 0 ustar 00root root 0000000 0000000 # Setuptools, when combined with setuptools_scm, puts all the files that are
# under version control in the source distribution. We only want to distribute
# the files needed to build a binary wheel. So, start with an empty slate.
prune *
exclude .*
# Python package files
graft bdsf
# Required scikit-build files
include CMakeLists.txt
# Required minpack sources
include src/minpack/lmder.f
include src/minpack/lmpar.f
include src/minpack/qrfac.f
include src/minpack/qrsolv.f
include src/minpack/enorm.f
include src/minpack/dpmpar.f
# Required port3 sources
include src/port3/dnsg.f
include src/port3/dn2g.f
include src/port3/drnsg.f
include src/port3/drn2g.f
include src/port3/d1mach.f
include src/port3/da7sst.f
include src/port3/dc7vfn.f
include src/port3/dd7tpr.f
include src/port3/dd7upd.f
include src/port3/df7hes.f
include src/port3/dg7lit.f
include src/port3/dg7qts.f
include src/port3/ditsum.f
include src/port3/divset.f
include src/port3/dl7itv.f
include src/port3/dl7ivm.f
include src/port3/dl7mst.f
include src/port3/dl7nvr.f
include src/port3/dl7sqr.f
include src/port3/dl7srt.f
include src/port3/dl7svn.f
include src/port3/dl7svx.f
include src/port3/dl7tsq.f
include src/port3/dl7tvm.f
include src/port3/dl7vml.f
include src/port3/dn2cvp.f
include src/port3/dn2lrd.f
include src/port3/dn2rdp.f
include src/port3/do7prd.f
include src/port3/dparck.f
include src/port3/dq7apl.f
include src/port3/dq7rad.f
include src/port3/dq7rfh.f
include src/port3/dr7mdc.f
include src/port3/drldst.f
include src/port3/ds7cpr.f
include src/port3/ds7lup.f
include src/port3/ds7lvm.f
include src/port3/dv2axy.f
include src/port3/dv2nrm.f
include src/port3/dv7cpy.f
include src/port3/dv7dfl.f
include src/port3/dv7prm.f
include src/port3/dv7scl.f
include src/port3/dv7scp.f
include src/port3/dv7swp.f
include src/port3/i1mach.f
include src/port3/i7mdcn.f
include src/port3/stopx.f
# Required pytesselate sources
include src/fortran/constants.h
include src/fortran/pytess_*.f
# Required natgrid sources
include natgrid/Src/*.c
include natgrid/Include/*.h
# Required cbdsm sources
include src/c++/*
include src/c++/num_util/*
PyBDSF-1.11.0/README.rst 0000664 0000000 0000000 00000005703 14650706641 0014326 0 ustar 00root root 0000000 0000000 PyBDSF
======
PyBDSF (the Python **B**\ lob **D**\ etection and **S**\ ource **F**\ inder)
is a tool designed to decompose radio interferometry images into
sources and make available their properties for further use. PyBDSF can
decompose an image into a set of Gaussians, shapelets, or wavelets as
well as calculate spectral indices and polarization properties of
sources and measure the psf variation across an image. PyBDSF uses an
interactive environment based on CASA that will be familiar to most
radio astronomers. Additionally, PyBDSF may also be used in Python
scripts.
The documentation is currently hosted at https://pybdsf.readthedocs.io
Installation
------------
Installation can be done in a number of ways. In order of preference (read:
ease of use):
* Install the latest release from PyPI::
pip install bdsf
.. note:: The interactive shell ``pybdsf`` is no longer installed by default.
To install it you have to specify the extra ``[ishell]``. For example::
pip install bdsf[ishell]
* Install the ``master`` branch from the PyBDSF git repository::
pip install git+https://github.com/lofar-astron/PyBDSF.git
Or install a specific revision or release, for example ``v1.9.3``::
pip install git+https://github.com/lofar-astron/PyBDSF.git@v1.9.3
* Install from a local source tree, e.g. after you cloned the git repository::
pip install .
or (to install the interactive shell as well)::
pip install .[ishell]
If you get the error::
RuntimeError: module compiled against API version 0xf but this version of numpy is 0xd
then please update ``numpy`` with ``pip install -U numpy``.
.. attention:: It is *not* recommend to use ``python setup.py install``. It is
deprecated, and we do *not* support it.
External requirements include the ubuntu packages (or similar packages in another Linux distribution):
* ``gfortran``
* ``libboost-python-dev``
* ``libboost-numpy-dev`` (Only if boost > 1.63)
* ``python-setuptools``.
Also, a working ``numpy`` installation is required. At runtime, you will need ``scipy`` and either ``pyfits`` and ``pywcs`` or ``python-casacore`` or ``astropy``.
If you install as a user not using conda, use ``pip install --user``.
Make sure to use similar versions for gcc, g++ and gfortran
(use update-alternatives if multiple versions of gcc/g++/gfortran are present on the system).
In this case, the script ``pybdsf`` is installed in ``~/.local/bin``, so you might want to add that to your ``$PATH``.
Installation on MacOS / OSX is more involved, you will need the packages mentioned above, for example installed with Homebrew.
You will need to tell `setup.py` to use the same compiler for fortran as for C++. In case of problems, see https://github.com/lofar-astron/PyBDSF/issues/104#issuecomment-509267088 for some possible steps to try.
.. image:: https://github.com/lofar-astron/PyBDSF/actions/workflows/ci.yml/badge.svg?branch=master
:target: https://github.com/lofar-astron/PyBDSF/actions/workflows/ci.yml
PyBDSF-1.11.0/bdsf/ 0000775 0000000 0000000 00000000000 14650706641 0013550 5 ustar 00root root 0000000 0000000 PyBDSF-1.11.0/bdsf/__init__.py 0000664 0000000 0000000 00000022476 14650706641 0015674 0 ustar 00root root 0000000 0000000 """Initialize PyBDSF namespace.
Import all standard operations, define default chain of
operations and provide function 'execute', which can
execute chain of operations properly. Also define the
'process_image' convienence function that can take
options as arguments rather than as a dictionary (as
required by 'execute').
"""
from __future__ import print_function
from __future__ import absolute_import
try:
import matplotlib.pyplot as pl
has_pl = True
except (RuntimeError, ImportError, AssertionError):
import sys
print("\033[31;1mWARNING\033[0m: Matplotlib pyplot could not be imported. Plotting is disabled.", file=sys.stderr)
has_pl = False
from .readimage import Op_readimage
from .collapse import Op_collapse
from .preprocess import Op_preprocess
from .rmsimage import Op_rmsimage
from .threshold import Op_threshold
from .islands import Op_islands
from .gausfit import Op_gausfit
from .make_residimage import Op_make_residimage
from .output import Op_outlist
from .shapefit import Op_shapelets
from .gaul2srl import Op_gaul2srl
from .spectralindex import Op_spectralindex
from .polarisation import Op_polarisation
from .wavelet_atrous import Op_wavelet_atrous
from .psf_vary import Op_psf_vary
from .cleanup import Op_cleanup
from ._version import __version__
from .functions import set_up_output_paths
import gc
default_chain = [Op_readimage(),
Op_collapse(),
Op_preprocess(),
Op_rmsimage(),
Op_threshold(),
Op_islands(),
Op_gausfit(),
Op_wavelet_atrous(),
Op_shapelets(),
Op_gaul2srl(),
Op_spectralindex(),
Op_polarisation(),
Op_make_residimage(),
Op_psf_vary(),
Op_outlist(),
Op_cleanup()
]
fits_chain = default_chain # for legacy scripts
def execute(chain, opts):
"""Execute chain.
Create new Image with given options and apply chain of
operations to it. The opts input must be a dictionary.
"""
from .image import Image
from . import mylogger
if 'quiet' in opts:
quiet = opts['quiet']
else:
quiet = False
if 'debug' in opts:
debug = opts['debug']
else:
debug = False
_, basedir = set_up_output_paths(opts)
basename = os.path.basename(opts['filename']) + '.pybdsf.log'
logfilename = os.path.join(basedir, basename)
mylogger.init_logger(logfilename, quiet=quiet, debug=debug)
mylog = mylogger.logging.getLogger("PyBDSF.Init")
mylog.info("Processing "+opts["filename"])
try:
img = Image(opts)
img.log = logfilename
_run_op_list(img, chain)
return img
except RuntimeError as err:
# Catch and log, then re-raise if needed (e.g., for AstroWise)
mylog.error(str(err))
raise
except KeyboardInterrupt:
mylogger.userinfo(mylog, "\n\033[31;1mAborted\033[0m")
raise
def _run_op_list(img, chain):
"""Runs an Image object through chain of op's.
This is separate from execute() to allow other modules (such as
interface.py) to use it as well.
"""
from time import time
from .interface import raw_input_no_history
from .gausfit import Op_gausfit
from . import mylogger
import gc
ops = []
stopat = img.opts.stop_at
# Make sure all op's are instances
for op in chain:
if isinstance(op, type):
ops.append(op())
else:
ops.append(op)
if stopat == 'read' and isinstance(op, Op_readimage): break
if stopat == 'isl' and isinstance(op, Op_islands): break
# Log all non-default parameters
mylog = mylogger.logging.getLogger("PyBDSF.Init")
mylog.info("PyBDSF version %s"
% (__version__, ))
par_msg = "Non-default input parameters:\n"
user_opts = img.opts.to_list()
for user_opt in user_opts:
k, v = user_opt
val = img.opts.__getattribute__(k)
if val != v._default and v.group() != 'hidden':
par_msg += ' %-20s = %s\n' % (k, repr(val))
mylog.info(par_msg[:-1]) # -1 is to trim final newline
# Run all op's
dc = '\033[34;1m'
nc = '\033[0m'
for op in ops:
if isinstance(op, Op_gausfit) and img.opts.interactive:
print(dc + '--> Displaying islands and rms image...' + nc)
if max(img.ch0_arr.shape) > 4096:
print(dc + '--> Image is large. Showing islands only.' + nc)
img.show_fit(rms_image=False, mean_image=False, ch0_image=False,
ch0_islands=True, gresid_image=False, sresid_image=False,
gmodel_image=False, smodel_image=False, pyramid_srcs=False)
else:
img.show_fit(rms_image=True, mean_image=True,
ch0_islands=True, gresid_image=False, sresid_image=False,
gmodel_image=False, smodel_image=False, pyramid_srcs=False)
prompt = dc + "Press enter to continue or 'q' to quit .. : " + nc
answ = raw_input_no_history(prompt)
while answ != '':
if answ == 'q':
return False
answ = raw_input_no_history(prompt)
op.__start_time = time()
op(img)
op.__stop_time = time()
gc.collect()
if img.opts.interactive and not img._pi:
print(dc + 'Fitting complete. Displaying results...' + nc)
if img.opts.shapelet_do:
show_smod = True
show_sres = True
else:
show_smod = False
show_sres = False
if img.opts.spectralindex_do:
show_spec = True
else:
show_spec = False
if max(img.ch0_arr.shape) > 4096:
print(dc + '--> Image is large. Showing Gaussian residual image only.' + nc)
img.show_fit(rms_image=False, mean_image=False, ch0_image=False,
ch0_islands=False, gresid_image=True, sresid_image=False,
gmodel_image=False, smodel_image=False, pyramid_srcs=False,
source_seds=show_spec)
else:
img.show_fit(smodel_image=show_smod, sresid_image=show_sres,
source_seds=show_spec)
if img.opts.print_timing:
print("="*36)
print("%18s : %10s" % ("Module", "Time (sec)"))
print("-"*36)
for i, op in enumerate(chain):
if hasattr(op, '__start_time'):
print("%18s : %f" % (op.__class__.__name__,
(op.__stop_time - op.__start_time)))
indx_stop = i
print("="*36)
print("%18s : %f" % ("Total",
(chain[indx_stop].__stop_time - chain[0].__start_time)))
# Log all internally derived parameters
mylog = mylogger.logging.getLogger("PyBDSF.Final")
par_msg = "Internally derived parameters:\n"
import inspect
import types
for attr in inspect.getmembers(img.opts):
if attr[0][0] != '_':
if isinstance(attr[1], (int, str, bool, float, type(None), tuple, list)):
if hasattr(img, attr[0]):
used = img.__getattribute__(attr[0])
if used != attr[1] and isinstance(used, (int, str, bool, float,
type(None), tuple,
list)):
par_msg += ' %-20s : %s\n' % (attr[0], repr(used))
mylog.info(par_msg[:-1]) # -1 is to trim final newline
return True
def process_image(input, **kwargs):
"""Run a standard analysis and returns the associated Image object.
The input can be a FITS or CASA image, a PyBDSF parameter save
file, or a dictionary of options. Partial names are allowed for the
parameters as long as they are unique. Parameters are set to default
values if par = ''.
Examples:
> img = bdsf.process_image('example.fits', thresh_isl=4)
--> process FITS image names 'example.fits'
> img_3C196 = bdsf.process_image('3C196.image', mea='map')
--> process CASA image, 'mean_map' parameter is abbreviated
> img_VirA = bdsf.process_image('VirA_im.pybdsf.sav')
--> load parameter save file and process
"""
from .interface import load_pars
from .image import Image
import os
# Try to load input assuming it's a parameter save file or a dictionary.
# load_pars returns None if this doesn't work.
img, err = load_pars(input)
# If load_pars fails (returns None), assume that input is an image file. If it's not a
# valid image file (but is an existing file), an error will be raised
# by img.process() during reading of the file.
if img is None:
if os.path.exists(input):
img = Image({'filename': input})
else:
raise RuntimeError("File '" + input + "' not found.")
# Set logging options (must be done explicitly, as they are used before the
# kwargs are parsed in img.process())
if 'quiet' in kwargs:
img.opts.quiet = kwargs['quiet']
if 'debug' in kwargs:
img.opts.debug = kwargs['debug']
# Now process it. Any kwargs specified by the user will
# override those read in from the parameter save file or dictionary.
img.process(**kwargs)
return img
PyBDSF-1.11.0/bdsf/_changelog.py 0000664 0000000 0000000 00000071727 14650706641 0016226 0 ustar 00root root 0000000 0000000 """Changelog module.
This module records all the relevant changes made to the software.
The change list must be kept up-to-date manually.
"""
def changelog():
"""
PyBDSF Changelog.
-----------------------------------------------------------------------
2023/05/22 - Version 1.10.3
2023/05/08 - Fix build issue with Python 3.11 (#205)
2023/05/03 - Use cibuildwheel to build binary wheels (#203)
Build binary wheels for Linux and MacOS (Intel).
Drop support for Python 3.6.
2023/05/02 - Fix #198 (#199)
Use the new method call `canvas.manager.set_window_title`
2023/04/28 - Replace Travis CI with GitHub actions (#196)
2023/02/10 - Version 1.10.2
2023/02/10 - Fix issues with numpy versions >= 1.24 (#193)
2022/11/28 - Switch to `manylinux2014` for building binary wheels (#191)
2022/11/23 - Fix ImportError in setuptools (#190)
2022/10/31 - Add binary wheels for Python 3.10 (#186)
2022/10/14 - Fix various documentation issues (#185)
2022/10/11 - Add logfilename option (#181)
2022/10/05 - Use len() instead of numpy.alen() (#180)
2022/02/14 - Version 1.10.1: Fix Numpy API incompatibility issue
2022/02/09 - Version 1.10.0
2022/02/09 - Update some functions as required by scipy versions >= 1.8.0
(PR #172)
2022/02/09 - Fix build issues with Python 3.8, end support for Python < 3.6,
add support for Python 3.8 and 3.9, and make installation of the interactive
pybdsf shell optional (PR #169)
2022/02/09 - Improve handling of the beam in the spectral index module
(PR #165)
2021/05/05 - Improve handling of large, complex islands (PR #160)
2020/04/07 - Allow a file to be supplied for the ch0 image used in the
spectral index module (PR #127)
2019/12/05 - Version 1.9.2
2019/12/04 - Fix exception behaviour if spline order change does not work
2019/09/27 - Add check for frequency info in header
2019/09/25 - Version 1.9.1
2019/09/25 - Fix various minor bugs
2019/06/06 - Fix blank_limit check_low error (#100)
2019/05/09 - Fix various shapelet decomposition issues
2019/05/08 - Fix crash in Gaussian fitting (#96)
2019/03/25 - Version 1.9.0
2018/10/18 - Add support for Python 3
2018/10/18 - Fix various minor bugs
2018/10/12 - Version 1.8.15
2018/10/09 - Fix segfault in Gaussian fitting (#63)
2018/10/04 - Fix math domain error (#76)
2018/06/21 - Fix setup.py for boost versions > 1.63
2018/05/18 - Version 1.8.14
2018/05/18 - Fix an error on total flux density (#50)
2018/05/18 - Add the possibility to provide an external noise and mean maps (#43)
2018/05/18 - Append the image FITS header into catalog FITS header (#53)
2018/05/18 - Make PyBDSF compatible with newer boost libraries, specifically
those used in Ubuntu 18.04 (#55)
2017/11/17 - Version 1.8.13
2017/11/17 - Remove deprecated boolean operators
2017/09/01 - Version 1.8.12
2017/09/01 - Fix crash with tiny regions
2017/09/01 - Fix very low centroid peak fluxes
2017/09/01 - Fix compile error with numpy 1.13
2017/06/01 - Version 1.8.11
2017/06/01 - Fix for interactive shell problem
2017/05/31 - Version 1.8.10
2017/05/31 - Fixes for various installation and runtime issues on modern systems.
2017/03/23 - Version 1.8.9
2017/03/23 - Fix to bug that causes an error when grouping Gaussians
into sources
2017/03/17 - Version 1.8.8
2017/03/17 - Rename to PyBDSF, move to github, add setup.py installer
2017/02/28 - Fix to issues related to numpy >= 1.12 and astropy >= 1.3
2016/06/10 - Version 1.8.7
2016/06/10 - Fix to bug that caused incorrect output images when input
image was not square.
2016/01/20 - Version 1.8.6
2016/01/15 - Fix to bug that caused incorrect island mask when two
islands are very close together.
2015/12/07 - Fix to bug that caused crash when image is masked and
the src_ra_dec option is used.
2015/11/30 - Version 1.8.5
2015/11/25 - Fix to bug in export_image that resulted in incorrect
output image when both trim_box and pad_image were used.
2015/11/20 - Fix to bug in wavelet module related to merging of islands.
2015/11/20 - Fix to bug in polarization module related to numbering of
new islands.
2015/11/20 - Fix to bug in spectral index module related to rms map
calculation.
2015/11/20 - Added option to use much faster (but also much more memory
intensive) SciPy fftconvolve function instead of custom PyBDSM one.
The option (use_scipy_fft) defaults to True.
2015/11/20 - Increased number of digits for values in output text
catalogs
2015/08/06 - Version 1.8.4
2015/08/06 - Improved speed of wavelet module.
2015/08/06 - Added option to use PyFFTW in wavelet module if available.
2015/08/06 - Fix to IPython version check.
2015/08/06 - Fix to bug that caused a failure to write shapelet models
in FITS format.
2014/11/07 - Fix to bug that caused a crash when both atrous_do = True
and output_all = True. Fixed a bug that caused a crash on machines
with only one core.
2014/09/26 - Version 1.8.3
2014/09/26 - Fix to bug that caused a crash when using the wavelet
module and all Gaussians in an island were flagged.
2014/07/03 - Mask will now be expanded to match input image shape. Fix
to bug that caused image read failure when image lacks a Stokes axis.
2014/05/14 - Version 1.8.2
2014/05/15 - Fix to bug in CASA masks generated with export_image() that
caused cleaning to fail in CASA 4.2 and above.
2014/02/05 - Fix to bug that resulted in output file names being
converted to lower case inappropriately.
2014/01/14 - Version 1.8.1
2014/01/13 - Added option (bbs_patches = 'mask') to allow patches in
an output BBS sky model to be defined using a mask image.
2014/01/09 - Fix to bug that caused the incl_empty option to be
ignored when format='fits' in the write_catalog task.
2013/12/05 - Enabled output of images in CASA format in the export_image
task (img_format = 'casa'). Added an option to export_image task to
export an island-mask image, with ones where there is emission and
zeros elsewhere (image_type = 'island_mask'). Features in the island
mask may be optionally dilated by specifying the number of dilation
iterations with the mask_dilation parameter. Added an option to
write a CASA region file to the write_catalog task (format =
'casabox'). Added an option to write a CSV catalog to the
write_catalog task (format = 'csv').
2013/11/04 - Added error message when the rms is zero in some part of the
rms map.
2013/10/16 - Version 1.8.0
2013/10/16 - Improved wavelet fitting. Added option so that wavelet
fitting can be done to the sum of images on the remaining wavelet
scales, improving the signal for fitting (controlled with the
atrous_sum option). Added option so that user can choose whether to
include new islands found only in the wavelet images in the final
fit or not (controlled with the atrous_orig_isl option).
2013/10/10 - Fixed a bug that could lead to incomplete fitting of
some islands. Improved overall convergence of fits.
2013/10/10 - Version 1.7.7
2013/10/10 - Improved fitting of bright sources under certain
circumstances.
2013/09/27 - Version 1.7.6
2013/09/27 - Changed caching behavior to ensure that temporary files
are always deleted after they are no longer needed or on exit.
2013/09/05 - Renamed blank_zeros to blank_limit. The blank_limit
option now specifies a limit below which pixels are blanked.
2013/09/05 - Enabled SAGECAL sky-model output.
2013/09/02 - Version 1.7.5
2013/09/02 - Fix to bug that caused a crash when images with 2 or
3 axes were used. Improved rms and mean calculation (following the
implementation used in PySE, see http://dare.uva.nl/document/174052
for details). The threshold used to determine the clipped rms and
mean values is now determined internally by default (kappa_clip =
None).
2013/08/27 - Version 1.7.4
2013/08/29 - Fix to bug in show_fit() that caused error when
'i' is pressed in the plot window and shapelet decomposition
had not been done. Tweak to 'pybdsm' startup shell script to
avoid problems with the Mac OS X matplotlib backend on non-
framework Python installations (such as Anaconda Python).
2013/08/28 - Fix to bug in process_image() that could result in
wavelet Gaussians being excluded from model image under certain
conditions.
2013/08/27 - Version 1.7.3
2013/08/27 - Fix to bug in image reading that caused images to be
distorted.
2013/08/23 - Version 1.7.2
2013/08/23 - Improved handling of non-standard FITS CUNIT keywords.
Improved loading of FITS images when trim_box is specified.
2013/08/22 - Version 1.7.1
2013/08/21 - Fix to bug that caused cached images to be deleted when
rerunning an analysis. Fix to bug in show_fit() due to undefined
images. Fix to bug in process_image() that would result in unneeded
reprocessing.
2013/08/20 - Version 1.7.0
2013/08/19 - PyBDSM will now use Astropy if installed for FITS and WCS
modules.
2013/08/11 - Fix to avoid excessive memory usage in the wavelet module
(replaced scipy.signal.fftconvolve with a custom function).
2013/08/11 - Added option to use disk caching for internally derived
images (do_cache). Caching can reduce memory usage and is
therefore useful when processing large images.
2013/07/11 - Implemented a variable operation chain for process_image
(and img.process()) that allows unneeded steps to be skipped if
the image is being reprocessed.
2013/07/11 - Fixed a bug that could cause Gaussian fitting to hang
during iterative fitting of large islands.
2013/06/24 - Added option (fix_to_beam) to fix the size and position
angle of Gaussians to the restoring beam during fitting. Fix to
bug that caused the position angle used to initialize fitting to
be incorrect.
2013/03/22 - Version 1.6.1
2013/03/21 - Fix to bug in ds9 and kvis catalog files that resulted in
incorrect position angles. Fix to bug in position-dependent WCS
transformations that caused incorrect source parameters in output
catalogs. Added option to output uncorrected source parameters
to a BBS sky model file (correct_proj).
2013/03/14 - Removed sky transformations for flagged Gaussians, as
these could sometimes give math domain errors. Disabled aperture
flux measurement on wavelet images as it is not used/needed.
2013/02/25 - Version 1.6.0
2013/02/25 - Improved speed and accuracy of aperture flux
calculation.
2013/02/20 - Added option to use the curvature map method of
Hancock et al. (2012) for the initial estimation of Gaussian
parameters (ini_method = 'curvature') and for grouping of
Gaussians into sources (group_method = 'curvature').
2013/02/18 - Fix to bug in spectral index module that caused sources
with multiple Gaussians to be skipped. Minor adjustments to the
wavelet module to improve performance.
2013/02/08 - Implemented position-dependent WCS transformations.
2013/02/08 - Added option to fit to any arbitrary location in the
image within a given radius (src_ra_dec and src_radius_pix).
2013/02/04 - Fix to bug in wavelet module that caused crash when
no Gaussians were fit to the main image.
2013/01/30 - Fix to bug that resulted in incorrect numbering of
wavelet Gaussians. Added 'srl' output in ds9 format when using
output_all = True.
2013/01/28 - Fix to bug in source grouping algorithm. Improved fitting
when background mean is nonzero. Fix to allow images with GLAT and
GLON WCS coordinates. Fix to bug when equinox is taken from the
epoch keyword.
2012/12/19 - Version 1.5.1
2012/12/19 - Fix to bug in wavelet module that occurred when the
center of the wavelet Gaussian lies outside of the image. Fix
to re-enable srl output catalogs in ds9 region format. Fix to
bug that resulted in the output directory not always being
created. Added an option (aperture_posn), used when aperture
fluxes are desired, to specify whether to center the aperture
on the source centroid or the source peak.
2012/12/02 - Changes to reduce memory usage, particularly in the
wavelet module.
2012/11/30 - Fix to bypass bug in matplotlib when display variable
is not set.
2012/11/21 - Fixed bug that caused a crash when a detection image
was used. Fixed a bug with incorrect save directory when
plot_allgaus = True.
2012/10/29 - Version 1.5.0
2012/10/29 - Improved handling of WCS information so that a much
greater variety of WCS systems may be used. Fixed a bug in logging
that occurred when negative values were found in the rms map.
Updated installation instructions.
2012/10/12 - Version 1.4.5
2012/10/12 - Added option (incl_empty) to include empty islands (that
have no un-flagged Gaussians) in output catalogs. Any such empty
islands are given negative source IDs and positions given by the
location of the peak of the island.
2012/10/10 - Fixed a bug in Gaussian fitting that could cause a crash
when fitting fails. Fixed a bug in parallelization that could
cause a crash due to improper concatenation of result lists.
2012/10/09 - Version 1.4.4
2012/10/09 - Improved logging. Added a warning when one or more islands
are not fit properly (i.e., no valid, unflagged Gaussians were
fit). Fixed a bug in parallelization of Gaussian fitting that
could cause a crash due to improper mapping of island lists to
processes.
2012/10/05 - Added code to handle images with no unblanked pixels.
Improved fitting robustness.
2012/10/04 - Version 1.4.3
2012/10/04 - Fixed a bug in the mean map calculation that caused mean
maps with constant values (i.e., non-2D maps) to have values of
0.0 Jy/beam unless mean_map = 'const' was explicitly specified.
Fixed a bug in Gaussian fitting that could cause an island to be
skipped.
2012/10/02 - Fixed a bug in the PSF vary module that resulted in
incorrect PSF generators being used. Added an option to smooth
the resulting PSF images (psf_smooth). Parallelized the PSF
interpolation and smoothing steps. Improved PSF vary documentation.
2012/09/25 - Version 1.4.2
2012/09/25 - Dramatically reduced the time required to identify valid
wavelet islands.
2012/09/21 - Fixed bug that resulted in output FITS gaul tables being
improperly sorted. Fixed cosmetic bug in the statusbar that could
sometimes cause improper formatting. Added example of SAMP usage
to the documentation.
2012/09/20 - Version 1.4.1
2012/09/20 - Fixed a bug in the wavelet module that caused a crash when
no Gaussians were fit to the ch0 image.
2012/09/19 - Added option (broadcast) to show_fit task to send
coordinates and row highlight request to a SAMP hub when a Gaussian
is clicked. Fixed bug in aperture flux masking that sometimes caused
the mask to be the wrong shape.
2012/09/18 - Added option to send images and catalogs to a SAMP hub
(activated by setting outfile = 'SAMP' in the export_image and
write_catalog tasks).
2012/09/13 - Improved speed of plotting when images are large and in
mean/rms map generation. Fixed bug that caused residual image
statistics to fail when NaNs are present.
2012/09/11 - Version 1.4.0
2012/09/11 - Parallelized Gaussian fitting, shapelet decomposition,
validation of wavelet islands, and mean/rms map generation.
The number of cores to be used can be specified with the ncores
option (default is to use up to 8). Fixed bug in SED plotting in
the show_fit task.
2012/08/29 - Fixed incorrect terminal size in parameter listing. Added
logging of non-default input parameters and internally derived
parameters.
2012/08/22 - Version 1.3.2
2012/08/22 - Fixed a bug that caused the user-specified rms_box to be
ignored. Added an option to enable the Monte Carlo error estimation
for 'M'-type sources (the do_mc_errors option), which is now
disabled by default.
2012/07/11 - Version 1.3.1
2012/07/11 - Cleaned up unused options.
2012/07/10 - Fixed a bug that caused a segfault during Gaussian
fitting. Fixed a bug that caused a crash when a detection image
is used.
2012/07/05 - Fixed a bug that caused images written when output_all =
True to be transposed. Added frequency information to all output
images. Improved fitting robustness to prevent rare cases in
which no valid Gaussians could be fit to an island. Modified the
island-finding routine to handle NaNs properly.
2012/07/03 - Version 1.3
2012/07/03 - Fixed a bug in calculation of the positional errors of
Gaussians. If interactive=True and image is large (> 4096 pixels),
display is limited to 'ch0_islands' only; otherwise, show_fit()
is very slow. Tweaked show_fit() to better display a single image.
2012/07/02 - Adjusted rms_box algorithm to check for negative rms
values (due to interpolation with cubic spline). If negative
values are found, either the box size is increased or the
interpolation is done with order=1 (bilinear) instead.
2012/06/28 - Output now includes the residual image produced by
using only wavelet Gaussians (if any) when atrous_do=True and
output_all=True. Improved organization of files when
output_all=True. Added logging of simple statistics (mean,
std. dev, skew, and kurtosis) of the residual images.
2012/06/22 - Included image rotation (if any) in beam definition.
Rotation angle can vary across the image (defined by image WCS).
2012/06/19 - Changed exception handling to raise exceptions when
the interactive shell is not being used. Fixed bug that
caused a crash when using show_fit() when no islands were
found.
2012/06/15 - Added Sagecal output format for Gaussian catalogs.
2012/06/14 - Added check for newer versions of the PyBDSM
software tar.gz file available on ftp.strw.leidenuniv.nl.
2012/06/13 - Added total island flux (from sum of pixels) to
"gaul" and "srl" catalogs.
2012/06/06 - Version 1.2
2012/06/06 - Added option to calculate fluxes within a specified
aperture radius in pixels (set with the "aperture" option).
Aperture fluxes, if measured, are output in the 'srl' catalogs.
Changed code that determines terminal width to be more robust.
2012/05/07 - Removed dependencies on matplotlib -- if matplotlib is
not available, plotting is disabled. Corrected inconsistencies,
spelling mistakes, etc. in help text and documentation. Cleaned
up unneeded modules and files.
2012/05/02 - Added option to output flux densities for every channel
found by the spectral index module. Added option to spectral index
module to allow use of flux densities that do not meet the desired
SNR. Changed flag_maxsnr criterion to also flag if the peak flux
density per beam of the Gaussian exceeds the value at its center.
Removed incl_wavelet option.
2012/04/20 - Promoted the adaptive_rms_box parameter to the main options
listing and added the rms_box_bright option so that the user can
specify either (or both) of the rms_boxes. Fixed bug in wavelet
module so that invalid Gaussians (i.e., those that lie outside of
islands in the ch0 image) are not used when making the residual
images at each scale. Improved speed of Gaussian fitting to wavelet
images. Fixed bug that resulted in pixels found to be outside the
universe (check is enabled with the check_outsideuniv option) not
being masked properly.
2012/04/17 - Fixed bug in psf_vary module that resulted in PSF major and
minor axis maps in terms of sigma instead of FWHM. Added option
(psf_stype_only) to allow PSF fitting to non- S-type sources
(useful if sources are very distorted).
2012/04/12 - Fixed bug in adaptive scaling code that could cause
incorrect small-scale rms_box size. Added a parameter
(adaptive_thresh) that controls the minimum threshold for sources
used to set the small-scale rms_box size.
2012/04/02 - Implemented an adaptive scaling scheme for the rms_box
parameter that shrinks the box size near bright sources and expands
it far from them (enabled with the adaptive_rms_box option when
rms_box=None). This scheme generally results in improved rms and
mean maps when both strong artifacts and extended sources are
present. Fixed bug that prevented plotting of results during wavelet
decomposition when interactive = True.
2012/03/29 - Fixed bug in wavelet module that could cause incorrect
associations of Gaussians. Fixed bug in show_fit that displayed
incorrect model and residual images when wavelets were used.
2012/03/28 - Version 1.1
2012/03/28 - Fixed bug that caused mask to be ignored when determining
whether variations in rms and mean maps is significant. Fixed bug
that caused internally derived rms_box value to be ignored.
2012/03/27 - Modified calculation of rms_box parameter (when rms_box
option is None) to work better with fields composed mainly of point
sources when strong artifacts are present. Tweaked flagging on FWHM
to prevent over-flagging of Gaussians in small islands. Changed
wavelet module to flag Gaussians whose centers fall outside of
islands found in the original image and removed atrous_orig_isl
option (as redundant).
2012/03/26 - Modified fitting of large islands to adopt an iterative
fitting scheme that limits the number of Gaussians fit
simultaneously per iteration to 10. This change speeds up fitting of
large islands considerably. The options peak_fit and peak_maxsize
control whether iterative fitting is done. Added new Gaussian
flagging condition (flag_maxsize_fwhm) that flags Gaussians whose
sigma contour times factor extends beyond the island boundary. This
flag prevents fitting of Gaussians that extend far beyond the island
boundary.
2012/03/23 - Tweaked settings that affect fitting of Gaussians to
improve fitting in general.
2012/03/19 - Added output of shapelet parameters to FITS tables. Fixed
issue with resizing of sources in spectral index module.
2012/03/16 - Fixed bugs in polarisation module that caused incorrect
polarization fractions.
2012/03/13 - Improved plotting speed (by factor of ~ 4) in show_fit when
there is a large number of islands. Simplified the spectral index
module to make it more user friendly and stable. Added the option to
use a "detection" image for island detection (the detection_image
option); source properties are still measured from the main input
image.
2012/03/01 - Fixed a bug in the polarisation module that could result in
incorrect flux densities. Changed logging module to suppress output
of ANSI color codes to the log file.
2012/02/27 - Implemented fitting of Gaussians in polarisation module,
instead of simple summation of pixel values, to determine polarized
flux densities.
2012/02/17 - In scripts, process_image() will now accept a dictionary of
parameters as input.
2012/02/10 - Sources that appear only in Stokes Q or U (and hence not in
Stokes I) are now identified and included in the polarisation
module. This identification is done using the polarized intensity
(PI) image. show_fit() and export_image() were updated to allow
display and export of the PI image.
2012/02/06 - Fixed bug in island splitting code that could result in
duplicate Gaussians.
2012/02/02 - Improved polarisation module. Polarization quantities are
now calculated for Gaussians as well as sources.
2012/01/27 - Fixed bug in psf_vary module that affected tesselation.
Fixed many small typos in parameter descriptions.
2012/01/18 - Fixed a bug that resulted in incorrect coordinates when the
trim_box option was used with a CASA image. Added option
(blank_zeros) to blank pixels in the input image that are exactly
zero.
2012/01/13 - Fixed minor bugs in the interactive shell and updated
pybdsm.py to support IPython 0.12.
2011/12/21 - Fixed bug in gaul2srl module due to rare cases in which an
island has a negative rms value. Fixed a memory issue in which
memory was not released after using show_fit.
2011/11/28 - Added option to have minpix_isl estimated automatically as
1/3 of the beam area. This estimate should help exclude false
islands that are much smaller than the beam. This estimate is not
let to fall below 6 pixels.
2011/11/11 - Fixed bugs in source generation that would lead to masking
of all pixels for certain sources during moment analysis. Adjusted
calculation of jmax in wavelet module to use island sizes (instead
of image size) if opts.atrous_orig_isl is True.
2011/11/04 - Implemented new island fitting routine (enabled with the
peak_fit option) that can speed up fitting of large islands. Changed
plotting of Gaussians in show_fit to use Ellipse artists to improve
plotting speed.
2011/11/03 - Altered reading of images to correctly handle 4D cubes.
Fixed bug in readimage that affected filenames.
2011/10/26 - Extended psf_vary module to include fitting of stacked PSFs
with Gaussians, interpolation of the resulting parameters across the
image, and correction of the de- convolved source sizes using the
interpolated PSFs. Changed plotting of Gaussians in show_fit() to
use the FWHM instead of sigma. Modified error calculation of M
sources to be more robust when sources are small. Fixed spelling of
"gaussian" in bbs_patches option list.
2011/10/24 - Many small bug fixes to the psf_vary module. Fixed use of
input directory so that input files not in the current directory are
handled correctly.
2011/10/14 - Added residual rms and mean values to sources and source
list catalogs. These values can be compared to background rms and
mean values as a quick check of fit quality.
2011/10/13 - Modified deconvolution to allow 1-D Gaussians and sources.
Added FREQ0, EQUINOX, INIMAGE keywords to output fits catalogs.
Fixed bug in source position angles. Adjusted column names of output
catalogs slightly to be more descriptive.
2011/10/12 - Added errors to source properties (using a Monte Carlo
method for M sources). Fixed bug in output column names.
2011/10/11 - Tweaked autocomplete to support IPython shell commands
(e.g., "!more file.txt"). Fixed bug in gaul2srl that resulted in
some very nearby Gaussians being placed into different sources.
Added group_tol option so that user can adjust the tolerance of how
Gaussians are grouped into sources.
2011/10/05 - Added output of source lists. Changed name of write_gaul
method to write_catalog (more general).
2011/10/04 - Added option to force source grouping by island
(group_by_isl). Added saving of parameters to a PyBDSM save file to
Op_output.
2011/09/21 - Fixed issue with shapelet centering failing: it now falls
back to simple moment when this happens. Fixed issue with
plotresults when shapelets are fit.
2011/09/14 - Placed output column names and units in TC properties of
Gaussians. This allows easy standardization of the column names and
units.
2011/09/13 - Fixes to trim_box and resetting of Image objects in
interface.process(). Changed thr1 --> thr2 in fit_iter in
guasfit.py, as bright sources are often "overfit" when using thr1,
leading to large negative residuals. Restricted fitting of Gaussians
to wavelet images to be only in islands found in the original image
if opts.atrous_orig_isl is True.
2011/09/08 - Version 1.0
2011/09/08 - Versioning system changed to use _version.py.
"""
pass
PyBDSF-1.11.0/bdsf/cleanup.py 0000664 0000000 0000000 00000003237 14650706641 0015556 0 ustar 00root root 0000000 0000000 """
Does miscellaneous jobs at the end, which assumes all other tasks are run.
"""
from __future__ import absolute_import
import numpy as N
import os
from .image import *
from . import mylogger
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
import matplotlib.cm as cm
from . import functions as func
class Op_cleanup(Op):
""" """
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM.Cleanup")
### plotresults for all gaussians together
if img.opts.plot_allgaus and has_pl:
pl.figure()
pl.title('All gaussians including wavelet images')
allgaus = img.gaussians
if hasattr(img, 'atrous_gaussians'):
for gg in img.atrous_gaussians:
allgaus += gg
for g in allgaus:
ellx, elly = func.drawellipse(g)
pl.plot(ellx, elly, 'r')
from math import log10
bdir = img.basedir + '/misc/'
if not os.path.isdir(bdir): os.makedirs(bdir)
im_mean = img.clipped_mean
im_rms = img.clipped_rms
low = 1.1*abs(img.min_value)
low1 = 1.1*abs(N.min(im_mean-im_rms*5.0))
if low1 > low: low = low1
vmin = log10(im_mean-im_rms*5.0 + low)
vmax = log10(im_mean+im_rms*15.0 + low)
im = N.log10(img.ch0_arr + low)
pl.imshow(N.transpose(im), origin='lower', interpolation='nearest',vmin=vmin, vmax=vmax, \
cmap=cm.gray); pl.colorbar()
pl.savefig(bdir+'allgaussians.png')
pl.close()
img.completed_Ops.append('cleanup')
PyBDSF-1.11.0/bdsf/collapse.py 0000664 0000000 0000000 00000031673 14650706641 0015736 0 ustar 00root root 0000000 0000000 """Module collapse
Defines operation Op_collapse which collapses 3D image. Calculates and
stores mean and rms (normal and clipped) per channel anyway for further
use, even if weights are unity.
"""
from __future__ import absolute_import
import numpy as N
from .image import *
from . import _cbdsm
#_cbdsm.init_numpy()
from . import mylogger
from . import functions as func
class Op_collapse(Op):
"""Collapse 3D image"""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Collapse")
if img.opts.polarisation_do:
pols = ['I', 'Q', 'U', 'V'] # make sure I is done first
else:
pols = ['I'] # assume I is always present
img.ch0_Q_arr = None
img.ch0_U_arr = None
img.ch0_V_arr = None
if img.shape[1] > 1:
c_mode = img.opts.collapse_mode
chan0 = img.opts.collapse_ch0
c_list = img.opts.collapse_av
c_wts = img.opts.collapse_wt
if not c_list:
c_list = N.arange(img.shape[1])
if len(c_list) == 1 and c_mode=='average':
c_mode = 'single'
chan0 = c_list[0]
img.collapse_ch0 = chan0
ch0sh = img.image_arr.shape[2:]
if img.opts.polarisation_do:
ch0images = ['ch0_arr', 'ch0_Q_arr', 'ch0_U_arr', 'ch0_V_arr']
else:
ch0images = ['ch0_arr']
# Check whether the collapse channel index is sensible
if chan0 < 0 or chan0 >= len(c_list):
raise RuntimeError('The channel index (set with the "collapse_ch0" option) '
'must be greater than zero and less than the number of '
'channels ({}).'.format(len(c_list)))
# assume all Stokes images have the same blank pixels as I:
blank = N.isnan(img.image_arr[0])
hasblanks = blank.any()
if img.opts.kappa_clip is None:
kappa = -img.pixel_beamarea()
else:
kappa = img.opts.kappa_clip
mean, rms, cmean, crms = chan_stats(img, kappa)
img.channel_mean = mean; img.channel_rms = rms
img.channel_clippedmean = cmean; img.channel_clippedrms = crms
for ipol, pol in enumerate(pols):
if c_mode == 'single':
if pol == 'I':
ch0 = img.image_arr[0, chan0]
img.ch0_arr = ch0
# Construct weights so that desired channel has weight of 1 and all
# others have weight of 0. The init_freq_collapse function will then
# select the intended frequency
wtarr = N.zeros(len(c_list))
wtarr[chan0] = 1.0
init_freq_collapse(img, wtarr)
mylogger.userinfo(mylog, 'Source extraction will be ' \
'done on channel', '%i (%.3f MHz)' % \
(chan0, img.frequency/1e6))
else:
ch0[:] = img.image_arr[ipol, chan0][:]
img.__setattr__(ch0images[ipol][:], ch0)
elif c_mode == 'average':
if not hasblanks:
if pol == 'I':
ch0, wtarr = avspc_direct(c_list, img.image_arr[0], img.channel_clippedrms, c_wts)
else:
# use wtarr from the I image, which is always collapsed first
ch0, wtarr = avspc_direct(c_list, img.image_arr[ipol], img.channel_clippedrms, c_wts, wtarr=wtarr)
else:
if pol == 'I':
ch0, wtarr = avspc_blanks(c_list, img.image_arr[0], img.channel_clippedrms, c_wts)
else:
# use wtarr from the I image, which is always collapsed first
ch0, wtarr = avspc_blanks(c_list, img.image_arr[ipol], img.channel_clippedrms, c_wts, wtarr=wtarr)
img.__setattr__(ch0images[ipol][:], ch0)
if pol == 'I':
img.avspc_wtarr = wtarr
init_freq_collapse(img, wtarr)
if c_wts == 'unity':
mylogger.userinfo(mylog, 'Channels averaged with '\
'uniform weights')
else:
mylogger.userinfo(mylog, 'Channels averaged with '\
'weights=(1/rms)^2')
mylogger.userinfo(mylog, 'Source extraction will be '\
'done on averaged ("ch0") image')
mylogger.userinfo(mylog, 'Frequency of averaged '\
'image', '%.3f MHz' % \
(img.frequency/1e6,))
str1 = " ".join(str(n) for n in c_list)
mylog.debug('%s %s' % ('Channels averaged : ', str1))
str1 = " ".join(["%9.4e" % n for n in wtarr])
mylog.debug('%s %s %s' % ('Channel weights : ', str1, '; unity=zero if c_wts="rms"'))
elif c_mode=='file':
mylogger.userinfo(mylog, 'Reading ch0 image from file %s' % (img.opts.collapse_file))
image,hdr=func.read_image_from_file(img.opts.collapse_file, img, None, quiet=False)
if pol == 'I':
ch0 = image[0,0]
img.ch0_arr = ch0
else:
raise NotImplementedError('Polarization cubes not allowed in file mode')
else:
raise NotImplementedError('Mode supplied not implemented') # should never happen!
if img.opts.output_all:
func.write_image_to_file(img.use_io, img.imagename+'.ch0_'+pol+'.fits', ch0,
img, outdir=img.basedir)
mylog.debug('%s %s ' % ('Writing file ', img.imagename+'.ch0_'+pol+'.fits'))
else:
# Only one channel in image
image = img.image_arr
img.ch0_arr = image[0, 0]
mylogger.userinfo(mylog, 'Frequency of image',
'%.3f MHz' % (img.frequency/1e6,))
if img.opts.polarisation_do:
for pol in pols[1:]:
if pol == 'Q':
img.ch0_Q_arr = image[1, 0][:]
if pol == 'U':
img.ch0_U_arr = image[2, 0][:]
if pol == 'V':
img.ch0_V_arr = image[3, 0][:]
# create mask if needed (assume all pols have the same mask as I)
image = img.ch0_arr
mask = N.isnan(image)
img.blankpix = N.sum(mask)
frac_blank = round(
float(img.blankpix) / float(image.shape[0] * image.shape[1]),
3)
mylogger.userinfo(mylog, "Number of blank pixels", str(img.blankpix)
+ ' (' + str(frac_blank * 100.0) + '%)')
if img.opts.blank_limit is not None:
import scipy
import sys
threshold = img.opts.blank_limit
mylogger.userinfo(mylog, "Blanking pixels with values "
"below %.1e Jy/beam" % (threshold,))
bad = (abs(image) < threshold)
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = func.NullDevice() # redirect the real STDOUT
count = scipy.signal.convolve2d(bad, N.ones((3, 3)), mode='same')
sys.stdout = original_stdout # turn STDOUT back on
mask_low = (count >= 5)
image[N.where(mask_low)] = N.nan
mask = N.isnan(image)
img.blankpix = N.sum(mask)
frac_blank = round(
float(img.blankpix) / float(image.shape[0] *
image.shape[1]), 3)
mylogger.userinfo(mylog, "Total number of blanked pixels",
str(img.blankpix) + ' (' + str(frac_blank * 100.0) + '%)')
masked = mask.any()
img.masked = masked
if masked:
img.mask_arr = mask
else:
img.mask_arr = None
if img.blankpix == image.shape[0] * image.shape[1]:
# ALL pixels are blanked!
raise RuntimeError('All pixels in the image are blanked.')
img.completed_Ops.append('collapse')
########################################################################################
def chan_stats(img, kappa):
bstat = func.bstat #_cbdsm.bstat
nchan = img.shape[1]
mean = []; rms = []; cmean = []; crms = []
for ichan in range(nchan):
if isinstance(img, Image): # check if img is an Image or just an ndarray
im = img.image_arr[0, ichan]
else:
im = img[0, ichan]
if N.any(im):
immask = N.isnan(im)
if immask.all():
m, r, cm, cr = 0, 0, 0, 0
else:
if immask.any():
m, r, cm, cr, cnt = bstat(im, immask, kappa)
else:
m, r, cm, cr, cnt = bstat(im, None, kappa)
else:
m, r, cm, cr = 0, 0, 0, 0
mean.append(m); rms.append(r); cmean.append(cm); crms.append(cr)
return N.array(mean), N.array(rms), N.array(cmean), N.array(crms)
########################################################################################
def avspc_direct(c_list, image, rmsarr, c_wts, wtarr=None):
shape2 = image.shape[1:]
ch0 = N.zeros(shape2, dtype=N.float32)
sumwts = 0.0
if wtarr is None:
wtarr = N.zeros(len(c_list))
for i, ch in enumerate(c_list):
im = image[ch]
r = rmsarr[ch]
if c_wts == 'unity': wt = 1.0
if c_wts == 'rms': wt = r
if r != 0:
wt = 1.0/(wt*wt)
else:
wt = 0
sumwts += wt
ch0 += im*wt
wtarr[i] = wt
else:
for i, ch in enumerate(c_list):
im = image[ch]
sumwts += wtarr[i]
ch0 += im*wtarr[i]
ch0 = ch0/sumwts
return ch0, wtarr
########################################################################################
def avspc_blanks(c_list, image, rmsarr, c_wts, wtarr=None):
shape2 = image.shape[1:]
ch0 = N.zeros(shape2, dtype=N.float32)
sumwtim = N.zeros(shape2, dtype=N.float32)
if wtarr is None:
wtarr = N.zeros(len(c_list))
for i, ch in enumerate(c_list):
im = image[ch]
r = rmsarr[ch]
if c_wts == 'unity': wt = 1.0
if c_wts == 'rms': wt = r
if r > 1e-18 and r < 1e18:
# Set reasonable limits to avoid overflow of float32
wt = 1.0/(wt*wt)
else:
wt = 0
wtim = N.ones(shape2, dtype=N.float32)*wt*(~N.isnan(im))
sumwtim += wtim
ch0 += N.nan_to_num(im)*wtim
wtarr[i] = wt
else:
for i, ch in enumerate(c_list):
im = image[ch]
wtim = N.ones(shape2)*wtarr[i]*(~N.isnan(im))
sumwtim += wtim
ch0 += N.nan_to_num(im)*wtim
ch0 = ch0/sumwtim
return ch0, wtarr
########################################################################################
def init_freq_collapse(img, wtarr):
# Place appropriate, post-collapse frequency info in img
# Calculate weighted average frequency
if img.opts.frequency_sp is not None:
c_list = img.opts.collapse_av
if c_list == []: c_list = N.arange(img.image_arr.shape[1])
freqs = img.opts.frequency_sp
if len(freqs) != len(c_list):
raise RuntimeError("Number of channels and number of frequencies specified "\
"by user do not match")
sumwts = 0.0
sumfrq = 0.0
for i, ch in enumerate(c_list):
sumwts += wtarr[i]
sumfrq += freqs[ch]*wtarr[i]
img.frequency = sumfrq / sumwts
img.freq_pars = (img.frequency, 0.0, 0.0)
else:
# Calculate from header info
c_list = img.opts.collapse_av
if c_list == []: c_list = N.arange(img.image_arr.shape[1])
sumwts = 0.0
sumfrq = 0.0
spec_indx = img.wcs_obj.wcs.spec
if spec_indx == -1 and img.opts.frequency_sp is None:
raise RuntimeError("Frequency information not found in header and frequencies "\
"not specified by user")
else:
for i, ch in enumerate(c_list):
sumwts += wtarr[i]
freq = img.wcs_obj.p2f(ch)
sumfrq += freq*wtarr[i]
img.frequency = sumfrq / sumwts
PyBDSF-1.11.0/bdsf/const.py 0000664 0000000 0000000 00000000222 14650706641 0015244 0 ustar 00root root 0000000 0000000 """Constants
Some universal constants
"""
import math
pi=math.pi
fwsig=2.35482
rad=180.0/pi
c=2.99792458e8
bolt=1.3806505e-23
sq2=math.sqrt(2)
PyBDSF-1.11.0/bdsf/functions.py 0000664 0000000 0000000 00000226722 14650706641 0016145 0 ustar 00root root 0000000 0000000 # some functions
from __future__ import print_function
from __future__ import absolute_import
try:
# For Python 2
basestring = basestring
except NameError:
basestring = str
def poly(c,x):
""" y = Sum { c(i)*x^i }, i=0,len(c)"""
import numpy as N
y=N.zeros(len(x))
for i in range(len(c)):
y += c[i]*(x**i)
return y
def sp_in(c, x):
""" Spectral index in freq-flux space """
import numpy as N
order = len(c)-1
if order == 1:
y = c[0]*N.power(x, c[1])
else:
if order == 2:
y = c[0]*N.power(x, c[1])*N.power(x, c[2]*N.log(x))
else:
print('Not yet implemented')
return y
def wenss_fit(c,x):
""" sqrt(c0*c0 + c1^2/x^2)"""
import numpy as N
y = N.sqrt(c[0]*c[0]+c[1]*c[1]/(x*x))
return y
def nanmean(x):
""" Mean of array with NaN """
import numpy as N
sum = N.nansum(x)
n = N.sum(~N.isnan(x))
if n > 0:
mean = sum/n
else:
mean = float("NaN")
return mean
def shapeletfit(cf, Bset, cfshape):
""" The function """
import numpy as N
ordermax = Bset.shape[0]
y = (Bset[0,0,::]).flatten()
y = N.zeros(y.shape)
index = [(i,j) for i in range(ordermax) for j in range(ordermax-i)] # i=0->nmax, j=0-nmax-i
for coord in index:
linbasis = (Bset[coord[0], coord[1], ::]).flatten()
y += cf.reshape(cfshape)[coord]*linbasis
return y
def func_poly2d(ord,p,x,y):
""" 2d polynomial.
ord=0 : z=p[0]
ord=1 : z=p[0]+p[1]*x+p[2]*y
ord=2 : z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y
ord=3 : z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y+
p[6]*x*x*x+p[7]*x*x*y+p[8]*x*y*y+p[9]*y*y*y"""
if ord == 0:
z=p[0]
if ord == 1:
z=p[0]+p[1]*x+p[2]*y
if ord == 2:
z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y
if ord == 3:
z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y+\
p[6]*x*x*x+p[7]*x*x*y+p[8]*x*y*y+p[9]*y*y*y
if ord > 3:
print(" We do not trust polynomial fits > 3 ")
z = None
return z
def func_poly2d_ini(ord, av):
""" Initial guess -- assume flat plane. """
if ord == 0:
p0 = N.asarray([av])
if ord == 1:
p0 = N.asarray([av] + [0.0]*2)
if ord == 2:
p0 = N.asarray([av] + [0.0]*5)
if ord == 3:
p0 = N.asarray([av] + [0.0]*9)
if ord > 3:
p0 = None
return p0
def ilist(x):
""" integer part of a list of floats. """
fn = lambda x : [int(round(i)) for i in x]
return fn(x)
def cart2polar(cart, cen):
""" convert cartesian coordinates to polar coordinates around cen. theta is
zero for +ve xaxis and goes counter clockwise. cart is a numpy array [x,y] where
x and y are numpy arrays of all the (>0) values of coordinates."""
import math
polar = N.zeros(cart.shape)
pi = math.pi
rad = 180.0/pi
cc = N.transpose(cart)
cc = (cc-cen)*(cc-cen)
polar[0] = N.sqrt(N.sum(cc,1))
th = N.arctan2(cart[1]-cen[1],cart[0]-cen[0])*rad
polar[1] = N.where(th > 0, th, 360+th)
return polar
def polar2cart(polar, cen):
""" convert polar coordinates around cen to cartesian coordinates. theta is
zero for +ve xaxis and goes counter clockwise. polar is a numpy array of [r], [heta]
and cart is a numpy array [x,y] where x and y are numpy arrays of all the (>0)
values of coordinates."""
import math
cart = N.zeros(polar.shape)
pi = math.pi
rad = 180.0/pi
cart[0]=polar[0]*N.cos(polar[1]/rad)+cen[0]
cart[1]=polar[0]*N.sin(polar[1]/rad)+cen[1]
return cart
def gaus_pixval(g, pix):
""" Calculates the value at a pixel pix due to a gaussian object g. """
from .const import fwsig, pi
from math import sin, cos, exp
cen = g.centre_pix
peak = g.peak_flux
bmaj_p, bmin_p, bpa_p = g.size_pix
a4 = bmaj_p/fwsig; a5 = bmin_p/fwsig
a6 = (bpa_p+90.0)*pi/180.0
spa = sin(a6); cpa = cos(a6)
dr1 = ((pix[0]-cen[0])*cpa + (pix[1]-cen[1])*spa)/a4
dr2 = ((pix[1]-cen[1])*cpa - (pix[0]-cen[0])*spa)/a5
pixval = peak*exp(-0.5*(dr1*dr1+dr2*dr2))
return pixval
def atanproper(dumr, dx, dy):
from math import pi
ysign = (dy >= 0.0)
xsign = (dx >= 0.0)
if ysign and (not xsign): dumr = pi - dumr
if (not ysign) and (not xsign): dumr = pi + dumr
if (not ysign) and xsign: dumr = 2.0*pi - dumr
return dumr
def gdist_pa(pix1, pix2, gsize):
""" Computes FWHM in degrees in the direction towards second source, of an elliptical gaussian. """
from math import atan, pi, sqrt, cos, sin, tan
dx = pix2[0] - pix1[0]
dy = pix2[1] - pix1[1]
if dx == 0.0:
val = pi/2.0
else:
dumr = atan(abs(dy/dx))
val = atanproper(dumr, dx, dy)
psi = val - (gsize[2]+90.0)/180.0*pi
# convert angle to eccentric anomaly
if approx_equal(gsize[1], 0.0):
psi = pi/2.0
else:
psi=atan(gsize[0]/gsize[1]*tan(psi))
dumr2 = gsize[0]*cos(psi)
dumr3 = gsize[1]*sin(psi)
fwhm = sqrt(dumr2*dumr2+dumr3*dumr3)
return fwhm
def gaus_2d(c, x, y):
""" x and y are 2d arrays with the x and y positions. """
import math
import numpy as N
rad = 180.0/math.pi
cs = math.cos(c[5]/rad)
sn = math.sin(c[5]/rad)
f1 = ((x-c[1])*cs+(y-c[2])*sn)/c[3]
f2 = ((y-c[2])*cs-(x-c[1])*sn)/c[4]
val = c[0]*N.exp(-0.5*(f1*f1+f2*f2))
return val
def gaus_2d_itscomplicated(c, x, y, p_tofix, ind):
""" x and y are 2d arrays with the x and y positions. c is a list (of lists) of gaussian parameters to fit, p_tofix
are gaussian parameters to fix. ind is a list with 0, 1; 1 = fit; 0 = fix. """
import math
import numpy as N
val = N.zeros(x.shape)
indx = N.array(ind)
if len(indx) % 6 != 0:
print(" Something wrong with the parameters passed - need multiples of 6 !")
else:
ngaus = int(len(indx)/6)
params = N.zeros(6*ngaus)
params[N.where(indx==1)[0]] = c
params[N.where(indx==0)[0]] = p_tofix
for i in range(ngaus):
gau = params[i*6:i*6+6]
val = val + gaus_2d(gau, x, y)
return val
def g2param(g, adj=False):
"""Convert gaussian object g to param list [amp, cenx, ceny, sigx, sigy, theta] """
from .const import fwsig
from math import pi
A = g.peak_flux
if adj and hasattr(g, 'size_pix_adj'):
sigx, sigy, th = g.size_pix_adj
else:
sigx, sigy, th = g.size_pix
cenx, ceny = g.centre_pix
sigx = sigx/fwsig; sigy = sigy/fwsig; th = th+90.0
params = [A, cenx, ceny, sigx, sigy, th]
return params
def g2param_err(g, adj=False):
"""Convert errors on gaussian object g to param list [Eamp, Ecenx, Eceny, Esigx, Esigy, Etheta] """
from .const import fwsig
from math import pi
A = g.peak_fluxE
if adj and hasattr(g, 'size_pix_adj'):
sigx, sigy, th = g.size_pix_adj
else:
sigx, sigy, th = g.size_pixE
cenx, ceny = g.centre_pixE
sigx = sigx/fwsig; sigy = sigy/fwsig
params = [A, cenx, ceny, sigx, sigy, th]
return params
def corrected_size(size):
""" convert major and minor axis from sigma to fwhm and angle from horizontal to P.A. """
from .const import fwsig
csize = [0,0,0]
csize[0] = size[0]*fwsig
csize[1] = size[1]*fwsig
bpa = size[2]
pa = bpa-90.0
pa = pa % 360
if pa < 0.0: pa = pa + 360.0
if pa > 180.0: pa = pa - 180.0
csize[2] = pa
return csize
def drawellipse(g):
import numpy as N
from .gausfit import Gaussian
rad = 180.0/N.pi
if isinstance(g, Gaussian):
param = g2param(g)
else:
if isinstance(g, list) and len(g)>=6:
param = g
else:
raise RuntimeError("Input to drawellipse neither Gaussian nor list")
size = [param[3], param[4], param[5]]
size_fwhm = corrected_size(size)
th=N.arange(0, 370, 10)
x1=size_fwhm[0]*N.cos(th/rad)
y1=size_fwhm[1]*N.sin(th/rad)
x2=x1*N.cos(param[5]/rad)-y1*N.sin(param[5]/rad)+param[1]
y2=x1*N.sin(param[5]/rad)+y1*N.cos(param[5]/rad)+param[2]
return x2, y2
def drawsrc(src):
import math
import numpy as N
import matplotlib.path as mpath
Path = mpath.Path
paths = []
xmin = []
xmax = []
ymin = []
ymax = []
ellx = []
elly = []
for indx, g in enumerate(src.gaussians):
gellx, gelly = drawellipse(g)
ellx += gellx.tolist()
elly += gelly.tolist()
yarr = N.array(elly)
minyarr = N.min(yarr)
maxyarr = N.max(yarr)
xarr = N.array(ellx)
for i in range(10):
inblock = N.where(yarr > minyarr + float(i)*(maxyarr-minyarr)/10.0)
yarr = yarr[inblock]
xarr = xarr[inblock]
inblock = N.where(yarr < minyarr + float(i+1)*(maxyarr-minyarr)/10.0)
xmin.append(N.min(xarr[inblock])-1.0)
xmax.append(N.max(xarr[inblock])+1.0)
ymin.append(N.mean(yarr[inblock]))
ymax.append(N.mean(yarr[inblock]))
xmax.reverse()
ymax.reverse()
pathdata = [(Path.MOVETO, (xmin[0], ymin[0]))]
for i in range(10):
pathdata.append((Path.LINETO, (xmin[i], ymin[i])))
pathdata.append((Path.CURVE3, (xmin[i], ymin[i])))
pathdata.append((Path.LINETO, ((xmin[9]+xmax[0])/2.0, (ymin[9]+ymax[0])/2.0+1.0)))
for i in range(10):
pathdata.append((Path.LINETO, (xmax[i], ymax[i])))
pathdata.append((Path.CURVE3, (xmax[i], ymax[i])))
pathdata.append((Path.LINETO, ((xmin[0]+xmax[9])/2.0, (ymin[0]+ymax[9])/2.0-1.0)))
pathdata.append((Path.CLOSEPOLY, (xmin[0], ymin[0])))
codes, verts = zip(*pathdata)
path = Path(verts, codes)
return path
def mask_fwhm(g, fac1, fac2, delc, shap):
""" take gaussian object g and make a mask (as True) for pixels which are outside (less flux)
fac1*FWHM and inside (more flux) fac2*FWHM. Also returns the values as well."""
import math
import numpy as N
from .const import fwsig
x, y = N.indices(shap)
params = g2param(g)
params[1] -= delc[0]; params[2] -= delc[1]
gau = gaus_2d(params, x, y)
dumr1 = 0.5*fac1*fwsig
dumr2 = 0.5*fac2*fwsig
flux1= params[0]*math.exp(-0.5*dumr1*dumr1)
flux2 = params[0]*math.exp(-0.5*dumr2*dumr2)
mask = (gau <= flux1) * (gau > flux2)
gau = gau * mask
return mask, gau
def flatten(x):
"""flatten(sequence) -> list
Taken from http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def moment(x,mask=None):
"""
Calculates first 3 moments of numpy array x. Only those values of x
for which mask is False are used, if mask is given. Works for any
dimension of x.
"""
import numpy as N
if mask is None:
mask=N.zeros(x.shape, dtype=bool)
m1=N.zeros(1)
m2=N.zeros(x.ndim)
m3=N.zeros(x.ndim)
for i, val in N.ndenumerate(x):
if not mask[i]:
m1 += val
m2 += val*N.array(i)
m3 += val*N.array(i)*N.array(i)
m2 /= m1
if N.all(m3/m1 > m2*m2):
m3 = N.sqrt(m3/m1-m2*m2)
return m1, m2, m3
def fit_mask_1d(x, y, sig, mask, funct, do_err, order=0, p0 = None):
"""
Calls scipy.optimise.leastsq for a 1d function with a mask.
Takes values only where mask=False.
"""
from scipy.optimize import leastsq
from math import sqrt, pow
import numpy as N
import sys
ind=N.where(~N.array(mask))[0]
if len(ind) > 1:
n=sum(mask)
if isinstance(x, list): x = N.array(x)
if isinstance(y, list): y = N.array(y)
if isinstance(sig, list): sig = N.array(sig)
xfit=x[ind]; yfit=y[ind]; sigfit=sig[ind]
if p0 is None:
if funct == poly:
p0=N.array([0]*(order+1))
p0[1]=(yfit[0]-yfit[-1])/(xfit[0]-xfit[-1])
p0[0]=yfit[0]-p0[1]*xfit[0]
if funct == wenss_fit:
p0=N.array([yfit[N.argmax(xfit)]] + [1.])
if funct == sp_in:
ind1 = N.where(yfit > 0.)[0]
if len(ind1) >= 2:
low = ind1[0]; hi = ind1[-1]
sp = N.log(yfit[low]/yfit[hi])/N.log(xfit[low]/xfit[hi])
p0=N.array([yfit[low]/pow(xfit[low], sp), sp] + [0.]*(order-1))
elif len(ind1) == 1:
p0=N.array([ind1[0], -0.8] + [0.]*(order-1))
else:
return [0, 0], [0, 0]
res=lambda p, xfit, yfit, sigfit: (yfit-funct(p, xfit))/sigfit
try:
(p, cov, info, mesg, flag)=leastsq(res, p0, args=(xfit, yfit, sigfit), full_output=True, warning=False)
except TypeError:
# This error means no warning argument is available, so redirect stdout to a null device
# to suppress printing of (unnecessary) warning messages
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
(p, cov, info, mesg, flag)=leastsq(res, p0, args=(xfit, yfit, sigfit), full_output=True)
sys.stdout = original_stdout # turn STDOUT back on
if do_err:
if cov is not None:
if N.sum(sig != 1.) > 0:
err = N.array([sqrt(abs(cov[i,i])) for i in range(len(p))])
else:
chisq=sum(info["fvec"]*info["fvec"])
dof=len(info["fvec"])-len(p)
err = N.array([sqrt(abs(cov[i,i])*chisq/dof) for i in range(len(p))])
else:
p, err = [0, 0], [0, 0]
else: err = [0]
else:
p, err = [0, 0], [0, 0]
return p, err
def dist_2pt(p1, p2):
""" Calculated distance between two points given as tuples p1 and p2. """
from math import sqrt
dx=p1[0]-p2[0]
dy=p1[1]-p2[1]
dist=sqrt(dx*dx + dy*dy)
return dist
def angsep(ra1, dec1, ra2, dec2):
"""Returns angular separation between two coordinates (all in degrees)"""
import math
const = math.pi/180.
ra1 = ra1*const
rb1 = dec1*const
ra2 = ra2*const
rb2 = dec2*const
v1_1 = math.cos(ra1)*math.cos(rb1)
v1_2 = math.sin(ra1)*math.cos(rb1)
v1_3 = math.sin(rb1)
v2_1 = math.cos(ra2)*math.cos(rb2)
v2_2 = math.sin(ra2)*math.cos(rb2)
v2_3 = math.sin(rb2)
w = ( (v1_1-v2_1)**2 + (v1_2-v2_2)**2 + (v1_3-v2_3)**2 )/4.0
x = math.sqrt(w)
y = math.sqrt(max(0.0, 1.0-w))
angle = 2.0*math.atan2(x, y)/const
return angle
def std(y):
""" Returns unbiased standard deviation. """
from math import sqrt
import numpy as N
l=len(y)
s=N.std(y)
if l == 1:
return s
else:
return s*sqrt(float(l)/(l-1))
def imageshift(image, shift):
""" Shifts a 2d-image by the tuple (shift). Positive shift is to the right and upwards.
This is done by fourier shifting. """
import scipy.fft
from scipy import ndimage
shape=image.shape
f1=scipy.fft.fft(image, shape[0], axis=0)
f2=scipy.fft.fft(f1, shape[1], axis=1)
s=ndimage.fourier_shift(f2,shift, axis=0)
y1=scipy.fft.ifft(s, shape[1], axis=1)
y2=scipy.fft.ifft(y1, shape[0], axis=0)
return y2.real
def trans_gaul(q):
" transposes a tuple "
y=[]
if len(q) > 0:
for i in range(len(q[0])):
elem=[]
for j in range(len(q)):
elem.append(q[j][i])
y.append(elem)
return y
def momanalmask_gaus(subim, mask, isrc, bmar_p, allpara=True):
""" Compute 2d gaussian parameters from moment analysis, for an island with
multiple gaussians. Compute only for gaussian with index (mask value) isrc.
Returns normalised peak, centroid, fwhm and P.A. assuming North is top.
"""
from math import sqrt, atan, pi
from .const import fwsig
import numpy as N
N.seterr(all='ignore')
m1 = N.zeros(2); m2 = N.zeros(2); m11 = 0.0; tot = 0.0
mompara = N.zeros(6)
n, m = subim.shape[0], subim.shape[1]
index = [(i, j) for i in range(n) for j in range(m) if mask[i,j]==isrc]
for coord in index:
tot += subim[coord]
m1 += N.array(coord)*subim[coord]
mompara[0] = tot/bmar_p
mompara[1:3] = m1/tot
if allpara:
for coord in index:
co = N.array(coord)
m2 += (co - mompara[1:3])*(co - mompara[1:3])*subim[coord]
m11 += N.product(co - mompara[1:3])*subim[coord]
mompara[3] = sqrt((m2[0]+m2[1]+sqrt((m2[0]-m2[1])*(m2[0]-m2[1])+4.0*m11*m11))/(2.0*tot))*fwsig
mompara[4] = sqrt((m2[0]+m2[1]-sqrt((m2[0]-m2[1])*(m2[0]-m2[1])+4.0*m11*m11))/(2.0*tot))*fwsig
dumr = atan(abs(2.0*m11/(m2[0]-m2[1])))
dumr = atanproper(dumr, m2[0]-m2[1], 2.0*m11)
mompara[5] = 0.5*dumr*180.0/pi - 90.0
if mompara[5] < 0.0: mompara[5] += 180.0
return mompara
def fit_gaus2d(data, p_ini, x, y, mask = None, err = None):
""" Fit 2d gaussian to data with x and y also being 2d numpy arrays with x and y positions.
Takes an optional error array and a mask array (True => pixel is masked). """
from scipy.optimize import leastsq
import numpy as N
import sys
if mask is not None and mask.shape != data.shape:
print('Data and mask array dont have the same shape, ignoring mask')
mask = None
if err is not None and err.shape != data.shape:
print('Data and error array dont have the same shape, ignoring error')
err = None
if mask is None: mask = N.zeros(data.shape, bool)
g_ind = N.where(~N.ravel(mask))[0]
if err is None:
errorfunction = lambda p: N.ravel(gaus_2d(p, x, y) - data)[g_ind]
else:
errorfunction = lambda p: N.ravel((gaus_2d(p, x, y) - data)/err)[g_ind]
try:
p, success = leastsq(errorfunction, p_ini, warning=False)
except TypeError:
# This error means no warning argument is available, so redirect stdout to a null device
# to suppress printing of warning messages
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
p, success = leastsq(errorfunction, p_ini)
sys.stdout = original_stdout # turn STDOUT back on
return p, success
def deconv(gaus_bm, gaus_c):
""" Deconvolves gaus_bm from gaus_c to give gaus_dc.
Stolen shamelessly from aips DECONV.FOR.
All PA is in degrees."""
from math import pi, cos, sin, atan, sqrt
rad = 180.0/pi
gaus_d = [0.0, 0.0, 0.0]
phi_c = gaus_c[2]+900.0 % 180
phi_bm = gaus_bm[2]+900.0 % 180
maj2_bm = gaus_bm[0]*gaus_bm[0]; min2_bm = gaus_bm[1]*gaus_bm[1]
maj2_c = gaus_c[0]*gaus_c[0]; min2_c = gaus_c[1]*gaus_c[1]
theta=2.0*(phi_c-phi_bm)/rad
cost = cos(theta)
sint = sin(theta)
rhoc = (maj2_c-min2_c)*cost-(maj2_bm-min2_bm)
if rhoc == 0.0:
sigic = 0.0
rhoa = 0.0
else:
sigic = atan((maj2_c-min2_c)*sint/rhoc) # in radians
rhoa = ((maj2_bm-min2_bm)-(maj2_c-min2_c)*cost)/(2.0*cos(sigic))
gaus_d[2] = sigic*rad/2.0+phi_bm
dumr = ((maj2_c+min2_c)-(maj2_bm+min2_bm))/2.0
gaus_d[0] = dumr-rhoa
gaus_d[1] = dumr+rhoa
error = 0
if gaus_d[0] < 0.0: error += 1
if gaus_d[1] < 0.0: error += 1
gaus_d[0] = max(0.0,gaus_d[0])
gaus_d[1] = max(0.0,gaus_d[1])
gaus_d[0] = sqrt(abs(gaus_d[0]))
gaus_d[1] = sqrt(abs(gaus_d[1]))
if gaus_d[0] < gaus_d[1]:
sint = gaus_d[0]
gaus_d[0] = gaus_d[1]
gaus_d[1] = sint
gaus_d[2] = gaus_d[2]+90.0
gaus_d[2] = gaus_d[2]+900.0 % 180
if gaus_d[0] == 0.0:
gaus_d[2] = 0.0
else:
if gaus_d[1] == 0.0:
if (abs(gaus_d[2]-phi_c) > 45.0) and (abs(gaus_d[2]-phi_c) < 135.0):
gaus_d[2] = gaus_d[2]+450.0 % 180
# errors
#if rhoc == 0.0:
#if gaus_d[0] != 0.0:
# ed_1 = gaus_c[0]/gaus_d[0]*e_1
#else:
# ed_1 = sqrt(2.0*e_1*gaus_c[0])
#if gaus_d[1] != 0.0:
# ed_2 = gaus_c[1]/gaus_d[1]*e_2
#else:
# ed_2 = sqrt(2.0*e_2*gaus_c[1])
#ed_3 =e_3
#else:
# pass
return gaus_d
def deconv2(gaus_bm, gaus_c):
""" Deconvolves gaus_bm from gaus_c to give gaus_dc.
Stolen shamelessly from Miriad gaupar.for.
All PA is in degrees.
Returns deconvolved gaussian parameters and flag:
0 All OK.
1 Result is pretty close to a point source.
2 Illegal result.
"""
from math import pi, cos, sin, atan2, sqrt
rad = 180.0/pi
phi_c = gaus_c[2]+900.0 % 180.0
phi_bm = gaus_bm[2]+900.0 % 180.0
theta1 = phi_c / rad
theta2 = phi_bm / rad
bmaj1 = gaus_c[0]
bmaj2 = gaus_bm[0]
bmin1 = gaus_c[1]
bmin2 = gaus_bm[1]
alpha = ( (bmaj1*cos(theta1))**2 + (bmin1*sin(theta1))**2 -
(bmaj2*cos(theta2))**2 - (bmin2*sin(theta2))**2 )
beta = ( (bmaj1*sin(theta1))**2 + (bmin1*cos(theta1))**2 -
(bmaj2*sin(theta2))**2 - (bmin2*cos(theta2))**2 )
gamma = 2.0 * ( (bmin1**2-bmaj1**2)*sin(theta1)*cos(theta1) -
(bmin2**2-bmaj2**2)*sin(theta2)*cos(theta2) )
s = alpha + beta
t = sqrt((alpha-beta)**2 + gamma**2)
limit = min(bmaj1, bmin1, bmaj2, bmin2)
limit = 0.1*limit*limit
if alpha < 0.0 or beta < 0.0 or s < t:
if alpha < 0.0 or beta < 0.0:
bmaj = 0.0
bpa = 0.0
else:
bmaj = sqrt(0.5*(s+t))
bpa = rad * 0.5 * atan2(-gamma, alpha-beta)
bmin = 0.0
if 0.5*(s-t) < limit and alpha > -limit and beta > -limit:
ifail = 1
else:
ifail = 2
else:
bmaj = sqrt(0.5*(s+t))
bmin = sqrt(0.5*(s-t))
if abs(gamma) + abs(alpha-beta) == 0.0:
bpa = 0.0
else:
bpa = rad * 0.5 * atan2(-gamma, alpha-beta)
ifail = 0
return (bmaj, bmin, bpa), ifail
def get_errors(img, p, stdav, bm_pix=None, fixed_to_beam=False):
""" Returns errors on the fitted Gaussian parameters, using the
equations from Condon 1997 (PASP, 109, 166) and Condon et al.
1998 (ApJ, 115, 1693)
Parameters:
img: Image object (needed for pixel beam info)
p: list of Gaussian parameters: [peak, x0, y0, maj, min, pa, tot]
stdav: estimate of the image noise at the Gaussian's position
bm_pix: optional pixel beam to be used instead of that in img
fixed_to_beam: True if the fits were done with the
size fixed to that of the beam, False otherwise
Returned list includes errors on:
peak flux [Jy/beam]
x_0 [pix]
y_0 [pix]
e_maj [pix]
e_min [pix]
e_pa [deg]
e_tot [Jy]
"""
from .const import fwsig
from math import sqrt, log, pow, pi
from . import mylogger
import numpy as N
mylog = mylogger.logging.getLogger("PyBDSM.Compute")
if len(p) % 7 > 0:
mylog.error("Gaussian parameters passed have to have 7n numbers")
ngaus = int(len(p)/7)
errors = []
for i in range(ngaus):
pp = p[i*7:i*7+7]
### Now do error analysis as in Condon (and fBDSM)
size = pp[3:6]
size = corrected_size(size) # angle is now degrees CCW from +y-axis
if size[0] == 0.0 or size[1] == 0.0:
errors = errors + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
else:
sq2 = sqrt(2.0)
if bm_pix is None:
bm_pix = N.array([img.pixel_beam()[0]*fwsig, img.pixel_beam()[1]*fwsig, img.pixel_beam()[2]])
dumr = sqrt(abs(size[0] * size[1] / (4.0 * bm_pix[0] * bm_pix[1]))) # first term of Eq. 26 of Condon+ (1998)
dumrr1 = 1.0 + bm_pix[0] * bm_pix[1] / (size[0] * size[0]) # second term of Eq. 26 of Condon+ (1998)
dumrr2 = 1.0 + bm_pix[0] * bm_pix[1] / (size[1] * size[1]) # third term of Eq. 26 of Condon+ (1998)
dumrr3 = dumr * pp[0] / stdav # product of first and fourth terms of Eq. 26 of Condon+ (1998)
d1 = sqrt(8.0 * log(2.0))
d2 = (size[0] * size[0] - size[1] * size[1]) / (size[0] * size[1]) # last term of Eq. 30 of Condon+ (1998)
try:
# The following three errors are calculated using Eq. 21 of Condon (1997),
# using Eq. 26 of Condon+ (1998) for rho
e_peak = pp[0] * sq2 / (dumrr3 * pow(dumrr1, 0.75) * pow(dumrr2, 0.75))
e_maj = size[0] * sq2 / (dumrr3 * pow(dumrr1, 1.25) * pow(dumrr2, 0.25))
e_min = size[1] * sq2 / (dumrr3 * pow(dumrr1, 0.25) * pow(dumrr2, 1.25))
# The following two errors are calculated using Eq. 27 of Condon+ (1998)
pa_rad = size[2] * pi / 180.0
e_x0 = sqrt( (e_maj * N.sin(pa_rad))**2 + (e_min * N.cos(pa_rad))**2 ) / d1
e_y0 = sqrt( (e_maj * N.cos(pa_rad))**2 + (e_min * N.sin(pa_rad))**2 ) / d1
# The following error is calculated using Eq. 30 of Condon+ (1998)
e_pa = 2.0 / (d2 * dumrr3 * pow(dumrr1, 0.25) * pow(dumrr2, 1.25))
e_pa = e_pa * 180.0/pi
# The following error is calculated using Eq. 36 of Condon+ (1998)
e_tot = pp[6] * sqrt(e_peak * e_peak / (pp[0] * pp[0]) + (0.25 / dumr / dumr) *
(e_maj * e_maj / (size[0] * size[0]) + e_min * e_min / (size[1] * size[1])))
except:
e_peak = 0.0
e_x0 = 0.0
e_y0 = 0.0
e_maj = 0.0
e_min = 0.0
e_pa = 0.0
e_tot = 0.0
if abs(e_pa) > 180.0:
e_pa = 180.0
if fixed_to_beam:
# When the size was fixed to that of the beam during the fit, set
# uncertainties on the size to zero and reduce the error in the fluxes
# by sqrt(2) (see Eq. 25 of Condon 1997)
e_maj = 0.0
e_min = 0.0
e_pa = 0.0
e_peak /= sq2
e_tot /= sq2
errors = errors + [e_peak, e_x0, e_y0, e_maj, e_min, e_pa, e_tot]
return errors
def fit_chisq(x, p, ep, mask, funct, order):
import numpy as N
ind = N.where(N.array(mask)==False)[0]
if order == 0:
fit = [funct(p)]*len(p)
else:
fitpara, efit = fit_mask_1d(x, p, ep, mask, funct, True, order)
fit = funct(fitpara, x)
dev = (p-fit)*(p-fit)/(ep*ep)
num = order+1
csq = N.sum(dev[ind])/(len(fit)-num-1)
return csq
def calc_chisq(x, y, ey, p, mask, funct, order):
import numpy as N
if order == 0:
fit = [funct(y)]*len(y)
else:
fit = funct(p, x)
dev = (y-fit)*(y-fit)/(ey*ey)
ind = N.where(~N.array(mask))
num = order+1
csq = N.sum(dev[ind])/(len(mask)-num-1)
return csq
def get_windowsize_av(S_i, rms_i, chanmask, K, minchan):
import numpy as N
av_window = N.arange(2, int(len(S_i)/minchan)+1)
win_size = 0
for window in av_window:
fluxes, vars, mask = variance_of_wted_windowedmean(S_i, rms_i, chanmask, window)
minsnr = N.min(fluxes[~mask]/vars[~mask])
if minsnr > K*1.1: ### K*1.1 since fitted peak can be less than wted peak
win_size = window # is the size of averaging window
break
return win_size
def variance_of_wted_windowedmean(S_i, rms_i, chanmask, window_size):
from math import sqrt
import numpy as N
nchan = len(S_i)
nwin = nchan/window_size
wt = 1/rms_i/rms_i
wt = wt/N.median(wt)
fluxes = N.zeros(nwin); vars = N.zeros(nwin); mask = N.zeros(nwin, bool)
for i in range(nwin):
strt = i*window_size; stp = (i+1)*window_size
if i == nwin-1: stp = nchan
ind = N.arange(strt,stp)
m = chanmask[ind]
index = [arg for ii,arg in enumerate(ind) if not m[ii]]
if len(index) > 0:
s = S_i[index]; r = rms_i[index]; w = wt[index]
fluxes[i] = N.sum(s*w)/N.sum(w)
vars[i] = 1.0/sqrt(N.sum(1.0/r/r))
mask[i] = N.product(m)
else:
fluxes[i] = 0
vars[i] = 0
mask[i] = True
return fluxes, vars, mask
def fit_mulgaus2d(image, gaus, x, y, mask = None, fitfix = None, err = None, adj=False):
""" fitcode : 0=fit all; 1=fit amp; 2=fit amp, posn; 3=fit amp, size """
from scipy.optimize import leastsq
import numpy as N
import sys
if mask is not None and mask.shape != image.shape:
print('Data and mask array dont have the same shape, ignoring mask')
mask = None
if err is not None and err.shape != image.shape:
print('Data and error array dont have the same shape, ignoring error')
err = None
if mask is None: mask = N.zeros(image.shape, bool)
g_ind = N.where(~N.ravel(mask))[0]
ngaus = len(gaus)
if ngaus > 0:
p_ini = []
for g in gaus:
p_ini = p_ini + g2param(g, adj)
p_ini = N.array(p_ini)
if fitfix is None: fitfix = [0]*ngaus
ind = N.ones(6*ngaus) # 1 => fit ; 0 => fix
for i in range(ngaus):
if fitfix[i] == 1: ind[i*6+1:i*6+6] = 0
if fitfix[i] == 2: ind[i*6+3:i*6+6] = 0
if fitfix[i] == 3: ind[i*6+1:i*6+3] = 0
ind = N.array(ind)
p_tofit = p_ini[N.where(ind==1)[0]]
p_tofix = p_ini[N.where(ind==0)[0]]
if err is None: err = N.ones(image.shape)
errorfunction = lambda p, x, y, p_tofix, ind, image, err, g_ind: \
N.ravel((gaus_2d_itscomplicated(p, x, y, p_tofix, ind)-image)/err)[g_ind]
try:
p, success = leastsq(errorfunction, p_tofit, args=(x, y, p_tofix, ind, image, err, g_ind))
except TypeError:
# This error means no warning argument is available, so redirect stdout to a null device
# to suppress printing of warning messages
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
p, success = leastsq(errorfunction, p_tofit, args=(x, y, p_tofix, ind, image, err, g_ind))
sys.stdout = original_stdout # turn STDOUT back on
else:
p, sucess = None, 1
para = N.zeros(6*ngaus)
para[N.where(ind==1)[0]] = p
para[N.where(ind==0)[0]] = p_tofix
for igaus in range(ngaus):
para[igaus*6+3] = abs(para[igaus*6+3])
para[igaus*6+4] = abs(para[igaus*6+4])
return para, success
def gaussian_fcn(g, x1, x2):
"""Evaluate Gaussian on the given grid.
Parameters:
x1, x2: grid (as produced by numpy.mgrid f.e.)
g: Gaussian object or list of Gaussian paramters
"""
from math import radians, sin, cos
from .const import fwsig
import numpy as N
if isinstance(g, list):
A, C1, C2, S1, S2, Th = g
else:
A = g.peak_flux
C1, C2 = g.centre_pix
S1, S2, Th = g.size_pix
S1 = S1/fwsig; S2 = S2/fwsig; Th = Th + 90.0 # Define theta = 0 on x-axis
th = radians(Th)
cs = cos(th)
sn = sin(th)
f1 = ((x1-C1)*cs + (x2-C2)*sn)/S1
f2 = (-(x1-C1)*sn + (x2-C2)*cs)/S2
return A*N.exp(-(f1*f1 + f2*f2)/2)
def mclean(im1, c, beam):
""" Simple image plane clean of one gaussian at posn c and size=beam """
import numpy as N
amp = im1[c]
b1, b2, b3 = beam
b3 += 90.0
para = [amp, c[0], c[1], b1, b2, b3]
x, y = N.indices(im1.shape)
im = gaus_2d(para, x, y)
im1 = im1-im
return im1
def arrstatmask(im, mask):
""" Basic statistics for a masked array. dont wanna use numpy.ma """
import numpy as N
ind = N.where(~mask)
im1 = im[ind]
av = N.mean(im1)
std = N.std(im1)
maxv = N.max(im1)
x, y = N.where(im == maxv)
xmax = x[0]; ymax = y[0]
minv = N.min(im1)
x, y = N.where(im == minv)
xmin = x[0]; ymin = y[0]
return (av, std, maxv, (xmax, ymax), minv, (xmin, ymin))
def get_maxima(im, mask, thr, shape, beam, im_pos=None):
""" Gets the peaks in an image """
from copy import deepcopy as cp
import numpy as N
if im_pos is None:
im_pos = im
im1 = cp(im)
ind = N.array(N.where(~mask)).transpose()
ind = [tuple(coord) for coord in ind if im_pos[tuple(coord)] > thr]
n, m = shape
iniposn = []
inipeak = []
for c in ind:
goodlist = [im_pos[i,j] for i in range(c[0]-1,c[0]+2) for j in range(c[1]-1,c[1]+2) \
if i>=0 and i=0 and j goodlist) == len(goodlist)
if peak:
iniposn.append(c)
inipeak.append(im[c])
im1 = mclean(im1, c, beam)
return inipeak, iniposn, im1
def watershed(image, mask=None, markers=None, beam=None, thr=None):
import numpy as N
from copy import deepcopy as cp
import scipy.ndimage as nd
#import matplotlib.pyplot as pl
#import pylab as pl
if thr is None: thr = -1e9
if mask is None: mask = N.zeros(image.shape, bool)
if beam is None: beam = (2.0, 2.0, 0.0)
if markers is None:
inipeak, iniposn, im1 = get_maxima(image, mask, thr, image.shape, beam)
ng = len(iniposn); markers = N.zeros(image.shape, int)
for i in range(ng): markers[iniposn[i]] = i+2
markers[N.unravel_index(N.argmin(image), image.shape)] = 1
im1 = cp(image)
if im1.min() < 0.: im1 = im1-im1.min()
im1 = 255 - im1/im1.max()*255
opw = nd.watershed_ift(N.array(im1, N.uint16), markers)
return opw, markers
def get_kwargs(kwargs, key, typ, default):
obj = True
if key in kwargs:
obj = kwargs[key]
if not isinstance(obj, typ):
obj = default
return obj
def read_image_from_file(filename, img, indir, quiet=False):
""" Reads data and header from indir/filename.
We can use either pyfits or python-casacore depending on the value
of img.use_io = 'fits'/'rap'
PyFITS is required, as it is used to standardize the header format. python-casacore
is optional.
"""
from . import mylogger
import os
import numpy as N
from astropy.io import fits as pyfits
from astropy.wcs import WCS
from copy import deepcopy as cp
import warnings
# Check if casacore is available, which is needed for 'rap' type of image I/O.
try:
import casacore.images as pim
has_casacore = True
except ImportError as err:
has_casacore = False
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Readfile")
if indir is None or indir == './':
prefix = ''
else:
prefix = indir + '/'
image_file = prefix + filename
# Check that file exists
if not os.path.exists(image_file):
img._reason = f'File {image_file} does not exist'
return None
# If img.use_io is set, then use appropriate io module
if img.use_io:
# Sanity check: only 'fits' and 'rap' are supported image I/O types
if img.use_io not in ('fits', 'rap'):
raise ValueError(f"Invalid image I/O type '{img.use_io}'. "
"Supported types are: 'fits' and 'rap'")
if img.use_io == 'fits':
try:
fits = pyfits.open(image_file, mode="readonly", ignore_missing_end=True)
except IOError as err:
img._reason = f'Problem reading {image_file}.\nOriginal error: {err}'
return None
if img.use_io == 'rap':
if not has_casacore:
img._reason = f'Problem reading {image_file}.\nCasacore is unavailable'
return None
try:
inputimage = pim.image(image_file)
except IOError as err:
img._reason = f'Problem reading {image_file}.\nOriginal error: {err}'
return None
else:
# First assume image is a fits file, and use pyfits to open it.
# If that fails, try to use casacore if available.
failed_read = False
try:
fits = pyfits.open(image_file, mode="readonly", ignore_missing_end=True)
img.use_io = 'fits'
except IOError as err:
e_pyfits = str(err)
if has_casacore:
try:
inputimage = pim.image(image_file)
img.use_io = 'rap'
except IOError as err:
e_casacore = str(err)
failed_read = True
img._reason = 'File is not a valid FITS, CASA, or HDF5 image.'
else:
failed_read = True
e_casacore = "Casacore unavailable"
img._reason = f'Problem reading {image_file}.'
if failed_read:
img._reason += f'\nOriginal error: {e_pyfits}\n {e_casacore}'
return None
# Now that image has been read in successfully, get header (data is loaded
# later to take advantage of sectioning if trim_box is specified).
if not quiet:
mylogger.userinfo(mylog, "Opened '"+image_file+"'")
if img.use_io == 'rap':
tmpdir = os.path.join(img.outdir, img.parentname+'_tmp')
hdr = convert_casacore_header(inputimage, tmpdir)
coords = inputimage.coordinates()
img.coords_dict = coords.dict()
if 'telescope' in img.coords_dict:
img._telescope = img.coords_dict['telescope']
else:
img._telescope = None
if img.use_io == 'fits':
hdr = fits[0].header
img.coords_dict = None
if 'TELESCOP' in hdr:
img._telescope = hdr['TELESCOP']
else:
img._telescope = None
# Make sure data is in proper order. Final order is [pol, chan, x (RA), y (DEC)],
# so we need to rearrange dimensions if they are not in this order. Use the
# ctype FITS keywords to determine order of dimensions. Note that both PyFITS
# and casacore reverse the order of the axes relative to NAXIS, so we must too.
naxis = hdr['NAXIS']
data_shape = []
for i in range(naxis):
data_shape.append(hdr['NAXIS'+str(i+1)])
data_shape.reverse()
data_shape = tuple(data_shape)
mylog.info("Original data shape of " + image_file +': ' +str(data_shape))
ctype_in = []
for i in range(naxis):
key_val_raw = hdr['CTYPE' + str(i+1)]
key_val = key_val_raw.split('-')[0]
ctype_in.append(key_val.strip())
if 'RA' not in ctype_in or 'DEC' not in ctype_in:
if 'GLON' not in ctype_in or 'GLAT' not in ctype_in:
raise RuntimeError("Image data not found")
else:
lat_lon = True
else:
lat_lon = False
# Check for incorrect spectral units. For example, "M/S" is not
# recognized by PyWCS as velocity ("S" is actually Siemens, not
# seconds). Note that we check CUNIT3 and CUNIT4 even if the
# image has only 2 axes, as the header may still have these
# entries.
for i in range(4):
key_val_raw = hdr.get('CUNIT' + str(i+1))
if key_val_raw is not None:
if 'M/S' in key_val_raw or 'm/S' in key_val_raw or 'M/s' in key_val_raw:
hdr['CUNIT' + str(i+1)] = 'm/s'
if 'HZ' in key_val_raw or 'hZ' in key_val_raw or 'hz' in key_val_raw:
hdr['CUNIT' + str(i+1)] = 'Hz'
if 'DEG' in key_val_raw or 'Deg' in key_val_raw:
hdr['CUNIT' + str(i+1)] = 'deg'
# Make sure that the spectral axis has been identified properly
if len(ctype_in) > 2 and 'FREQ' not in ctype_in:
from astropy.wcs import FITSFixedWarning
# TODO: Is it still needed and/or desirable to filter warnings?
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
warnings.filterwarnings("ignore",category=FITSFixedWarning)
t = WCS(hdr)
t.wcs.fix()
spec_indx = t.wcs.spec
if spec_indx != -1:
ctype_in[spec_indx] = 'FREQ'
# Now reverse the axes order to match PyFITS/casacore order and define the
# final desired order (cytpe_out) and shape (shape_out).
ctype_in.reverse()
if lat_lon:
ctype_out = ['STOKES', 'FREQ', 'GLON', 'GLAT']
else:
ctype_out = ['STOKES', 'FREQ', 'RA', 'DEC']
indx_out = [-1, -1, -1, -1]
indx_in = range(naxis)
for i in indx_in:
for j in range(4):
if ctype_in[i] == ctype_out[j]:
indx_out[j] = i
shape_out = [1, 1, data_shape[indx_out[2]], data_shape[indx_out[3]]]
if indx_out[0] != -1:
shape_out[0] = data_shape[indx_out[0]]
if indx_out[1] != -1:
shape_out[1] = data_shape[indx_out[1]]
indx_out = [a for a in indx_out if a >= 0] # trim unused axes
# Read in data. If only a subsection of the image is desired (as defined
# by the trim_box option), we can try to use PyFITS to read only that section.
img._original_naxis = data_shape
img._original_shape = (shape_out[2], shape_out[3])
img._xy_hdr_shift = (0, 0)
if img.opts.trim_box is not None:
img.trim_box = [int(b) for b in img.opts.trim_box]
xmin, xmax, ymin, ymax = img.trim_box
if xmin < 0: xmin = 0
if ymin < 0: ymin = 0
if xmax > shape_out[2]: xmax = shape_out[2]
if ymax > shape_out[3]: ymax = shape_out[3]
if xmin >= xmax or ymin >= ymax:
raise RuntimeError("The trim_box option does not specify a valid part of the image.")
shape_out_untrimmed = shape_out[:]
shape_out[2] = xmax-xmin
shape_out[3] = ymax-ymin
if img.use_io == 'fits':
sx = slice(int(xmin),int(xmax))
sy = slice(int(ymin),int(ymax))
sn = slice(None)
s_array = [sx, sy]
for i in range(naxis-2):
s_array.append(sn)
s_array.reverse() # to match ordering of data array returned by PyFITS
if naxis == 2:
data = fits[0].section[s_array[0], s_array[1]]
elif naxis == 3:
data = fits[0].section[s_array[0], s_array[1], s_array[2]]
elif naxis == 4:
data = fits[0].section[s_array[0], s_array[1], s_array[2], s_array[3]]
else:
# If more than 4 axes, just read in the whole image and
# do the trimming after reordering.
data = fits[0].data
fits.close()
data = data.transpose(*indx_out) # transpose axes to final order
data.shape = data.shape[0:4] # trim unused dimensions (if any)
if naxis > 4:
data = data.reshape(shape_out_untrimmed) # Add axes if needed
data = data[:, :, xmin:xmax, ymin:ymax] # trim to trim_box
else:
data = data.reshape(shape_out) # Add axes if needed
else:
# With casacore, just read in the whole image and then trim
data = inputimage.getdata()
data = data.transpose(*indx_out) # transpose axes to final order
data.shape = data.shape[0:4] # trim unused dimensions (if any)
data = data.reshape(shape_out_untrimmed) # Add axes if needed
data = data[:, :, xmin:xmax, ymin:ymax] # trim to trim_box
# Adjust WCS keywords for trim_box starting x and y.
hdr['crpix1'] -= xmin
hdr['crpix2'] -= ymin
img._xy_hdr_shift = (xmin, ymin)
else:
if img.use_io == 'fits':
data = fits[0].data
fits.close()
else:
data = inputimage.getdata()
data = data.transpose(*indx_out) # transpose axes to final order
data.shape = data.shape[0:4] # trim unused dimensions (if any)
data = data.reshape(shape_out) # Add axes if needed
mylog.info("Final data shape (npol, nchan, x, y): " + str(data.shape))
return data, hdr
def convert_casacore_header(casacore_image, tmpdir):
"""Converts a casacore header to a PyFITS header."""
import tempfile
import os
import atexit
import shutil
try:
from astropy.io import fits as pyfits
except ImportError as err:
import pyfits
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
tfile = tempfile.NamedTemporaryFile(delete=False, dir=tmpdir)
casacore_image.tofits(tfile.name)
hdr = pyfits.getheader(tfile.name)
if os.path.isfile(tfile.name):
os.remove(tfile.name)
# Register deletion of temp directory at exit to be sure it is deleted
atexit.register(shutil.rmtree, tmpdir, ignore_errors=True)
return hdr
def write_image_to_file(use, filename, image, img, outdir=None,
pad_image=False, clobber=True, is_mask=False):
""" Writes image array to outdir/filename"""
import numpy as N
import os
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Writefile")
wcs_obj = img.wcs_obj
if pad_image and img.opts.trim_box is not None:
# Pad image to original size
xsize, ysize = img._original_shape
xmin, ymin = img._xy_hdr_shift
image_pad = N.zeros((xsize, ysize), dtype=N.float32)
image_pad[xmin:xmin+image.shape[0], ymin:ymin+image.shape[1]] = image
image = image_pad
else:
xmin = 0
ymin = 0
if not hasattr(img, '_telescope'):
telescope = None
else:
telescope = img._telescope
if filename == 'SAMP':
import tempfile
if not hasattr(img,'samp_client'):
s, private_key = start_samp_proxy()
img.samp_client = s
img.samp_key = private_key
# Broadcast image to SAMP Hub
temp_im = make_fits_image(N.transpose(image), wcs_obj, img.beam,
img.frequency, img.equinox, telescope, xmin=xmin, ymin=ymin,
is_mask=is_mask)
tfile = tempfile.NamedTemporaryFile(delete=False)
try:
temp_im.writeto(tfile.name, overwrite=clobber)
except TypeError:
# The "overwrite" argument was added in astropy v1.3, so fall back to "clobber"
# if it doesn't work
temp_im.writeto(tfile.name, clobber=clobber)
send_fits_image(img.samp_client, img.samp_key, 'PyBDSM image', tfile.name)
else:
# Write image to FITS file
if outdir is None:
outdir = img.indir
if not os.path.exists(outdir) and outdir != '':
os.makedirs(outdir)
outfilename = os.path.join(outdir, filename)
if os.path.isfile(outfilename):
if clobber:
os.remove(outfilename)
else:
return
if os.path.isdir(outfilename):
if clobber:
os.system("rm -rf "+outfilename)
else:
return
temp_im = make_fits_image(N.transpose(image), wcs_obj, img.beam,
img.frequency, img.equinox, telescope, xmin=xmin, ymin=ymin,
is_mask=is_mask, shape=(img.shape[1], img.shape[0], image.shape[1],
image.shape[0]))
if use == 'rap':
outfile = outfilename + '.fits'
else:
outfile = outfilename
try:
temp_im.writeto(outfile, overwrite=clobber)
except TypeError:
# The "overwrite" argument was added in astropy v1.3, so fall back to "clobber"
# if it doesn't work
temp_im.writeto(outfile, clobber=clobber)
temp_im.close()
if use == 'rap':
# For CASA images, read in FITS image and convert
try:
import casacore.images as pim
import casacore.tables as pt
import os
outimage = pim.image(outfile)
outimage.saveas(outfilename, overwrite=clobber)
# For masks, use the coordinates dictionary from the input
# image, as this is needed in order for the
# image to work as a clean mask in CASA.
if is_mask:
if img.coords_dict is None:
mylog.warning('Mask header information may be incomplete.')
else:
outtable = pt.table(outfilename, readonly=False, ack=False)
outtable.putkeywords({'coords': img.coords_dict})
outtable.done()
except ImportError as err:
import os
os.remove(outfile)
raise RuntimeError("Error importing python-casacore. CASA image could not "
"be writen. Use img_format = 'fits' instead.")
def make_fits_image(imagedata, wcsobj, beam, freq, equinox, telescope, xmin=0, ymin=0,
is_mask=False, shape=None):
"""Makes a simple FITS hdulist appropriate for single-channel images"""
import numpy as np
from astropy.io import fits as pyfits
# If mask, expand to all channels and Stokes for compatibility with casa
if is_mask and shape is not None:
shape_out = shape
else:
shape_out = [1, 1, imagedata.shape[0], imagedata.shape[1]]
hdu = pyfits.PrimaryHDU(np.resize(imagedata, shape_out))
hdulist = pyfits.HDUList([hdu])
header = hdulist[0].header
# Add WCS info
header['CRVAL1'] = wcsobj.wcs.crval[0]
header['CDELT1'] = wcsobj.wcs.cdelt[0]
header['CRPIX1'] = wcsobj.wcs.crpix[0] + xmin
header['CUNIT1'] = str(wcsobj.wcs.cunit[0]).strip().lower() # needed due to bug in pywcs/astropy
header['CTYPE1'] = wcsobj.wcs.ctype[0]
header['CRVAL2'] = wcsobj.wcs.crval[1]
header['CDELT2'] = wcsobj.wcs.cdelt[1]
header['CRPIX2'] = wcsobj.wcs.crpix[1] + ymin
header['CUNIT2'] = str(wcsobj.wcs.cunit[1]).strip().lower()
header['CTYPE2'] = wcsobj.wcs.ctype[1]
# Add STOKES info
header['CRVAL3'] = 1.0
header['CDELT3'] = 1.0
header['CRPIX3'] = 1.0
header['CUNIT3'] = ''
header['CTYPE3'] = 'STOKES'
# Add frequency info
header['RESTFRQ'] = freq
header['CRVAL4'] = freq
header['CDELT4'] = 3e8
header['CRPIX4'] = 1.0
header['CUNIT4'] = 'HZ'
header['CTYPE4'] = 'FREQ'
header['SPECSYS'] = 'TOPOCENT'
# Add beam info
if not is_mask:
header['BMAJ'] = beam[0]
header['BMIN'] = beam[1]
header['BPA'] = beam[2]
# Add equinox
header['EQUINOX'] = equinox
# Add telescope
if telescope is not None:
header['TELESCOP'] = telescope
hdulist[0].header = header
return hdulist
def retrieve_map(img, map_name):
"""Returns a map cached on disk."""
import numpy as N
import os
filename = get_name(img, map_name)
if not os.path.isfile(filename):
return None
infile = open(filename, 'rb')
data = N.load(infile)
infile.close()
return data
def store_map(img, map_name, map_data):
"""Caches a map to disk."""
import numpy as N
filename = get_name(img, map_name)
outfile = open(filename, 'wb')
N.save(outfile, map_data)
outfile.close()
def del_map(img, map_name):
"""Deletes a cached map."""
import os
filename = get_name(img, map_name)
if os.path.isfile(filename):
os.remove(filename)
def get_name(img, map_name):
"""Returns name of cache file."""
import os
if img._pi:
pi_text = 'pi'
else:
pi_text = 'I'
suffix = '/w%i_%s/' % (img.j, pi_text)
dir = img.tempdir + suffix
if not os.path.exists(dir):
os.makedirs(dir)
return dir + map_name + '.bin'
def connect(mask):
""" Find if a mask is singly or multiply connected """
import scipy.ndimage as nd
connectivity = nd.generate_binary_structure(2,2)
labels, count = nd.label(mask, connectivity)
if count > 1 :
connected = 'multiple'
else:
connected = 'single'
return connected, count
def area_polygon(points):
""" Given an ANGLE ORDERED array points of [[x], [y]], find the total area by summing each successsive
triangle with the centre """
import numpy as N
x, y = points
n_tri = len(x)-1
cenx, ceny = N.mean(x), N.mean(y)
area = 0.0
for i in range(n_tri):
p1, p2, p3 = N.array([cenx, ceny]), N.array([x[i], y[i]]), N.array([x[i+1], y[i+1]])
t_area= N.linalg.norm(N.cross((p2 - p1), (p3 - p1)))/2.
area += t_area
return area
def convexhull_deficiency(isl):
""" Finds the convex hull for the island and returns the deficiency.
Code taken from http://code.google.com/p/milo-lab/source/browse/trunk/src/toolbox/convexhull.py?spec=svn140&r=140
"""
import random
import time
import numpy as N
import scipy.ndimage as nd
def _angle_to_point(point, centre):
"""calculate angle in 2-D between points and x axis"""
delta = point - centre
if delta[0] == 0.0:
res = N.pi/2.0
else:
res = N.arctan(delta[1] / delta[0])
if delta[0] < 0:
res += N.pi
return res
def area_of_triangle(p1, p2, p3):
"""calculate area of any triangle given co-ordinates of the corners"""
return N.linalg.norm(N.cross((p2 - p1), (p3 - p1)))/2.
def convex_hull(points):
"""Calculate subset of points that make a convex hull around points
Recursively eliminates points that lie inside two neighbouring points until only convex hull is remaining.
points : ndarray (2 x m) array of points for which to find hull
Returns: hull_points : ndarray (2 x n), convex hull surrounding points """
n_pts = points.shape[1]
#assert(n_pts > 5)
centre = points.mean(1)
angles = N.apply_along_axis(_angle_to_point, 0, points, centre)
pts_ord = points[:,angles.argsort()]
pts = [x[0] for x in zip(pts_ord.transpose())]
prev_pts = len(pts) + 1
k = 0
while prev_pts > n_pts:
prev_pts = n_pts
n_pts = len(pts)
i = -2
while i < (n_pts - 2):
Aij = area_of_triangle(centre, pts[i], pts[(i + 1) % n_pts])
Ajk = area_of_triangle(centre, pts[(i + 1) % n_pts], \
pts[(i + 2) % n_pts])
Aik = area_of_triangle(centre, pts[i], pts[(i + 2) % n_pts])
if Aij + Ajk < Aik:
del pts[i+1]
i += 1
n_pts = len(pts)
k += 1
return N.asarray(pts)
mask = ~isl.mask_active
points = N.asarray(N.where(mask ^ nd.binary_erosion(mask)))
hull_pts = list(convex_hull(points)) # these are already in angle-sorted order
hull_pts.append(hull_pts[0])
hull_pts = N.transpose(hull_pts)
isl_area = isl.size_active
hull_area = area_polygon(hull_pts)
ratio1 = hull_area/(isl_area - 0.5*len(hull_pts[0]))
return ratio1
def open_isl(mask, index):
""" Do an opening on a mask, divide left over pixels among opened sub islands. Mask = True => masked pixel """
import scipy.ndimage as nd
import numpy as N
connectivity = nd.generate_binary_structure(2,2)
ft = N.ones((index,index), int)
open = nd.binary_opening(~mask, ft)
open = check_1pixcontacts(open) # check if by removing one pixel from labels, you can split a sub-island
labels, n_subisl = nd.label(open, connectivity) # get label/rank image for open. label = 0 for masked pixels
labels, mask = assign_leftovers(mask, open, n_subisl, labels) # add the leftover pixels to some island
if labels is not None:
isl_pixs = [len(N.where(labels==i)[0]) for i in range(1,n_subisl+1)]
isl_pixs = N.array(isl_pixs)/float(N.sum(isl_pixs))
else:
isl_pixs = None
return n_subisl, labels, isl_pixs
def check_1pixcontacts(open):
import scipy.ndimage as nd
import numpy as N
from copy import deepcopy as cp
connectivity = nd.generate_binary_structure(2,2)
ind = N.transpose(N.where(open[1:-1,1:-1] > 0)) + [1,1] # exclude boundary to make it easier
for pixel in ind:
x, y = pixel
grid = cp(open[x-1:x+2, y-1:y+2]); grid[1,1] = 0
grid = N.where(grid == open[tuple(pixel)], 1, 0)
ll, nn = nd.label(grid, connectivity)
if nn > 1:
open[tuple(pixel)] = 0
return open
def assign_leftovers(mask, open, nisl, labels):
"""
Given isl and the image of the mask after opening (open) and the number of new independent islands n,
connect up the left over pixels to the new islands if they connect to only one island and not more.
Assign the remaining to an island. We need to assign the leftout pixels to either of many sub islands.
Easiest is to assign to the sub island with least size.
"""
import scipy.ndimage as nd
import numpy as N
from copy import deepcopy as cp
n, m = mask.shape
leftout = ~mask ^ open
connectivity = nd.generate_binary_structure(2,2)
mlabels, count = nd.label(leftout, connectivity)
npix = [len(N.where(labels==b)[0]) for b in range(1,nisl+1)]
for i_subisl in range(count):
c_list = [] # is list of all bordering pixels of the sub island
ii = i_subisl+1
coords = N.transpose(N.where(mlabels==ii)) # the coordinates of island i of left-out pixels
for co in coords:
co8 = [[x,y] for x in range(co[0]-1,co[0]+2) for y in range(co[1]-1,co[1]+2) if x >=0 and y >=0 and x mask pixels
for cc in coords:
mask = (mlabels == ii)
# mask[cc] = True
return None, mask
if len(belongs) == 1:
for cc in coords:
labels[tuple(cc)] = belongs[0]
else: # get the border pixels of the islands
nn = [npix[b-1] for b in belongs]
addto = belongs[N.argmin(nn)]
for cc in coords:
labels[tuple(cc)] = addto
return labels, mask
def _float_approx_equal(x, y, tol=1e-18, rel=1e-7):
if tol is rel is None:
raise TypeError('cannot specify both absolute and relative errors are None')
tests = []
if tol is not None: tests.append(tol)
if rel is not None: tests.append(rel*abs(x))
assert tests
return abs(x - y) <= max(tests)
def approx_equal(x, y, *args, **kwargs):
"""approx_equal(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False
approx_equal(obj1, obj2[, *args, **kwargs]) -> True|False
Return True if x and y are approximately equal, otherwise False.
If x and y are floats, return True if y is within either absolute error
tol or relative error rel of x. You can disable either the absolute or
relative check by passing None as tol or rel (but not both).
For any other objects, x and y are checked in that order for a method
__approx_equal__, and the result of that is returned as a bool. Any
optional arguments are passed to the __approx_equal__ method.
__approx_equal__ can return NotImplemented to signal that it doesn't know
how to perform that specific comparison, in which case the other object is
checked instead. If neither object have the method, or both defer by
returning NotImplemented, approx_equal falls back on the same numeric
comparison used for floats.
>>> almost_equal(1.2345678, 1.2345677)
True
>>> almost_equal(1.234, 1.235)
False
"""
if not (type(x) is type(y) is float):
# Skip checking for __approx_equal__ in the common case of two floats.
methodname = '__approx_equal__'
# Allow the objects to specify what they consider "approximately equal",
# giving precedence to x. If either object has the appropriate method, we
# pass on any optional arguments untouched.
for a,b in ((x, y), (y, x)):
try:
method = getattr(a, methodname)
except AttributeError:
continue
else:
result = method(b, *args, **kwargs)
if result is NotImplemented:
continue
return bool(result)
# If we get here without returning, then neither x nor y knows how to do an
# approximate equal comparison (or are both floats). Fall back to a numeric
# comparison.
return _float_approx_equal(x, y, *args, **kwargs)
def isl_tosplit(isl, opts):
""" Splits an island and sends back parameters """
import numpy as N
size_extra5 = opts.splitisl_size_extra5
frac_bigisl3 = opts.splitisl_frac_bigisl3
connected, count = connect(isl.mask_active)
index = 0
n_subisl3, labels3, isl_pixs3 = open_isl(isl.mask_active, 3)
n_subisl5, labels5, isl_pixs5 = open_isl(isl.mask_active, 5)
isl_pixs3, isl_pixs5 = N.array(isl_pixs3), N.array(isl_pixs5)
# take open 3 or 5
open3, open5 = False, False
if n_subisl3 > 0 and isl_pixs3 is not None: # open 3 breaks up island
max_sub3 = N.max(isl_pixs3)
if max_sub3 < frac_bigisl3 : open3 = True # if biggest sub island isnt too big
if n_subisl5 > 0 and isl_pixs5 is not None: # open 5 breaks up island
max_sub5 = N.max(isl_pixs5) # if biggest subisl isnt too big OR smallest extra islands add upto 10 %
if (max_sub5 < 0.75*max_sub3) or (N.sum(N.sort(isl_pixs5)[:len(isl_pixs5)-n_subisl3]) > size_extra5):
open5 = True
# index=0 => dont split
if open5: index = 5; n_subisl = n_subisl5; labels = labels5
else:
if open3: index = 3; n_subisl = n_subisl3; labels = labels3
else: index = 0
convex_def = convexhull_deficiency(isl)
#print 'CONVEX = ',convex_def
if opts.plot_islands:
try:
import matplotlib.pyplot as pl
pl.figure()
pl.suptitle('Island '+str(isl.island_id))
pl.subplot(2,2,1); pl.imshow(N.transpose(isl.image*~isl.mask_active), origin='lower', interpolation='nearest'); pl.title('Image')
pl.subplot(2,2,2); pl.imshow(N.transpose(labels3), origin='lower', interpolation='nearest'); pl.title('labels3')
pl.subplot(2,2,3); pl.imshow(N.transpose(labels5), origin='lower', interpolation='nearest'); pl.title('labels5')
except ImportError:
print("\033[31;1mWARNING\033[0m: Matplotlib not found. Plotting disabled.")
if index == 0: return [index, n_subisl5, labels5]
else: return [index, n_subisl, labels]
class NullDevice():
"""Null device to suppress stdout, etc."""
def write(self, s):
pass
def ch0_aperture_flux(img, posn_pix, aperture_pix):
"""Measure ch0 flux inside radius aperture_pix pixels centered on posn_pix.
Returns [flux, fluxE]
"""
import numpy as N
if aperture_pix is None:
return [0.0, 0.0]
# Make ch0 and rms subimages
ch0 = img.ch0_arr
shape = ch0.shape
xlo = int(posn_pix[0]) - int(aperture_pix) - 1
if xlo < 0:
xlo = 0
xhi = int(posn_pix[0]) + int(aperture_pix) + 1
if xhi > shape[0]:
xhi = shape[0]
ylo = int(posn_pix[1]) - int(aperture_pix) - 1
if ylo < 0:
ylo = 0
yhi = int(posn_pix[1]) + int(aperture_pix) + 1
if yhi > shape[1]:
yhi = shape[1]
mean = img.mean_arr
rms = img.rms_arr
aper_im = ch0[int(xlo):int(xhi), int(ylo):int(yhi)] - mean[int(xlo):int(xhi), int(ylo):int(yhi)]
aper_rms = rms[int(xlo):int(xhi), int(ylo):int(yhi)]
posn_pix_new = [int(posn_pix[0])-xlo, int(posn_pix[1])-ylo]
pixel_beamarea = img.pixel_beamarea()
aper_flux = aperture_flux(aperture_pix, posn_pix_new, aper_im, aper_rms, pixel_beamarea)
return aper_flux
def aperture_flux(aperture_pix, posn_pix, aper_im, aper_rms, beamarea):
"""Returns aperture flux and error"""
import numpy as N
dist_mask = generate_aperture(aper_im.shape[0], aper_im.shape[1], posn_pix[0], posn_pix[1], aperture_pix)
aper_mask = N.where(dist_mask.astype(bool))
if N.size(aper_mask) == 0:
return [0.0, 0.0]
aper_flux = N.nansum(aper_im[aper_mask])/beamarea # Jy
pixels_in_source = N.sum(~N.isnan(aper_im[aper_mask])) # number of unmasked pixels assigned to current source
aper_fluxE = nanmean(aper_rms[aper_mask]) * N.sqrt(pixels_in_source/beamarea) # Jy
return [aper_flux, aper_fluxE]
def generate_aperture(xsize, ysize, xcenter, ycenter, radius):
"""Makes a mask (1 = inside aperture) for a circular aperture"""
import numpy
x, y = numpy.mgrid[0.5:xsize, 0.5:ysize]
mask = ((x - xcenter)**2 + (y - ycenter)**2 <= radius**2) * 1
return mask
def make_src_mask(mask_size, posn_pix, aperture_pix):
"""Makes an island mask (1 = inside aperture) for a given source position.
"""
import numpy as N
xsize, ysize = mask_size
if aperture_pix is None:
return N.zeros((xsize, ysize), dtype=int)
# Make subimages
xlo = int(posn_pix[0]-int(aperture_pix)-1)
if xlo < 0:
xlo = 0
xhi = int(posn_pix[0]+int(aperture_pix)+1)
if xhi > xsize:
xhi = xsize
ylo = int(posn_pix[1]-int(aperture_pix)-1)
if ylo < 0:
ylo = 0
yhi = int(posn_pix[1]+int(aperture_pix)+1)
if yhi > ysize:
yhi = ysize
mask = N.zeros((xsize, ysize), dtype=int)
posn_pix_new = [posn_pix[0]-xlo, posn_pix[1]-ylo]
submask_xsize = xhi - xlo
submask_ysize = yhi - ylo
submask = generate_aperture(submask_xsize, submask_ysize, posn_pix_new[0], posn_pix_new[1], aperture_pix)
submask_slice = [slice(int(xlo), int(xhi)), slice(int(ylo), int(yhi))]
mask[tuple(submask_slice)] = submask
return mask
def getTerminalSize():
"""
returns (lines:int, cols:int)
"""
import os, struct
def ioctl_GWINSZ(fd):
import fcntl, termios
return struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
# try stdin, stdout, stderr
for fd in (0, 1, 2):
try:
return ioctl_GWINSZ(fd)
except:
pass
# try os.ctermid()
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
return ioctl_GWINSZ(fd)
finally:
os.close(fd)
except:
pass
# try `stty size`
try:
return tuple(int(x) for x in os.popen("stty size", "r").read().split())
except:
pass
# try environment variables
try:
return tuple(int(os.getenv(var)) for var in ("LINES", "COLUMNS"))
except:
pass
# Give up. return 0.
return (0, 0)
def eval_func_tuple(f_args):
"""Takes a tuple of a function and args, evaluates and returns result
This function (in addition to itertools) gets around limitation that
multiple-argument sequences are not supported by multiprocessing.
"""
return f_args[0](*f_args[1:])
def start_samp_proxy():
"""Starts (registers) and returns a SAMP proxy"""
import os
try:
# Python 3
from xmlrpc.client import ServerProxy
except ImportError:
# Python 2
from xmlrpclib import ServerProxy
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
HUB_PARAMS = {}
for line in open(lockfile):
if not line.startswith('#'):
key, value = line.split('=', 1)
HUB_PARAMS[key] = value.strip()
# Set up proxy
s = ServerProxy(HUB_PARAMS['samp.hub.xmlrpc.url'])
# Register with Hub
metadata = {"samp.name": 'PyBDSM', "samp.description.text": 'PyBDSM: the Python Blob Detection and Source Measurement software'}
result = s.samp.hub.register(HUB_PARAMS['samp.secret'])
private_key = result['samp.private-key']
s.samp.hub.declareMetadata(private_key, metadata)
return s, private_key
def stop_samp_proxy(img):
"""Stops (unregisters) a SAMP proxy"""
import os
if hasattr(img, 'samp_client'):
lockfile = os.path.expanduser('~/.samp')
if os.path.exists(lockfile):
img.samp_client.samp.hub.unregister(img.samp_key)
def send_fits_image(s, private_key, name, file_path):
"""Send a SAMP notification to load a fits image."""
import os
message = {}
message['samp.mtype'] = "image.load.fits"
message['samp.params'] = {}
message['samp.params']['url'] = 'file://' + os.path.abspath(file_path)
message['samp.params']['name'] = name
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def send_fits_table(s, private_key, name, file_path):
"""Send a SAMP notification to load a fits table."""
import os
message = {}
message['samp.mtype'] = "table.load.fits"
message['samp.params'] = {}
message['samp.params']['url'] = 'file://' + os.path.abspath(file_path)
message['samp.params']['name'] = name
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def send_highlight_row(s, private_key, url, row_id):
"""Send a SAMP notification to highlight a row in a table."""
import os
message = {}
message['samp.mtype'] = "table.highlight.row"
message['samp.params'] = {}
message['samp.params']['row'] = str(row_id)
message['samp.params']['url'] = url
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def send_coords(s, private_key, coords):
"""Send a SAMP notification to point at given coordinates."""
import os
message = {}
message['samp.mtype'] = "coord.pointAt.sky"
message['samp.params'] = {}
message['samp.params']['ra'] = str(coords[0])
message['samp.params']['dec'] = str(coords[1])
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def make_curvature_map(subim):
"""Makes a curvature map with the Aegean curvature algorithm
(Hancock et al. 2012)
The Aegean algorithm uses a curvature map to identify regions of negative
curvature. These regions then define distinct sources.
"""
import scipy.signal as sg
import numpy as N
import sys
# Make average curavature map:
curv_kernal = N.array([[1, 1, 1],[1, -8, 1],[1, 1, 1]])
# The next step prints meaningless warnings, so suppress them
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
curv_map = sg.convolve2d(subim, curv_kernal)
sys.stdout = original_stdout # turn STDOUT back on
return curv_map
def bstat(indata, mask, kappa_npixbeam):
"""Numpy version of the c++ bstat routine
Uses the PySE method for calculating the clipped mean and rms of an array.
This method is superior to the c++ bstat routine (see section 2.7.3 of
http://dare.uva.nl/document/174052 for details) and, since the Numpy
functions used here are written in c, there should be no big computational
penalty in using Python code.
"""
import numpy
from scipy.special import erf, erfcinv
# Flatten array
skpix = indata.flatten()
if mask is not None:
msk_flat = mask.flatten()
unmasked = numpy.where(~msk_flat)
skpix = skpix[unmasked]
ct = skpix.size
iter = 0
c1 = 1.0
c2 = 0.0
maxiter = 200
converge_num = 1e-6
m_raw = numpy.mean(skpix)
r_raw = numpy.std(skpix, ddof=1)
while (c1 >= c2) and (iter < maxiter):
npix = skpix.size
if kappa_npixbeam > 0.0:
kappa = kappa_npixbeam
else:
npixbeam = abs(kappa_npixbeam)
kappa = numpy.sqrt(2.0)*erfcinv(1.0 / (2.0*npix/npixbeam))
if kappa < 3.0:
kappa = 3.0
lastct = ct
medval = numpy.median(skpix)
sig = numpy.std(skpix)
wsm = numpy.where(abs(skpix-medval) < kappa*sig)
ct = len(wsm[0])
if ct > 0:
skpix = skpix[wsm]
c1 = abs(ct - lastct)
c2 = converge_num * lastct
iter += 1
mean = numpy.mean(skpix)
median = numpy.median(skpix)
sigma = numpy.std(skpix, ddof=1)
mode = 2.5*median - 1.5*mean
if sigma > 0.0:
skew_par = abs(mean - median)/sigma
else:
raise RuntimeError("A region with an unphysical rms value has been found. "
"Please check the input image.")
if skew_par <= 0.3:
m = mode
else:
m = median
r1 = numpy.sqrt(2.0*numpy.pi)*erf(kappa/numpy.sqrt(2.0))
r = numpy.sqrt(sigma**2 * (r1 / (r1 - 2.0*kappa*numpy.exp(-kappa**2/2.0))))
return m_raw, r_raw, m, r, iter
def centered(arr, newshape):
"""Return the center newshape portion of the array
This function is a copy of the private _centered() function in
scipy.signal.signaltools
"""
import numpy as np
newshape = np.asarray(newshape)
currshape = np.array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def set_up_output_paths(opts):
"""Returns various paths and filenames related to output
The opts input is either an instance of or a
dict generated by that class.
The outputs are:
- parentname: the name of the image, with the path and extension removed
(if it is a common image extension)
- output_basedir: the output directory, where the log file and
other optional outputs of the process_image task are placed
"""
import os
# Get filename and outdir from opts
if type(opts) is dict:
filename = opts['filename']
outdir = opts['outdir']
else:
# opts is type , so options are stored
# as attributes
filename = opts.filename
outdir = opts.outdir
# Try to trim common extensions from filename to make the parent filename,
# used for various output purposes
root, ext = os.path.splitext(filename)
if ext in ['.fits', '.FITS', '.image']:
fname = root
elif ext in ['.gz', '.GZ']:
root2, ext2 = os.path.splitext(root)
if ext2 in ['.fits', '.FITS', '.image']:
fname = root2
else:
fname = root
else:
fname = filename
parentname = os.path.basename(fname)
# Determine the base output directory
if outdir is None:
output_basedir = os.path.abspath(os.path.dirname(filename))
else:
output_basedir = os.path.abspath(outdir)
# Make the output directory if needed
if not os.path.exists(output_basedir):
os.makedirs(output_basedir)
# Check that we have write permission to the base directory
if not os.access(output_basedir, os.W_OK):
raise RuntimeError("Cannot write to the output directory '{0}' (permission denied). "
"Please specify an output directory to which you have "
"write permission using the 'outdir' option.".format(output_basedir))
return parentname, output_basedir
def fix_gaussian_axes(major, minor, pa):
"""Check a Gaussian for switched axes and fix if found
Returns corrected (major, minor, pa)
"""
if major < minor:
major, minor = minor, major
pa += 90.0
pa = divmod(pa, 180)[1] # restrict to range [0, 180)
return (major, minor, pa)
PyBDSF-1.11.0/bdsf/gaul2srl.py 0000664 0000000 0000000 00000104434 14650706641 0015663 0 ustar 00root root 0000000 0000000
"""Module gaul2srl
This will group gaussians in an island into sources. Will code callgaul2srl.f here, though
it could probably be made more efficient.
img.sources is a list of source objects, which are instances of the class Source
(with attributes the same as in .srl of fbdsm).
img.sources[n] is a source.
source.gaussians is the list of component gaussian objects.
source.island_id is the island id of that source.
source.source_id is the source id of that source, the index of source in img.sources.
Each gaussian object gaus has gaus.source_id, the source id.
Also, each island object of img.islands list has the source object island.source
"""
from __future__ import absolute_import
from .image import *
from .islands import *
from .interface import wrap
from . import mylogger
import numpy as N
N.seterr(divide='raise')
class Op_gaul2srl(Op):
"""
Slightly modified from fortran.
"""
def __call__(self, img):
# for each island, get the gaussians into a list and then send them to process
# src_index is source number, starting from 0
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Gaul2Srl")
mylogger.userinfo(mylog, 'Grouping Gaussians into sources')
img.aperture = img.opts.aperture
if img.aperture is not None and img.aperture <= 0.0:
mylog.warn('Specified aperture is <= 0. Skipping aperture fluxes.')
img.aperture = None
src_index = -1
dsrc_index = 0
sources = []
dsources = []
no_gaus_islands = []
no_gaus_islands_flag_values = []
for iisl, isl in enumerate(img.islands):
isl_sources = []
isl_dsources = []
g_list = []
for g in isl.gaul:
if g.flag == 0:
g_list.append(g)
if len(g_list) > 0:
if len(g_list) == 1:
src_index, source = self.process_single_gaussian(img, g_list, src_index, code = 'S')
sources.append(source)
isl_sources.append(source)
else:
src_index, source = self.process_CM(img, g_list, isl, src_index)
sources.extend(source)
isl_sources.extend(source)
else:
if not img.waveletimage:
dg = isl.dgaul[0]
no_gaus_islands.append((isl.island_id, dg.centre_pix[0], dg.centre_pix[1]))
flag_values = []
for fg in isl.fgaul:
flag_values.append(fg.flag)
no_gaus_islands_flag_values.append(flag_values)
# Put in the dummy Source as the source and use negative IDs
g_list = isl.dgaul
dsrc_index, dsource = self.process_single_gaussian(img, g_list, dsrc_index, code = 'S')
dsources.append(dsource)
isl_dsources.append(dsource)
isl.sources = isl_sources
isl.dsources = isl_dsources
img.sources = sources
img.dsources = dsources
img.nsrc = src_index + 1
mylogger.userinfo(mylog, "Number of sources formed from Gaussians",
str(img.nsrc))
if not img.waveletimage and not img._pi and len(no_gaus_islands) > 0 and not img.opts.quiet:
message = 'All Gaussians were flagged for the following island'
if len(no_gaus_islands) == 1:
message += ':\n'
else:
message += 's:\n'
for isl_id, flag_list in zip(no_gaus_islands, no_gaus_islands_flag_values):
message += ' Island #%i (x=%i, y=%i): ' % isl_id
if len(flag_list) > 0:
flags_str = '{}'.format(', '.join([str(f) for f in flag_list]))
if len(flag_list) == 1:
pl_str = ''
else:
pl_str = 's'
message += 'fit with {0} Gaussian{1} with flag{1} = {2}\n'.format(len(flag_list), pl_str, flags_str)
else:
message += '\n'
if len(no_gaus_islands) == 1:
message += 'Please check this island. If it is a valid island and\n'
else:
message += 'Please check these islands. If they are valid islands and\n'
if img.opts.atrous_do:
message += 'should be fit, try adjusting the flagging options (use\n'\
'show_fit with "ch0_flagged=True" to see the flagged Gaussians\n'\
'and "help \'flagging_opts\'" to see the meaning of the flags).'
else:
message += 'should be fit, try adjusting the flagging options (use\n'\
'show_fit with "ch0_flagged=True" to see the flagged Gaussians\n'\
'and "help \'flagging_opts\'" to see the meaning of the flags)\n'\
'or enabling the wavelet module (with "atrous_do=True").'
message += '\nTo include empty islands in output source catalogs, set\n'\
'incl_empty=True in the write_catalog task.'
mylog.warning(message)
img.completed_Ops.append('gaul2srl')
#################################################################################################
def process_single_gaussian(self, img, g_list, src_index, code):
""" Process single gaussian into a source, for both S and C type sources. g is just one
Gaussian object (not a list)."""
g = g_list[0]
total_flux = [g.total_flux, g.total_fluxE]
peak_flux_centroid = peak_flux_max = [g.peak_flux, g.peak_fluxE]
posn_sky_centroid = posn_sky_max = [g.centre_sky, g.centre_skyE]
size_sky = [g.size_sky, g.size_skyE]
size_sky_uncorr = [g.size_sky_uncorr, g.size_skyE]
deconv_size_sky = [g.deconv_size_sky, g.deconv_size_skyE]
deconv_size_sky_uncorr = [g.deconv_size_sky_uncorr, g.deconv_size_skyE]
bbox = img.islands[g.island_id].bbox
ngaus = 1
island_id = g.island_id
aper_flux = func.ch0_aperture_flux(img, g.centre_pix, img.aperture)
if g.gaussian_idx == -1:
src_index -= 1
else:
src_index += 1
g.source_id = src_index
g.code = code
if g.gaus_num < 0:
gaussians = []
else:
gaussians = [g]
source_prop = list([code, total_flux, peak_flux_centroid, peak_flux_max, aper_flux, posn_sky_centroid,
posn_sky_max, size_sky, size_sky_uncorr, deconv_size_sky, deconv_size_sky_uncorr, bbox, ngaus, island_id, gaussians])
source = Source(img, source_prop)
source.source_id = src_index
return src_index, source
##################################################################################################
def process_CM(self, img, g_list, isl, src_index):
"""
Bundle errors with the quantities.
ngau = number of gaussians in island
src_id = the source index array for every gaussian in island
nsrc = final number of distinct sources in the island
"""
ngau = len(g_list) # same as cisl in callgaul2srl.f
nsrc = ngau # same as islct; initially make each gaussian as a source
src_id = N.arange(nsrc) # same as islnum in callgaul2srl.f
boxx, boxy = isl.bbox
subn = boxx.stop-boxx.start; subm = boxy.stop-boxy.start
delc = [boxx.start, boxy.start]
subim = self.make_subim(subn, subm, g_list, delc)
index = [(i,j) for i in range(ngau) for j in range(ngau) if j > i]
for pair in index:
same_island = self.in_same_island(pair, img, g_list, isl, subim, subn, subm, delc)
if same_island:
nsrc -= 1
mmax, mmin = max(src_id[pair[0]],src_id[pair[1]]), min(src_id[pair[0]],src_id[pair[1]])
arr = N.where(src_id == mmax)[0]; src_id[arr] = mmin
# now reorder src_id so that it is contiguous
for i in range(ngau):
ind1 = N.where(src_id==i)[0]
if len(ind1) == 0:
arr = N.where(src_id > i)[0]
if len(arr) > 0:
decr = N.min(src_id[arr])-i
for j in arr: src_id[j] -= decr
nsrc = N.max(src_id)+1
# now do whats in sub_calc_para_source
source_list = []
for isrc in range(nsrc):
posn = N.where(src_id == isrc)[0]
g_sublist=[]
for i in posn:
g_sublist.append(g_list[i])
ngau_insrc = len(posn)
# Do source type C
if ngau_insrc == 1:
src_index, source = self.process_single_gaussian(img, g_sublist, src_index, code = 'C')
else:
# make mask and subim. Invalid mask value is -1 since 0 is valid srcid
mask = self.make_mask(isl, subn, subm, 1, isrc, g_sublist, delc)
src_index, source = self.process_Multiple(img, g_sublist, mask, src_index, isrc, subim, \
isl, delc, subn, subm)
source_list.append(source)
return src_index, source_list
##################################################################################################
def in_same_island(self, pair, img, g_list, isl, subim, subn, subm, delc):
""" Whether two gaussians belong to the same source or not. """
from . import functions as func
def same_island_min(pair, g_list, subim, delc, tol=0.5):
""" If the difference between the lower peak and the minimum of the reconstructed fluxes along the line joining the peak positions
is greater than thresh_isl times the rms_clip, they belong to different islands. """
g1 = g_list[pair[0]]
g2 = g_list[pair[1]]
pix1 = N.array(g1.centre_pix)
pix2 = N.array(g2.centre_pix)
x1, y1 = map(int, N.floor(pix1)-delc); x2, y2 = map(int, N.floor(pix2)-delc)
pix1 = N.array(N.unravel_index(N.argmax(subim[x1:x1+2,y1:y1+2]), (2,2)))+[x1,y1]
pix2 = N.array(N.unravel_index(N.argmax(subim[x2:x2+2,y2:y2+2]), (2,2)))+[x2,y2]
if pix1[1] >= subn: pix1[1] = pix1[1]-1
if pix2[1] >= subm: pix2[1] = pix2[1]-1
pix1 = pix1.astype(float) #N.array(map(float, pix1))
pix2 = pix2.astype(float) #N.array(map(float, pix2))
maxline = int(round(N.max(N.abs(pix1-pix2)+1)))
flux1 = g1.peak_flux
flux2 = g2.peak_flux
# get pix values of the line
pixdif = pix2 - pix1
same_island_min = False
same_island_cont = False
if maxline == 1:
same_island_min = True
same_island_cont = True
else:
if abs(pixdif[0]) > abs(pixdif[1]):
xline = N.round(min(pix1[0],pix2[0])+N.arange(maxline))
yline = N.round((pix1[1]-pix2[1])/(pix1[0]-pix2[0])* \
(min(pix1[0],pix2[0])+N.arange(maxline)-pix1[0])+pix1[1])
else:
yline = N.round(min(pix1[1],pix2[1])+N.arange(maxline))
xline = N.round((pix1[0]-pix2[0])/(pix1[1]-pix2[1])* \
(min(pix1[1],pix2[1])+N.arange(maxline)-pix1[1])+pix1[0])
rpixval = N.zeros(maxline, dtype=N.float32)
xbig = N.where(xline >= N.size(subim,0))
xline[xbig] = N.size(subim,0) - 1
ybig = N.where(yline >= N.size(subim,1))
yline[ybig] = N.size(subim,1) - 1
for i in range(maxline):
pixval = subim[int(xline[i]), int(yline[i])]
rpixval[i] = pixval
min_pixval = N.min(rpixval)
minind_p = N.argmin(rpixval)
maxind_p = N.argmax(rpixval)
if minind_p in (0, maxline-1) and maxind_p in (0, maxline-1):
same_island_cont = True
if min_pixval >= min(flux1, flux2):
same_island_min = True
elif abs(min_pixval-min(flux1,flux2)) <= tol*isl.rms*img.opts.thresh_isl:
same_island_min = True
return same_island_min, same_island_cont
def same_island_dist(pair, g_list, tol=0.5):
""" If the centres are seperated by a distance less than half the sum of their
fwhms along the PA of the line joining them, they belong to the same island. """
from math import sqrt
g1 = g_list[pair[0]]
g2 = g_list[pair[1]]
pix1 = N.array(g1.centre_pix)
pix2 = N.array(g2.centre_pix)
gsize1 = g1.size_pix
gsize2 = g2.size_pix
fwhm1 = func.gdist_pa(pix1, pix2, gsize1)
fwhm2 = func.gdist_pa(pix1, pix2, gsize2)
dx = pix2[0]-pix1[0]; dy = pix2[1]-pix1[1]
dist = sqrt(dy*dy + dx*dx)
if dist <= tol*(fwhm1+fwhm2):
same_island = True
else:
same_island = False
return same_island
if img.opts.group_by_isl:
same_isl1_min = True
same_isl1_cont = True
same_isl2 = True
else:
if img.opts.group_method == 'curvature':
subim = -1.0 * func.make_curvature_map(subim)
tol = img.opts.group_tol
same_isl1_min, same_isl1_cont = same_island_min(pair, g_list, subim, delc, tol)
same_isl2 = same_island_dist(pair, g_list, tol/2.0)
g1 = g_list[pair[0]]
same_island = (same_isl1_min and same_isl2) or same_isl1_cont
return same_island
##################################################################################################
def process_Multiple(self, img, g_sublist, mask, src_index, isrc, subim, isl, delc, subn, subm):
""" Same as gaul_to_source.f. isrc is same as k in the fortran version. """
from math import pi, sqrt
from .const import fwsig
from scipy import ndimage
from . import functions as func
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Gaul2Srl ")
dum = img.beam[0]*img.beam[1]
cdeltsq = img.wcs_obj.acdelt[0]*img.wcs_obj.acdelt[1]
bmar_p = 2.0*pi*dum/(cdeltsq*fwsig*fwsig)
# try
subim_src = self.make_subim(subn, subm, g_sublist, delc)
mompara = func.momanalmask_gaus(subim_src, mask, isrc, bmar_p, True)
# initial peak posn and value
maxv = N.max(subim_src)
maxx, maxy = N.unravel_index(N.argmax(subim_src), subim_src.shape)
# fit gaussian around this posn
blc = N.zeros(2,dtype=int); trc = N.zeros(2,dtype=int)
n, m = subim_src.shape[0:2]
bm_pix = N.array([img.pixel_beam()[0]*fwsig, img.pixel_beam()[1]*fwsig, img.pixel_beam()[2]])
ssubimsize = max(int(N.round(N.max(bm_pix[0:2])*2))+1, 5)
blc[0] = max(0, maxx-(ssubimsize-1)/2); blc[1] = max(0, maxy-(ssubimsize-1)/2)
trc[0] = min(n, maxx+(ssubimsize-1)/2); trc[1] = min(m, maxy+(ssubimsize-1)/2)
s_imsize = trc - blc + 1
p_ini = [maxv, (s_imsize[0]-1)/2.0*1.1, (s_imsize[1]-1)/2.0*1.1, bm_pix[0]/fwsig*1.3, \
bm_pix[1]/fwsig*1.1, bm_pix[2]*2]
data = subim_src[blc[0]:blc[0]+s_imsize[0], blc[1]:blc[1]+s_imsize[1]]
smask = mask[blc[0]:blc[0]+s_imsize[0], blc[1]:blc[1]+s_imsize[1]]
rmask = N.where(smask==isrc, False, True)
x_ax, y_ax = N.indices(data.shape)
if N.sum(~rmask) >=6:
para, ierr = func.fit_gaus2d(data, p_ini, x_ax, y_ax, rmask)
if (0.0 2.0*mompara[1]:
mompara1E = 2.0*mompara[1] # Don't let errors get too large
mompara2E = N.std(mompara2_MC)
if mompara2E > 2.0*mompara[2]:
mompara2E = 2.0*mompara[2] # Don't let errors get too large
mompara3E = N.std(mompara3_MC)
if mompara3E > 2.0*mompara[3]:
mompara3E = 2.0*mompara[3] # Don't let errors get too large
mompara4E = N.std(mompara4_MC)
if mompara4E > 2.0*mompara[4]:
mompara4E = 2.0*mompara[4] # Don't let errors get too large
mompara5E = N.std(mompara5_MC)
if mompara5E > 2.0*mompara[5]:
mompara5E = 2.0*mompara[5] # Don't let errors get too large
else:
mompara1E = 0.0
mompara2E = 0.0
mompara3E = 0.0
mompara4E = 0.0
mompara5E = 0.0
# Now add MC errors in quadrature with Condon (1997) errors
size_skyE = [sqrt(mompara3E**2 + errors[3]**2) * sqrt(cdeltsq),
sqrt(mompara4E**2 + errors[4]**2) * sqrt(cdeltsq),
sqrt(mompara5E**2 + errors[5]**2)]
sraE, sdecE = (sqrt(mompara1E**2 + errors[1]**2) * sqrt(cdeltsq),
sqrt(mompara2E**2 + errors[2]**2) * sqrt(cdeltsq))
deconv_size_skyE = size_skyE # set deconvolved errors to non-deconvolved ones
# Find aperture flux
if img.opts.aperture_posn == 'centroid':
aper_pos = [mompara[1]+delc[0], mompara[2]+delc[1]]
else:
aper_pos = posn
aper_flux, aper_fluxE = func.ch0_aperture_flux(img, aper_pos, img.aperture)
isl_id = isl.island_id
source_prop = list(['M', [tot, totE], [s_peak, isl.rms], [maxpeak, isl.rms],
[aper_flux, aper_fluxE], [[sra, sdec],
[sraE, sdecE]], [[mra, mdec], [sraE, sdecE]], [size_sky, size_skyE], [size_sky_uncorr, size_skyE],
[deconv_size_sky, deconv_size_skyE], [deconv_size_sky_uncorr, deconv_size_skyE], isl.bbox, len(g_sublist),
isl_id, g_sublist])
source = Source(img, source_prop)
src_index += 1
for g in g_sublist:
g.source_id = src_index
g.code = 'M'
source.source_id = src_index
return src_index, source
##################################################################################################
def make_subim(self, subn, subm, g_list, delc, mc=False):
from . import functions as func
subim = N.zeros((subn, subm), dtype=N.float32)
x, y = N.indices((subn, subm))
for g in g_list:
params = func.g2param(g)
params[1] -= delc[0]; params[2] -= delc[1]
if mc:
# draw random variables from distributions given by errors
params_err = func.g2param_err(g)
for i in range(len(params)):
mc_param = N.random.normal(loc=params[i], scale=params_err[i])
params[i] = mc_param
gau = func.gaus_2d(params, x, y)
subim = subim + gau
return subim
##################################################################################################
def make_mask(self, isl, subn, subm, nsrc, src_id, g_list, delc):
from . import functions as func
# define stuff for calculating gaussian
boxx, boxy = isl.bbox
subn = boxx.stop-boxx.start; subm = boxy.stop-boxy.start
x, y = N.indices((subn, subm))
# construct image of each source in the island
src_image = N.zeros((subn, subm, nsrc), dtype=N.float32)
nn = 1
for isrc in range(nsrc):
if nsrc == 1:
g_sublist = g_list
else:
posn = N.where(src_id == isrc)[0]
g_sublist=[]
for i in posn:
g_sublist.append(g_list[i])
for g in g_sublist:
params = func.g2param(g)
params[1] -= delc[0]; params[2] -= delc[1]
gau = func.gaus_2d(params, x, y)
src_image[:,:,isrc] = src_image[:,:,isrc] + gau
# mark each pixel as belonging to one source
# just compare value, should compare with sigma later
mask = N.argmax(src_image, axis=2) + src_id
orig_mask = isl.mask_active
mask[N.where(orig_mask)] = -1
return mask
##################################################################################################
# Define class Source
##################################################################################################
from .image import *
class Source(object):
""" Instances of this class store sources made from grouped gaussians. """
def __init__(self, img, sourceprop):
# Add attribute definitions needed for output
self.source_id_def = Int(doc="Source index", colname='Source_id')
self.code_def = String(doc='Source code S, C, or M', colname='S_Code')
self.total_flux_def = Float(doc="Total flux density (Jy)", colname='Total_flux', units='Jy')
self.total_fluxE_def = Float(doc="Error in total flux density (Jy)", colname='E_Total_flux',
units='Jy')
self.peak_flux_centroid_def = Float(doc="Peak flux density per beam at centroid of emission (Jy/beam)",
colname='Peak_flux_cen', units='Jy/beam')
self.peak_flux_centroidE_def = Float(doc="Error in peak flux density per beam at centroid of emission (Jy/beam)",
colname='E_Peak_flux_cen', units='Jy/beam')
self.peak_flux_max_def = Float(doc="Peak flux density per beam at posn of maximum emission (Jy/beam)",
colname='Peak_flux', units='Jy/beam')
self.peak_flux_maxE_def = Float(doc="Error in peak flux density per beam at posn of max emission (Jy/beam)",
colname='E_Peak_flux', units='Jy/beam')
self.aperture_flux_def = Float(doc="Total aperture flux density (Jy)", colname='Aperture_flux',
units='Jy')
self.aperture_fluxE_def = Float(doc="Error in total aperture flux density (Jy)", colname='E_Aperture_flux',
units='Jy')
self.posn_sky_centroid_def = List(Float(), doc="Posn (RA, Dec in deg) of centroid of source",
colname=['RA', 'DEC'], units=['deg', 'deg'])
self.posn_sky_centroidE_def = List(Float(), doc="Error in posn (RA, Dec in deg) of centroid of source",
colname=['E_RA', 'E_DEC'], units=['deg', 'deg'])
self.posn_sky_max_def = List(Float(), doc="Posn (RA, Dec in deg) of maximum emission of source",
colname=['RA_max', 'DEC_max'], units=['deg', 'deg'])
self.posn_sky_maxE_def = List(Float(), doc="Error in posn (deg) of maximum emission of source",
colname=['E_RA_max', 'E_DEC_max'], units=['deg', 'deg'])
self.posn_pix_centroid_def = List(Float(), doc="Position (x, y in pixels) of centroid of source",
colname=['Xposn', 'Yposn'], units=['pix', 'pix'])
self.posn_pix_centroidE_def = List(Float(), doc="Error in position (x, y in pixels) of centroid of source",
colname=['E_Xposn', 'E_Yposn'], units=['pix', 'pix'])
self.posn_pix_max_def = List(Float(), doc="Position (x, y in pixels) of maximum emission of source",
colname=['Xposn_max', 'Yposn_max'], units=['pix', 'pix'])
self.posn_pix_maxE_def = List(Float(), doc="Error in position (pixels) of maximum emission of source",
colname=['E_Xposn_max', 'E_Yposn_max'], units=['pix', 'pix'])
self.size_sky_def = List(Float(), doc="Shape of the source FWHM, BPA, deg",
colname=['Maj', 'Min', 'PA'], units=['deg', 'deg',
'deg'])
self.size_skyE_def = List(Float(), doc="Error on shape of the source FWHM, BPA, deg",
colname=['E_Maj', 'E_Min', 'E_PA'], units=['deg', 'deg',
'deg'])
self.deconv_size_sky_def = List(Float(), doc="Deconvolved shape of the source FWHM, BPA, deg",
colname=['DC_Maj', 'DC_Min', 'DC_PA'], units=['deg', 'deg',
'deg'])
self.deconv_size_skyE_def = List(Float(), doc="Error on deconvolved shape of the source FWHM, BPA, deg",
colname=['E_DC_Maj', 'E_DC_Min', 'E_DC_PA'], units=['deg', 'deg',
'deg'])
self.size_sky_uncorr_def = List(Float(), doc="Shape in image plane of the gaussian FWHM, PA, deg",
colname=['Maj_img_plane', 'Min_img_plane', 'PA_img_plane'], units=['deg', 'deg',
'deg'])
self.size_skyE_uncorr_def = List(Float(), doc="Error on shape in image plane of the gaussian FWHM, PA, deg",
colname=['E_Maj_img_plane', 'E_Min_img_plane', 'E_PA_img_plane'], units=['deg', 'deg',
'deg'])
self.deconv_size_sky_uncorr_def = List(Float(), doc="Deconvolved shape in image plane of the gaussian FWHM, PA, deg",
colname=['DC_Maj_img_plane', 'DC_Min_img_plane', 'DC_PA_img_plane'], units=['deg', 'deg',
'deg'])
self.deconv_size_skyE_uncorr_def = List(Float(), doc="Error on deconvolved shape in image plane of the gaussian FWHM, PA, deg",
colname=['E_DC_Maj_img_plane', 'E_DC_Min_img_plane', 'E_DC_PA_img_plane'], units=['deg', 'deg',
'deg'])
self.rms_isl_def = Float(doc="Island rms Jy/beam", colname='Isl_rms', units='Jy/beam')
self.mean_isl_def = Float(doc="Island mean Jy/beam", colname='Isl_mean', units='Jy/beam')
self.total_flux_isl_def = Float(doc="Island total flux from sum of pixels", colname='Isl_Total_flux', units='Jy')
self.total_flux_islE_def = Float(doc="Error on island total flux from sum of pixels", colname='E_Isl_Total_flux', units='Jy')
self.gresid_rms_def = Float(doc="Island rms in Gaussian residual image Jy/beam",
colname='Resid_Isl_rms', units='Jy/beam')
self.gresid_mean_def = Float(doc="Island mean in Gaussian residual image Jy/beam",
colname='Resid_Isl_mean', units='Jy/beam')
self.sresid_rms_def = Float(doc="Island rms in Shapelet residual image Jy/beam",
colname='Resid_Isl_rms', units='Jy/beam')
self.sresid_mean_def = Float(doc="Island mean in Shapelet residual image Jy/beam",
colname='Resid_Isl_mean', units='Jy/beam')
self.ngaus_def = Int(doc='Number of gaussians in the source', colname='N_gaus')
self.island_id_def = Int(doc="Serial number of the island", colname='Isl_id')
self.bbox_def = List(Instance(slice(0), or_none=False), doc = "")
self.spec_indx_def = Float(doc = "Spectral index", colname='Spec_Indx', units=None)
self.e_spec_indx_def = Float(doc = "Error in spectral index", colname='E_Spec_Indx', units=None)
self.specin_flux_def = List(Float(), doc = "Total flux density, Jy", colname=['Total_flux'], units=['Jy'])
self.specin_fluxE_def = List(Float(), doc = "Error in total flux density per channel, Jy", colname=['E_Total_flux'], units=['Jy'])
self.specin_freq_def = List(Float(), doc = "Frequency per channel, Hz", colname=['Freq'], units=['Hz'])
code, total_flux, peak_flux_centroid, peak_flux_max, aper_flux, posn_sky_centroid, \
posn_sky_max, size_sky, size_sky_uncorr, deconv_size_sky, \
deconv_size_sky_uncorr, bbox, ngaus, island_id, gaussians = sourceprop
self.code = code
self.total_flux, self.total_fluxE = total_flux
self.peak_flux_centroid, self.peak_flux_centroidE = peak_flux_centroid
self.peak_flux_max, self.peak_flux_maxE = peak_flux_max
self.posn_sky_centroid, self.posn_sky_centroidE = posn_sky_centroid
self.posn_sky_max, self.posn_sky_maxE = posn_sky_max
self.size_sky, self.size_skyE = size_sky
self.size_sky_uncorr, self.size_skyE_uncorr = size_sky_uncorr
self.deconv_size_sky, self.deconv_size_skyE = deconv_size_sky
self.deconv_size_sky_uncorr, self.deconv_size_skyE_uncorr = deconv_size_sky_uncorr
self.bbox = bbox
self.ngaus = ngaus
self.island_id = island_id
self.gaussians = gaussians
self.rms_isl = img.islands[island_id].rms
self.mean_isl = img.islands[island_id].mean
self.total_flux_isl = img.islands[island_id].total_flux
self.total_flux_islE = img.islands[island_id].total_fluxE
self.mean_isl = img.islands[island_id].mean
self.jlevel = img.j
self.aperture_flux, self.aperture_fluxE = aper_flux
PyBDSF-1.11.0/bdsf/gausfit.py 0000664 0000000 0000000 00000136606 14650706641 0015600 0 ustar 00root root 0000000 0000000 """Module gausfit.
This module does multi-gaussian fits for all detected islands.
At the moment fitting algorithm is quite simple -- we just add
gaussians one-by-one as long as there are pixels with emission
in the image, and do post-fitting flagging of the extracted
gaussians.
The fitting itself is implemented by the means of MGFunction
class and a number of fitter routines in _cbdsm module.
MGFunction class implements multi-gaussian function and
provides all functionality required by the specific fitters.
"""
from __future__ import print_function
from __future__ import absolute_import
from .image import *
from . import mylogger
from . import statusbar
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
import scipy.ndimage as nd
from . import multi_proc as mp
import itertools
class Op_gausfit(Op):
"""Fit a number of 2D gaussians to each island.
The results of the fitting are stored in the Island
structure itself as a list of Gaussian objects (gaul) and a
list of flagged gaussians (fgaul).
Prerequisites: module islands should be run first.
"""
def __call__(self, img):
from . import functions as func
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Gausfit")
if len(img.islands) == 0:
img.gaussians = []
img.ngaus = 0
img.total_flux_gaus = 0.0
img.completed_Ops.append('gausfit')
return img
bar = statusbar.StatusBar('Fitting islands with Gaussians .......... : ',
0, img.nisl)
opts = img.opts
if not opts.quiet and not opts.verbose_fitting:
bar.start()
min_maxsize = 50.0
maxsize = opts.splitisl_maxsize
min_peak_size = 30.0
peak_size = opts.peak_maxsize
if maxsize < min_maxsize:
maxsize = min_maxsize
opts.splitisl_maxsize = min_maxsize
if peak_size < min_peak_size:
peak_size = min_peak_size
opts.peak_maxsize = min_peak_size
# Set up multiproccessing. First create a simple copy of the Image
# object that contains the minimal data needed.
opts_dict = opts.to_dict()
img_simple = Image(opts_dict)
img_simple.pixel_beamarea = img.pixel_beamarea
img_simple.pixel_beam = img.pixel_beam
img_simple.thresh_pix = img.thresh_pix
img_simple.minpix_isl = img.minpix_isl
img_simple.clipped_mean = img.clipped_mean
img_simple.beam2pix = img.beam2pix
img_simple.beam = img.beam
# Next, define the weights to use when distributing islands among cores.
# The weight should scale with the processing time. At the moment
# we use the island area, but other parameters may be better.
weights = []
for isl in img.islands:
weights.append(isl.size_active)
# Now call the parallel mapping function. Returns a list of
# [gaul, fgaul] for each island. If ncores is 1, use the
# standard Python map function -- this helps with debugging in
# some circumstances
if opts.ncores == 1:
gaus_list = map(func.eval_func_tuple,
zip(itertools.repeat(self.process_island),
img.islands, itertools.repeat(img_simple),
itertools.repeat(opts)))
else:
gaus_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_island),
img.islands, itertools.repeat(img_simple),
itertools.repeat(opts)),
numcores=opts.ncores, bar=bar, weights=weights)
gaus_list = list(gaus_list)
for isl in img.islands:
# Now convert gaussians into Gaussian objects and store
idx = isl.island_id
gaul = gaus_list[idx][0]
fgaul = gaus_list[idx][1]
dgaul = []
if len(gaul) > 0:
gidx = gaul[-1][0] # save last index value for use with fgaul below
else:
gidx = 0
gaul = [Gaussian(img, par, idx, gidx)
for (gidx, par) in enumerate(gaul)]
if len(gaul) == 0:
# No good Gaussians were fit. In this case, make a dummy
# Gaussian located at the island center so
# that the source may still be included in output catalogs.
# These dummy Gaussians all have an ID of -1. They do not
# appear in any of the source or island Gaussian lists except
# the island dgaul list.
if opts.src_ra_dec is not None:
# Center the dummy Gaussian on the user-specified source position
posn_isl = (int(isl.shape[0]/2.0), int(isl.shape[1]/2.0))
posn_img = (int(isl.shape[0]/2.0 + isl.origin[0]), int(isl.shape[1]/2.0 + isl.origin[1]))
par = [isl.image[posn_isl], posn_img[0], posn_img[1], 0.0, 0.0, 0.0]
else:
# Center the dummy Gaussian on the maximum pixel
posn = N.unravel_index(N.argmax(isl.image*~isl.mask_active), isl.shape) + N.array(isl.origin)
par = [isl.max_value, posn[0], posn[1], 0.0, 0.0, 0.0]
dgaul = [Gaussian(img, par, idx, -1)]
# Now make the list of flagged Gaussians, if any
fgaul = [Gaussian(img, par, idx, gidx + gidx2 + 1, flag)
for (gidx2, (flag, par)) in enumerate(fgaul)]
isl.gaul = gaul
isl.fgaul = fgaul
isl.dgaul = dgaul
gaussian_list = [g for isl in img.islands for g in isl.gaul]
img.gaussians = gaussian_list
# Put in the serial number of the gaussians for the whole image
n = 0
nn = 0
tot_flux = 0.0
for isl in img.islands:
m = 0
for g in isl.gaul:
n += 1
m += 1
g.gaus_num = n - 1
tot_flux += g.total_flux
for dg in isl.dgaul:
nn -= 1
dg.gaus_num = nn
isl.ngaus = m
img.ngaus = n
img.total_flux_gaus = tot_flux
mylogger.userinfo(mylog, "Total number of Gaussians fit to image",
str(n))
if not img._pi and not img.waveletimage:
mylogger.userinfo(mylog, "Total flux density in model", '%.3f Jy' %
tot_flux)
# Check if model flux is very different from sum of flux in image
if img.ch0_sum_jy > 0 and not img._pi:
if img.total_flux_gaus/img.ch0_sum_jy < 0.5 or \
img.total_flux_gaus/img.ch0_sum_jy > 2.0:
mylog.warn('Total flux density in model is %0.2f times sum of pixels '
'in input image. Large residuals may remain.' %
(img.total_flux_gaus/img.ch0_sum_jy,))
# Check if there are many Gaussians with deconvolved size of 0 in one
# axis but not in the other. Don't bother to do this for wavelet images.
fraction_1d = self.check_for_1d_gaussians(img)
if fraction_1d > 0.5 and img.beam is not None and not img.waveletimage:
mylog.warn("After deconvolution, more than 50% of Gaussians are "
"1-D. Unless you're fitting an extended source, "
"beam may be incorrect.")
img.completed_Ops.append('gausfit')
return img
def process_island(self, isl, img, opts=None):
"""Processes a single island.
Returns a list of the best-fit Gaussians and flagged Gaussians.
"""
from . import functions as func
if opts is None:
opts = img.opts
iter_ngmax = 10
maxsize = opts.splitisl_maxsize
min_peak_size = 30.0
min_maxsize = 50.0
peak_size = opts.peak_maxsize
if maxsize < min_maxsize:
maxsize = min_maxsize
opts.splitisl_maxsize = min_maxsize
if peak_size < min_peak_size:
peak_size = min_peak_size
opts.peak_maxsize = min_peak_size
size = isl.size_active/img.pixel_beamarea()*2.0 # 2.0 roughly corrects for thresh_isl
if opts.verbose_fitting:
print("Fitting isl #", isl.island_id, '; # pix = ', N.sum(~isl.mask_active), '; size = ', size)
if size > maxsize:
tosplit = func.isl_tosplit(isl, opts)
if opts.split_isl and tosplit[0] > 0:
n_subisl, sub_labels = tosplit[1], tosplit[2]
gaul = []
fgaul = []
if opts.verbose_fitting:
print('SPLITTING ISLAND INTO ', n_subisl, ' PARTS FOR ISLAND ', isl.island_id)
for i_sub in range(n_subisl):
islcp = isl.copy(img.pixel_beamarea())
islcp.mask_active = N.where(sub_labels == i_sub+1, False, True)
islcp.mask_noisy = N.where(sub_labels == i_sub+1, False, True)
size_subisl = (~islcp.mask_active).sum()/img.pixel_beamarea()*2.0
if opts.peak_fit and size_subisl > peak_size:
sgaul, sfgaul = self.fit_island_iteratively(img, islcp, iter_ngmax=iter_ngmax, opts=opts)
else:
sgaul, sfgaul = self.fit_island(islcp, opts, img)
gaul = gaul + sgaul
fgaul = fgaul + sfgaul
else:
isl.islmean = 0.0
if opts.peak_fit and size > peak_size:
gaul, fgaul = self.fit_island_iteratively(img, isl, iter_ngmax=iter_ngmax, opts=opts)
else:
gaul, fgaul = self.fit_island(isl, opts, img)
else:
if opts.peak_fit and size > peak_size:
gaul, fgaul = self.fit_island_iteratively(img, isl, iter_ngmax=iter_ngmax, opts=opts)
else:
gaul, fgaul = self.fit_island(isl, opts, img)
# Return list of Gaussians
return [gaul, fgaul]
def fit_island(self, isl, opts, img, ngmax=None, ffimg=None, ini_gausfit=None):
"""Fit island with a set of 2D gaussians.
Parameters:
isl: island
opts: Opts structure of the image
beam: beam parameters which are used as an initial guess for
gaussian shape
Returns:
Function returns 2 lists with parameters of good and flagged
gaussians. Gaussian parameters are updated to be image-relative.
Note: "fitok" indicates whether fit converged
and one or more flagged Gaussians indicate
that significant residuals remain (peak > thr).
"""
from ._cbdsm import MGFunction
from . import functions as func
from .const import fwsig
verbose = opts.verbose_fitting
if verbose:
print('Entering fit_island in verbose mode')
if ffimg is None:
fit_image = isl.image-isl.islmean
else:
fit_image = isl.image-isl.islmean-ffimg
fcn = MGFunction(fit_image, isl.mask_active, 1)
# For fitting, use img.beam instead of img.pixel_beam, as we want
# to pick up the wavelet beam (img.pixel_beam is not changed for
# wavelet images, but img.beam is)
beam = N.array(img.beam2pix(img.beam))
beam = (beam[0]/fwsig, beam[1]/fwsig, beam[2]+90.0) # change angle from +y-axis to +x-axis and FWHM to sigma
if abs(beam[0]/beam[1]) < 1.1:
beam = (1.1*beam[0], beam[1], beam[2])
thr1 = isl.mean + opts.thresh_isl*isl.rms
thr0 = thr1
g3_only = opts.fix_to_beam
peak = fcn.find_peak()[0]
dof = isl.size_active
shape = isl.shape
size = isl.size_active/img.pixel_beamarea()*2.0
gaul = []
iter = 0
ng1 = 0
if ini_gausfit is None:
ini_gausfit = opts.ini_gausfit
if ini_gausfit not in ['default', 'simple', 'nobeam']:
ini_gausfit = 'default'
if ini_gausfit == 'simple' and ngmax is None:
ngmax = 25
if ini_gausfit == 'default' or opts.fix_to_beam:
gaul, ng1, ngmax = self.inigaus_fbdsm(isl, thr0, beam, img)
if len(gaul) > 25:
ini_gausfit = 'simple'
gaul = []
ng1 = 0
ngmax = 25
if ini_gausfit == 'nobeam' and not opts.fix_to_beam:
gaul = self.inigaus_nobeam(isl, thr0, beam, img)
ng1 = len(gaul)
ngmax = ng1+2
if verbose:
print('Initializing, ini_gausfit is', ini_gausfit, 'gaul =', gaul, 'ngmax =', ngmax)
while iter < 5:
iter += 1
if verbose:
print('In Gaussian flag loop, iter =', iter)
fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, ini_gausfit, ngmax, verbose, g3_only)
if verbose:
print('Calling flag_gaussians')
gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
beam, thr0, peak, shape, isl.mask_active,
isl.image, size)
if verbose:
print('Leaving flag_gaussians')
ng1 = len(gaul)
if fitok and len(fgaul) == 0:
break
if (not fitok or len(gaul) == 0) and ini_gausfit != 'simple':
if verbose:
print('Using simple method instead')
# If fits using default or nobeam methods did not work,
# try using simple instead
gaul = []
iter = 0
ng1 = 0
ngmax = 25
while iter < 5:
iter += 1
fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, 'simple', ngmax, verbose, g3_only)
gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
beam, thr0, peak, shape, isl.mask_active,
isl.image, size)
ng1 = len(gaul)
if fitok and len(fgaul) == 0:
break
sm_isl = nd.binary_dilation(isl.mask_active)
if (not fitok or len(gaul) == 0) and N.sum(~sm_isl) >= img.minpix_isl:
if verbose:
print('Fit still not OK, shrinking')
# If fitting still fails, shrink the island a little and try again
fcn = MGFunction(fit_image, nd.binary_dilation(isl.mask_active), 1)
gaul = []
iter = 0
ng1 = 0
ngmax = 25
while iter < 5:
iter += 1
fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, 'simple', ngmax, verbose, g3_only)
gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
beam, thr0, peak, shape, isl.mask_active,
isl.image, size)
ng1 = len(gaul)
if fitok and len(fgaul) == 0:
break
lg_isl = nd.binary_erosion(isl.mask_active)
if (not fitok or len(gaul) == 0) and N.sum(~lg_isl) >= img.minpix_isl:
if verbose:
print('Fit still not OK, expanding')
# If fitting still fails, expand the island a little and try again
fcn = MGFunction(fit_image, nd.binary_erosion(isl.mask_active), 1)
gaul = []
iter = 0
ng1 = 0
ngmax = 25
while iter < 5:
iter += 1
fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, 'simple', ngmax, verbose, g3_only)
gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
beam, thr0, peak, shape, isl.mask_active,
isl.image, size)
ng1 = len(gaul)
if fitok and len(fgaul) == 0:
break
if not fitok or len(gaul) == 0:
# If all else fails, try to use moment analysis
if verbose:
print('All else has failed, trying moment analysis')
inisl = N.where(~isl.mask_active)
mask_id = N.zeros(isl.image.shape, dtype=N.int32) - 1
mask_id[inisl] = isl.island_id
try:
pixel_beamarea = img.pixel_beamarea()
mompara = func.momanalmask_gaus(fit_image, mask_id, isl.island_id, pixel_beamarea, True)
mompara[5] += 90.0
if not N.isnan(mompara[1]) and not N.isnan(mompara[2]):
x1 = int(N.floor(mompara[1]))
y1 = int(N.floor(mompara[2]))
t = (mompara[1]-x1)/(x1+1-x1)
u = (mompara[2]-y1)/(y1+1-y1)
s_peak = ((1.0-t) * (1.0-u) * fit_image[x1, y1] + t * (1.0-u) * fit_image[x1+1, y1] +
t * u * fit_image[x1+1, y1+1] + (1.0-t) * u * fit_image[x1, y1+1])
mompara[0] = s_peak
par = [mompara.tolist()]
par[3] /= fwsig
par[4] /= fwsig
gaul, fgaul = self.flag_gaussians(par, opts,
beam, thr0, peak, shape, isl.mask_active,
isl.image, size)
except:
pass
# Return whatever we got
if verbose:
print('Preparing to return')
isl.mg_fcn = fcn
gaul = [self.fixup_gaussian(isl, g) for g in gaul]
fgaul = [(flag, self.fixup_gaussian(isl, g)) for flag, g in fgaul]
if verbose:
print('Number of good Gaussians: %i' % (len(gaul),))
print('Number of flagged Gaussians: %i' % (len(fgaul),))
return gaul, fgaul
def fit_island_iteratively(self, img, isl, iter_ngmax=5, opts=None):
"""Fits an island iteratively.
For large islands, which can require many Gaussians to fit well,
it is much faster to fit a small number of Gaussians simultaneously
and iterate. However, this does usually result in larger residuals.
"""
from . import functions as func
sgaul = []
sfgaul = []
gaul = []
fgaul = []
if opts is None:
opts = img.opts
thresh_isl = opts.thresh_isl
thresh = opts.fittedimage_clip
thr = isl.mean + thresh_isl * isl.rms
if opts.verbose_fitting:
print('Iteratively fitting island ', isl.island_id)
gaul = []
fgaul = []
ffimg_tot = N.zeros(isl.shape, dtype=N.float32)
peak_val = N.max(isl.image - isl.islmean)
count = 0
while peak_val >= thr:
count += 1
if opts.verbose_fitting:
print('Iteration %i' % count)
sgaul, sfgaul = self.fit_island(isl, opts, img, ffimg=ffimg_tot, ngmax=iter_ngmax, ini_gausfit='simple')
gaul = gaul + sgaul
fgaul = fgaul + sfgaul
# Calculate residual image
if len(sgaul) > 0:
for g in sgaul:
gcopy = g[:]
gcopy[1] -= isl.origin[0]
gcopy[2] -= isl.origin[1]
S1, S2, Th = func.corrected_size(gcopy[3:6])
gcopy[3] = S1
gcopy[4] = S2
gcopy[5] = Th
A, C1, C2, S1, S2, Th = gcopy
shape = isl.shape
b = find_bbox(thresh*isl.rms, gcopy)
bbox = N.s_[max(0, int(C1-b)):min(shape[0], int(C1+b+1)),
max(0, int(C2-b)):min(shape[1], int(C2+b+1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(gcopy, x_ax, y_ax)
ffimg_tot[bbox] += ffimg
peak_val_prev = peak_val
peak_val = N.max(isl.image - isl.islmean - ffimg_tot)
if func.approx_equal(peak_val, peak_val_prev):
break
else:
break
if len(gaul) == 0:
if opts.verbose_fitting:
# Fitting iteratively did not work -- try normal fit
print('Iterative fitting failed for', isl.island_id)
gaul, fgaul = self.fit_island(isl, opts, img, ini_gausfit='default')
else:
if opts.verbose_fitting:
print('Iterative fitting succeeded for', isl.island_id)
return gaul, fgaul
def inigaus_fbdsm(self, isl, thr, beam, img):
""" initial guess for gaussians like in fbdsm """
from math import sqrt
from .const import fwsig
from . import functions as func
im = isl.image-isl.islmean
if img.opts.ini_method == 'curvature':
im_pos = -1.0 * func.make_curvature_map(isl.image-isl.islmean)
thr_pos = 0.0
else:
im_pos = im
thr_pos = thr
mask = isl.mask_active
av = img.clipped_mean
inipeak, iniposn, im1 = func.get_maxima(im, mask, thr_pos, isl.shape, beam, im_pos=im_pos)
if len(inipeak) == 0:
av, stdnew, maxv, maxp, minv, minp = func.arrstatmask(im, mask)
inipeak = [maxv]
iniposn = [maxp]
nmulsrc1 = len(iniposn)
domore = True
while domore:
domore = False
av, stdnew, maxv, maxp, minv, minp = func.arrstatmask(im1, mask)
if stdnew > isl.rms and maxv >= thr and maxv >= isl.mean+2.0*isl.rms:
domore = True
x1, y1 = N.array(iniposn).transpose()
dumr = N.sqrt((maxp[0]-x1)*(maxp[0]-x1)+(maxp[1]-y1)*(maxp[1]-y1))
distbm = dumr/sqrt(beam[0]*beam[1]*fwsig*fwsig)
if N.any((distbm < 0.5) + (dumr < 2.2)):
domore = False
if domore:
iniposn.append(N.array(maxp))
inipeak.append(maxv)
im1 = func.mclean(im1, maxp, beam)
inipeak = N.array(inipeak)
iniposn = N.array(iniposn)
ind = list(N.argsort(inipeak))
ind.reverse()
inipeak = inipeak[ind]
iniposn = iniposn[ind]
gaul = []
for i in range(len(inipeak)):
g = (float(inipeak[i]), int(iniposn[i][0]), int(iniposn[i][1])) + beam
gaul.append(g)
return gaul, nmulsrc1, len(inipeak)
def inigaus_nobeam(self, isl, thr, beam, img):
""" To get initial guesses when the source sizes are very different
from the beam, and can also be elongated. Mainly in the context of
a-trous transform images. Need to arrive at a good guess of the sizes
and hence need to partition the image around the maxima first. Tried the
IFT watershed algo but with markers, it segments the island only around
the minima and not the whole island. Cant find a good weighting scheme
for tesselation either. Hence will try this :
Calculate number of maxima. If one, then take moment as initial
guess. If more than one, then moment of whole island is one of the
guesses if mom1 is within n pixels of one of the maxima. Else dont take
whole island moment. Instead, find minima on lines connecting all maxima
and use geometric mean of all minima of a peak as the size of that peak.
"""
from math import sqrt
from .const import fwsig
import scipy.ndimage as nd
from . import functions as func
im = isl.image-isl.islmean
if img.opts.ini_method == 'curvature':
im_pos = -1.0 * func.make_curvature_map(isl.image-isl.islmean)
thr_pos = 0.0
else:
im_pos = im
thr_pos = -1e9
mask = isl.mask_active
av = img.clipped_mean
inipeak, iniposn, im1 = func.get_maxima(im, mask, thr_pos, isl.shape, beam, im_pos=im_pos)
npeak = len(iniposn)
gaul = []
av, stdnew, maxv, maxp, minv, minp = func.arrstatmask(im, mask)
mom = func.momanalmask_gaus(isl.image-isl.islmean, isl.mask_active, 0, 1.0, True)
if npeak <= 1:
g = (float(maxv), int(round(mom[1])), int(round(mom[2])), mom[3]/fwsig,
mom[4]/fwsig, mom[5])
gaul.append(g)
if npeak > 1: # markers start from 1=background, watershed starts from 1=background
watershed, markers = func.watershed(im, mask=isl.mask_active)
nshed = N.max(markers)-1 # excluding background
xm, ym = N.transpose([N.where(markers == i) for i in range(1, nshed+2)])[0]
coords = [c for c in N.transpose([xm, ym])[1:]]
alldists = [func.dist_2pt(c1, c2) for c1 in coords for c2 in coords if N.any(c1 != c2)] # has double
meandist = N.mean(alldists) # mean dist between all pairs of markers
# Find at least some 'compact' sources
cscale = 3.0
while True:
compact = []
invmask = []
for ished in range(nshed):
shedmask = N.where(watershed == ished+2, False, True) + isl.mask_active # good unmasked pixels = 0
imm = nd.binary_dilation(~shedmask, N.ones((3, 3), int))
xbad, ybad = N.where((imm == 1)*(im > im[xm[ished+1], ym[ished+1]]))
imm[xbad, ybad] = 0
invmask.append(imm)
x, y = N.where(imm)
xcen, ycen = N.mean(x), N.mean(y) # good pixels are now = 1
dist = func.dist_2pt([xcen, ycen], [xm[ished+1], ym[ished+1]])
if dist < max(cscale, meandist/4.0):
compact.append(True) # if not compact, break source + diffuse
else:
compact.append(False)
if N.any(compact):
break
else:
# Rescale to search for more compact sources
cscale *= 1.5
if not N.all(compact):
o_avsize = []
ind = N.where(compact)[0]
for i in ind:
o_avsize.append(N.sum(invmask[i]))
avsize = sqrt(N.mean(N.array(o_avsize)))
for i in range(len(compact)):
if not compact[i]: # make them all compact
newmask = N.zeros(imm.shape, bool)
newmask[max(0, int(xm[i+1]-avsize/2)):min(im.shape[0], int(xm[i+1]+avsize/2)),
max(0, int(ym[i+1]-avsize/2)):min(im.shape[1], int(ym[i+1]+avsize/2))] = True
invmask[i] = invmask[i]*newmask
resid = N.zeros(im.shape, dtype=N.float32) # approx fit all compact ones
for i in range(nshed):
size = sqrt(N.sum(invmask))/fwsig
xf, yf = coords[i][0], coords[i][1]
p_ini = [im[xf, yf], xf, yf, size, size, 0.0]
x, y = N.indices(im.shape)
p, success = func.fit_gaus2d(im*invmask[i], p_ini, x, y)
resid = resid + func.gaus_2d(p, x, y)
gaul.append(p)
resid = im - resid
if not N.all(compact): # just add one gaussian to fit whole unmasked island
maxv = N.max(resid) # assuming resid has only diffuse emission. can be false
x, y = N.where(~isl.mask_active)
xcen = N.mean(x)
ycen = N.mean(y)
invm = ~isl.mask_active
mom = func.momanalmask_gaus(invm, N.zeros(invm.shape, dtype=N.int16), 0, 1.0, True)
g = (maxv, xcen, ycen, mom[3]/fwsig, mom[4]/fwsig, mom[5]-90.)
gaul.append(g)
coords.append([xcen, ycen])
return gaul
def fit_iter(self, gaul, ng1, fcn, dof, beam, thr, iter, inifit, ngmax, verbose=1, g3_only=False):
"""One round of fitting
Parameters:
gaul : list of initial gaussians
fcn : MGFunction object
dof : maximal number of fitted parameters
beam : initial shape for newly added gaussians
[bmaj, bmin, bpa] in pixels
thr : peak threshold for adding more gaussians
verbose: whether to print fitting progress information
"""
from ._cbdsm import lmder_fit
if verbose:
print('Greetings from fit_iter')
fit = lmder_fit
beam = list(beam)
# First drop-in initial gaussians
# No error-checking here, they MUST fit
fcn.reset()
for ig in range(ng1):
g = gaul[ig]
self.add_gaussian(fcn, g, dof, g3_only)
# Do a round of fitting if any initials were provided
if verbose:
print('About to call C++ wrapper')
fitok = True
if len(gaul) != 0:
fitok = fit(fcn, final=0, verbose=verbose)
if verbose:
print('Returned from the fit')
# Iteratively add gaussians while there are high peaks
# in the image and fitting converges
while fitok:
peak, coords = fcn.find_peak()
if peak < thr: # no good peaks left
break
if len(fcn.parameters) < ngmax and iter == 1 and inifit == 'default' and len(gaul) >= ng1+1:
ng1 = ng1 + 1
g = gaul[ng1-1]
else:
if len(fcn.parameters) < ngmax:
g = [peak, coords[0], coords[1]] + beam
else:
break
fitok &= self.add_gaussian(fcn, g, dof, g3_only)
fitok &= fit(fcn, final=0, verbose=verbose)
# And one last fit with higher precision
# make sure we return False when fitok==False due to lack
# of free parameters
fitok &= fit(fcn, final=1, verbose=verbose)
return fitok
def add_gaussian(self, fcn, parameters, dof, g3_only=False):
"""Try adding one more gaussian to fcn object.
It's trying to reduce number of fitted parameters if
there is not enough DoF left.
Note: g1 fits amplitude only
g3 fits amplitude and position
g6 fits all parameters
Parameters:
fcn: MGFunction object
parameters: initial values for gaussian parameters
dof: total possible number of fitted parameters
"""
from ._cbdsm import Gtype
if g3_only:
gtype = (Gtype.g3 if fcn.fitted_parameters() + 3 <= dof else None)
else:
gtype = (Gtype.g3 if fcn.fitted_parameters() + 3 <= dof else None)
gtype = (Gtype.g6 if fcn.fitted_parameters() + 6 <= dof else gtype)
if gtype:
fcn.add_gaussian(gtype, parameters)
return True
else:
return False
def flag_gaussians(self, gaul, opts, beam, thr, peak, shape, isl_mask, isl_image, size):
"""Flag gaussians according to some rules.
Splits list of gaussian parameters in 2, where the first
one is a list of parameters for accepted gaussians, and
the second one is a list of pairs (flag, parameters) for
flagged gaussians.
Parameters:
gaul: input list of gaussians
opts: Opts object to extract flagging parameters from
beam: beam shape
thr: threshold for pixels with signal
peak: peak data value in the current island
shape: shape of the current island
isl_mask: island mask
"""
good = []
bad = []
for g in gaul:
flag = self._flag_gaussian(g, beam, thr, peak, shape, opts, isl_mask, isl_image, size)
if flag:
bad.append((flag, g))
else:
good.append(g)
return good, bad
def _flag_gaussian(self, g, beam, thr, peak, shape, opts, mask, image, size_bms):
"""The actual flagging routine. See above for description.
"""
from math import sqrt, sin, cos, pi
from . import functions as func
import scipy.ndimage as nd
A, x1, x2, s1, s2, th = g
s1, s2 = map(abs, [s1, s2])
flag = 0
if N.any(N.isnan(g)) or s1 == 0.0 or s2 == 0.0:
return -1
if s1 < s2: # s1 etc are sigma
ss1 = s2
ss2 = s1
th1 = divmod(th+90.0, 180)[1]
else:
ss1 = s1
ss2 = s2
th1 = divmod(th, 180)[1]
th1 = th1/180.0*pi
if ss1 > 1e4 and ss2 > 1e4:
xbox = 1e9
ybox = 1e9
else:
xbox = 2.0 * (abs(ss1 * cos(th1) * cos(th1)) + abs(ss2 * ss2 / ss1 * sin(th1) * sin(th1))) / \
sqrt(cos(th1) * cos(th1) + ss2 * ss2 / ss1 / ss1 * sin(th1) * sin(th1))
ybox = 2.0 * (abs(ss1 * sin(th1) * sin(th1)) + abs(ss2 * ss2 / ss1 * cos(th1) * cos(th1))) / \
sqrt(sin(th1) * sin(th1) + ss2 * ss2 / ss1 / ss1 * cos(th1) * cos(th1))
# Now check all conditions
border = opts.flag_bordersize
x1ok = True
x2ok = True
flagmax = False
if A < opts.flag_minsnr*thr:
flag += 1
if A > opts.flag_maxsnr*peak:
flag += 2
flagmax = True
if x1 - border < 0 or x1 + border + 1 > shape[0]:
flag += 4
x1ok = False
if x2 - border < 0 or x2 + border + 1 > shape[1]:
flag += 8
x2ok = False
if x1ok and x2ok:
if not flagmax:
# Check image value at Gaussian center
im_val_at_cen = nd.map_coordinates(image, [N.array([x1]), N.array([x2])])
if A > opts.flag_maxsnr*im_val_at_cen:
flag += 2
borx1_1 = x1 - border
if borx1_1 < 0:
borx1_1 = 0
borx1_2 = x1 + border + 1
if borx1_2 > shape[0]:
borx1_2 = shape[0]
if N.any(mask[int(borx1_1):int(borx1_2), int(x2)]):
flag += 4
borx2_1 = x2 - border
if borx2_1 < 0:
borx2_1 = 0
borx2_2 = x2 + border + 1
if borx2_2 > shape[1]:
borx2_2 = shape[1]
if N.any(mask[int(x1), int(borx2_1):int(borx2_2)]):
flag += 8
if xbox > opts.flag_maxsize_isl*shape[0]:
flag += 16
if ybox > opts.flag_maxsize_isl*shape[1]:
flag += 32
if s1*s2 > opts.flag_maxsize_bm*beam[0]*beam[1]:
flag += 64
if opts.flag_smallsrc:
if s1*s2 < opts.flag_minsize_bm*beam[0]*beam[1]:
flag += 128
if not opts.flag_smallsrc:
if s1*s2 == 0.:
flag += 128
if ss1/ss2 > 2.0:
# Only check for fairly elliptical Gaussians, as this condition
# is unreliable for more circular ones.
ellx, elly = func.drawellipse([A, x1, x2, s1*opts.flag_maxsize_fwhm,
s2*opts.flag_maxsize_fwhm, th])
pt1 = [N.min(ellx), elly[N.argmin(ellx)]]
pt2 = [ellx[N.argmax(elly)], N.max(elly)]
pt3 = [N.max(ellx), elly[N.argmax(ellx)]]
pt4 = [ellx[N.argmin(elly)], N.min(elly)]
extremes = [pt1, pt2, pt3, pt4]
for pt in extremes:
if N.any(N.isnan(pt)):
flag += 256
break
elif pt[0] < 0 or pt[0] >= shape[0] or pt[1] < 0 or pt[1] >= shape[1]:
flag += 256
break
elif mask[int(pt[0]), int(pt[1])]:
flag += 256
break
return flag
def fixup_gaussian(self, isl, gaussian):
"""Normalize parameters by adjusting them to the
proper image coordinates and ensuring that all of
the implicit conventions (such as bmaj >= bmin) are met.
"""
np = list(gaussian)
# Update to the image coordinates
np[1] += isl.origin[0]
np[2] += isl.origin[1]
# Shape values should be positive
np[3] = abs(np[3])
np[4] = abs(np[4])
# First extent is major
if np[3] < np[4]:
np[3:5] = np[4:2:-1]
np[5] += 90
# Clip position angle
np[5] = divmod(np[5], 180)[1]
return np
def check_for_1d_gaussians(self, img):
"""Check for Gaussians with deconvolved sizes of 0 for one axis only."""
n1d = 0
ng = 0
for g in img.gaussians:
ng += 1
dsize = g.deconv_size_sky
if (dsize[0] == 0 and dsize[1] > 0) or (dsize[0] > 0 and dsize[1] == 0):
n1d += 1
if ng > 0:
return float(n1d)/float(ng)
else:
return 0.0
def find_bbox(thresh, g):
"""Calculate bounding box for gaussian.
This function calculates size of the box for evaluating
gaussian, so that value of gaussian is smaller than threshold
outside of the box.
Parameters:
thres: threshold
g: Gaussian object or list of paramters
"""
from math import ceil, sqrt, log
if isinstance(g, list):
A = g[0]
S = g[3]
else:
A = g.peak_flux
S = g.size_pix[0]
if A == 0.0:
return ceil(S*1.5)
if thresh/A >= 1.0 or thresh/A <= 0.0:
return ceil(S*1.5)
return ceil(S*sqrt(-2*log(thresh/A)))
class Gaussian(object):
"""Instances of this class are used to store information about
extracted gaussians in a structured way.
"""
def __init__(self, img, gaussian, isl_idx, g_idx, flg=0):
"""Initialize Gaussian object from fitting data
Parameters:
img: PyBDSM image object
gaussian: 6-tuple of fitted numbers
isl_idx: island serial number
g_idx: gaussian serial number
flg: flagging (if any)
"""
from . import functions as func
import numpy as N
# Add attribute definitions needed for output
self.source_id_def = Int(doc="Source index", colname='Source_id')
self.code_def = String(doc='Source code S, C, or M', colname='S_Code')
self.gaus_num_def = Int(doc="Serial number of the gaussian for the image", colname='Gaus_id')
self.island_id_def = Int(doc="Serial number of the island", colname='Isl_id')
self.flag_def = Int(doc="Flag associated with gaussian", colname='Flag')
self.total_flux_def = Float(doc="Total flux density, Jy", colname='Total_flux', units='Jy')
self.total_fluxE_def = Float(doc="Total flux density error, Jy", colname='E_Total_flux',
units='Jy')
self.peak_flux_def = Float(doc="Peak flux density/beam, Jy/beam", colname='Peak_flux',
units='Jy/beam')
self.peak_fluxE_def = Float(doc="Peak flux density/beam error, Jy/beam",
colname='E_Peak_flux', units='Jy/beam')
self.centre_sky_def = List(Float(), doc="Sky coordinates of gaussian centre",
colname=['RA', 'DEC'], units=['deg', 'deg'])
self.centre_skyE_def = List(Float(), doc="Error on sky coordinates of gaussian centre",
colname=['E_RA', 'E_DEC'], units=['deg', 'deg'])
self.centre_pix_def = List(Float(), doc="Pixel coordinates of gaussian centre",
colname=['Xposn', 'Yposn'], units=['pix', 'pix'])
self.centre_pixE_def = List(Float(), doc="Error on pixel coordinates of gaussian centre",
colname=['E_Xposn', 'E_Yposn'], units=['pix', 'pix'])
self.size_sky_def = List(Float(), doc="Shape of the gaussian FWHM, PA, deg",
colname=['Maj', 'Min', 'PA'], units=['deg', 'deg', 'deg'])
self.size_skyE_def = List(Float(), doc="Error on shape of the gaussian FWHM, PA, deg",
colname=['E_Maj', 'E_Min', 'E_PA'], units=['deg', 'deg', 'deg'])
self.deconv_size_sky_def = List(Float(), doc="Deconvolved shape of the gaussian FWHM, PA, deg",
colname=['DC_Maj', 'DC_Min', 'DC_PA'], units=['deg', 'deg', 'deg'])
self.deconv_size_skyE_def = List(Float(), doc="Error on deconvolved shape of the gaussian FWHM, PA, deg",
colname=['E_DC_Maj', 'E_DC_Min', 'E_DC_PA'], units=['deg', 'deg', 'deg'])
self.size_sky_uncorr_def = List(Float(), doc="Shape in image plane of the gaussian FWHM, PA, deg",
colname=['Maj_img_plane', 'Min_img_plane', 'PA_img_plane'],
units=['deg', 'deg', 'deg'])
self.size_skyE_uncorr_def = List(Float(), doc="Error on shape in image plane of the gaussian FWHM, PA, deg",
colname=['E_Maj_img_plane', 'E_Min_img_plane', 'E_PA_img_plane'],
units=['deg', 'deg', 'deg'])
self.deconv_size_sky_uncorr_def = List(Float(), doc="Deconvolved shape in image plane of the gaussian FWHM, PA, deg",
colname=['DC_Maj_img_plane', 'DC_Min_img_plane', 'DC_PA_img_plane'],
units=['deg', 'deg', 'deg'])
self.deconv_size_skyE_uncorr_def = List(Float(), doc="Error on deconvolved shape in image plane of the gaussian FWHM, PA, deg",
colname=['E_DC_Maj_img_plane', 'E_DC_Min_img_plane', 'E_DC_PA_img_plane'],
units=['deg', 'deg', 'deg'])
self.rms_def = Float(doc="Island rms, Jy/beam", colname='Isl_rms', units='Jy/beam')
self.mean_def = Float(doc="Island mean, Jy/beam", colname='Isl_mean', units='Jy/beam')
self.total_flux_isl_def = Float(doc="Island total flux from sum of pixels", colname='Isl_Total_flux', units='Jy')
self.total_flux_islE_def = Float(doc="Error on island total flux from sum of pixels", colname='E_Isl_Total_flux', units='Jy')
self.gresid_rms_def = Float(doc="Island rms in Gaussian residual image", colname='Resid_Isl_rms', units='Jy/beam')
self.gresid_mean_def = Float(doc="Island mean in Gaussian residual image", colname='Resid_Isl_mean', units='Jy/beam')
self.sresid_rms_def = Float(doc="Island rms in Shapelet residual image", colname='Resid_Isl_rms', units='Jy/beam')
self.sresid_mean_def = Float(doc="Island mean in Shapelet residual image", colname='Resid_Isl_mean', units='Jy/beam')
self.wave_rms_def = Float(doc="Island rms in wavelet image, Jy/beam", colname='Wave_Isl_rms', units='Jy/beam')
self.wave_mean_def = Float(doc="Island mean in wavelet image, Jy/beam", colname='Wave_Isl_mean', units='Jy/beam')
self.jlevel_def = Int(doc="Wavelet number to which Gaussian belongs", colname='Wave_id')
self.spec_indx_def = Float(doc="Spectral index", colname='Spec_Indx', units=None)
self.e_spec_indx_def = Float(doc="Error in spectral index", colname='E_Spec_Indx', units=None)
self.specin_flux_def = List(Float(), doc="Total flux density per channel, Jy", colname=['Total_flux'], units=['Jy'])
self.specin_fluxE_def = List(Float(), doc="Error in total flux density per channel, Jy", colname=['E_Total_flux'], units=['Jy'])
self.specin_freq_def = List(Float(), doc="Frequency per channel, Hz", colname=['Freq'], units=['Hz'])
use_wcs = True
self.gaussian_idx = g_idx
self.gaus_num = 0 # stored later
self.island_id = isl_idx
self.jlevel = img.j
self.flag = flg
self.parameters = gaussian
p = gaussian
self.peak_flux = p[0]
self.centre_pix = p[1:3]
size = p[3:6]
if func.approx_equal(size[0], img.pixel_beam()[0]*1.1) and \
func.approx_equal(size[1], img.pixel_beam()[1]) and \
func.approx_equal(size[2], img.pixel_beam()[2]+90.0) or \
img.opts.fix_to_beam:
# Check whether fitted Gaussian is just the distorted pixel beam given as an
# initial guess (always set to [bm_maj*1.1, bm_min, bm_pa+90]) or if size was
# fixed to the beam. If so, reset the size to the undistorted beam. Note:
# these are sigma sizes, not FWHM sizes.
size = img.pixel_beam()
size = (size[0], size[1], size[2]+90.0) # adjust angle so that corrected_size() works correctly
size = func.corrected_size(size) # gives fwhm and P.A.
self.size_pix = size # FWHM in pixels and P.A. CCW from +y axis
# Use img.orig_beam for flux calculation and deconvolution on wavelet
# images, as img.beam has been altered to match the wavelet scale.
# Note: these are all FWHM sizes.
if img.waveletimage:
bm_pix = N.array(img.beam2pix(img.orig_beam))
else:
bm_pix = N.array(img.beam2pix(img.beam))
# Calculate fluxes, sky sizes, etc. All sizes are FWHM.
tot = p[0]*size[0]*size[1]/(bm_pix[0]*bm_pix[1])
if flg == 0:
# These are good Gaussians
errors = func.get_errors(img, p+[tot], img.islands[isl_idx].rms, fixed_to_beam=img.opts.fix_to_beam)
self.centre_sky = img.pix2sky(p[1:3])
self.centre_skyE = img.pix2coord(errors[1:3], self.centre_pix, use_wcs=use_wcs)
self.size_sky = img.pix2gaus(size, self.centre_pix, use_wcs=use_wcs) # FWHM in degrees and P.A. east from north
self.size_sky_uncorr = img.pix2gaus(size, self.centre_pix, use_wcs=False) # FWHM in degrees and P.A. east from +y axis
self.size_skyE = img.pix2gaus(errors[3:6], self.centre_pix, use_wcs=use_wcs, is_error=True)
self.size_skyE_uncorr = img.pix2gaus(errors[3:6], self.centre_pix, use_wcs=False, is_error=True)
gaus_dc, err = func.deconv2(bm_pix, size)
self.deconv_size_sky = img.pix2gaus(gaus_dc, self.centre_pix, use_wcs=use_wcs)
self.deconv_size_sky_uncorr = img.pix2gaus(gaus_dc, self.centre_pix, use_wcs=False)
self.deconv_size_skyE = img.pix2gaus(errors[3:6], self.centre_pix, use_wcs=use_wcs, is_error=True)
self.deconv_size_skyE_uncorr = img.pix2gaus(errors[3:6], self.centre_pix, use_wcs=False, is_error=True)
else:
# These are flagged Gaussians, so don't calculate sky values or errors
errors = [0]*7
self.centre_sky = [0., 0.]
self.centre_skyE = [0., 0.]
self.size_sky = [0., 0., 0.]
self.size_sky_uncorr = [0., 0., 0.]
self.size_skyE = [0., 0.]
self.size_skyE_uncorr = [0., 0., 0.]
self.deconv_size_sky = [0., 0., 0.]
self.deconv_size_sky_uncorr = [0., 0., 0.]
self.deconv_size_skyE = [0., 0., 0.]
self.deconv_size_skyE_uncorr = [0., 0., 0.]
self.total_flux = tot
self.total_fluxE = errors[6]
self.peak_fluxE = errors[0]
self.total_fluxE = errors[6]
self.centre_pixE = errors[1:3]
self.size_pixE = errors[3:6]
self.rms = img.islands[isl_idx].rms
self.mean = img.islands[isl_idx].mean
self.wave_rms = 0.0 # set if needed in the wavelet operation
self.wave_mean = 0.0 # set if needed in the wavelet operation
self.total_flux_isl = img.islands[isl_idx].total_flux
self.total_flux_islE = img.islands[isl_idx].total_fluxE
PyBDSF-1.11.0/bdsf/image.py 0000664 0000000 0000000 00000016566 14650706641 0015222 0 ustar 00root root 0000000 0000000 """Module image.
Instances of class Image are a primary data-holders for all PyBDSF
operations. They store the image itself together with some meta-information
(such as headers), options for processing modules and all data generated during
processing. A few convenience methods are also defined here for interactive
use: to allow viewing and output of the most important data, to allow listing
and setting of options, and to allow re-processing of Images (these methods are
used by the interactive IPython shell made by pybdsf).
This module also defines class Op, which is used as a base class for all PyBDSF
operations.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as N
from .opts import *
class Image(object):
"""Image is a primary data container for PyBDSF.
All the run-time data (such as image data, mask, etc.)
is stored here. A number of type-checked properties
are defined for the most basic image attributes, such
as image data, mask, header, user options.
To allow transparent caching of large image data to disk,
the image data must be stored in attributes ending in
"_arr". Additionally, setting subarrays does not work
using the attributes directly (e.g., img.ch0_arr[0:100,0:100]
= 0.0 will not work). Instead, set the subarray values then set
the attribute (e.g., ch0[0:100,0:100] = 0.0; img.ch0_arr = ch0).
There is little sense in declaring all possible attributes
right here as it will introduce unneeded dependencies
between modules, thus most other attributes (like island lists,
gaussian lists, etc) are inserted at run-time by the specific
PyBDSF modules.
"""
def __init__(self, opts):
self._prev_opts = None
self.extraparams = {}
self.masked = False
self.completed_Ops = []
self.waveletimage = False
self._pi = False
self.do_cache = False
self.bbspatchnum = 0
self.blankpix = 0
self.use_io = ''
self.j = 0
self.freq_pars = [0.0, 0.0, 0.0]
self.filename = ''
self.logfilename = ''
self.resid_gaus_arr = None
self._is_interactive_shell = False
self.opts = Opts(opts)
def __setstate__(self, state):
"""Needed for multiprocessing"""
self.thresh_pix = state['thresh_pix']
self.minpix_isl = state['minpix_isl']
self.clipped_mean = state['clipped_mean']
def __getstate__(self):
"""Needed for multiprocessing"""
state = {}
state['thresh_pix'] = self.thresh_pix
state['minpix_isl'] = self.minpix_isl
state['clipped_mean'] = self.clipped_mean
return state
def __getattribute__(self, name):
from . import functions as func
if name.endswith("_arr"):
if self.do_cache:
map_data = func.retrieve_map(self, name)
if map_data is not None:
return map_data
else:
return object.__getattribute__(self, name)
else:
return object.__getattribute__(self, name)
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
from . import functions as func
if hasattr(self, 'do_cache'):
if self.do_cache and name.endswith("_arr") and isinstance(value, N.ndarray):
func.store_map(self, name, value)
else:
super(Image, self).__setattr__(name, value)
else:
super(Image, self).__setattr__(name, value)
def __delattr__(self, name):
from . import functions as func
if self.do_cache and name.endswith("_arr"):
func.del_map(self, name)
else:
super(Image, self).__delattr__(name)
def get_map(self, map_name):
"""Returns requested map."""
from . import functions as func
if self.do_cache:
map_data = func.retrieve_map(self, map_name)
else:
map_data = getattr(self, map_name)
return map_data
def put_map(self, map_name, map_data):
"""Stores requested map."""
from . import functions as func
if self.do_cache:
func.store_map(self, map_name, map_data)
else:
setattr(self, map_name, map_data)
def list_pars(self):
"""List parameter values."""
from . import interface
interface.list_pars(self)
def set_pars(self, **kwargs):
"""Set parameter values."""
from . import interface
interface.set_pars(self, **kwargs)
def process(self, **kwargs):
"""Process Image object"""
from . import interface
success = interface.process(self, **kwargs)
return success
def save_pars(self, savefile=None):
"""Save parameter values."""
from . import interface
interface.save_pars(self, savefile)
def load_pars(self, loadfile=None):
"""Load parameter values."""
from . import interface
import os
if loadfile is None or loadfile == '':
loadfile = self.opts.filename + '.pybdsf.sav'
if os.path.exists(loadfile):
timg, err = interface.load_pars(loadfile)
if timg is not None:
orig_filename = self.opts.filename
self.opts = timg.opts
self.opts.filename = orig_filename # reset filename to original
else:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: '"+\
loadfile+"' is not a valid parameter save file.")
else:
raise RuntimeError(str(err))
else:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: File '"+\
loadfile+"' not found.")
else:
raise RuntimeError('File not found')
def show_fit(self, **kwargs):
"""Show results of the fit."""
from . import plotresults
if not hasattr(self, 'nisl'):
print('Image has not been processed. Please run process_image first.')
return False
plotresults.plotresults(self, **kwargs)
return True
def export_image(self, **kwargs):
"""Export an internal image to a file."""
from . import interface
try:
result = interface.export_image(self, **kwargs)
return result
except RuntimeError as err:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: " + str(err))
else:
raise RuntimeError(str(err))
def write_catalog(self, **kwargs):
"""Write the Gaussian, source, or shapelet list to a file"""
from . import interface
try:
result = interface.write_catalog(self, **kwargs)
return result
except RuntimeError as err:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: " + str(err))
else:
raise RuntimeError(str(err))
class Op(object):
"""Common base class for all PyBDSF operations.
At the moment this class is empty and only defines placeholder
for method __call__, which should be redefined in all derived
classes.
"""
def __call__(self, img):
raise NotImplementedError("This method should be redefined")
PyBDSF-1.11.0/bdsf/interface.py 0000664 0000000 0000000 00000134610 14650706641 0016067 0 ustar 00root root 0000000 0000000 """Interface module.
The interface module handles all functions typically needed by the user in an
interactive environment such as IPython. Many are also used by the
custom IPython shell defined in pybdsf.
"""
from __future__ import print_function
from __future__ import absolute_import
try:
# For Python 2, use raw_input() for input()
input = raw_input
except NameError:
pass
def process(img, **kwargs):
"""Find and measure sources in an image.
This function is used by process_image in __init__.py and by process_image
in pybdsf. It is also used as a method of the Image object in image.py
to allow reprocessing of existing Image objects with the command
img.process().
Any options given as keyword arguments will override existing ones stored
in img.opts.
"""
from . import default_chain, _run_op_list
from .image import Image
from . import mylogger
from .functions import set_up_output_paths
import os
# Start up logger. We need to initialize it each time process() is
# called, in case the quiet or debug options have changed
_, basedir = set_up_output_paths(img.opts)
basename = os.path.basename(img.opts.filename) + '.pybdsf.log'
logfilename = os.path.join(basedir, basename)
img.log = ''
mylogger.init_logger(logfilename, quiet=img.opts.quiet,
debug=img.opts.debug)
add_break_to_logfile(logfilename)
mylog = mylogger.logging.getLogger("PyBDSF.Process")
mylog.info("Processing "+img.opts.filename)
try:
# set options if given
if len(kwargs) > 0:
set_pars(img, **kwargs)
except RuntimeError as err:
# Catch and log error
mylog.error(str(err))
# Re-throw error if the user is not in the interactive shell
if img._is_interactive_shell:
return False
else:
raise
# Run all the op's
try:
# Run op's in chain
img, op_chain = get_op_chain(img)
if op_chain is not None:
_run_op_list(img, op_chain)
img._prev_opts = img.opts.to_dict()
return True
except RuntimeError as err:
# Catch and log error
mylog.error(str(err))
# Re-throw error if the user is not in the interactive shell
if img._is_interactive_shell:
return False
else:
raise
except KeyboardInterrupt:
mylogger.userinfo(mylog, "\n\033[31;1mAborted\033[0m")
if img._is_interactive_shell:
return False
else:
raise
def get_op_chain(img):
"""Determines the optimal Op chain for an Image object.
This is useful when reprocessing an Image object. For example,
if Gaussians were already fit, but the user now wants to use
shapelets, we do not need to re-run Op_gausfit, etc.
Note that any new options added to opts.py should also be
added here. If not, a full reprocessing will be done if the
new option is changed.
"""
from . import default_chain
Op_chain = default_chain[:]
Op_names = ['readimage',
'collapse',
'preprocess',
'rmsimage',
'threshold',
'islands',
'gausfit',
'wavelet_atrous',
'shapelets',
'gaul2srl',
'spectralindex',
'polarisation',
'make_residimage',
'psf_vary',
'outlist',
'cleanup']
prev_opts = img._prev_opts
if prev_opts is None:
return img, default_chain
new_opts = img.opts.to_dict()
# Set the hidden options, which should include any option whose change
# should not trigger a process_image action
hidden_opts = img.opts.get_names(group='hidden')
hidden_opts.append('advanced_opts')
hidden_opts.append('flagging_opts')
hidden_opts.append('multichan_opts')
hidden_opts.append('output_opts')
# Define lists of options for each Op. Some of these can be defined
# using the "group" parameter of each option.
#
# Op_readimage()
readimage_opts = ['filename', 'beam', 'trim_box', 'frequency',
'beam_spectrum', 'frequency_sp']
# Op_collapse()
collapse_opts = img.opts.get_names(group='multichan_opts')
collapse_opts.append('polarisation_do')
collapse_opts += readimage_opts
# Op_preprocess()
preprocess_opts = ['kappa_clip', 'polarisation_do']
preprocess_opts += collapse_opts
# Op_rmsimage()
rmsimage_opts = ['rms_box', 'rms_box_bright', 'adaptive_rms_box',
'mean_map', 'rms_map', 'adaptive_thresh', 'rms_box_bright']
rmsimage_opts += preprocess_opts
# Op_threshold()
threshold_opts = ['thresh', 'thresh_pix', 'thresh_isl']
threshold_opts += rmsimage_opts
# Op_islands()
islands_opts = threshold_opts
islands_opts.append('minpix_isl')
# Op_gausfit()
gausfit_opts = ['verbose_fitting']
gausfit_opts += islands_opts
gausfit_opts += img.opts.get_names(group='flagging_opts')
# Op_wavelet_atrous()
wavelet_atrous_opts = img.opts.get_names(group='atrous_do')
wavelet_atrous_opts.append('atrous_do')
wavelet_atrous_opts += gausfit_opts
# Op_shapelets()
shapelets_opts = img.opts.get_names(group='shapelet_do')
shapelets_opts.append('shapelet_do')
shapelets_opts += islands_opts
# Op_gaul2srl()
gaul2srl_opts = ['group_tol', 'group_by_isl', 'group_method']
gaul2srl_opts += gausfit_opts
gaul2srl_opts += wavelet_atrous_opts
# Op_spectralindex()
spectralindex_opts = img.opts.get_names(group='spectralindex_do')
spectralindex_opts.append('spectralindex_do')
spectralindex_opts += gaul2srl_opts
# Op_polarisation()
polarisation_opts = img.opts.get_names(group='polarisation_do')
polarisation_opts.append('polarisation_do')
polarisation_opts += gaul2srl_opts
# Op_make_residimage()
make_residimage_opts = ['fittedimage_clip']
make_residimage_opts += gausfit_opts
make_residimage_opts += wavelet_atrous_opts
make_residimage_opts += shapelets_opts
# Op_psf_vary()
psf_vary_opts = img.opts.get_names(group='psf_vary_do')
psf_vary_opts.append('psf_vary_do')
psf_vary_opts += gaul2srl_opts
# Op_outlist() and Op_cleanup() are always done.
# Find whether new opts differ from previous opts (and are not hidden
# opts, which should not be checked). If so, found = True and we reset
# the relevant image parameters and add the relevant Op to the Op_chain.
re_run = False
found = False
for k, v in prev_opts.items():
if v != new_opts[k] and k not in hidden_opts:
re_run = True
if k in readimage_opts:
if hasattr(img, 'use_io'): del img.use_io
if hasattr(img, 'image_arr'): del img.image_arr
while 'readimage' in img.completed_Ops:
img.completed_Ops.remove('readimage')
found = True
if k in collapse_opts:
if hasattr(img, 'mask_arr'): del img.mask_arr
if hasattr(img, 'ch0_arr'): del img.ch0_arr
while 'collapse' in img.completed_Ops:
img.completed_Ops.remove('collapse')
found = True
if k in preprocess_opts:
while 'preprocess' in img.completed_Ops:
img.completed_Ops.remove('preprocess')
found = True
if k in rmsimage_opts:
if hasattr(img, 'rms_arr'): del img.rms_arr
if hasattr(img, 'mean_arr'): del img.mean_arr
if hasattr(img, 'rms_Q_arr'): del img.rms_Q_arr
if hasattr(img, 'mean_Q_arr'): del img.mean_Q_arr
if hasattr(img, 'rms_U_arr'): del img.rms_U_arr
if hasattr(img, 'mean_U_arr'): del img.mean_U_arr
if hasattr(img, 'rms_V_arr'): del img.rms_V_arr
if hasattr(img, 'mean_V_arr'): del img.mean_V_arr
if hasattr(img, '_adapt_rms_isl_pos'): del img._adapt_rms_isl_pos
while 'rmsimage' in img.completed_Ops:
img.completed_Ops.remove('rmsimage')
found = True
if k in threshold_opts:
while 'threshold' in img.completed_Ops:
img.completed_Ops.remove('threshold')
found = True
if k in islands_opts:
if hasattr(img, 'islands'): del img.islands
while 'islands' in img.completed_Ops:
img.completed_Ops.remove('islands')
found = True
if k in gausfit_opts:
if hasattr(img, 'sources'): del img.sources
if hasattr(img, 'dsources'): del img.dsources
if hasattr(img, 'gaussians'): del img.gaussians
while 'gausfit' in img.completed_Ops:
img.completed_Ops.remove('gausfit')
found = True
if k in wavelet_atrous_opts:
if hasattr(img, 'atrous_gaussians'): del img.atrous_gaussians
if hasattr(img, 'islands'): del img.islands
if hasattr(img, 'sources'): del img.sources
if hasattr(img, 'dsources'): del img.dsources
if hasattr(img, 'gaussians'): del img.gaussians
while 'islands' in img.completed_Ops:
img.completed_Ops.remove('islands')
while 'gausfit' in img.completed_Ops:
img.completed_Ops.remove('gausfit')
while 'wavelet_atrous' in img.completed_Ops:
img.completed_Ops.remove('wavelet_atrous')
found = True
if k in shapelets_opts:
while 'shapelets' in img.completed_Ops:
img.completed_Ops.remove('shapelets')
found = True
if k in gaul2srl_opts:
while 'gaul2srl' in img.completed_Ops:
img.completed_Ops.remove('gaul2srl')
found = True
if k in spectralindex_opts:
while 'spectralindex' in img.completed_Ops:
img.completed_Ops.remove('spectralindex')
found = True
if k in polarisation_opts:
while 'polarisation' in img.completed_Ops:
img.completed_Ops.remove('polarisation')
found = True
if k in make_residimage_opts:
if hasattr(img, 'resid_gaus_arr'):
del img.resid_gaus_arr
img.resid_gaus_arr = None # set to init state
if hasattr(img, 'model_gaus_arr'): del img.model_gaus_arr
if hasattr(img, 'resid_shap_arr'): del img.resid_shap_arr
if hasattr(img, 'model_shap_arr'): del img.model_shap_arr
while 'make_residimage' in img.completed_Ops:
img.completed_Ops.remove('make_residimage')
found = True
if k in psf_vary_opts:
while 'psf_vary' in img.completed_Ops:
img.completed_Ops.remove('psf_vary')
found = True
if not found:
break
# If nothing has changed, ask if user wants to re-run
if not found and not re_run:
prompt = "Analysis appears to be up-to-date. Force reprocessing (y/n)? "
answ = raw_input_no_history(prompt)
while answ.lower() not in ['y', 'n', 'yes', 'no']:
answ = raw_input_no_history(prompt)
if answ.lower() in ['y', 'yes']:
re_run = True # Force re-run
else:
return img, None
# If a changed option is not in any of the above lists,
# force a re-run of all Ops.
if not found:
img.completed_Ops = []
if hasattr(img, 'use_io'): del img.use_io
if hasattr(img, 'image_arr'): del img.image_arr
if hasattr(img, 'mask_arr'): del img.mask_arr
if hasattr(img, 'ch0_arr'): del img.ch0_arr
if hasattr(img, 'rms_arr'): del img.rms_arr
if hasattr(img, 'mean_arr'): del img.mean_arr
if hasattr(img, 'rms_Q_arr'): del img.rms_Q_arr
if hasattr(img, 'mean_Q_arr'): del img.mean_Q_arr
if hasattr(img, 'rms_U_arr'): del img.rms_U_arr
if hasattr(img, 'mean_U_arr'): del img.mean_U_arr
if hasattr(img, 'rms_V_arr'): del img.rms_V_arr
if hasattr(img, 'mean_V_arr'): del img.mean_V_arr
if hasattr(img, 'islands'): del img.islands
if hasattr(img, 'sources'): del img.sources
if hasattr(img, 'dsources'): del img.dsources
if hasattr(img, 'gaussians'): del img.gaussians
if hasattr(img, 'atrous_gaussians'): del img.atrous_gaussians
if hasattr(img, 'resid_gaus_arr'): del img.resid_gaus_arr
if hasattr(img, 'model_gaus_arr'): del img.model_gaus_arr
if hasattr(img, 'resid_shap_arr'): del img.resid_shap_arr
if hasattr(img, 'model_shap_arr'): del img.model_shap_arr
if hasattr(img, '_adapt_rms_isl_pos'): del img._adapt_rms_isl_pos
return img, Op_chain
while 'outlist' in img.completed_Ops:
img.completed_Ops.remove('outlist')
while 'cleanup' in img.completed_Ops:
img.completed_Ops.remove('cleanup')
for completed_Op in img.completed_Ops:
if completed_Op in Op_names:
Op_indx = Op_names.index(completed_Op)
Op_names.pop(Op_indx)
Op_chain.pop(Op_indx)
return img, Op_chain
def load_pars(filename):
"""Load parameters from a save file or dictionary.
If a file is given, it must be a pickled opts dictionary.
filename - name of options file to load or a dictionary of opts.
Returns None (and original error) if no file can be loaded successfully.
"""
from .image import Image
from . import mylogger
try:
import cPickle as pickle
except ImportError:
import pickle
# First, check if input is a dictionary
if isinstance(filename, dict):
timg = Image(filename)
return timg, None
else:
try:
pkl_file = open(filename, 'rb')
pars = pickle.load(pkl_file)
pkl_file.close()
timg = Image(pars)
print("--> Loaded parameters from file '" + filename + "'.")
return timg, None
except Exception as err:
return None, err
def save_pars(img, savefile=None, quiet=False):
"""Save parameters to a file.
The save file is a "pickled" opts dictionary.
"""
try:
import cPickle as pickle
except ImportError:
import pickle
from . import tc
import sys
if savefile is None or savefile == '':
basename = os.path.basename(img.opts.filename) + '.pybdsf.sav'
savefile = os.path.join(img.basedir, basename)
# convert opts to dictionary
pars = img.opts.to_dict()
output = open(savefile, 'wb')
pickle.dump(pars, output, protocol=0)
output.close()
if not quiet:
print("--> Saved parameters to file '" + savefile + "'.")
def list_pars(img, opts_list=None, banner=None, use_groups=True):
"""Lists all parameters for the Image object.
opts_list - a list of the parameter names to list;
if None, all parameters are used.
banner - banner text to place at top of listing.
use_groups - whether to use the group information for each
parameter.
"""
from . import tc
import sys
# Get all options as a list sorted by name
opts = img.opts.to_list()
# Filter list
if opts_list is not None:
opts_temp = []
for o in opts:
if o[0] in opts_list:
opts_temp.append(o)
opts = opts_temp
# Move filename, infile, outfile to front of list
for o in opts:
if o[0] == 'filename' or o[0] == 'infile' or o[0] == 'outfile':
opts.remove(o)
opts.insert(0, o)
# Now group options with the same "group" together.
if use_groups:
opts = group_opts(opts)
# Finally, print options, values, and doc strings to screen
print_opts(opts, img, banner=banner)
def set_pars(img, **kwargs):
"""Set parameters using arguments instead of using a dictionary.
Allows partial names for parameters as long as they are unique. Parameters
are set to default values if par = ''.
"""
import re
import sys
from .image import Image
# Enumerate all options
opts = img.opts.get_names()
# Check that parameters are valid options and are unique
full_key = []
for i, key in enumerate(kwargs):
chk_key = checkpars(opts, key)
if chk_key == []:
raise RuntimeError("Input parameter '" + key + "' not recognized.")
if len(chk_key) > 1 and key not in opts:
raise RuntimeError("Input parameter '" + key + "' matches to more than one "\
"possible parameter:\n " + "\n ".join(chk_key))
if key in opts:
full_key.append(key)
else:
full_key.append(chk_key[0])
# Build options dictionary
pars = {}
for i, key in enumerate(kwargs):
if kwargs[key] == '':
temp_img = Image({'filename':''})
opt_names = temp_img.opts.get_names()
for k in opt_names:
if key == k:
kwargs[key] = temp_img.opts.__getattribute__(k)
pars.update({full_key[i]: kwargs[key]})
# Finally, set the options
img.opts.set_opts(pars)
def group_opts(opts):
"""Sorts options by group (as defined in opts.py).
Returns a list of options, with suboptions arranged in a list inside the
main list and directly following the main options. Options belonging to the
"hidden" group are excluded from the returned list (as defined in opts.py).
"""
groups = []
gp = []
for i in range(len(opts)):
grp = opts[i][1].group()
if grp is not None and grp not in groups:
groups.append(opts[i][1].group())
groups.sort()
# Now, make a list for each group with its options. Don't include
# "hidden" options, as they should never by seen by the user.
for g in groups:
g_list = []
for i in range(len(opts)):
if isinstance(opts[i], tuple):
if g == str(opts[i][1].group()):
g_list.append(opts[i])
for gs in g_list:
opts.remove(gs)
for i in range(len(opts)):
if g == str(opts[i][0]) and g != 'hidden':
opts.insert(i+1, g_list)
break
return opts
def print_opts(grouped_opts_list, img, banner=None):
"""Print options to screen.
Options can be sorted by group (defined in opts.py) previously defined by
group_opts. Output of grouped items is suppressed if parent option is
False. The layout is as follows:
[20 spaces par name with ...] = [at least 49 spaces for value]
[at least 49 spaces for doc]
When more than one line is required for the doc, the next line is:
[25 blank spaces][at least 47 spaces for doc]
As in casapy, print non-defaults in blue, options with suboptions in
47m and suboptions in green. Option Values are printed in bold, to help
to distinguish them from the descriptions. NOTE: in iTerm, one needs
to set the bold color in the profiles to white, as it defaults to red,
which is a bit hard on the eyes in this case.
"""
from .image import Image
import os
from . import functions as func
termy, termx = func.getTerminalSize() # note: returns row, col -> y, x
minwidth = 28 # minimum width for parameter names and values
# Define colors for output
dc = '\033[1;34m' # Blue: non-default option text color
ec = '\033[0;47m' # expandable option text color
sc = '\033[0;32m' # Green: suboption text color
nc = '\033[0m' # normal text color
ncb = '\033[1m' # normal text color bold
if banner is not None:
print(banner)
spcstr = ' ' * minwidth # spaces string for second or later lines
infix = nc + ': ' + nc # infix character used to separate values from comments
print('=' * termx) # division string for top of parameter listing
for indx, o in enumerate(grouped_opts_list):
if isinstance(o, tuple):
# Print main options, which are always tuples, before printing
# suboptions (if any).
k = o[0]
v = o[1]
val = img.opts.__getattribute__(k)
v1 = v2 = ''
if val == v._default:
# value is default
v1 = ncb
v2 = nc
else:
# value is non-default
v1 = dc
v2 = nc
if isinstance(val, str):
valstr = v1 + repr(val) + v2
if k == 'filename':
# Since we can check whether filename is valid,
# do so here and print in red if not.
if not os.path.exists(val):
valstr = '\033[31;1m' + repr(val) + nc
width_par_val = max(minwidth, len(k) + len(str(val)) + 5)
else:
if isinstance(val, float):
val = round_float(val)
if isinstance(val, tuple):
val = round_tuple(val)
valstr = v1 + str(val) + v2
width_par_val = max(minwidth, len(k) + len(str(val)) + 4)
width_desc = max(termx - width_par_val - 3, 44)
# Get the option description text from the doc string, which
# is defined in opts.py. By convention, print_opts will only
# show the short description; help('option_name') will
# print both the short and long description. The versions
# are separated in the doc string by '\n', which is split
# on here:
desc_text = wrap(str(v.doc()).split('\n')[0], width_desc)
fmt = '%' + str(minwidth) + 's' + infix + '%44s'
# Now loop over lines of description
if indx < len(grouped_opts_list)-1:
# Here we check if next entry in options list is a tuple or a
# list. If it is a list, then the current option has
# suboptions and should be in the ec color. Since we check the
# next option, we can't do this if we let indx go to the end.
if isinstance(grouped_opts_list[indx+1], tuple):
parvalstr = nc + k + nc + ' ..'
else:
parvalstr = ec + k + nc + ' ..'
else:
# Since this is the last entry in the options list and is a
# tuple, it cannot be an expandable option, so make it nc color
parvalstr = nc + k + nc + ' ..'
if "'" in valstr:
len_without_formatting = len(k) + len(str(val)) + 5
else:
len_without_formatting = len(k) + len(str(val)) + 4
for i in range(len_without_formatting, minwidth):
parvalstr += '.'
parvalstr += ' ' + valstr
if "'" not in valstr:
parvalstr += ' '
for dt_indx, dt in enumerate(desc_text):
if dt_indx == 0:
print(fmt % (parvalstr.ljust(minwidth), dt.ljust(44)))
else:
print(nc + spcstr + ' %44s' % dt.ljust(44))
else:
# Print suboptions, indented 2 spaces from main options in sc color
parent_opt = grouped_opts_list[indx-1]
parent_val = img.opts.__getattribute__(parent_opt[0])
if parent_val == True:
for og in o:
k = og[0]
v = og[1]
val = img.opts.__getattribute__(k)
v1 = v2 = ''
if val == v._default:
# value is default
v1 = ncb
v2 = nc
else:
# value is non-default
v1 = dc
v2 = nc
if isinstance(val, str):
valstr = v1 + repr(val) + v2
width_par_val = max(minwidth, len(k) + len(str(val)) + 7)
else:
if isinstance(val, float):
val = round_float(val)
if k == 'beam_spectrum' and val is not None:
val = round_list_of_tuples(val)
if k == 'frequency_sp' and val is not None:
val = round_list(val)
valstr = v1 + str(val) + v2
width_par_val = max(minwidth, len(k) + len(str(val)) + 6)
width_desc = max(termx - width_par_val - 3, 44)
desc_text = wrap(str(v.doc()).split('\n')[0], width_desc)
fmt = ' ' + '%' + str(minwidth) + 's' + infix + '%44s'
parvalstr = sc + k + nc + ' ..'
if "'" in valstr:
len_without_formatting = len(k) + len(str(val)) + 7
else:
len_without_formatting = len(k) + len(str(val)) + 6
for i in range(len_without_formatting, minwidth):
parvalstr += '.'
parvalstr += ' ' + valstr
if "'" not in valstr:
parvalstr += ' '
for dt_indx, dt in enumerate(desc_text):
if dt_indx == 0:
print(fmt % (parvalstr.ljust(minwidth-2), dt.ljust(44)))
else:
print(nc + spcstr + ' %44s' % dt.ljust(44))
def wrap(text, width=80):
"""Wraps text to given width and returns list of lines."""
lines = []
for paragraph in text.split('\n'):
line = []
len_line = 0
for word in paragraph.split(' '):
word.strip()
len_word = len(word)
if len_line + len_word <= width:
line.append(word)
len_line += len_word + 1
else:
lines.append(' '.join(line))
line = [word]
len_line = len_word + 1
lines.append(' '.join(line))
return lines
def checkpars(lines, regex):
"""Checks that parameters are unique"""
import re
result = []
for l in lines:
match = re.match(regex,l)
if match:
result += [l]
return result
def in_ipython():
"""Checks if interpreter is IPython."""
try:
__IPYTHON__
except NameError:
return False
else:
return True
def raw_input_no_history(prompt):
"""Removes user input from readline history."""
import readline
userinput = input(prompt)
if userinput != '':
readline.remove_history_item(readline.get_current_history_length()-1)
return userinput
# The following functions just make the printing of
# parameters look better
def round_tuple(val):
valstr_list = []
for v in val:
vstr = '%s' % (round(v, 5))
if len(vstr) > 7:
vstr = '%.5f' % (v,)
valstr_list.append(vstr)
valstr = '(' + ','.join(valstr_list) + ')'
return valstr
def round_float(val):
vstr = '%s' % (round(val, 5))
if len(vstr) > 7 and val < 1e3:
vstr = '%.5f' % (val,)
elif len(vstr) > 7 and val >= 1e3:
vstr = '%.2e' % (val,)
return vstr
def round_list(val):
valstr_list = []
for v in val:
valstr_list.append('%.2e' % (v,))
valstr = '[' + ','.join(valstr_list) + ']'
return valstr
def round_list_of_tuples(val):
valstr_list = []
valstr_list_tot = []
for l in val:
for v in l:
vstr = '%s' % (round(v, 5))
if len(vstr) > 7:
vstr = '%.5f' % (v,)
valstr_list.append(vstr)
valstr = '(' + ','.join(valstr_list) + ')'
valstr_list_tot.append(valstr)
valstr = '[' + ','.join(valstr_list_tot) + ']'
return valstr
# The following functions give convenient access to the output functions in
# output.py
def export_image(img, outfile=None, img_format='fits', pad_image = False,
img_type='gaus_resid', mask_dilation=0, clobber=False):
"""Write an image to a file. Returns True if successful, False if not.
outfile - name of resulting file; if None, file is
named automatically.
img_type - type of image to export; see below
img_format - format of resulting file: 'fits' or 'casa'
incl_wavelet - include wavelet Gaussians in model
and residual images?
clobber - overwrite existing file?
The following images may be exported:
'ch0' - image used for source detection
'rms' - rms map image
'mean' - mean map image
'pi' - polarized intensity image
'gaus_resid' - Gaussian model residual image
'gaus_model' - Gaussian model image
'shap_resid' - Shapelet model residual image
'shap_model' - Shapelet model image
'psf_major' - PSF major axis FWHM image (FWHM in arcsec)
'psf_minor' - PSF minor axis FWHM image (FWHM in arcsec)
'psf_pa' - PSF position angle image (degrees east of north)
'psf_ratio' - PSF peak-to-total flux ratio (in units of 1/beam)
'psf_ratio_aper' - PSF peak-to-aperture flux ratio (in units of 1/beam)
'island_mask' - Island mask image (0 = outside island, 1 = inside island)
"""
import os
from . import functions as func
from .const import fwsig
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSF."+img.log+"ExportImage")
# First some checking:
if not 'gausfit' in img.completed_Ops and 'gaus' in img_type:
print('\033[91mERROR\033[0m: Gaussians have not been fit. Please run process_image first.')
return False
elif not 'shapelets' in img.completed_Ops and 'shap' in img_type:
print('\033[91mERROR\033[0m: Shapelets have not been fit. Please run process_image first.')
return False
elif not 'polarisation' in img.completed_Ops and 'pi' in img_type:
print('\033[91mERROR\033[0m: Polarization properties have not been calculated. Please run process_image first.')
return False
elif not 'psf_vary' in img.completed_Ops and 'psf' in img_type:
print('\033[91mERROR\033[0m: PSF variations have not been calculated. Please run process_image first.')
return False
elif not 'collapse' in img.completed_Ops and 'ch0' in img_type:
print('\033[91mERROR\033[0m: ch0 image has not been calculated. Please run process_image first.')
return False
elif not 'rmsimage' in img.completed_Ops and ('rms' in img_type or 'mean' in img_type):
print('\033[91mERROR\033[0m: Mean and rms maps have not been calculated. Please run process_image first.')
return False
elif not 'make_residimage' in img.completed_Ops and ('resid' in img_type or 'model' in img_type):
print('\033[91mERROR\033[0m: Residual and model maps have not been calculated. Please run process_image first.')
return False
format = img_format.lower()
if (format in ['fits', 'casa']) == False:
print('\033[91mERROR\033[0m: img_format must be "fits" or "casa"')
return False
filename = outfile
if filename is None or filename == '':
filename = img.imagename + '_' + img_type + '.' + format
if os.path.exists(filename) and clobber == False:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
if format == 'fits':
use_io = 'fits'
if format == 'casa':
use_io = 'rap'
bdir = ''
try:
if img_type == 'ch0':
func.write_image_to_file(use_io, filename,
img.ch0_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'rms':
func.write_image_to_file(use_io, filename,
img.rms_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'mean':
func.write_image_to_file(use_io, filename,
img.mean_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'pi':
func.write_image_to_file(use_io, filename,
img.ch0_pi_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'psf_major':
func.write_image_to_file(use_io, filename,
img.psf_vary_maj_arr*fwsig, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'psf_minor':
func.write_image_to_file(use_io, filename,
img.psf_vary_min_arr*fwsig, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'psf_pa':
func.write_image_to_file(use_io, filename,
img.psf_vary_pa_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'psf_ratio':
func.write_image_to_file(use_io, filename,
img.psf_vary_ratio_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'psf_ratio_aper':
func.write_image_to_file(use_io, filename,
img.psf_vary_ratio_aper_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'gaus_resid':
im = img.resid_gaus_arr
func.write_image_to_file(use_io, filename,
im, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'gaus_model':
im = img.model_gaus_arr
func.write_image_to_file(use_io, filename,
im, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'shap_resid':
func.write_image_to_file(use_io, filename,
img.resid_shap_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'shap_model':
func.write_image_to_file(use_io, filename,
img.model_shap_arr, img, bdir, pad_image,
clobber=clobber)
elif img_type == 'island_mask':
import numpy as N
import scipy.ndimage as nd
island_mask_bool = img.pyrank + 1 > 0
if mask_dilation > 0:
# Dilate the mask by specified number of iterations
island_mask_bool = nd.binary_dilation(island_mask_bool,
iterations=mask_dilation)
# Perform a binary closing to remove small holes/gaps. The
# structure array is chosen to be about the size of the
# beam (assuming a normally sampled psf), so that holes/gaps
# smaller than the beam are removed.
pbeam = int(round(img.beam2pix(img.beam)[0] * 1.5))
island_mask_bool = nd.binary_closing(island_mask_bool,
structure=N.ones((pbeam, pbeam)))
# Check for telescope, needed for CASA clean masks
if img._telescope is None:
print('\033[91mWARNING\033[0m: Telescope is unknown. Mask may not work correctly in CASA.')
island_mask = N.array(island_mask_bool, dtype=N.float32)
func.write_image_to_file(use_io, filename,
island_mask, img, bdir, pad_image,
clobber=clobber, is_mask=True)
else:
print("\n\033[91mERROR\033[0m: img_type not recognized.")
return False
if filename == 'SAMP':
print('--> Image sent to SMAP hub')
else:
print('--> Wrote file ' + repr(filename))
if use_io == 'rap':
# remove the temporary fits file used as a casacore template
import os
os.remove(filename+'.fits')
return True
except RuntimeError as err:
# Catch and log error
mylog.error(str(err))
# Re-throw error if the user is not in the interactive shell
if img._is_interactive_shell:
return False
else:
raise
except KeyboardInterrupt:
mylogger.userinfo(mylog, "\n\033[31;1mAborted\033[0m")
if img._is_interactive_shell:
return False
else:
raise
def write_catalog(img, outfile=None, format='bbs', srcroot=None, catalog_type='gaul',
bbs_patches=None, incl_chan=False, incl_empty=False, clobber=False,
force_output=False, correct_proj=True, bbs_patches_mask=None):
"""Write the Gaussian, source, or shapelet list to a file. Returns True if
successful, False if not.
filename - name of resulting file; if None, file is
named automatically. If 'SAMP', table is sent to a samp hub
(must be running already).
catalog_type - type of catalog
"gaul" - Gaussian list
"srl" - Source list
"shap" - Shapelet list ("fits" format only)
format - format of output list. Supported formats are:
"fits" - FITS binary table
"ascii" - ASCII text file
"bbs" - BBS sky model (Gaussian list only)
"ds9" - ds9 region file
"star" - AIPS STAR file (Gaussian list only)
"kvis" - kvis file (Gaussian list only)
"sagecal" - SAGECAL file (Gaussian list only)
srcroot - root for source and patch names (BBS/ds9 only);
if None, the srcroot is chosen automatically
bbs_patches - type of patches to use:
None - no patches
"gaussian" - each Gaussian gets its own patch
"single" - all Gaussians are put into a single
patch
"source" - sources are grouped by source into patches
"mask" - use a Boolean mask to define the patches
bbs_patches_mask - file name of mask file if bbs_patches="mask"
incl_chan - Include fluxes for each channel?
incl_empty - Include islands without any valid Gaussians (source list only)?
sort_by - Property to sort output list by:
"flux" - sort by total integrated flux, largest first
"indx" - sort by Gaussian and island or source index, smallest first
force_output - Force the creation of a catalog, even if it is empty
correct_proj - Correct source parameters for image projection effects (BBS only)?
clobber - Overwrite existing file?
"""
from . import output
# First some checking:
if not 'gausfit' in img.completed_Ops:
print('\033[91mERROR\033[0m: Image has not been fit. Please run process_image first.')
return False
if catalog_type == 'shap' and not 'shapelets' in img.completed_Ops:
print('\033[91mERROR\033[0m: Image has not been decomposed into shapelets. Please run process_image first.')
return False
if catalog_type == 'srl' and not 'gaul2srl' in img.completed_Ops:
print('\033[91mERROR\033[0m: Gaussians have not been grouped into sources. Please run process_image first.')
return False
format = format.lower()
patch = bbs_patches
filename = outfile
if isinstance(patch, str):
patch = patch.lower()
if format not in ['fits', 'ascii', 'bbs', 'ds9', 'star',
'kvis', 'sagecal', 'csv', 'casabox']:
print('\033[91mERROR\033[0m: format must be "fits", '\
'"ascii", "ds9", "star", "kvis", "csv", "casabox", or "bbs"')
return False
if patch not in [None, 'gaussian', 'single', 'source', 'mask']:
print('\033[91mERROR\033[0m: patch must be None, '\
'"gaussian", "source", "single", or "mask"')
return False
if patch == 'mask':
if bbs_patches_mask is None:
print('\033[91mERROR\033[0m: if patch is "mask", bbs_patches_mask must be set to the file name of the mask file')
return False
if (catalog_type in ['gaul', 'srl', 'shap']) == False:
print('\033[91mERROR\033[0m: catalog_type must be "gaul", '\
'"srl", or "shap"')
return False
if catalog_type == 'shap' and format != 'fits':
print("\033[91mERROR\033[0m: Only format = 'fits' is supported with shapelet output.")
return False
if (len(img.sources) == 0 and not incl_empty) or (len(img.sources) == 0 and len(img.dsources) == 0 and incl_empty):
if not force_output:
print('No sources were found in the image. Output file not written.')
return False
if filename == '':
filename = None
# Now go format by format and call appropriate function
if filename == 'samp' or filename == 'SAMP':
import tempfile
from . import functions as func
import os
if not hasattr(img,'samp_client'):
s, private_key = func.start_samp_proxy()
img.samp_client = s
img.samp_key = private_key
# Broadcast fits table to SAMP Hub
tfile = tempfile.NamedTemporaryFile(delete=False)
filename = output.write_fits_list(img, filename=tfile.name,
incl_chan=incl_chan, incl_empty=incl_empty,
clobber=True, objtype=catalog_type)
table_name = 'PyBDSF '+ catalog_type + ' table'
if catalog_type == 'srl':
img.samp_srl_table_url = 'file://' + os.path.abspath(tfile.name)
if catalog_type == 'gaul':
img.samp_gaul_table_url = 'file://' + os.path.abspath(tfile.name)
func.send_fits_table(img.samp_client, img.samp_key, table_name, tfile.name)
print('--> Table sent to SMAP hub')
return True
if format == 'fits':
filename = output.write_fits_list(img, filename=filename,
incl_chan=incl_chan, incl_empty=incl_empty,
clobber=clobber, objtype=catalog_type)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote FITS file ' + repr(filename))
return True
if format == 'ascii' or format == 'csv':
filename = output.write_ascii_list(img, filename=filename,
incl_chan=incl_chan, incl_empty=incl_empty,
sort_by='index', format = format,
clobber=clobber, objtype=catalog_type)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote ASCII file ' + repr(filename))
return True
if format == 'bbs':
if catalog_type != 'gaul':
print("\033[91mERROR\033[0m: Only catalog_type = 'gaul' is supported with BBS files.")
return False
filename = output.write_bbs_gaul(img, filename=filename,
srcroot=srcroot, incl_empty=incl_empty,
patch=patch, correct_proj=correct_proj,
sort_by='flux',
clobber=clobber)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote BBS sky model ' + repr(filename))
return True
if format == 'sagecal':
if catalog_type != 'gaul':
print("\033[91mERROR\033[0m: Only catalog_type = 'gaul' is supported with Sagecal files.")
return False
filename = output.write_lsm_gaul(img, filename=filename,
srcroot=srcroot, incl_empty=incl_empty,
patch=patch,
sort_by='flux',
clobber=clobber)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote Sagecal lsm file ' + repr(filename))
return True
if format == 'ds9':
filename = output.write_ds9_list(img, filename=filename,
srcroot=srcroot, incl_empty=incl_empty,
clobber=clobber, objtype=catalog_type)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote ds9 region file ' + repr(filename))
return True
if format == 'star':
if catalog_type != 'gaul':
print("\033[91mERROR\033[0m: Only catalog_type = 'gaul' is supported with star files.")
return False
filename = output.write_star(img, filename=filename,
clobber=clobber)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber = False.')
return False
else:
print('--> Wrote AIPS STAR file ' + repr(filename))
return True
if format == 'kvis':
if catalog_type != 'gaul':
print("\033[91mERROR\033[0m: Only catalog_type = 'gaul' is supported with kvis files.")
return False
filename = output.write_kvis_ann(img, filename=filename,
clobber=clobber)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber=False.')
return False
else:
print('--> Wrote kvis file ' + repr(filename))
return True
if format == 'casabox':
filename = output.write_casa_gaul(img, filename=filename,
incl_empty=incl_empty, clobber=clobber)
if filename is None:
print('\033[91mERROR\033[0m: File exists and clobber=False.')
else:
print('--> Wrote CASA clean box file ' + filename)
def add_break_to_logfile(logfile):
f = open(logfile, 'a')
f.write('\n' + '='*72 + '\n')
f.close()
PyBDSF-1.11.0/bdsf/islands.py 0000664 0000000 0000000 00000043235 14650706641 0015566 0 ustar 00root root 0000000 0000000 """Module islands.
Defines operation Op_islands which does island detection.
Current implementation uses scipy.ndimage operations for island detection.
While it's implemented to work for images of arbitrary dimensionality,
the bug in the current version of scipy (0.6) often causes crashes
(or just wrong results) for 3D inputs.
If this (scipy.ndimage.label) isn't fixed by the time we need 3D source
extraction, one will have to adopt my old pixel-runs algorithm for 3D data.
Check out islands.py rev. 1362 from repository for it.
"""
from __future__ import absolute_import
from __future__ import division
import numpy as N
import scipy.ndimage as nd
from .image import *
from . import mylogger
from . import functions as func
from .output import write_islands
from .readimage import Op_readimage
from .preprocess import Op_preprocess
from .rmsimage import Op_rmsimage
from .threshold import Op_threshold
from .collapse import Op_collapse
import os
class Op_islands(Op):
"""Detect islands of emission in the image
All detected islands are stored in the list img.islands,
where each individual island is represented as an instance
of class Island.
The option to detect islands on a different "detection"
image is also available. This option is useful for example
when a primary beam correction is used -- it is generally
better to detect sources on the uncorrected image, but
to measure them on the corrected image.
Prerequisites: module rmsimage should be run first.
"""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Islands")
opts = img.opts
minsize = opts.minpix_isl
if minsize is None:
minsize = int(img.pixel_beamarea()/3.0) # 1/3 of beam area in pixels
if minsize < 6:
minsize = 6 # Need at least 6 pixels to obtain good fits
mylogger.userinfo(mylog, "Minimum number of pixels per island", '%i' %
minsize)
img.minpix_isl = minsize
maxsize = opts.maxpix_isl
if maxsize is None:
maxsize = N.inf
img.maxpix_isl = maxsize
if opts.detection_image != '':
# Use a different image for island detection. The detection
# image and the measurement image must have the same shape
# and be registered. Otherwise, one could reproject the
# detection image using, e.g., the Kapteyn package.
#
# First, set up up an Image object and run a limited
# op_chain.
from . import _run_op_list
mylogger.userinfo(mylog, "\nDetermining islands from detection image")
det_chain, det_opts = self.setpara_bdsm(img, opts.detection_image)
det_img = Image(det_opts)
det_img.log = 'Detection image'
success = _run_op_list(det_img, det_chain)
if not success:
return
# Check that the ch0 images are the same size
ch0_map = img.ch0_arr
det_ch0_map = det_img.ch0_arr
det_shape = det_ch0_map.shape
ch0_shape = ch0_map.shape
if det_shape != ch0_shape:
raise RuntimeError("Detection image shape does not match that of input image.")
# Save the rms and mean maps derived from the detection image
img.detection_mean_arr = det_img.mean_arr
img.detection_rms_arr = det_img.rms_arr
# Run through islands and correct the image and rms, mean and max values
corr_islands = []
mean_map = img.mean_arr
rms_map = img.rms_arr
for i, isl in enumerate(det_img.islands):
islcp = isl.copy(img.pixel_beamarea(), image=ch0_map[tuple(isl.bbox)], mean=mean_map[tuple(isl.bbox)], rms=rms_map[tuple(isl.bbox)])
islcp.island_id = i
corr_islands.append(islcp)
img.islands = corr_islands
img.nisl = len(img.islands)
img.pyrank = det_img.pyrank
img.minpix_isl = det_img.minpix_isl
if opts.savefits_det_rmsim or opts.output_all:
resdir = os.path.join(img.basedir, 'background')
os.makedirs(resdir, exist_ok=True)
func.write_image_to_file(img.use_io, img.imagename + '.detection_rmsd_I.fits',
img.detection_rms_arr, img, resdir)
mylog.info('%s %s' % ('Writing ', os.path.join(resdir, img.imagename+'.detection_rmsd_I.fits')))
if opts.savefits_det_meanim or opts.output_all:
resdir = os.path.join(img.basedir, 'background')
os.makedirs(resdir, exist_ok=True)
func.write_image_to_file(img.use_io, img.imagename + '.detection_mean_I.fits',
img.detection_mean_arr, img, resdir)
mylog.info('%s %s' % ('Writing ', os.path.join(resdir, img.imagename+'.detection_mean_I.fits')))
mylogger.userinfo(mylog, "\nContinuing processing using primary image")
else:
if opts.src_ra_dec is not None:
mylogger.userinfo(mylog, "Constructing islands at user-supplied source locations")
img.islands = self.coords_to_isl(img, opts)
else:
img.islands = self.ndimage_alg(img, opts)
img.nisl = len(img.islands)
mylogger.userinfo(mylog, "Number of islands found", '%i' %
len(img.islands))
ch0_map = img.ch0_arr
ch0_shape = ch0_map.shape
pyrank = N.zeros(ch0_shape, dtype=N.int32)
for i, isl in enumerate(img.islands):
isl.island_id = i
pyrank[tuple(isl.bbox)] += N.invert(isl.mask_active) * (i + 1)
pyrank -= 1 # align pyrank values with island ids and set regions outside of islands to -1
img.pyrank = pyrank
if opts.output_all:
write_islands(img)
if opts.savefits_rankim or opts.output_all:
resdir = os.path.join(img.basedir, 'misc')
os.makedirs(resdir, exist_ok=True)
func.write_image_to_file(img.use_io, img.imagename + '_pyrank.fits', img.pyrank, img, resdir)
mylog.info('%s %s' % ('Writing ', os.path.join(resdir, img.imagename+'_pyrank.fits')))
img.completed_Ops.append('islands')
return img
def ndimage_alg(self, img, opts):
"""Island detection using scipy.ndimage
Use scipy.ndimage.label to detect islands of emission in the image.
Island is defined as group of tightly connected (8-connectivity
for 2D images) pixels with emission.
The following cuts are applied:
- pixel is considered to have emission if it is 'thresh_isl' times
higher than RMS.
- Island should have at least 'minsize' active pixels
- There should be at lease 1 pixel in the island which is 'thresh_pix'
times higher than noise (peak clip).
Parameters:
image, mask: arrays with image data and mask
mean, rms: arrays with mean & rms maps
thresh_isl: threshold for 'active pixels'
thresh_pix: threshold for peak
minsize: minimal acceptable island size
Function returns a list of Island objects.
"""
# Islands detection
image = img.ch0_arr
mask = img.mask_arr
rms = img.rms_arr
mean = img.mean_arr
thresh_isl = opts.thresh_isl
thresh_pix = img.thresh_pix
# Here act_pixels is true if significant emission
if img.masked:
act_pixels = ~(mask.copy())
act_pixels[~mask] = (image[~mask]-mean[~mask])/thresh_isl >= rms[~mask]
else:
act_pixels = (image-mean)/thresh_isl >= rms
# Find dimension of image
rank = len(image.shape)
# Generates matrix for connectivity, in this case, 8-conn
connectivity = nd.generate_binary_structure(rank, rank)
# Here labels = matrix with value = (initial) island number
labels, count = nd.label(act_pixels, connectivity)
# Here slices has limits of bounding box of each such island
slices = nd.find_objects(labels)
img.island_labels = labels
# Apply cuts on island size and peak value
pyrank = N.zeros(image.shape, dtype=N.int32)
res = []
for idx, s in enumerate(slices):
idx += 1 # nd.labels indices are counted from 1
isl_size = (labels[s] == idx).sum() # number of pixels inside bounding box which are in island
isl_peak = nd.maximum(image[s], labels[s], idx)
isl_maxposn = tuple(N.array(N.unravel_index(N.nanargmax(image[s]), image[s].shape)) +
N.array((s[0].start, s[1].start)))
if (isl_size >= img.minpix_isl) and (isl_size <= img.maxpix_isl) and (isl_peak - mean[isl_maxposn])/thresh_pix > rms[isl_maxposn]:
isl = Island(image, mask, mean, rms, labels, s, idx, img.pixel_beamarea())
res.append(isl)
pyrank[tuple(isl.bbox)] += N.invert(isl.mask_active)*idx // idx
return res
def coords_to_isl(self, img, opts):
"""Construct islands around given coordinates with given size.
Returns a list of island objects.
"""
coords = opts.src_ra_dec # list of RA and Dec tuples
isl_radius_pix = opts.src_radius_pix
if isl_radius_pix is None:
isl_radius_pix = img.beam2pix(img.beam)[0] # twice beam major axis radius at half max (= FWHM)
res = []
for idx, coord in enumerate(coords):
idx += 1 # nd.labels indices are counted from 1
isl_posn_pix = img.sky2pix(coord)
image = img.ch0_arr
mask = img.mask_arr
rms = img.rms_arr
mean = img.mean_arr
labels = func.make_src_mask(image.shape, isl_posn_pix, isl_radius_pix)
if img.masked:
aper_mask = N.where(labels.astype(bool) & ~mask)
else:
aper_mask = N.where(labels.astype(bool))
if N.size(aper_mask) >= img.minpix_isl and N.size(aper_mask) <= img.maxpix_isl:
labels[aper_mask] = idx
s = [slice(max(0, isl_posn_pix[0] - isl_radius_pix - 1),
min(image.shape[0], isl_posn_pix[0] + isl_radius_pix + 1)),
slice(max(0, isl_posn_pix[1] - isl_radius_pix - 1),
min(image.shape[1], isl_posn_pix[1] + isl_radius_pix + 1))]
isl = Island(image, mask, mean, rms, labels, s, idx, img.pixel_beamarea())
res.append(isl)
return res
def setpara_bdsm(self, img, det_file):
chain = [Op_readimage(), Op_collapse(), Op_preprocess, Op_rmsimage(),
Op_threshold(), Op_islands()]
opts = img.opts.to_dict()
opts['filename'] = det_file
opts['detection_image'] = ''
opts['polarisation_do'] = False
opts['rmsmean_map_filename'] = opts['rmsmean_map_filename_det']
opts['det_rmsmean_map_filename'] = None
ops = []
for op in chain:
if isinstance(op, type):
ops.append(op())
else:
ops.append(op)
return ops, opts
class Island(object):
"""Instances of this class represent islands of emission in the image.
Its primary use is a container for all kinds of data describing island.
"""
def __init__(self, img, mask, mean, rms, labels, bbox, idx,
beamarea, origin=None, noise_mask=None, copy=False):
"""Create Island instance.
Parameters:
img, mask, mean, rms: arrays describing image
labels: labels array from scipy.ndimage
bbox: slices
"""
# Add attribute definitions needed for output
self.island_id_def = Int(doc="Island id, starting from 0", colname='Isl_id')
self.shapelet_basis_def = String(doc="Coordinate system for shapelet decomposition (cartesian/polar)",
colname='Basis', units=None)
self.shapelet_beta_def = Float(doc="Value of shapelet scale beta", colname='Beta', units=None)
self.shapelet_nmax_def = Int(doc="Maximum value of shapelet order", colname='NMax', units=None)
self.shapelet_posn_sky_def = List(Float(), doc="Posn (RA, Dec in deg) of shapelet centre",
colname=['RA', 'DEC'], units=['deg', 'deg'])
self.shapelet_posn_skyE_def = List(Float(), doc="Error on sky coordinates of shapelet centre",
colname=['E_RA', 'E_DEC'], units=['deg', 'deg'])
self.shapelet_cf_def = NArray(doc="Coefficient matrix of the shapelet decomposition",
colname='Coeff_matrix', units=None)
if not copy:
# We make bbox slightly bigger
self.oldbbox = bbox
self.oldidx = idx
bbox = self.__expand_bbox(bbox, img.shape)
origin = [b.start for b in bbox] # easier in case ndim > 2
data = img[tuple(bbox)]
bbox_rms_im = rms[tuple(bbox)]
bbox_mean_im = mean[tuple(bbox)]
# Create (inverted) masks
# Note that mask_active is the island mask; mask_noisy marks only
# the noisy pixels in the island image. If you want to mask the
# noisy pixels, set the final mask to:
# mask = mask_active + mask_noisy
isl_mask = (labels[tuple(bbox)] == idx)
noise_mask = (labels[tuple(bbox)] == 0)
N.logical_or(noise_mask, isl_mask, noise_mask)
# Invert masks
N.logical_not(isl_mask, isl_mask)
N.logical_not(noise_mask, noise_mask)
if isinstance(mask, N.ndarray):
noise_mask[mask[tuple(bbox)]] = True
isl_mask[mask[tuple(bbox)]] = True
else:
if origin is None:
origin = [b.start for b in bbox]
isl_mask = mask
if noise_mask is None:
noise_mask = mask
data = img
bbox_rms_im = rms
bbox_mean_im = mean
self.oldbbox = bbox
self.oldidx = idx
# Finish initialization
isl_size = N.sum(~isl_mask)
self.island_id = idx
self.bbox = bbox
self.origin = origin
self.image = data
self.mask_active = isl_mask
self.mask_noisy = noise_mask
self.shape = data.shape
self.size_active = isl_size
self.max_value = N.max(self.image[~self.mask_active])
in_bbox_and_unmasked = N.where(~N.isnan(bbox_rms_im))
self.rms = bbox_rms_im[in_bbox_and_unmasked].mean()
in_bbox_and_unmasked = N.where(~N.isnan(bbox_mean_im))
self.mean = bbox_mean_im[in_bbox_and_unmasked].mean()
self.islmean = bbox_mean_im[in_bbox_and_unmasked].mean()
self.total_flux = N.nansum(self.image[in_bbox_and_unmasked])/beamarea
pixels_in_isl = N.sum(~N.isnan(self.image[self.mask_active])) # number of unmasked pixels assigned to current island
self.total_fluxE = func.nanmean(bbox_rms_im[in_bbox_and_unmasked]) * N.sqrt(pixels_in_isl/beamarea) # Jy
self.border = self.get_border()
self.gaul = []
self.fgaul = []
self.sources = []
self.gresid_mean = 0.0
self.gresid_rms = 0.0
def __setstate__(self, state):
"""Needed for multiprocessing"""
self.mean = state['mean']
self.rms = state['rms']
self.image = state['image']
self.islmean = state['islmean']
self.mask_active = state['mask_active']
self.mask_noisy = state['mask_noisy']
self.size_active = state['size_active']
self.shape = state['shape']
self.origin = state['origin']
self.island_id = state['island_id']
self.oldidx = state['oldidx']
self.bbox = state['bbox']
def __getstate__(self):
"""Needed for multiprocessing"""
state = {}
state['mean'] = self.mean
state['rms'] = self.rms
state['image'] = self.image
state['islmean'] = self.islmean
state['mask_active'] = self.mask_active
state['mask_noisy'] = self.mask_noisy
state['size_active'] = self.size_active
state['shape'] = self.shape
state['origin'] = self.origin
state['island_id'] = self.island_id
state['oldidx'] = self.oldidx
state['bbox'] = self.bbox
return state
# Do map etc in case of ndim image
def __expand_bbox(self, bbox, shape):
"""Expand bbox of the image by 1 pixel"""
def __expand(bbox, shape):
return slice(int(max(0, bbox.start - 1)), int(min(shape, bbox.stop + 1)))
ebbox = [__expand(b, shape[i]) for i, b in enumerate(bbox)]
return ebbox
def copy(self, pixel_beamarea, image=None, mean=None, rms=None):
mask = self.mask_active
noise_mask = self.mask_noisy
if image is None:
image = self.image
if mean is None:
mean = N.zeros(mask.shape, dtype=N.float32) + self.mean
if rms is None:
rms = N.zeros(mask.shape, dtype=N.float32) + self.rms
bbox = self.bbox
idx = self.oldidx
origin = self.origin
return Island(image, mask, mean, rms, None, bbox, idx, pixel_beamarea,
origin=origin, noise_mask=noise_mask, copy=True)
def get_border(self):
""" From all valid island pixels, generate the border."""
mask = ~self.mask_active
border = N.transpose(N.asarray(N.where(mask ^ nd.binary_erosion(mask)))) + self.origin
return N.transpose(N.array(border))
PyBDSF-1.11.0/bdsf/make_residimage.py 0000664 0000000 0000000 00000022545 14650706641 0017240 0 ustar 00root root 0000000 0000000 """Module make_residimage.
It calculates residual image from the list of gaussians and shapelets
"""
from __future__ import absolute_import
import numpy as N
from scipy import stats # for skew and kurtosis
from .image import *
from .shapelets import *
from . import mylogger
class Op_make_residimage(Op):
"""Creates an image from the fitted gaussians
or shapelets.
The resulting model image is stored in the
resid_gaus or resid_shap attribute.
Prerequisites: module gausfit or shapelets should
be run first.
"""
def __call__(self, img):
from . import functions as func
from copy import deepcopy as cp
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"ResidImage")
mylog.info("Calculating residual image after subtracting reconstructed gaussians")
shape = img.ch0_arr.shape
thresh= img.opts.fittedimage_clip
resid_gaus = cp(img.ch0_arr)
model_gaus = N.zeros(shape, dtype=N.float32)
for g in img.gaussians:
C1, C2 = g.centre_pix
if hasattr(g, 'wisland_id') and img.waveletimage:
isl = img.islands[g.wisland_id]
else:
isl = img.islands[g.island_id]
b = self.find_bbox(thresh*isl.rms, g)
bbox = N.s_[max(0, int(C1-b)):min(shape[0], int(C1+b+1)),
max(0, int(C2-b)):min(shape[1], int(C2+b+1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(g, x_ax, y_ax)
resid_gaus[bbox] = resid_gaus[bbox] - ffimg
model_gaus[bbox] = model_gaus[bbox] + ffimg
# Apply mask to model and resid images
if hasattr(img, 'rms_mask'):
mask = img.rms_mask
else:
mask = img.mask_arr
if isinstance(img.mask_arr, N.ndarray):
pix_masked = N.where(img.mask_arr == True)
model_gaus[pix_masked] = N.nan
resid_gaus[pix_masked] = N.nan
img.model_gaus_arr = model_gaus
img.resid_gaus_arr = resid_gaus
if img.opts.output_all or img.opts.savefits_residim:
if img.waveletimage:
resdir = img.basedir + '/wavelet/residual/'
else:
resdir = img.basedir + '/residual/'
if not os.path.exists(resdir): os.makedirs(resdir)
func.write_image_to_file(img.use_io, img.imagename + '.resid_gaus.fits', resid_gaus, img, resdir)
mylog.info('%s %s' % ('Writing', resdir+img.imagename+'.resid_gaus.fits'))
if img.opts.output_all or img.opts.savefits_modelim:
if img.waveletimage:
moddir = img.basedir + '/wavelet/model/'
else:
moddir = img.basedir + '/model/'
if not os.path.exists(moddir): os.makedirs(moddir)
func.write_image_to_file(img.use_io, img.imagename + '.model.fits', (img.ch0_arr - resid_gaus), img, moddir)
mylog.info('%s %s' % ('Writing', moddir+img.imagename+'.model_gaus.fits'))
### residual rms and mean per island
for isl in img.islands:
resid = resid_gaus[tuple(isl.bbox)]
self.calc_resid_mean_rms(isl, resid, type='gaus')
# Calculate some statistics for the Gaussian residual image
non_masked = N.where(~N.isnan(img.ch0_arr))
mean = N.mean(resid_gaus[non_masked], axis=None)
std_dev = N.std(resid_gaus[non_masked], axis=None)
skew = stats.skew(resid_gaus[non_masked], axis=None)
kurt = stats.kurtosis(resid_gaus[non_masked], axis=None)
stat_msg = "Statistics of the Gaussian residual image:\n"
stat_msg += " mean: %.3e (Jy/beam)\n" % mean
stat_msg += " std. dev: %.3e (Jy/beam)\n" % std_dev
stat_msg += " skew: %.3f\n" % skew
stat_msg += " kurtosis: %.3f" % kurt
mylog.info(stat_msg)
# Now residual image for shapelets
if img.opts.shapelet_do:
mylog.info("Calculating residual image after subtracting reconstructed shapelets")
shape = img.ch0_arr.shape
fimg = N.zeros(shape, dtype=N.float32)
for isl in img.islands:
if hasattr(isl, 'shapelet_beta'):
if isl.shapelet_beta > 0: # make sure shapelet has nonzero scale for this island
mask=isl.mask_active
cen=isl.shapelet_centre-N.array(isl.origin)
basis, beta, nmax, cf = isl.shapelet_basis, isl.shapelet_beta, \
isl.shapelet_nmax, isl.shapelet_cf
image_recons=reconstruct_shapelets(isl.shape, mask, basis, beta, cen, nmax, cf)
fimg[tuple(isl.bbox)] += image_recons
model_shap = fimg
resid_shap = img.ch0_arr - fimg
if img.opts.shapelet_gresid:
# also subtract Gaussian model image
shape = img.ch0_arr.shape
thresh= img.opts.fittedimage_clip
model_gaus = N.zeros(shape, dtype=N.float32)
for isl in img.islands:
for g in isl.gaul:
C1, C2 = g.centre_pix
b = self.find_bbox(thresh*isl.rms, g)
bbox = N.s_[max(0, int(C1-b)):min(shape[0], int(C1+b+1)),
max(0, int(C2-b)):min(shape[1], int(C2+b+1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(g, x_ax, y_ax)
model_gaus[bbox] = model_gaus[bbox] + ffimg
resid_shap -= model_gaus
# Apply mask to model and resid images
if hasattr(img, 'rms_mask'):
mask = img.rms_mask
else:
mask = img.mask_arr
if isinstance(mask, N.ndarray):
pix_masked = N.where(mask == True)
model_shap[pix_masked] = N.nan
resid_shap[pix_masked] = N.nan
img.model_shap_arr = model_shap
img.resid_shap_arr = resid_shap
if img.opts.output_all:
func.write_image_to_file(img.use_io, img.imagename + '.resid_shap.fits', resid_shap, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.resid_shap.fits'))
### shapelet residual rms and mean per island
for isl in img.islands:
resid = resid_shap[tuple(isl.bbox)]
self.calc_resid_mean_rms(isl, resid, type='shap')
# Calculate some statistics for the Shapelet residual image
non_masked = N.where(~N.isnan(img.ch0_arr))
mean = N.mean(resid_shap[non_masked], axis=None)
std_dev = N.std(resid_shap[non_masked], axis=None)
skew = stats.skew(resid_shap[non_masked], axis=None)
kurt = stats.kurtosis(resid_shap[non_masked], axis=None)
mylog.info("Statistics of the Shapelet residual image:")
mylog.info(" mean: %.3e (Jy/beam)" % mean)
mylog.info(" std. dev: %.3e (Jy/beam)" % std_dev)
mylog.info(" skew: %.3f" % skew)
mylog.info(" kurtosis: %.3f" % kurt)
img.completed_Ops.append('make_residimage')
return img
def find_bbox(self, thresh, g):
"""Calculate bounding box for gaussian.
This function calculates size of the box for evaluating
gaussian, so that value of gaussian is smaller than threshold
outside of the box.
Parameters:
thres: threshold
g: Gaussian object
"""
from math import ceil, sqrt, log
A = g.peak_flux
S = g.size_pix[0]
if A == 0.0:
return ceil(S*1.5)
if thresh/A >= 1.0 or thresh/A <= 0.0:
return ceil(S*1.5)
return ceil(S*sqrt(-2*log(thresh/A)))
def calc_resid_mean_rms(self, isl, resid, type):
"""Inserts mean and rms of residual image into isl, src, and gaussians
type - specifies 'gaus' or 'shap'
"""
if len(isl.gaul) == 0:
resid = N.zeros(isl.shape, dtype=N.float32)
ind = N.where(~isl.mask_active)
resid = resid[ind]
if type == 'gaus':
isl.gresid_rms = N.std(resid)
isl.gresid_mean = N.mean(resid)
else:
isl.sresid_rms = N.std(resid)
isl.sresid_mean = N.mean(resid)
if hasattr(isl, 'sources'):
for src in isl.sources:
if type == 'gaus':
src.gresid_rms = N.std(resid)
src.gresid_mean = N.mean(resid)
else:
src.sresid_rms = N.std(resid)
src.sresid_mean = N.mean(resid)
for g in src.gaussians:
if type == 'gaus':
g.gresid_rms = N.std(resid)
g.gresid_mean = N.mean(resid)
else:
g.sresid_rms = N.std(resid)
g.sresid_mean = N.mean(resid)
if hasattr(isl, 'dsources'):
for dsrc in isl.dsources: # Handle dummy sources (if any)
if type == 'gaus':
dsrc.gresid_rms = N.std(resid)
dsrc.gresid_mean = N.mean(resid)
else:
dsrc.sresid_rms = N.std(resid)
dsrc.sresid_mean = N.mean(resid)
PyBDSF-1.11.0/bdsf/multi_proc.py 0000664 0000000 0000000 00000017105 14650706641 0016303 0 ustar 00root root 0000000 0000000 """Multiprocessing module to handle parallelization.
This module can optionally update a statusbar and can divide tasks
between cores using weights (so that each core gets a set of tasks with
the same total weight).
Adapted from a module by Brian Refsdal at SAO, available at AstroPython
(http://www.astropython.org/snippet/2010/3/Parallel-map-using-multiprocessing).
"""
from __future__ import print_function
import traceback
import sys
import numpy
_multi = False
_ncpus = 1
try:
# May raise ImportError
import multiprocessing
# Set spawn method to "fork". This is needed for macOS on Python 3.8+ where the
# default has been changed to "spawn", causing problems (see the discussion at
# https://github.com/ipython/ipython/issues/12396)
if sys.platform == 'darwin':
if sys.version_info[0] == 3 and sys.version_info[1] >= 8:
multiprocessing.set_start_method('fork')
_multi = True
# May raise NotImplementedError
_ncpus = min(multiprocessing.cpu_count(), 8)
except:
pass
__all__ = ('parallel_map',)
def worker(f, ii, chunk, out_q, err_q, lock, bar, bar_state):
"""
A worker function that maps an input function over a
slice of the input iterable.
:param f : callable function that accepts argument from iterable
:param ii : process ID
:param chunk: slice of input iterable
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param lock : thread-safe lock to protect a resource
( useful in extending parallel_map() )
:param bar: statusbar to update during fit
:param bar_state: statusbar state dictionary
"""
vals = []
# iterate over slice
for val in chunk:
try:
result = f(val)
except Exception as e:
etype,val,tbk=sys.exc_info()
print('Thread raised exception',e)
print('Traceback of thread is:')
print('-------------------------')
traceback.print_tb(tbk)
print('-------------------------')
err_q.put(e)
return
vals.append(result)
# update statusbar
if bar is not None:
if bar_state['started']:
bar.pos = bar_state['pos']
bar.spin_pos = bar_state['spin_pos']
bar.started = bar_state['started']
increment = bar.increment()
bar_state['started'] = bar.started
bar_state['pos'] += increment
bar_state['spin_pos'] += increment
if bar_state['spin_pos'] >= 4:
bar_state['spin_pos'] = 0
# output the result and task ID to output queue
out_q.put( (ii, vals) )
def run_tasks(procs, err_q, out_q, num):
"""
A function that executes populated processes and processes
the resultant array. Checks error queue for any exceptions.
:param procs: list of Process objects
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param num : length of resultant array
"""
# function to terminate processes that are still running.
die = (lambda vals : [val.terminate() for val in vals
if val.exitcode is None])
try:
for proc in procs:
proc.start()
for proc in procs:
proc.join()
except Exception as e:
# kill all slave processes on ctrl-C
die(procs)
raise e
if not err_q.empty():
# kill all on any exception from any one slave
die(procs)
raise err_q.get()
# Processes finish in arbitrary order. Process IDs double
# as index in the resultant array.
results=[None]*num;
for i in range(num):
idx, result = out_q.get()
results[idx] = result
# Remove extra dimension added by array_split
result_list = []
for result in results:
result_list += result
return result_list
def parallel_map(function, sequence, numcores=None, bar=None, weights=None):
"""
A parallelized version of the native Python map function that
utilizes the Python multiprocessing module to divide and
conquer a sequence.
parallel_map does not yet support multiple argument sequences.
:param function: callable function that accepts argument from iterable
:param sequence: iterable sequence
:param numcores: number of cores to use (if None, all are used)
:param bar: statusbar to update during fit
:param weights: weights to use when splitting the sequence
"""
if not callable(function):
raise TypeError("input function '%s' is not callable" %
repr(function))
if not numpy.iterable(sequence):
raise TypeError("input '%s' is not iterable" %
repr(sequence))
sequence = numpy.array(list(sequence), dtype=object)
size = len(sequence)
if not _multi or size == 1:
results = list(map(function, sequence))
if bar is not None:
bar.stop()
return results
# Set default number of cores to use. Try to leave one core free for pyplot.
if numcores is None:
numcores = _ncpus - 1
if numcores > _ncpus - 1:
numcores = _ncpus - 1
if numcores < 1:
numcores = 1
# Returns a started SyncManager object which can be used for sharing
# objects between processes. The returned manager object corresponds
# to a spawned child process and has methods which will create shared
# objects and return corresponding proxies.
manager = multiprocessing.Manager()
# Create FIFO queue and lock shared objects and return proxies to them.
# The managers handles a server process that manages shared objects that
# each slave process has access to. Bottom line -- thread-safe.
out_q = manager.Queue()
err_q = manager.Queue()
lock = manager.Lock()
bar_state = manager.dict()
if bar is not None:
bar_state['pos'] = bar.pos
bar_state['spin_pos'] = bar.spin_pos
bar_state['started'] = bar.started
# if sequence is less than numcores, only use len sequence number of
# processes
if size < numcores:
numcores = size
# group sequence into numcores-worth of chunks
if weights is None or numcores == size:
# No grouping specified (or there are as many cores as
# processes), so divide into equal chunks
sequence = numpy.array_split(sequence, numcores)
else:
# Group so that each group has roughly an equal sum of weights
weight_per_core = numpy.sum(weights)/float(numcores)
cut_values = []
temp_sum = 0.0
for indx, weight in enumerate(weights):
temp_sum += weight
if temp_sum > weight_per_core:
cut_values.append(indx+1)
temp_sum = weight
if len(cut_values) > numcores - 1:
cut_values = cut_values[0:numcores-1]
sequence = numpy.array_split(sequence, cut_values)
# Make sure there are no empty chunks at the end of the sequence
while len(sequence[-1]) == 0:
sequence.pop()
procs = [multiprocessing.Process(target=worker,
args=(function, ii, chunk, out_q, err_q, lock, bar, bar_state))
for ii, chunk in enumerate(sequence)]
try:
results = run_tasks(procs, err_q, out_q, len(sequence))
if bar is not None:
if bar.started:
bar.stop()
return results
except KeyboardInterrupt:
for proc in procs:
if proc.exitcode is None:
proc.terminate()
proc.join()
raise
PyBDSF-1.11.0/bdsf/mylogger.py 0000664 0000000 0000000 00000010753 14650706641 0015755 0 ustar 00root root 0000000 0000000 """ WARNING, ERROR, and CRITICAL are always output to screen and to log file.
INFO and USERINFO always go to the log file. DEBUG goes to log file if debug is
True. USERINFO goes to screen only if quiet is False.
Use as follows:
mylog = mylogger.logging.getLogger("name")
mylog.info('info') --> print to logfile, but not to screen
mylogger.userinfo(mylog, 'info') --> print to screen (if quiet==False)
and to logfile
"""
import logging
from socket import gethostname
import copy
def init_logger(logfilename, quiet=False, debug=False):
logging.USERINFO = logging.INFO + 1
logging.addLevelName(logging.USERINFO, 'USERINFO')
logger = logging.root
logger.setLevel(logging.DEBUG)
# First remove any existing handlers (in case PyBDSM has been run
# before in this session but the quiet or debug options have changed
while len(logger.handlers) > 0:
logger.removeHandler(logger.handlers[0])
# File handlers
fh = ColorStripperHandler(logfilename)
if debug:
# For log file and debug on, print name and levelname
fh.setLevel(logging.DEBUG)
fmt1 = MultiLineFormatter('%(asctime)s %(name)-20s:: %(levelname)-8s: '\
'%(message)s',
datefmt='%a %d-%m-%Y %H:%M:%S')
else:
# For log file and debug off, don't print name and levelname as
# they have no meaning to the user.
fh.setLevel(logging.INFO)
fmt1 = MultiLineFormatter('%(asctime)s:: %(levelname)-8s: %(message)s',
datefmt='%a %d-%m-%Y %H:%M:%S')
fh.setFormatter(fmt1)
logger.addHandler(fh)
# Console handler for warning, error, and critical: format includes levelname
# ANSI colors are used
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
fmt2 = logging.Formatter('\033[31;1m%(levelname)s\033[0m: %(message)s')
ch.setFormatter(fmt2)
logger.addHandler(ch)
# Console handler for USERINFO only: format does not include levelname
# (the user does not need to see the levelname, as it has no meaning to them)
# ANSI colors are allowed
chi = logging.StreamHandler()
chi.addFilter(InfoFilter())
if quiet:
# prints nothing, since filter lets only USERINFO through
chi.setLevel(logging.WARNING)
else:
# prints only USERINFO
chi.setLevel(logging.USERINFO)
fmt3 = logging.Formatter('%(message)s')
chi.setFormatter(fmt3)
logger.addHandler(chi)
class InfoFilter(logging.Filter):
# Lets only USERINFO through
def filter(self, rec):
return rec.levelno == logging.USERINFO
class MultiLineFormatter(logging.Formatter):
def format(self, record):
str = logging.Formatter.format(self, record)
header, footer = str.split(record.message)
nocolor_header = strip_color(header)
str = str.replace('\n', '\n' + ' '*len(nocolor_header))
return str
def userinfo(mylog, desc_str, val_str=''):
"""Writes a nicely formatted string to the log file and console
mylog = logger
desc_str = description string / message
val_str = value string
Message is constructed as:
'desc_str .... : val_str'
"""
bc = '\033[1;34m' # Blue
nc = '\033[0m' # Normal text color
if val_str == '':
sep = ''
if desc_str[:1] == '\n':
bc += '\n'
desc_str = desc_str[1:]
desc_str = bc + '--> ' + desc_str + nc
else:
sep = ' : '
if len(desc_str) < 40:
desc_str += ' '
if len(desc_str) < 40:
while len(desc_str) < 41:
desc_str += '.'
else:
while len(desc_str) < 41:
desc_str += ' '
mylog.log(logging.USERINFO, desc_str+sep+val_str)
class ColorStripperHandler(logging.FileHandler):
def emit(self, record):
"""Strips ANSI color codes from file stream"""
myrecord = copy.copy(record)
nocolor_msg = strip_color(myrecord.msg)
myrecord.msg = nocolor_msg
logging.FileHandler.emit(self, myrecord)
def strip_color(msg):
"""Strips specific ANSI color codes from an input string
The color codes are hard-coded to those used above
in userinfo() and in WARNING, ERROR, and CRITICAL.
"""
nocolor_msg = ''
a = msg.split('\033[1;34m')
for b in a:
c = b.split('\033[0m')
for d in c:
e = d.split('\033[31;1m')
for f in e:
nocolor_msg += f
return nocolor_msg
PyBDSF-1.11.0/bdsf/nat/ 0000775 0000000 0000000 00000000000 14650706641 0014332 5 ustar 00root root 0000000 0000000 PyBDSF-1.11.0/bdsf/nat/__init__.py 0000664 0000000 0000000 00000255306 14650706641 0016456 0 ustar 00root root 0000000 0000000 # Adapted for numpy/ma/cdms2 by convertcdms.py
"""---------------------------------------------------------------------------------------------
INTRODUCTION TO NGMATH
The ngmath library is a collection of interpolators and approximators for one-dimensional, two-dimensional
and three-dimensional data. The packages, which were obtained from NCAR, are:
natgrid -- a two-dimensional random data interpolation package based on Dave Watson's nngridr.
dsgrid -- a three-dimensional random data interpolator based on a simple inverse distance weighting
algorithm.
fitgrid -- an interpolation package for one-dimensional and two-dimensional gridded data based on
Alan Cline's Fitpack. Fitpack uses splines under tension to interpolate in one and two
dimensions.
csagrid -- an approximation package for one-dimensional, two-dimensional and three-dimensional random
data based on David Fulker's Splpack. csagrid uses cubic splines to calculate its
approximation function.
cssgrid -- an interpolation package for random data on the surface of a sphere based on the work of
Robert Renka. cssgrid uses cubic splines to calculate its interpolation function.
shgrid -- an interpolation package for random data in 3-space based on the work of Robert Renka.
shgrid uses a modified Shepard's algorithm to calculate its interpolation function.
COMPARISION OF NGMATH PACKAGES
Three-dimensional packages -- shgrid, csagrid and dsgrid.
shgrid is probably the package of choice for interpolation. It uses a least squares fit of biquadratics
to construct its interpolation function. The interpolation function will pass through the original data
points.
csagrid uses a least squares fit of cubic splines to calculate its approximation function: the calculated
surface will not necesarily pass through the original data points. The algorithm can become unstable in data
sparse regions.
dsgrid uses a weighted average algorithm and is stable in all cases, but the resultant interpolation is
not usually smooth and execution time is very slow. dsgrid is probably best used when csagrid and shgrid
fail or for comparative purposes.
Two-dimensional packages -- natgrid, fitgrid, csagrid and dsgrid.
natgrid is the package of choice in most cases. It implements a very stable algorithm and has parameters
for adjusting the smoothness of the output surface.
fitgrid offers user-settable parameters for specifiying derivatives along the boundary of the output grid
which are not available in natgrid.
csagrid produces an approximate two-dimensional surface which may be smoother than that produced by fitgrid
and natgrid.
dsgrid is not recommended for two-dimensional surfaces. natgrid is superior in all respects.
One-dimensional packages -- fitgrid and csagrid.
fitgrid is definitely the package of choice. It has many features not available in csagrid, such as
interpolating parametric curves, finding integrals, handling periodic functions, allowing smoothing that
varies from linear to a full cubic spline interpolation and specifying slopes at the end points.
Interpolation on a sphere -- cssgrid.
cssgrid is designed specifically for interpolating on a sphere. It uses cubic splines to calculate an
interpolation function.
NATGRID PACKAGE
natgrid implements a natural neighbor interpolation method. The input for the interpolation is a set
of randomly spaced two-dimensional coordinates with functional values at those coordinates; the output is a
set of interpolated values at coordinates in a user specified rectangular grid. The coordinates in the output
grid must be monotonic in each coordinate direction, but need not be evenly spaced. It is also possible to
interpolate at a single point.
natgrid uses a weighted average method that is much more sophisticated than the inverse distance weighted
average used by dsgrid. One distinguishing quality of natural neighbor interpolation is the way in which
a set of neighboring points (the natural neighbor) is selected to use for interpolating at a point. The
natural neighbor selection process avoids the problems common to methods based on choosing a fixed number
of neighboring points, or all points within a fixed distance. Another distinguishing quality of natural
neighbor interpolation is the way that the weights are calculated for the functional values at the natural
neighbor coordinates. These weights are based on proportionate area, rather than distances.
The method of finding the natural neighbors and calculating area-based weights to produce interpolated
values is called natural neighbor linear interpolation. This produces an interpolation surface that has a
continous slope at all points, except at the original input points. The result of natural neighbor linear
interpolation can be visualized as producing a snugly fit sheet stretched over all of the input points.
The interpolation method in natgrid also allows for natural neighbor linear interpolation augmented by
blending in gradient estimates. This is called natural neighbor nonlinear interpolation. It produces an
interpolation surface that has a continuous slope at all locations; two tautness parameters can be set by
the user to control the apparent smoothness of the output surface.
NATGRID CONTENTS
Access through Python to the natgrid package from NCAR's ngmath distribution is provided directly through the module
natgridmodule.so which was generated as a Python C language extension in order to export the natgrid functions
from the original C language library to Python.
REQUIRED FILE
natgridmodule.so -- the Python interface to the ngmath natgrid package.
USEFUL FILES
nat.py -- the object oriented interface including a general help package.
natgridtest.py -- the code to test nat.py and to write documentation.
USAGE
This module is designed to use in two ways. One is through the use of the object oriented interface to the underlying
functions. This approach is recommended for users not already familiar with the original natgrid distribtution because
it simplifies the calls to the routines. The other method uses the original functions calling them directly from Python.
------------------- OBJECT ORIENTED APPROACH ----------------
The nat module contains the Natgrid class and its single method, rgrd, which provides access to all the natgrid
functions. The object oriented approach has been organized as a two step process.
STEP 1.
To make an instance, r, type:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
or
r = nat.Natgrid(xi, yi, xo, yo, listOutput = 'yes')
where xi, yi and xo, yo are the input and output grid coordinate arrays. The optional listOutput must
set to anything except 'no' if xo, yo are in list format as explained below. It is the responsibility
of the user to set listOutput if the output is in the list form.
The input grid must be organized in a list format always. The size of the xi array and the yi array are
necessarily equal. For example, if there are n randomly spaced input data points, there
are n values in xi and n values in yi.
There are two possible formats for the output grid. The output grid coordinate arrays may be a list like
the input array or it may be a rectangular grid. The choice between the two posibilities is made according
to requirements in subseqent calls to the method function. The first choice is required if the subsequent
call is to the single point mode interpolation. The list can have one or more points. Of course, the list
could describe a rectangular grid. For example, a rectangular grid with 10 x values and 20 y values can be
rewrtten in list form with 200 x value and 200 y values. However, this form requires calling the slower
single point interpolator. The second choice is most efficient for the basic interpolation to a rectangular
output grid. The output grid must be monotonic but need not be equally spced.
The grid coordinate arrays can be single precision (numpy.float32) or double precision (numpy.float64). The
decision on whether to call for a single or a double precision computation subsequently is made by looking at
the type of these arrays.
To look at the default settings for the control parameters and a brief description of thier properties, type
r.printDefaultParameterTable()
To change a setting type the new value. For example, to set igr to 1, type
r.igr = 1
To find a value without printing the table, type the name. For example, to exam the value of hor, type
r.hor
To check the settings type
r.printInstanceParameterTable() -- prints in tabular form the parameters used in subsequent calls to the method
function rgrd.
or
printStoredParameters() -- prints the parameters in memory which may differ from the above if the user
has made more than one instance of the Natgrid class.
STEP 2.
natgrid is restricted to two dimensions . Consequently, it is the user's responsibility to reduce the processing of
higher dimensional data to a sequence of calls using only two dimensional data.
The computations are divided into two groups depending on whether the output arrays are in list form or in rectilinear
grid form. If they are in list format the single point mode is called to interpolate to those individual points. This is
the only process possible. On the other hand, if the output goes to a rectangular grid there are more choices. In
addition to carrying out linear and nonlinear interpolations, it is possible to request aspects and slopes. The aspect
at a point on the interpolated surface is the direction of steepest descend. The slope is the value of the partial
derivative taken in the direction of the aspect. The slope is measured as an angle that is zero in a horizonal surface
and positive below the horizontal.
The following examples cover the basic computations. They start with a indication of the appropriate STEP 1.
Example 1: the basic natural neighbor linear interpolation
As STEP 1 make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
where the xo, yo grid is rectilinear as explained above in STEP 1.
Then call the primary interpolation computation to regrid the input data, dataIn, on the grid (xi, yi) to
the output data, dataOut, on the grid (xo, yo), with
dataOut = r.rgrd( dataIn )
The computation is either single or double precision as determined by the precision submitted in the grid
description in STEP 1.
It is also possible to request a wrap in the input grid and the input data in the longitude direction, assumed
to be the yi grid coordinate, by adding a keyword as
dataOut = r.rgrd( dataIn, wrap = 'yes' )
Example 2: natural neighbor linear interpolation returning the aspect and the slope.
As STEP 1 make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
where the xo, yo grid is rectilinear as explained above in STEP 1.
Then call the primary interpolation computation to regrid the input data, dataIn, on the grid (xi, yi) to
the output data, dataOut, on the grid (xo, yo), while asking for the aspect and the slope on this output grid, with
dataOut, a, s = r.rgrd( dataIn, aspectSlope = 'yes' )
where a is the aspect, the direction of the steepest descent in degrees measured from 'north' and s is the
slope in degrees measured from the horizontal. Necessarily, these are arrays aligned with the rectilinear
output grid, xo, yo.
The computation is either single or double precision as determined by the precision submitted in the grid
description in STEP 1.
It is also possible to request a wrap in the input grid and the input data in the longitude direction, assumed
to be the yi grid coordinate, by adding a keyword as
dataOut, a, s = r.rgrd( dataIn, aspectSlope = 'yes', wrap = 'yes' )
Example 3: the basic natural neighbor nonlinear interpolation
The procedure for the nonlinear interpolation differs from the linear case in the need to set the control
parameter igr. Follow Example 1 and insert the following statament after making the instance, r.
r.igr = 1
Example 4: natural neighbor nonlinear interpolation returning the aspect and the slope.
The procedure for the nonlinear interpolation differs from the linear case in the need to set the control
parameter igr. Follow Example 2 and insert the following statament after making the instance, r.
r.igr = 1
Example 5: single point mode natural neighbor linear interpolation
As STEP 1 make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo, listOutput = 'yes')
where the xo, yo output grid is in the list form (not a rectangular output grid) as explained above in
STEP 1.
To call the single point mode interpolation computation to regrid the input data, dataIn, on the grid (xi, yi)
to the output data, dataOut, on the grid (xo, yo), type
dataOut = r.rgrd( dataIn )
The computation is either single or double precision as determined by the precision submitted in the grid
description in STEP 1. In the single point mode it is not possible to request the aspect and the slope.
Example 6: single point mode natural neighbor nonlinear interpolation
The procedure for the nonlinear interpolation differs from the linear case in the need to set the control
parameter igr. Follow Example 5 and insert the following statament after making the instance, r.
r.igr = 1
------------------- ORIGINAL FUNCTION APPROACH -----------------
The module natgridmodule.so exports the following functions to Python from the original ngmath C library:
Single precision procedures:
natgrids - primary function for gridding.
seti - set int parameter values.
geti - retrieve values for int parameters.
setr - set float parameter values.
getr - retrieve values for float parameters
setc - set char parameter values.
getc - retrieve values for char parameters.
getaspects - get aspect values, if calculated by setting sdi = 1.
getslopes - get slope values, if calculated by setting sdi = 1.
pntinits - initiate single point mode.
pnts - interpolate at a single point.
pntend - terminate single point mode.
Double precision procedures:
natgridd - primary function for gridding.
setrd - set float parameter values.
getrd - retrieve values for float parameters
getaspectd - get aspect values, if calculated by setting sdi = 1.
getsloped - get slope values, if calculated by setting sdi = 1.
pntinitd - initiate single point mode.
pntd - interpolate at a single point.
pntendd - terminate single point mode.
Information on the use of the routines is available by importing natgridmodule and printing the docstring
of interest. For example, documentation for the routine natgrids is obtained by typing
import natgridmodule
print natgridmodule.natgrids.__doc__
This same information is available in the help package.
A description of the control parameters is not in the natgridmodule documentation. It can be found by typing
import nat
nat.printParameterTable()
The documentation associated with the natgridmodule.so, such as the doctrings, describe the C code.
DOCUMENTATION
Documentation is provided through Python's docstrings, essentially Python style program
comments. A help package provides instructions on the use of the natgrid module. A table of contents
is printed to the screen by typing
nat.help()
after importing nat.
A hard copy of all the pertinent 'docstring' documentation written to the file natgridmodule.doc can
be produced by typing
nat.document()
As an alternate to using the help package, online documentation for the natgrids function, for example,
is available directly from the natgrids doctring by typing
import natgridmodule
print natgridmodule.natgrids.__doc__
TESTING
To run a test of the natgrid computations and to get a copy of this documentation, type
cdat natgridtest.py
--------------------------------------------------------------------------------------------------------------"""
from __future__ import print_function
# import string, math, sys, numpy, cdms2, natgridmodule
import string, math, sys, numpy
from . import natgridmodule
# writeTestcase = 'yes'
# try:
# import cdms2
# except ImportError:
# print 'Can not write test case results to netCDF files without module cdms'
# writeTestcase = 'no'
writeTestcase = 'no'
usefilled = 'yes'
try:
import numpy.ma
except ImportError:
print('Can not convert from numpy.ma array to numpy array without module numpy.ma')
usefilled = 'no'
debug = 0
class Natgrid:
#-------------------------------------------------------------------------------------------------------------
#
# Contents of Natgrid class
#
#
# Natgrid class
# __init__ -- initialization
# rgrd -- the regridder called from Python
#
# rgrdPrimary -- called by rgrd if the output grid is montonically increasing
# rgrdSinglePoint -- called by rgrd if the output grid is random or single point mode is selected
# setInstanceParameters -- sets the C values to the instance values
#
#---------------------------------------------------------------------------------------------------------------
def __init__(self, xi, yi, xo, yo, listOutput = 'no'):
""" --------------------------------------------------------------------------------------------------------
routine: __init__ for class Natgrid
purpose: init makes an instance of the Natgrid class while performing the following:
1. checks the argument list for the correct types.
2. selects single or double precision computation.
3. assigns the coordinate grid arrays to self data.
4. assigns default control parameter values from the parameter dictionary.
usage: r = nat.Natgrid(xi, yi, xo, yo)
or
r = nat.Natgrid(xi, yi, xo, yo, listOutput = 'yes')
where xi, yi and xo, yo are the input and output grid coordinate arrays. The optional listOutput is
set to anything except 'no' if xo, yo are in list format as explained below.
The input grid must be organized in a list format always. The size of the xi array and the yi array are
necessarily equal. For example, if there are n randomly spaced input data points, there
are n values in xi and n values in yi.
There are two possible formats for the output grid. The output grid coordinate arrays may be a list like
the input array or it may be a rectangular grid. The choice between the two posibilities is made according
to requirements in subseqent calls to the method function. The first choice is required if the subsequent
call is to the single point mode interpolation. The list can have one or more points. Of course, the list
could describe a rectangular grid. For example, a rectangular grid with 10 x values and 20 y values can be
rewrtten in list form with 200 x value and 200 y values. However, this form requires calling the slower
single point interpolator. The second choice is most efficient for the basic interpolation to a rectangular
output grid. The output grid must be monotonic but need not be equally spced.
Note: the index in the data associated with y varies the fastest.
definition: __init__(self, xi, yi, xo, yo, listOutput = 'no'):
--------------------------------------------------------------------------------------------------------"""
# ---- check the input grid argument list
try:
size = len(xi)
except:
msg = 'CANNOT CREATE INSTANCE - The first argument must be an array'
raise TypeError(msg)
if size < 4:
msg = 'CANNOT CREATE INSTANCE - The length of the input x coordindate grid must be greater than 3'
raise ValueError(msg)
try:
size = len(yi)
except:
msg = 'CANNOT CREATE INSTANCE - The third argument must be an array'
raise TypeError(msg)
if size < 4:
msg = 'CANNOT CREATE INSTANCE - The length of the input y coordindate grid must be greater than 3'
raise ValueError(msg)
# set the self data for the input grid
self.nxi = len(xi)
self.nyi = len(yi)
if self.nxi != self.nyi:
msg = 'CANNOT CREATE INSTANCE - The length of the input x and y coordindate grids must be equal'
raise ValueError(msg)
self.xi = xi
self.yi = yi
# ---- check the output grid argument list
try:
size = len(xo)
except:
msg = 'CANNOT CREATE INSTANCE - The second argument must be an array'
raise TypeError(msg)
try:
size = len(yo)
except:
msg = 'CANNOT CREATE INSTANCE - The fourth argument must be an array'
raise TypeError(msg)
# set the self data for the output grid
self.nxo = len(xo)
self.nyo = len(yo)
if listOutput == 'no':
self.xo, self.yo, monotonic, self.xreverse, self.yreverse = checkdim(xo, yo) # monotonicity check
if monotonic == 'no':
msg = 'CANNOT CREATE INSTANCE - Rectangular output grid must be monotonic'
raise ValueError(msg)
self.listOutput = 'no'
else:
if self.nxo != self.nyo:
msg = 'CANNOT CREATE INSTANCE - The list type output arrays must have the same length'
raise ValueError(msg)
else:
self.xo = xo
self.yo = yo
self.xreverse = 'no'
self.yreverse = 'no'
self.listOutput = 'yes'
# select the interpolation routines from the single or the double precision group - majority rules here
numberSingles = 0
numberDoubles = 0
if xi.dtype.char == 'f':
numberSingles = numberSingles + 1
else:
numberDoubles = numberDoubles + 1
if xo.dtype.char == 'f':
numberSingles = numberSingles + 1
else:
numberDoubles = numberDoubles + 1
if yi.dtype.char == 'f':
numberSingles = numberSingles + 1
else:
numberDoubles = numberDoubles + 1
if yo.dtype.char == 'f':
numberSingles = numberSingles + 1
else:
numberDoubles = numberDoubles + 1
if debug == 1:
print('number Singles and Doubles : ', numberSingles, numberDoubles)
if numberSingles >= numberDoubles:
self.group = 'single'
if numberSingles < 4:
sendmsg('Changing all the coordinate grid types to float32')
xi = xi.astype(numpy.float32)
xo = xo.astype(numpy.float32)
yi = yi.astype(numpy.float32)
yo = yo.astype(numpy.float32)
else:
self.group = 'double'
if numberDoubles < 4:
sendmsg('Changing all the coordinate grid types to float64')
xi = xi.astype(numpy.float64)
xo = xo.astype(numpy.float64)
yi = yi.astype(numpy.float64)
yo = yo.astype(numpy.float64)
# set the parameter instance data to the default values
defaultDict = Natgrid.makeDefaultParameterTable(self)
self.adf = eval(defaultDict['adf'][2])
self.alg = eval(defaultDict['alg'][2])
self.asc = eval(defaultDict['asc'][2])
self.bI = eval(defaultDict['bI'][2])
self.bJ = eval(defaultDict['bJ'][2])
self.dup = eval(defaultDict['dup'][2])
self.ext = eval(defaultDict['ext'][2])
self.hor = eval(defaultDict['hor'][2])
self.igr = eval(defaultDict['igr'][2])
self.magx = eval(defaultDict['magx'][2])
self.magy = eval(defaultDict['magy'][2])
self.magz = eval(defaultDict['magz'][2])
self.non = eval(defaultDict['non'][2])
self.nul = eval(defaultDict['nul'][2])
self.rad = eval(defaultDict['rad'][2])
self.sdi = eval(defaultDict['sdi'][2])
self.upd = eval(defaultDict['upd'][2])
self.ver = eval(defaultDict['ver'][2])
def rgrd(self, dataIn, aspectSlope = 'no', wrap = 'no'):
""" --------------------------------------------------------------------------------------------------------
routine: rgrd
purpose: Perform one of the following:
1. natural neighbor linear interpolation to a rectilinear grid
2. natural neighbor linear interpolation to a rectilinear grid returning aspects and slopes
3. natural neighbor linear interpolation to a list of points in the single point mode
4. natural neighbor nonlinear interpolation to a rectilinear grid
5. natural neighbor nonlinear interpolation to a rectilinear grid returning aspects and slopes
6. natural neighbor nonlinear interpolation to a list of points in the single point mode
Each of the computations can be single or double precison. The choice is made by examing the precision
in the grid coordinate arrays. In addition, the choice of the single point mode is determined by the
set of the listOuput parameter in creating an instance of the Natgrid class.
Assuming that the instance, r, has been constructed, the choice between a linear or a nonlinear
computation is made with the control parameter igr. The default calls for a linear calculation. To
call for a nonlinear one, type
r.igr = 1
usage: To interpolate the input data, dataIn, to the output data, dataOut, on the output grid, type
dataOut = r.rgrd(dataIn)
If the output grid is rectangular, it is possible to request the associated aspects and slopes with
dataOut, aspect, slope = r.rgrd(dataIn, aspectSlope = 'yes')
For global latitude-longitude grids, it is also possible to request a wrap in the input grid and the input
data in the longitude direction, assumed to be the yi grid coordinate, (with or without associated aspects
and slopes) with
dataOut, aspect, slope = r.rgrd(dataIn, wrap = 'yes')
or
dataOut, aspect, slope = r.rgrd(dataIn, aspectSlope = 'yes', wrap = 'yes')
definition: rgrd(self, dataIn, aspectSlope = 'no', wrap = 'no'):
--------------------------------------------------------------------------------------------------------"""
if self.nxi != len(dataIn):
msg = 'CANNOT CREATE INSTANCE - The length of the input coordindate grids and the data must be equal'
raise ValueError(msg)
if usefilled == 'yes':
dataIn = numpy.ma.filled(dataIn)
# set the instance values of the parameters in the c code
Natgrid.setInstanceParameters(self)
if wrap == 'yes':
self.xi, self.yi, dataIn = Natgrid.wrapAll(self, self.xi, self.yi, dataIn)
self.nxi = len(self.xi)
self.nyi = len(self.yi)
if dataIn.dtype.char == 'f': # single precision
if self.group == 'double': # change the grid type to match dataIn
self.group = 'single' # change the grid type to match dataIn
self.xi = self.xi.astype(numpy.float32)
self.xo = self.xo.astype(numpy.float32)
self.yi = self.yi.astype(numpy.float32)
self.yo = self.yo.astype(numpy.float32)
else: # double precision
if self.group == 'single': # change the grid type to match dataIn
self.group = 'double' # change the grid type to match dataIn
self.xi = self.xi.astype(numpy.float64)
self.xo = self.xo.astype(numpy.float64)
self.yi = self.yi.astype(numpy.float64)
self.yo = self.yo.astype(numpy.float64)
if self.listOutput == 'no': # output grid is rectangular
t = Natgrid.rgrdPrimary(self, dataIn, aspectSlope)
else: # output grid is a list
t = Natgrid.rgrdSinglePoint(self, dataIn)
return t
def rgrdPrimary(self, dataIn, aspectSlope):
""" #-------------------------------------------------------------------
#
#
#-------------------------------------------------------------------------"""
if aspectSlope != 'no':
self.sdi = 1 # calculate aspects and slopes
# set the instance values of the parameters in the c code
#Natgrid.setInstanceParameters(self)
if dataIn.dtype.char == 'f': # single precision
if debug == 1:
print('In rgrdPrimary calling natgrids')
dataOut, ier = natgridmodule.natgrids(self.nxi, self.xi, self.yi, dataIn, self.nxo, self.nyo, self.xo, self.yo)
if ier != 0:
msg = 'Error in return from natgrids call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
if aspectSlope != 'no':
nxo = self.nxo
nyo = self.nyo
a = numpy.zeros((nxo, nyo), numpy.float32)
for i in range(nxo):
for j in range(nyo):
uvtemp, ier = natgridmodule.getaspects(i, j)
if ier != 0:
msg = 'Error in return from getaspects call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
a[i,j] = uvtemp # return aspect in degrees
s = numpy.zeros((nxo, nyo), numpy.float32)
for i in range(nxo):
for j in range(nyo):
uvtemp, ier = natgridmodule.getslopes(i, j)
if ier != 0:
msg = 'Error in return from getslopes call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
s[i,j] = uvtemp # return slope in degrees
else: # double precision
if debug == 1:
print('In rgrdPrimary calling natgridd')
dataOut, ier = natgridmodule.natgridd(self.nxi, self.xi, self.yi, dataIn, self.nxo, self.nyo, self.xo, self.yo)
if ier != 0:
msg = 'Error in return from natgridd call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
if aspectSlope != 'no':
nxo = self.nxo
nyo = self.nyo
a = numpy.zeros((nxo, nyo), numpy.float64)
for i in range(nxo):
for j in range(nyo):
uvtemp, ier = natgridmodule.getsloped(i, j)
if ier != 0:
msg = 'Error in return from getaspectd call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
a[i,j] = uvtemp # return aspect in degrees
s = numpy.zeros((nxo, nyo), numpy.float64)
for i in range(nxo):
for j in range(nyo):
s[i,j], ier = natgridmodule.getsloped(i, j)
if ier != 0:
msg = 'Error in return from getsloped call with -- ' + Natgrid.errorTable(self)[ier]
raise ValueError(msg)
s[i,j] = uvtemp # return slope in degrees
# is a reverse the order in the returned arrays necessary
if (self.xreverse == 'yes') or (self.yreverse == 'yes'):
needReverse = 'yes'
else:
needReverse = 'no'
# construct the tuple for the return of what was calculated
if aspectSlope != 'no':
if needReverse == 'yes':
dataOut = Natgrid.reverseData(self, dataOut)
a = Natgrid.reverseData(self, a)
s = Natgrid.reverseData(self, s)
returnList = [dataOut]
returnList.append(a)
returnList.append(s)
return tuple(returnList)
else:
if needReverse == 'yes':
dataOut = Natgrid.reverseData(self, dataOut)
return dataOut
def rgrdSinglePoint(self, dataIn):
""" #-------------------------------------------------------------------
#
#
#-------------------------------------------------------------------------"""
self.sdi = 0 # turn off calculaton of aspect and slope
if dataIn.dtype.char == 'f': # single precision
if debug == 1:
print('In rgrdSinglePoint using single precision computation')
natgridmodule.pntinits(self.nxi, self.xi, self.yi, dataIn)
dataOut = numpy.zeros((self.nxo), numpy.float32)
for i in range(self.nxo):
dataOut[i] = natgridmodule.pnts(self.xo[i], self.yo[i])
natgridmodule.pntend()
else: # double precision
if debug == 1:
print('In rgrdSinglePoint using double precision computation')
natgridmodule.pntinitd(self.nxi, self.xi, self.yi, dataIn)
dataOut = numpy.zeros((self.nxo), numpy.float64)
for i in range(self.nxo):
dataOut[i] = natgridmodule.pntd(self.xo[i], self.yo[i])
natgridmodule.pntendd()
return dataOut
def reverseData(self, data):
#------------------------------------------------------------------------------
#
# purpose: reverse the order of th data if outgrid submitted was not increasing
#
# usage:
#
# returned: parameters
#
#------------------------------------------------------------------------------
if self.xreverse == 'yes':
data = data[::-1,:]
if self.yreverse == 'yes':
data = data[:, ::-1]
return data
def wrapAll(self, lat, lon, data):
#------------------------------------------------------------------------------
#
# purpose: Adds much wrap in longitude to the linear form of the input data
#
# usage:
#
# passed: lat -- the latitude array
# lon -- the longitude arraywhich requires a large wrap for natgrid
# data -- the data at the associated linear set of points
#
# returned: lat, lon and data differing fom th input by the wrap
#
#
#------------------------------------------------------------------------------
if debug == 1:
print('entering wrapAll with array lengths: ', len(lat))
# Make a wrapped grid and wrapped data
lonList = list(lon) # make Python lists as intermediate step
latList = list(lat)
dataList = list(data)
maxlon = max(lonList) # set up the wrap ranges in longitude
minlon = min(lonList)
distance = (maxlon - minlon)/4. # wrap first and last quarter of points
minlonLow = minlon
minlonHigh = minlon + distance
maxlonLow = maxlon - distance
maxlonHigh = maxlon
for i in range(len(lonList)): # wrap the Python lists
value = lonList[i]
if (value >= minlonLow) and (value < minlonHigh):
lonList.append(value + 360.)
latList.append(latList[i])
dataList.append(dataList[i])
elif (value > maxlonLow) and (value <= maxlonHigh):
lonList.append(value - 360.)
latList.append(latList[i])
dataList.append(dataList[i])
if self.group == 'single': # single precision
lon = numpy.array(lonList, numpy.float32) # convert to numpy arrays
lat = numpy.array(latList, numpy.float32)
data = numpy.array(dataList, numpy.float32)
else: # double precision
lon = numpy.array(lonList, numpy.float64) # convert to numpy arrays
lat = numpy.array(latList, numpy.float64)
data = numpy.array(dataList, numpy.float64)
if debug == 1:
print('leaving wrapAll with array lengths: ', len(lat))
return lat, lon, data
#---------------------------------------------------------------------------------
# **************** Control parameter manipulation functions ********************
#---------------------------------------------------------------------------------
def parameterNames(self):
#------------------------------------------------------------------------------
#
# purpose: produce a list of the natgrid parameters
#
# usage: parameters = parameterNames(self)
#
# passed: self
#
# returned: parameters
#
#------------------------------------------------------------------------------
parameters = ['name', '----', 'adf', 'alg', 'asc', 'bI', 'bJ', 'dup', 'ext', 'hor', 'igr', 'magx',
'magy', 'magz', 'non', 'nul', 'rad', 'sdi', 'upd', 'ver', 'xas', 'yas', 'zas' ]
return parameters
def parameterType(self):
#--------------------------------------------------------------------------------
#
# purpose: produce a dictionary connecting parameter names and their data types
#
# usage: typeDict = parameterType(self)
#
# passed: self
#
# returned: typeDict
#
#---------------------------------------------------------------------------------
typeDict = {
'adf':'int', 'alg':'char', 'asc':'int', 'bI':'float', 'bJ':'float', 'dup':'int', 'ext':'int',
'hor':'float', 'igr':'int', 'magx':'float', 'magy':'float', 'magz':'float', 'non':'int', 'nul':'float',
'rad':'int', 'sdi':'int', 'upd':'int', 'ver':'float', 'xas':'float', 'yas':'float', 'zas':'float' }
return typeDict
def makeDefaultParameterTable(self):
#-----------------------------------------------------------------------------------
#
# purpose: construct the dictionary which is the default control parameters table
#
# usage: makeDefaultParameterTable()
#
# passed: self
#
# returned: parameterDict
#
#----------------------------------------------------------------------------------
parameterDict = {
'name':('type ', ' legal values ',' default values ',' description '),
'----':('-----', '--------------------','-----------------','------------------------------------------------------------'),
'adf': ('int ','0 = no or 1 = yes ',' 0 ','produce data file of algoritmic info for display? (see alg) '),
'alg': ('char ','any file name ',' "nnalg.dat" ','file name for algoritmic display tool (see adf) '),
'asc': ('int ','0 = no or 1 = yes ',' 1 ','is automatic scaling is allowed? '),
'bI': ('float','>= 1. ',' 1.5 ','tautness increasing effect of the gradients by increasing bI'),
'bJ': ('float','>= 1. ',' 7.0 ','tautness decreasing breadth of region affected by gradients '),
'dup': ('int ','0 = yes or 1 = no ',' 1 ','are duplicate input coordinates are allowed? '),
'ext': ('int ','0 = no or 1 = yes ',' 1 ','is extrapolation allowed outside the convex hull? '),
'hor': ('float','>= 0. ',' -1.0 ','amount of horizontal overlap from outside current region '),
'igr': ('int ','0 = no or 1 = yes ',' 0 ','are gradients are to be computed? '),
'magx':('float','> 0. ',' 1.0 ','scale factor for x coordinate values '),
'magy':('float','> 0. ',' 1.0 ','scale factor for y coordinate values '),
'magz':('float','> 0. ',' 1.0 ','scale factor for z coordinate values '),
'non': ('int ','0 = yes or 1 = no ',' 0 ','are interpolated values are allowed to be negative? '),
'nul': ('float','any float ',' 0.0 ','value for points outside the convex hull if no extrapolation'),
'rad': ('int ','0 = rad or 1 = deg ',' 0 ','are slopes and aspects are returned in radians or degrees? '),
'sdi': ('int ','0 = no or 1 = yes ',' 0 ','are slopes and aspects to be computed? '),
'upd': ('int ','0=N to S or 1=S to N',' 1 ','does output array from giving N to S or S to N? '),
'ver': ('float','>= 0. ',' -1.0 ','amount of vertical overlap from outside current region '),
'xas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of x in last interpolation '),
'yas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of y in last interpolation '),
'zas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of z in last interpolation ') }
return parameterDict
def makeInstanceParameterTable(self):
#----------------------------------------------------------------------------------
#
# purpose: construct the dictionary which is the instance control parameters table
#
# usage: makeInstanceParameterTable(self)
#
# passed: self
#
# returned: parameterDict
#
#----------------------------------------------------------------------------------
parameterDict = {
'name':('type ', ' legal values ',' Values ',' description '),
'----':('-----', '-------------------','----------------','------------------------------------------------------------'),
'adf': ('int ','0 = no or 1 = yes ', eval('self.adf') ,'produce data file of algoritmic info for display? (see alg) '),
'alg': ('char ','any file name ', eval('self.alg') ,'file name for algoritmic display tool (see adf) '),
'asc': ('int ','0 = no or 1 = yes ', eval('self.asc') ,'is automatic scaling is allowed? '),
'bI': ('float','>= 1. ', eval('self.bI') ,'tautness increasing effect of the gradients by increasing bI'),
'bJ': ('float','>= 1. ', eval('self.bJ') ,'tautness decreasing breadth of region affected by gradients '),
'dup': ('int ','0 = yes or 1 = no ', eval('self.dup') ,'are duplicate input coordinates are allowed? '),
'ext': ('int ','0 = no or 1 = yes ', eval('self.ext') ,'is extrapolation allowed outside the convex hull? '),
'hor': ('float','>= 0. ', eval('self.hor') ,'amount of horizontal overlap from outside current region '),
'igr': ('int ','0 = no or 1 = yes ', eval('self.igr') ,'are gradients are to be computed? '),
'magx':('float','> 0. ', eval('self.magx'),'scale factor for x coordinate values '),
'magy':('float','> 0. ', eval('self.magy'),'scale factor for y coordinate values '),
'magz':('float','> 0. ', eval('self.magz'),'scale factor for z coordinate values '),
'non': ('int ','0 = yes or 1 = no ', eval('self.non') ,'are interpolated values are allowed to be negative? '),
'nul': ('float','any float ', eval('self.nul') ,'value for points outside the convex hull if no extrapolation'),
'rad': ('int ','0 = rad or 1 = deg ', eval('self.rad') ,'are slopes and aspects are returned in radians or degrees? '),
'sdi': ('int ','0 = no or 1 = yes ', eval('self.sdi') ,'are slopes and aspects to be computed? '),
'upd': ('int ','0=N to S or 1=S to N', eval('self.upd') ,'does output array from giving N to S or S to N? '),
'ver': ('float','>= 0. ', eval('self.ver') ,'amount of vertical overlap from outside current region '),
'xas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of x in last interpolation'),
'yas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of y in last interpolation'),
'zas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of z in last interpolation') }
return parameterDict
def printDefaultParameterTable(self):
""" --------------------------------------------------------------------------------------------------------
purpose: print the value of all the parameters
usage: r.printDefaultParameterTable()
where r is an instance of Natgrid
passed: self
returned: None
--------------------------------------------------------------------------------------------------------"""
names = Natgrid.parameterNames(self)
names = names[2:]
parameterDict = Natgrid.makeDefaultParameterTable(self)
for item in names:
items = (item, parameterDict[item][0], parameterDict[item][1], parameterDict[item][2], parameterDict[item][3])
print('%-7.7s %-6.6s %-12.12s %-15.15s %s' % items)
return
def printInstanceParameterTable(self):
""" --------------------------------------------------------------------------------------------------------
purpose: print the value of all the parameters
usage: r.printInstanceParameterTable()
where r is an instance of Natgrid
passed: self
returned: None
--------------------------------------------------------------------------------------------------------"""
names = Natgrid.parameterNames(self)
names = names[2:]
parameterDict = Natgrid.makeInstanceParameterTable(self)
for item in names:
items = (item, parameterDict[item][0], parameterDict[item][1], parameterDict[item][2], parameterDict[item][3])
print('%-7.7s %-6.6s %-12.12s %-7.7s %s' % items)
return
def printInstanceParameters(self):
""" --------------------------------------------------------------------------------------------------------
purpose: print the values of the current natgrid control parameters in c code
usage: r. printInstanceParameters()
where r is an instance of Natgrid
passed: self
returned: None
--------------------------------------------------------------------------------------------------------"""
names = Natgrid.parameterNames(self)
names = names[2:]
typeDict = Natgrid.parameterType(self)
for name in names:
if typeDict[name] == 'int':
print('Currently, %s = %d' % (name, eval('self.' + name)))
elif typeDict[name] == 'char':
print('Currently, %s = %s' % (name, eval('self.' + name)))
elif typeDict[name] == 'float':
print('Currently, %s = %f' % (name, eval('self.' + name)))
elif typeDict[name] == 'double':
print('Currently, %s = %f' % (name, eval('self.' + name)))
return None
def setInstanceParameters(self):
#---------------------------------------------------------------------------
#
# purpose: set the instance values of the current natgrid control parameters in c code
#
# usage: r.setInstanceParameters()
#
# where r is an instance of Natgrid
#
# passed: self
#
# returned: None
#
#----------------------------------------------------------------------------
names = Natgrid.parameterNames(self)
names = names[2:-3] # the -3 eliminates the nonsettable xas, yas and zas
typeDict = Natgrid.parameterType(self)
# set the current values for the natgrid control parameters
for name in names:
if typeDict[name] == 'int':
natgridmodule.seti(name, eval('self.' + name))
elif typeDict[name] == 'char':
natgridmodule.setc(name, eval('self.' + name))
elif typeDict[name] == 'float':
natgridmodule.setr(name, eval('self.' + name))
elif typeDict[name] == 'double':
natgridmodule.setrd(name, eval('self.' + name))
return None
#---------------------------------------------------------------------------------
# ***************************** Error Table ************************************
#---------------------------------------------------------------------------------
def errorTable(self):
""" --------------------------------------------------------------------------------------------------------
purpose: construct the dictionary which provides access to error messages
usage: errorDict = r.errorTable()
where r is an instance of Natgrid
returned: errorDict
--------------------------------------------------------------------------------------------------------"""
errorDict = {
1: 'Insufficient data in gridded region to triangulate',
2: 'Dulpicate input data coordinates are not allowed',
3: 'Unable to open file for writing algorithmic',
4: 'WARNING: The ratio of vertical to horizontal scales too large for gradients. Rescale if gradients required',
5: 'WARNING: The ratio of vertical to horizontal scales too small for gradients. Rescale if gradients required',
6: 'WARNING: The ratio of x to y-axis breath too extreme. Change proportions or rescale. Gradients disabled',
7: 'Unable to allocate storage for ivector',
8: 'Unable to allocate storage for dvector',
9: 'Unable to allocate storage for **imatrix',
10: 'Unable to allocate storage for imatrix[]',
11: 'Unable to allocate storage for **fmatrix',
12: 'Unable to allocate storage for fmatrix[]',
13: 'Unable to allocate storage for **dmatrix',
14: 'Unable to allocate storage for dmatrix[]',
15: 'Unable to allocate storage for raw data',
16: 'Unable to allocate storage for a simplex',
17: 'Unable to allocate storage for temp',
18: 'Unable to allocate storage for neig',
19: 'Slopes have not been computed, set sdip',
20: 'Row argument out of range',
21: 'Column argument out of range',
22: 'Aspects have not been computed, set sdip',
23: 'Parameter name not known',
24: 'Can not open error file',
25: 'Automatic scaling done - distorted aspects not returned. Rescale data or set magx, magy and magz appropriately',
26: 'Automatic scaling done - distorted slopes not returned. Rescale data or set magx, magy and magz appropriately',
27: 'Coordinate is outside the gridded region for a single point interpolation',
28: 'Can not compute aspects and slopes in conjunction with single point interpolation mode',
29: 'Fortran DOUBLE PRECISION entries not supported on UNICOS',
30: 'Error number out of range' }
return errorDict
#---------------------------------------------------------------------------------
# *************************** magic functions *********************************
#---------------------------------------------------------------------------------
def __setattr__(self, name, value):
#---------------------------------------------------------------------------------
#
# purpose: '__setattr__' is called on every assignment to an instance attribute.
# Consequently, it must put the value in through the __dict__ to avoid
# calling itself and setting up an infinite recursion loop.It sets the
# attribute called name to value in two steps.
# One -- set the global C code control parameter
# Two -- set the instance self data control parameter
#
# usage: x.name = value
#
# passed : name and value
#
# returned: None
#
#---------------------------------------------------------------------------------
typeDict = Natgrid.parameterType(self)
if name in typeDict.keys():
if typeDict[name] == 'int':
natgridmodule.seti(name, value)
self.__dict__[name] = value
elif typeDict[name] == 'char':
natgridmodule.setc(name, value)
self.__dict__[name] = value
elif typeDict[name] == 'float':
natgridmodule.setr(name, value)
self.__dict__[name] = value
elif typeDict[name] == 'double':
natgridmodule.setrd(name, value)
self.__dict__[name] = value
else:
self.__dict__[name] = value
return None
def __getattr__(self, name):
#---------------------------------------------------------------------------------
#
# purpose: '__getattr__' is called only if a referenced attribute can not be found
# in the instance. It gets the attribute from natgridmodule if possible.
#
# usage: x.name -- name is the oject and not a string repr
#
# passed : name
#
# returned: x.name
#
#---------------------------------------------------------------------------------
typeDict = Natgrid.parameterType(self)
if name in typeDict.keys():
if typeDict[name] == 'int':
value = natgridmodule.geti(name)
elif typeDict[name] == 'char':
value = natgridmodule.getc(name)
elif typeDict[name] == 'float':
value = natgridmodule.getr(name)
elif typeDict[name] == 'double':
value = natgridmodule.getrd(name)
else:
raise AttributeError(name)
return value
#---------------------------------------------------------------------------------
# *******************************************************************
# **************** end of magic functions **************************
# *******************************************************************
#---------------------------------------------------------------------------------
def printParameterTable():
""" --------------------------------------------------------------------------------------------------------
routine: printParameterTable
purpose: print the control parameter table using the default values from outside the Natgrid class
usage: import nat
nat.printParameterTable()
passed: nothing
returned: None
definition: printParameterTable():
--------------------------------------------------------------------------------------------------------"""
names = ['name', '----', 'adf', 'alg', 'asc', 'bI', 'bJ', 'dup', 'ext', 'hor', 'igr', 'magx',
'magy', 'magz', 'non', 'nul', 'rad', 'sdi', 'upd', 'ver', 'xas', 'yas', 'zas' ]
parameterDict = {
'name':('type ', ' legal values ',' default values ',' description '),
'----':('-----', '--------------------','-----------------','------------------------------------------------------------'),
'adf': ('int ','0 = no or 1 = yes ',' 0 ','produce data file of algoritmic info for display? (see alg) '),
'alg': ('char ','any file name ',' "nnalg.dat" ','file name for algoritmic display tool (see adf) '),
'asc': ('int ','0 = no or 1 = yes ',' 1 ','is automatic scaling is allowed? '),
'bI': ('float','>= 1. ',' 1.5 ','tautness increasing effect of the gradients by increasing bI'),
'bJ': ('float','>= 1. ',' 7.0 ','tautness decreasing breadth of region affected by gradients '),
'dup': ('int ','0 = yes or 1 = no ',' 1 ','are duplicate input coordinates are allowed? '),
'ext': ('int ','0 = no or 1 = yes ',' 1 ','is extrapolation allowed outside the convex hull? '),
'hor': ('float','>= 0. ',' -1.0 ','amount of horizontal overlap from outside current region '),
'igr': ('int ','0 = no or 1 = yes ',' 0 ','are gradients are to be computed? '),
'magx':('float','> 0. ',' 1.0 ','scale factor for x coordinate values '),
'magy':('float','> 0. ',' 1.0 ','scale factor for y coordinate values '),
'magz':('float','> 0. ',' 1.0 ','scale factor for z coordinate values '),
'non': ('int ','0 = yes or 1 = no ',' 0 ','are interpolated values are allowed to be negative? '),
'nul': ('float','any float ',' 0.0 ','value for points outside the convex hull if no extrapolation'),
'rad': ('int ','0 = rad or 1 = deg ',' 0 ','are slopes and aspects are returned in radians or degrees? '),
'sdi': ('int ','0 = no or 1 = yes ',' 0 ','are slopes and aspects to be computed? '),
'upd': ('int ','0=N to S or 1=S to N',' 1 ','does output array from giving N to S or S to N? '),
'ver': ('float','>= 0. ',' -1.0 ','amount of vertical overlap from outside current region '),
'xas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of x in last interpolation '),
'yas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of y in last interpolation '),
'zas': ('float','> 0. ',' 0.0 ','scale used by automatic scaling of z in last interpolation ') }
for item in names:
items = (item, parameterDict[item][0], parameterDict[item][1], parameterDict[item][2], parameterDict[item][3])
print('%-7.7s %-6.6s %-12.12s %-15.15s %s' % items)
return
def printStoredParameters():
""" --------------------------------------------------------------------------------------------------------
routine: printStoredParameters
purpose: print the values of the current natgrid control parameters in c code. The call
to the method function rgrd will change them to the instance values.
usage: import nat
nat.printStoredParameters()
passed: nothing
returned: None
definition: printStoredParameters():
--------------------------------------------------------------------------------------------------------"""
names = ['name', '----', 'adf', 'alg', 'asc', 'bI', 'bJ', 'dup', 'ext', 'hor', 'igr', 'magx',
'magy', 'magz', 'non', 'nul', 'rad', 'sdi', 'upd', 'ver', 'xas', 'yas', 'zas' ]
names = names[2:]
typeDict = {
'adf':'int', 'alg':'char', 'asc':'int', 'bI':'float', 'bJ':'float', 'dup':'int', 'ext':'int',
'hor':'float', 'igr':'int', 'magx':'float', 'magy':'float', 'magz':'float', 'non':'int', 'nul':'float',
'rad':'int', 'sdi':'int', 'upd':'int', 'ver':'float', 'xas':'float', 'yas':'float', 'zas':'float' }
for item in names:
if typeDict[item] == 'int':
print(' %s = %d' % (item, natgridmodule.geti(item)))
elif typeDict[item] == 'char':
print(' %s = %s' % (item, natgridmodule.getc(item)))
elif typeDict[item] == 'float':
print(' %s = %f' % (item, natgridmodule.getr(item)))
elif typeDict[item] == 'double':
print(' %s = %f' % (item, natgridmodule.getrd(item)))
return None
def checkdim(x, y):
#------------------------------------------------------------------------------------------
#
# purpose: determine whether the coordinate grid is random or monotonically increasing
#
# usage:
#
# returned: x, y, monotonic, xreverse, yreverse
#
#-------------------------------------------------------------------------------------------
xsize = len(x)
if x[0] > x[xsize - 1]:
x = x[::-1]
xreverse = 'yes'
else:
xreverse = 'no'
xmonotonic = 'yes' # monotonic and possibly reversed to make it montonically increasing
for n in range(1, xsize):
if x[n] < x[n - 1]:
xmonotonic = 'no' # not monotonic so return the original grid
ysize = len(y)
if y[0] > y[ysize - 1]:
y = y[::-1]
yreverse = 'yes'
else:
yreverse = 'no'
ymonotonic = 'yes' # monotonic and possibly reversed to make it montonically increasing
for n in range(1, ysize):
if y[n] < y[n - 1]:
ymonotonic = 'no' # not monotonic so return the original grid
if xmonotonic == 'yes' and ymonotonic == 'yes': # if both are monotonic the grid is monotonic
monotonic = 'yes'
else:
monotonic = 'no'
if xreverse == 'yes': # return vectors to thier original state
x = x[::-1]
xreverse = 'no'
if yreverse == 'yes':
y = y[::-1]
yreverse = 'no'
# note that x and y may be returned reversed as necessary only if monotonic is set to yes
return x, y, monotonic, xreverse, yreverse
#---------------------------------------------------------------------------------
# ********************************************************************************
# ********************************************************************************
#---------------------------------------------------------------------------------
def sendOutput(output, msg, value = None):
""" #---------------------------------------------------------------------------------
#
# purpose: send the same message to the screen and to a file
#
# passed : msg - the string
#
# returned: return
#
#---------------------------------------------------------------------------------"""
if value is None:
print(msg)
output.write(msg + '\n')
else:
print(msg, repr(value))
output.write(msg + ' %15.11e\n' % (value,))
return None
def document():
""" #-------------------------------------------------------------------------
#
# purpose: 'docstrings' writes the doc strings contained in the regrid module
# to a file as documentation for the user
#
# usage: import regrid2 as regrid
# regrid.document()
#
# passed : nothing
#
# returned: nothing
#
#-------------------------------------------------------------------------"""
import nat
std = sys.stdout # save sys.stout to allow reassigning later
sys.stdout = open( 'natgrid.doc', 'w')
print('**********************************************************************************************\n')
print('**************************** Overview of the CDAT interface to natgrid ***********************\n')
print('**********************************************************************************************\n')
print(nat.__doc__)
print()
print()
print(' ******************** Instructions for use of the natgrids function **************************')
print(natgridmodule.natgrids.__doc__)
print()
print(' ******************** Instructions for use of the seti function **************************')
print(natgridmodule.seti.__doc__)
print()
print(' ******************** Instructions for use of the geti function **************************')
print(natgridmodule.geti.__doc__)
print()
print(' ******************** Instructions for use of the setr function **************************')
print(natgridmodule.setr.__doc__)
print()
print(' ******************** Instructions for use of the getr function **************************')
print(natgridmodule.getr.__doc__)
print()
print(' ******************** Instructions for use of the setc function **************************')
print(natgridmodule.setc.__doc__)
print()
print(' ******************** Instructions for use of the getc function **************************')
print(natgridmodule.getc.__doc__)
print()
print(' ******************** Instructions for use of the getaspects function **************************')
print(natgridmodule.getaspects.__doc__)
print()
print(' ******************** Instructions for use of the getslopes function **************************')
print(natgridmodule.getslopes.__doc__)
print()
print(' ******************** Instructions for use of the pntinits function **************************')
print(natgridmodule.pntinits.__doc__)
print()
print(' ******************** Instructions for use of the pnts function **************************')
print(natgridmodule.pnts.__doc__)
print()
print(' ******************** Instructions for use of the pntend function **************************')
print(natgridmodule.pntend.__doc__)
print()
print(' ******************** Instructions for use of the natgridd function **************************')
print(natgridmodule.natgridd.__doc__)
print()
print(' ******************** Instructions for use of the setrd function **************************')
print(natgridmodule.setrd.__doc__)
print()
print(' ******************** Instructions for use of the getrd function **************************')
print(natgridmodule.getrd.__doc__)
print()
print(' ******************** Instructions for use of the getaspectd function **************************')
print(natgridmodule.getaspectd.__doc__)
print()
print(' ******************** Instructions for use of the getsloped function **************************')
print(natgridmodule.getsloped.__doc__)
print()
print(' ******************** Instructions for use of the pntinitd function **************************')
print(natgridmodule.pntinitd.__doc__)
print()
print(' ******************** Instructions for use of the pntd function **************************')
print(natgridmodule.pntd.__doc__)
print()
print(' ******************** Instructions for use of the pntendd function **************************')
print(natgridmodule.pntendd.__doc__)
print()
sys.stdout = std
return None
def sendmsg(msg, value1 = None, value2 = None):
""" #---------------------------------------------------------------------------------
#
# purpose: send the same message to the screen
#
# passed : msg - the string
# value - the number associated with the string
#
# returned: return
#
#---------------------------------------------------------------------------------"""
print('*******************************************************************')
if value1 is None:
print(msg)
elif value2 is None:
print(msg, value1)
else:
print(msg, value1, value2)
print('*******************************************************************')
return None
def help(choice = None):
import nat
if choice is None: # get instructions for use of help
print(""" ----------------------------------------------------------------------------------------
INSTRUCTIONS ON USE THE OBJECT ORIENTED INTERFACE TO THE NATGRID PACKAGE FROM NGMATH
This module is built as one class, Natgrid, which sports a single method called rgrd.
To get instructions on making an instance of Natgrid, type
nat.help('Natgrid')
To get instructions on using the control parameters, type
nat.help('parameters')
To print the table describing the control parameters, type
nat.help('table')
To get instructions on performing a regridding, type
nat.help('regrid')
To get instructions on calculating slopes and aspects, type
nat.help('aspectSlope')
To get instructions using the single point computational mode, type
nat.help('singlePoint')
INSTRUCTIONS ON USE OF ORIGINAL NATGRID PACKAGE FROM NGMATH
This module is built as an interface to natgridmodule.so which exports the following functions:
Single precision procedures:
natgrids - primary function for gridding.
seti - set int parameter values.
geti - retrieve values for int parameters.
setr - set float parameter values.
getr - retrieve values for float parameters
setc - set char parameter values.
getc - retrieve values for char parameters.
getaspects - get aspect values, if calculated.
getslopes - get slope values, if calculated.
pntinits - initiate single point mode.
pnts - interpolate at a single point.
pntend _ terminate single point mode.
Double precision procedures:
natgridd - primary function for gridding.
setrd - set float parameter values.
getrd - retrieve values for float parameters
getaspectd - get aspect values, if calculated.
getsloped - get slope values, if calculated.
pntinitd - initiate single point mode.
pntd - interpolate at a single point.
pntendd _ terminate single point mode.
It is feasible to use these functions directly without this module. Information is available
through their docstrings. For example, to get the docstring for the routine natgrids, follow this
procedure at the Python prompt:
import natgridmodule
print natgridmodule.natgrids.__doc__
or simply type
nat.help('natgrids')
------------------------------------------------------------------------------------------------------""")
elif choice == 'Natgrid':
print(""" ----------------------------------------------------------------------------------------
To make an instance, r, type:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
or
r = nat.Natgrid(xi, yi, xo, yo, listOutput = 'yes')
where xi, yi and xo, yo are the input and output grid coordinate arrays. The optional listOutput is
set to anything except 'no' if xo, yo are in list format as explained below.
The input grid must be organized in a list format always. The size of the xi array and the yi array are
necessarily equal. For example, if there are n randomly spaced input data points, there
are n values in xi and n values in yi.
There are two possible formats for the output grid. The output grid coordinate arrays may be a list like
the input array or it may be a rectangular grid. The choice between the two posibilities is made according
to requirements in subseqent calls to the method function. The first choice is required if the subsequent
call is to the single point mode interpolation. The list can have one or more points. Of course, the list
could describe a rectangular grid. For example, a rectangular grid with 10 x values and 20 y values can be
rewrtten in list form with 200 x value and 200 y values. However, this form requires calling the slower
single point interpolator. The second choice is most efficient for the basic interpolation to a rectangular
output grid. The output grid must be monotonic but need not be equally spced.
The grid coordinate arrays can be single precision (numpy.float32) or double precision (numpy.float64). The
decision on whether to call for a single or a double precision computation subsequently is made by looking at
the type of these arrays.
--------------------------------------------------------------------------------------------------------------------""")
elif choice == 'parameters':
print(""" ----------------------------------------------------------------------------------------
In the absence of an instance of the class Natgrid, a description of the control parameters can be found
by typing
import nat
nat.printParameterTable()
The control parameters are easily available within the class. First make an instance, r, type:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
To change a setting type the new value. For example, to set igr to 1, type
r.igr = 1
To find an individual value, type the name. For example, to exam the value of hor, type
r.hor
To check the settings type
r.printInstanceParameterTable() -- prints the table with values and a description of the parameters
used in subsequent calls to the method function rgrd
or
r.printInstanceParameters() -- prints a list of the parameters values used in subsequent calls to the
the rgrd method
nat. printStoredParameters() -- prints the parameters in memory which may differ from the above if the
user has made more than one instance of the Natgrid class.
--------------------------------------------------------------------------------------------------------------------""")
elif choice == 'table':
printParameterTable()
#-----------------------------------------------------------------------------------------------------
elif choice == 'regrid':
print(""" ----------------------------------------------------------------------------------------
natgrid is restricted to two dimensions . Consequently, it is the user's responsibility to reduce the processing
of higher dimensional data to a sequence of calls using only two dimensional data. A description of the basic
natural neighbor linear interpolation and nonlinear interpolations follow.
Make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
where the xo, yo grid is rectilinear as explained in the help choice 'Natgrid'.
r.igr = 1 -- in order to set up the computation for nonlinear interpolation. The default value
for igr calls for a linear interpolation.
Then call the primary interpolation computation to regrid the input data, dataIn, on the grid (xi, yi) to
the output data, dataOut, on the grid (xo, yo), with
dataOut = r.rgrd( dataIn )
When dealing with global data described on a latitude-longitude grid, it is also possible to request a wrap
in the input grid and the input data in the longitude direction, assumed to be the yi grid coordinate, with
dataOut = r.rgrd(dataIn, wrap = 'yes')
The computation is either single or double precision as determined by the precision submitted in making
the instance.
--------------------------------------------------------------------------------------------------------------------""")
elif choice == 'aspectSlope':
print(""" ----------------------------------------------------------------------------------------
natgrid is restricted to two dimensions . Consequently, it is the user's responsibility to reduce the processing
of higher dimensional data to a sequence of calls using only two dimensional data. A description of the basic
natural neighbor linear and nonlinear interpolations returning the aspect and the slope at the output grid
points follows.
First make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo)
where the xo, yo grid is rectilinear as explained in the help choice 'Natgrid'.
r.igr = 1 -- in order to set up the computation for nonlinear interpolation. The default value
for igr calls for a linear interpolation.
Then call the primary interpolation computation to regrid the input data, dataIn, on the grid (xi, yi) to
the output data, dataOut, on the grid (xo, yo), while asking for the aspect and the slope on this output grid, with
dataOut, a, s = r.rgrd( dataIn, aspectSlope = 'yes' )
where a is the aspect, the direction of the steepest descent in degrees measured from 'north' and s is the
slope in degrees measured from the horizontal. Necessarily, these are arrays aligned with the rectilinear
output grid, xo, yo.
It is also possible to request a wrap in the input grid and the input data in the longitude direction, assumed
to be the yi grid coordinate, by adding a keyword as
dataOut, a, s = r.rgrd( dataIn, aspectSlope = 'yes', wrap = 'yes' )
The computation is either single or double precision as determined by the precision submitted in making
the instance.
--------------------------------------------------------------------------------------------------------------------""")
elif choice == 'singlePoint':
print(""" ----------------------------------------------------------------------------------------
natgrid is restricted to two dimensions . Consequently, it is the user's responsibility to reduce the processing
of higher dimensional data to a sequence of calls using only two dimensional data. A description of the single
point natural neighbor linear and nonlinear interpolations follows.
First make an instance, r, with:
import nat
r = nat.Natgrid(xi, yi, xo, yo, listOutput)
where the xo, yo output grid is in the list form (not a rectangular output grid) as explained
in the help choice 'Natgrid'.
r.igr = 1 -- in order to set up the computation for nonlinear interpolation. The default value
for igr calls for a linear interpolation.
Then call the single point mode interpolation computation to regrid the input data, dataIn, on the grid (xi, yi)
to the output data, dataOut, on the grid (xo, yo), type
dataOut = r.rgrd( dataIn )
The single point mode is slow but it provides a choice where the interpolation is to one or more points
rather than to a complete rectangular grid..
The computation is either single or double precision as determined by the precision submitted in making
the instance.
--------------------------------------------------------------------------------------------------------------------""")
elif choice == 'natgrids':
print(natgridmodule.natgrids.__doc__)
elif choice == 'seti':
print(natgridmodule.seti.__doc__)
elif choice == 'geti':
print(natgridmodule.geti.__doc__)
elif choice == 'setr':
print(natgridmodule.setr.__doc__)
elif choice == 'getr':
print(natgridmodule.getr.__doc__)
elif choice == 'setc':
print(natgridmodule.setc.__doc__)
elif choice == 'getc':
print(natgridmodule.getc.__doc__)
elif choice == 'getaspects':
print(natgridmodule.getaspects.__doc__)
elif choice == 'getslopes':
print(natgridmodule.getslopes.__doc__)
elif choice == 'pntinits':
print(natgridmodule.pntinits.__doc__)
elif choice == 'pnts':
print(natgridmodule.pnts.__doc__)
elif choice == 'pntend':
print(natgridmodule.pntend.__doc__)
elif choice == 'natgridd':
print(natgridmodule.natgridd.__doc__)
elif choice == 'setrd':
print(natgridmodule.setrd.__doc__)
elif choice == 'getrd':
print(natgridmodule.getrd.__doc__)
elif choice == 'getaspectd':
print(natgridmodule.getaspectd.__doc__)
elif choice == 'getsloped':
print(natgridmodule.getsloped.__doc__)
elif choice == 'pntinitd':
print(natgridmodule.pntinitd.__doc__)
elif choice == 'pntd':
print(natgridmodule.pntd.__doc__)
elif choice == 'pntendd':
print(natgridmodule.pntendd.__doc__)
else:
print('Your request is not in help. The help choices are: ')
print('Natgrid, parameters, table, regrid, aspectSlope, singlePoint, natgrids, seti, geti, setr, getr, setc, getc, getaspects, getslopes, pntinits, pnts, pntend, natgridd, setrd, getrd, getaspectd, getsloped, pntinitd, pntd, pntendd')
return None
PyBDSF-1.11.0/bdsf/opts.py 0000664 0000000 0000000 00000312107 14650706641 0015113 0 ustar 00root root 0000000 0000000 """PyBDSF options
Options are essentially user-controllable parameters passed into PyBDSF
operations, and allow for end-users to control the exact details of how
calculations are done.
The doc string should give a short description of the option, followed by a
line break ('\n') then a long, detailed description. The short description can
then be split off using "str(v.doc()).split('\n')[0]".
The group string can be used to group suboptions under a parent option. The
group string should be the name of the parent option, which must be Bool
(except for the "hidden" group, which will suppress listing of the option; the
option can still be set as normal).
In general it's better to specify newly added options directly in this file, so
one can oversee them all. But it's also possible to extend it at run-time, and
under some circumstances (e.g. pybdsf installed system-wide, and there is no
way to modify this file) this might be the only option to do so. An example of
such extension follows:
==== file newmodule.py ====
from image import Op
class Op_new_op(Op):
## do something useful here
## we need to add option my_new_opt
pass
## this will extend Opts class at runtime and ensure that
## type-checking works properly.
Opts.my_new_opt = Float(33, doc="docstring")
"""
from __future__ import absolute_import
from .tc import Int, Float, Bool, String, Tuple, Enum, \
Option, NArray, Instance, tInstance, List, Any, TCInit, tcError
try:
# For Python 2
basestring = basestring
except NameError:
basestring = str
class Opts(object):
"""Class Opts -- user-controllable parameters."""
advanced_opts = Bool(False,
doc = "Show advanced options")
atrous_do = Bool(False,
doc = "Decompose Gaussian residual image "\
"into multiple scales\n"\
"If True, then the Gaussian-subtracted "\
"residual image is decomposed into multiple "\
"scales using an a-trous wavelet transform.\n"\
"This option is most useful when there is "\
"significant extended emission in the image. "\
"If the image contains only point sources, "\
"it is best to set this to Fasle.")
beam = Option(None, Tuple(Float(), Float(), Float()),
doc = "FWHM of restoring beam. Specify as (maj, "\
"min, pos ang E of N) in degrees. "\
"E.g., beam = (0.06, 0.02, 13.3). None => "\
"get from header\n"\
"For more than one channel, use the beam_spectrum "\
"parameter. "\
"If the beam is not given "\
"by the user, then it is looked for in the "\
"image header. If not found, then an error "\
"is raised. PyBDSF will not work without "\
"knowledge of the restoring beam.")
filename = String(doc = "Input image file name\n"\
"The input image can be a FITS or CASA 2-, "\
"3-, or 4-D cube.")
flagging_opts = Bool(False,
doc = "Show options for Gaussian flagging\n"\
"Gaussians which are likely in error "\
"(e.g., very small or very large Gaussians) "\
"are flagged according to a number of criteria, "\
"which the user may control. "\
"Flags are cumulative (i.e., if multiple "\
"flagging criteria are met, the respective "\
"flag values are added to produce the final "\
"flag value). Flag values are defined as follows:\n"\
"If flag_minsnr: flag + 1\n"\
"If flag_maxsnr: flag + 2\n"\
"If flag_bordersize: flag + 4 (x) or 8 (y)\n"\
"If flag_maxsize_isl: flag + 16 (x) or 32 (y)\n"\
"If flag_maxsize_bm: flag + 64\n"\
"If flag_minsize_bm: flag + 128\n"\
"If flag_maxsize_fwhm: flag + 256")
frequency = Option(None, Float(),
doc = "Frequency in Hz of input image. "\
"E.g., frequency = 74e6. None => get from header.\n"\
"For more than one channel, use the frequency_sp "\
"parameter. If the frequency is not given "\
"by the user, then it is looked for in the "\
"image header. If not found, then an error "\
"is raised. PyBDSF will not work without "\
"knowledge of the frequency.")
interactive = Bool(False,
doc = "Use interactive mode\n"\
"In interactive mode, plots are displayed at "\
"various stages of the processing so that "\
"the user may check the progress of the fit.\n"\
"First, plots of the rms and mean background images are "\
"displayed along with the islands found, before "\
"fitting of Gaussians takes place. The user should "\
"verify that the islands and maps are reasonable "\
"before preceding.\n"\
"Next, if atrous_do is True, the fits to each "\
"wavelet scale are shown. The wavelet fitting "\
"may be truncated at the current scale if "\
"desired.\nLastly, the final results are shown.")
mean_map = Enum('default', 'zero', 'const', 'map',
doc = "Background mean map: 'default' => calc whether "\
"to use or not, 'zero' => 0, 'const' => "\
"clipped mean, 'map' => use 2-D map\n"\
"This parameter determines "\
"how the background mean map is computed "\
"and how it is used further.\nIf 'const', then "\
"the value of the clipped "\
"mean of the entire image (set by the kappa_clip "\
"option) is used as the "\
"background mean map.\nIf 'zero', then a value "\
"of zero is used.\nIf 'map', then "\
"the 2-dimensional mean map is computed and used. "\
"The resulting mean map is largely determined by "\
"the value of the rms_box parameter (see the "\
"rms_box parameter for more information).\nIf "\
"'default', then PyBDSF will attempt to "\
"determine automatically whether to use "\
"a 2-dimensional map or a constant one as "\
"follows. First, "\
"the image is assumed to be confused if "\
"bmpersrc_th < 25 or the ratio of the "\
"clipped mean to rms (clipped mean/clipped rms) "\
"is > 0.1, else the image is not confused. "\
"Next, the mean map is checked to "\
"see if its spatial variation is significant. If "\
"so, then a 2-D map is used and, if not, "\
"then the mean map is set to either 0.0 or a "\
"constant depending on whether the image is "\
"thought to be confused or not.\nGenerally, "\
"'default' works well. However, if there is "\
"significant extended emission in the image, "\
"it is often necessary to force the use of a "\
"constant mean map using either 'const' or "\
"'mean'.")
multichan_opts = Bool(False,
doc = "Show options for multi-channel "\
"images")
output_opts = Bool(False,
doc = "Show output options")
polarisation_do = Bool(False,
doc = "Find polarisation properties\n"\
"First, if pi_fit = True, source detection is done on the polarized intensity "\
"(PI) image and sources not detected in "\
"the Stokes I image are identified. The thresholds for island "\
"detection can be controlled using the pi_thresh_isl and "\
"pi_thresh_pix parameters.\n"\
"Next, for any such PI-only sources, "\
"plus all sources detected in the Stokes I image, "\
"the flux densities in each of the other Stokes images are found. "\
"Flux densities are calculated by fitting for the normalization of the Gaussians "\
"found from the Stokes I or PI images."\
"Lastly, the polarisation fraction and angle for each source "\
"are calculated.\n"\
"For linearly polarised emission, the signal and noise "\
"add vectorially, giving a Rice distribution "\
"(Vinokur 1965) instead of a Gaussian one. To correct "\
"for this, a bias is estimated and removed from the "\
"polarisation fraction using the same method used for the "\
"NVSS catalog (see ftp://ftp.cv.nrao.edu/pub/nvss/catalog.ps). "\
"Errors on the linear and total polarisation fractions "\
"and polarisation angle are estimated using the debiased "\
"polarised flux density and standard error propagation. See "\
"Sparks & Axon (1999) for a more detailed treatment.")
psf_vary_do = Bool(False,
doc = "Calculate PSF variation across image")
rm_do = Bool(False,
doc = "Find rotation measure properties",
group = 'hidden')
rms_box = Option(None, Tuple(Int(), Int()),
doc = "Box size, step size for rms/mean map "\
"calculation. Specify as (box, step) in "\
"pixels. E.g., rms_box = (40, 10) => box "\
"of 40x40 pixels, step of 10 pixels. "\
"None => calculate inside program\n"\
"This is a tuple of two integers and is probably the "\
"most important input parameter for PyBDSF. The first "\
"integer, boxsize, is the size of the 2-D sliding box "\
"for calculating the rms and mean over the entire image. "\
"The second, stepsize, is the number of pixels by which "\
"this box is moved for the next measurement. If None, "\
"then suitable values are calculated internally.\n"\
"In general, it is best to choose a box size that "\
"corresponds to the typical scale of artifacts in the "\
"image, such as those that are common around bright "\
"sources. Too small of a box size will effectively "\
"raise the local rms near a source so much that a "\
"source may not be fit at all; too large a box size "\
"can result in underestimates of the rms due to "\
"oversmoothing. A step size of 1/3 "\
"to 1/4 of the box size usually works well.\n"\
"If adaptive_rms_box is True, the rms_box parameter "\
"sets the large-scale box size that is used far "\
"from bright sources.")
rms_map = Enum(None, True, False,
doc = "Background rms map: True => "\
"use 2-D rms map; False => use constant rms; " \
"None => calculate inside program\n"\
"If True, then the 2-D background rms image is "\
"computed and used. If False, then a constant value is "\
"assumed (use rms_value to force the rms to a specific "\
"value). If None, then the 2-D rms image is calculated, and "\
"if the variation is statistically significant then it "\
"is taken, else a constant value is assumed. The rms image "\
"used for each channel in computing the spectral index "\
"follows what was done for the channel-collapsed image.\n"\
"Generally, None works well. However, if there is "\
"significant extended emission in the image, "\
"it is often necessary to force the use of a "\
"constant rms map by setting rms_map = False.")
shapelet_do = Bool(False,
doc = "Decompose islands into shapelets\n"\
"If True, then each island is decomposed using shapelets, "\
"However, at the moment, output of the shapelet parameters "\
"is not supported.")
spectralindex_do = Bool(False,
doc = "Calculate spectral indices (for multi-channel image)\n"\
"If True, then for a multi-channel image, spectral indices "\
"are calculated for all Gaussians and sources which are "\
"detected in the channel-collapsed image.\nFrequencies "\
"can be specified manually using frequency_sp.")
thresh = Enum(None, "hard", "fdr",
doc = "Type of thresholding: " \
"None => calculate inside program, 'fdr' => use "\
"false detection rate algorithm, 'hard' => "\
"use sigma clipping\nIf thresh = 'hard', "\
"then a hard threshold is assumed, given by thresh_pix. "\
"If thresh = 'fdr', then the False Detection Rate algorithm of "\
"Hancock et al. (2002) is used to calculate the value of "\
"thresh_pix. If thresh is None, then the false detection "\
"probability is first calculated, and if the number of false "\
"source pixels is more than fdr_ratio times the estimated "\
"number of true source pixels, then the 'fdr' threshold "\
"option is chosen, else the 'hard' threshold option is "\
"chosen.")
thresh_isl = Float(3,
doc = "Threshold for the island boundary in number of sigma "\
"above the mean. Determines extent of island used for fitting\n"\
"This parameter determines the region to which fitting "\
"is done. A higher value will produce smaller islands, "\
"and hence smaller regions that are considered in the "\
"fits. A lower value will produce larger islands. "\
"Use the thresh_pix parameter to set the detection "
"threshold for sources. Generally, thresh_isl should "\
"be lower than thresh_pix.\n"
"Only regions "\
"above the absolute threshold will be used. "\
"The absolute threshold is calculated as abs_thr = "\
"mean + thresh_isl * rms. Use the mean_map "\
"and rms_map parameters to control the way "\
"the mean and rms are determined.")
thresh_pix = Float(5,
doc = "Source detection threshold: threshold for the "\
"island peak in number of sigma "\
"above the mean. If "\
"false detection rate thresholding is used, "\
"this value is ignored and thresh_pix is "\
"calculated inside the program\n"\
"This parameter sets the overall detection threshold "\
"for islands (i.e. thresh_pix = 5 will find all sources "\
"with peak flux densities per beam of 5-sigma or greater). Use the "\
"thresh_isl parameter to control how much of each island "\
"is used in fitting. Generally, thresh_pix should be larger "\
"than thresh_isl.\n"
"Only islands "\
"with peaks above the absolute threshold will be used. "\
"The absolute threshold is calculated as abs_thr = "\
"mean + thresh_pix * rms. Use the mean_map "\
"and rms_map parameters to control the way "\
"the mean and rms are determined.")
adaptive_rms_box = Bool(False,
doc = "Use adaptive rms_box when determining rms and "\
"mean maps\n"\
"If True, the rms_box is reduced in size near "\
"bright sources and enlarged far from them. "\
"This scaling attempts to account for possible "\
"strong artifacts around bright sources while "\
"still acheiving accurate background rms and "\
"mean values when extended sources are present.\n"\
"This option is generally slower than non-"\
"adaptive scaling.\n"\
"Use the rms_box parameter to set the large-"\
"scale rms_box and the rms_box_bright parameter "\
"to set the small-scale rms_box. The threshold "\
"for bright sources can be set with the "\
"adaptive_thresh parameter.")
#--------------------------------ADVANCED OPTIONS--------------------------------
split_isl = Bool(True,
doc = "Split island if it is too large, has a large "\
"convex deficiency and it opens well.\n"\
"If it doesn't open well, then isl.mean = "\
"isl.clipped_mean, and is taken for fitting. "\
"Splitting, if needed, is always done for "\
"wavelet images",
group = 'advanced_opts')
splitisl_maxsize = Float(50.0,
doc = "If island size in beam area is more than this, "\
"consider splitting island. Min value is 50",
group = 'advanced_opts')
splitisl_size_extra5 = Float(0.1,
doc = "Fraction of island area for 5x5 opening to "\
"be used.\nWhen deciding to split an island, "\
"if the smallest extra sub islands while opening "\
"with a 5x5 footprint add up to at least this "\
"fraction of the island area, and if the largest "\
"sub island is less than 75% the size of the "\
"largest when opened with a 3x3 footprint, a "\
"5x5 opening is taken.",
group = 'hidden')
splitisl_frac_bigisl3 = Float(0.8,
doc = "Fraction of island area for 3x3 opening to "\
"be used.\nWhen deciding to split an island, "\
"if the largest sub island when opened with a "\
"3x3 footprint is less than this fraction of the "\
"island area, then a 3x3 opening is considered.",
group = 'hidden')
peak_fit = Bool(True,
doc = "Find and fit peaks of large islands iteratively\n"\
"When enabled, PyBDSF will identify and "\
"fit peaks of emission in "\
"large islands iteratively (the size of islands for which "\
"peak fitting is done is controlled with the "\
"peak_maxsize option), using a maximum of 10 "\
"Gaussians per iteration. Enabling this option will "\
"generally speed up fitting, but may result in "\
"somewhat higher residuals.",
group = 'advanced_opts')
peak_maxsize = Float(30.0,
doc = "If island size in beam area is more than this, "\
"attempt to fit peaks iteratively (if "\
"peak_fit = True). Min value is 30",
group = 'advanced_opts')
fdr_alpha = Float(0.05,
doc = "Alpha for FDR algorithm for thresholds\n"\
"If thresh is 'fdr', then the estimate of fdr_alpha "\
"(see Hancock et al. 2002 for details) is stored "\
"in this parameter.",
group = "advanced_opts")
fdr_ratio = Float(0.1,
doc = "For thresh = None; " \
"if #false_pix / #source_pix < fdr_ratio, " \
"thresh = 'hard' else thresh = 'fdr'",
group = "advanced_opts")
kappa_clip = Option(None, Float(),
doc = "Kappa for clipped mean and rms. None => calculate "\
"inside program\n"\
"The value of this is the factor used for Kappa-alpha "\
"clipping, as in AIPS. For an image with few source "\
"pixels added on to (Gaussian) noise pixels, the "\
"dispersion of the underlying noise will need to be "\
"determined. This is done iteratively, whereby the actual "\
"dispersion is first computed. Then, all pixels whose "\
"value exceeds kappa clip times this rms are excluded and "\
"the rms is computed again. This process is repeated until "\
"no more pixels are excluded. For well behaved noise "\
"statistics, this process will converge to the true noise "\
"rms with a value for this parameter ~3-5. A large "\
"fraction of source pixels, less number of pixels in total, "\
"or significant non-gaussianity of the underlying noise "\
"will all lead to non-convergence.",
group = "advanced_opts")
bmpersrc_th = Option(None, Float(),
doc = "Theoretical estimate of number of beams " \
"per source. None => calculate inside program\n"\
"Its value is calculated inside the program if its "\
"value is given as None as N/[n*(alpha-1)], where N "\
"is the total number of pixels in the image, n is "\
"the number of pixels in the image whose value is "\
"greater than 5 times the clipped rms, and alpha is "\
"the slope of the differential source counts "\
"distribution, assumed to be 2.5. The value of "\
"bmpersrc_th is used to estimate the average separation "\
"in pixels between two sources, which in turn is used "\
"to estimate the boxsize for calculating the background "\
"rms and mean images. In addition, if the value is below "\
"25 (or the ratio of clipped mean to clipped rms of the "\
"image is greater than 0.1), the image is assumed to be "\
"confused and hence the background mean is put to zero.",
group = "advanced_opts")
spline_rank = Enum(3, 1, 2, 4,
doc = "Rank of the interpolating function for rms/mean map\n"\
"This is an integer and is the order of the interpolating "\
"spline function to interpolate the background rms and "\
"mean map over the entire image.",
group = "advanced_opts")
minpix_isl = Option(None, Int(),
doc = "Minimum number of pixels with emission per island "\
"(minimum is 6 pixels). "\
"None -> calculate inside program\n"\
"This is an integer and is the minimum number of pixels "\
"in an island for "\
"the island to be included. If None, the number of "\
"pixels is set to 1/3 of the area of an unresolved source "\
"using the beam and pixel size information in the "\
"image header. It is set to 6 pixels for all "\
"wavelet images.",
group = "advanced_opts")
maxpix_isl = Option(None, Int(),
doc = "Maximum number of pixels with emission per island. "\
"None -> no limit\n"\
"This is an integer and is the maximum number of pixels "\
"in an island for the island to be included.",
group = "advanced_opts")
rms_value = Option(None, Float(),
doc = "Value of constant rms in "\
"Jy/beam to use if rms_map = False. "\
"None => calculate inside program",
group = "advanced_opts")
aperture = Option(None, Float(),
doc = "Radius of aperture in pixels inside which aperture fluxes are measured "\
"for each source. None => no aperture fluxes measured\n" \
"This is a float and sets the radius (in pixels) inside "\
"which the aperture flux is measured for each source. "\
"Depending on the value of aperture_posn, the aperture is centered either "\
"on the centroid or the peak of the source. Errors are calculated "\
"from the mean of the rms map inside the aperture.",
group = "advanced_opts")
aperture_posn = Enum('centroid', 'peak',
doc = "Position the aperture (if aperture is not None) on: "\
"'centroid' or 'peak' of the source.\n"\
"This parameter determines how the aperture is "\
"positioned relative to the source. If 'centroid', "\
"the aperture is centered on the source centroid. If "\
"'peak', the aperture is centered on the source peak. "\
"If aperture=None (i.e., no aperture radius is specified), "\
"this parameter is ignored.",
group = "advanced_opts")
src_ra_dec = Option(None, List(Tuple(Float(), Float())),
doc = "List of source positions at which fitting is done. "\
"E.g., src_ra_dec = [(197.1932, 47.9188), (196.5573, 42.4852)].\n"\
"This parameter defines the center positions at which "\
"fitting will be done. The size of the region used for "\
"the fit is given by the src_radius_pix parameter. "\
"Positions should be given as a list of RA and Dec, "\
"in degrees, one set per source. These positions will "\
"override the normal island finding module.",
group = "advanced_opts")
src_radius_pix = Option(None, Float(),
doc = "Radius of the island (if src_ra_dec is not None) in pixels. "\
"None => radius is set to the FWHM of the beam major axis.\n"\
"This parameter determines the size of the region used "\
"to fit the source positions specified by the src_ra_dec "\
"parameter.",
group = "advanced_opts")
ini_gausfit = Enum('default', 'simple', 'nobeam',
doc = "Initial guess for Gaussian "\
"parameters: 'default', 'simple', or 'nobeam'\n"\
"These are three different ways of estimating the initial "\
"guess for fitting of Gaussians to an island of emission.\n"\
"If 'default', the number of Gaussians is "\
"estimated from the number of peaks in the island. An initial "\
"guess is made for the parameters of these Gaussians before "\
"final fitting is done. This method should produce the best "\
"results when there are no large sources present.\n"\
"If 'simple', the maximum allowable number of Gaussians per island "\
"is set to 25, and no initial guess for the gaussian parameters "\
"is made.\nLastly, the 'nobeam' method is similar to the "\
"'default' method, but no information about the beam is "\
"used. This method is best used when source sizes are "\
"expected to be very different from the beam and is generally "\
"slower than the other methods.\n"\
"For wavelet images, the value used for the original "\
"image is used for wavelet order j <= 3 and 'nobeam' for "\
"higher orders.",
group = "advanced_opts")
ini_method = Enum('intensity', 'curvature',
doc = "Method by which inital guess for fitting of Gaussians "\
"is chosen: 'intensity' or 'curvature'\n"\
"If 'intensity', the inital guess described in the help for "\
"the ini_gausfit parameter is calculated using the intensity "\
"(ch0) image. If 'curvature', it is done using the curvature "\
"map (see Hancock et al. 2012).",
group = "advanced_opts")
fix_to_beam = Bool(False,
doc = "Fix major and minor axes and PA of Gaussians to beam?\n"\
"If True, then during fitting the major and minor axes "\
"and PA of the Gaussians are fixed to the beam. Only the "\
"amplitude and position are fit. If False, all parameters "\
"are fit.\n"\
"Note that when this option is activated, as a "\
"consequence of using fewer free parameters, the estimated errors on the "\
"peak and total flux densities are a factor of sqrt(2) lower "\
"compared to the case in which all parameters are fit (see "\
"Condon 1997). Additionally, the reported errors on the major "\
"and minor axes and the PA are zero.",
group = "advanced_opts")
fittedimage_clip = Float(0.1,
doc = "Sigma for clipping Gaussians " \
"while creating fitted image\n"\
"When the residual image is being made after Gaussian "\
"decomposition, the model images for each fitted Gaussian "\
"are constructed up to a size 2b, such that the amplitude "\
"of the Gaussian falls to a value of fitted_image_clip times "\
"the local rms, b pixels from the peak.",
group = "advanced_opts")
check_outsideuniv = Bool(False,
doc = "Check for pixels outside the "\
"universe\n"\
"If True, then the coordinate of each pixel is examined "\
"to check if it is outside the universe, which may "\
"happen when, e.g., an all sky image is made with SIN "\
"projection (commonly done at LOFAR earlier). When found, "\
"these pixels are blanked (since imaging software do not "\
"do this on their own). Note that this process takes a "\
"lot of time, as every pixel is checked in case weird "\
"geometries and projections are used",
group = "advanced_opts")
trim_box = Option(None, Tuple(Float(), Float(), Float(), Float()),
doc = "Do source detection on only a part of the image. "\
"Specify as (xmin, xmax, ymin, ymax) in pixels. "\
"E.g., trim_box = (120, 840, 15, 895). None => "\
"use entire image",
group = "advanced_opts")
stop_at = Enum(None, 'isl', 'read',
doc = "Stops after: 'isl' = island finding step or "\
"'read' = image reading step",
group = "advanced_opts")
group_by_isl = Bool(False,
doc = "Group all Gaussians in each island into a single "\
"source\n"\
"If True, all Gaussians in the island belong to a "\
"single source. If False, grouping is controlled "\
"by the group_tol parameter.",
group = "advanced_opts")
group_method = Enum('intensity', 'curvature',
doc = "Group Gaussians into sources using 'intensity' map "\
"or 'curvature' map\n"\
"Gaussians are deemed to be a part of "\
"the same source if: 1. no pixel on the line joining "\
"the centers of any pair of Gaussians has a (Gaussian-"\
"reconstructed) value less than the island threshold, and "\
"2. the centers are separated by a distance less than "\
"half the sum of their FWHMs along the line joining them.\n"\
"If 'curvature', the above comparisons are done on the "\
"curature map (see Hancock et al. 2012). If 'intensity', "\
"the comparisons are done on the intensity map.",
group = "advanced_opts")
group_tol = Float(1.0,
doc = "Tolerance for grouping of Gaussians into sources: "\
"larger values will result in larger sources\n"\
"Sources are created by "\
"grouping nearby Gaussians as follows: (1) If the "\
"difference between the minimum value between two "\
"Gaussians and the lower of the peak flux densities of "\
"the Gaussians in an island is less than "\
"group_tol * thresh_isl * rms_clip, "\
"and (2) if the centres are seperated by a distance less "\
"than 0.5*group_tol of the sum of their fwhms along the "\
"PA of the line joining them, they belong to the "\
"same island.",
group = "advanced_opts")
blank_limit = Option(None, Float(),
doc = "Limit in Jy/beam below which pixels are blanked. "\
"None => no such blanking is done\n"\
"All pixels in the ch0 image with a value less than the "\
"specified limit and with at least 4 neighboring pixels "\
"with values also less than this limit are blanked. "\
"If None, any such pixels are left unblanked. "\
"Pixels with a value of NaN are always blanked.",
group = "advanced_opts")
detection_image = String(doc = "Detection image file name used only for detecting "\
"islands of emission. Source measurement is still done "\
"on the main image\n"\
"The detection image can be a FITS or CASA 2-, "\
"3-, or 4-D cube. The detection image and the main"\
"image must have the same size and be registered.",
group = "advanced_opts")
rmsmean_map_filename = List(None,
doc = "Filenames of FITS files to use as the mean and rms maps, "\
"given as a list [, ]\n"\
"If supplied, the internally generated mean and rms maps "\
"are not used.",
group = 'advanced_opts')
rmsmean_map_filename_det = List(None,
doc = "Filenames of FITS files to use as the mean and rms maps "\
"when a detection image is specified, "\
"given as a list [, ]\n"\
"If supplied, the internally generated mean and rms maps "\
"are not used.",
group = 'advanced_opts')
do_mc_errors = Bool(False,
doc = "Estimate uncertainties for 'M'-type sources using Monte "\
"Carlo method\n"\
"If True, uncertainties on the sizes and "\
"positions of 'M'-type sources "\
"due to uncertainties in the constituent Gaussians are "\
"estimated using a Monte Carlo technique. These "\
"uncertainties are added in quadrature with those "\
"calculated using Condon (1997). If False, "\
"these uncertainties are ignored, and errors are "\
"calculated using Condon (1997) only.\n"\
"Enabling this option will result in longer run "\
"times if many 'M'-type sources are present, but "\
"should give better estimates of the uncertainites, "
"particularly for complex sources composed of many "\
"Gaussians.",
group = "advanced_opts")
ncores = Option(None, Int(),
doc = "Number of cores to use during fitting, None => "\
"use all\n"\
"Sets the number of cores to use during fitting.",
group = "advanced_opts")
do_cache = Bool(False,
doc = "Cache internally derived images to disk\n" \
"This option controls whether internally "\
"derived images are stored in memory or are "\
"cached to disk. Caching can reduce the amount "\
"of memory used, and is therefore useful when "\
"analyzing large images.",
group = "advanced_opts")
#--------------------------------ADAPTIVE RMS_BOX OPTIONS--------------------------------
rms_box_bright = Option(None, Tuple(Int(), Int()),
doc = "Box size, step size for rms/mean map "\
"calculation near bright sources. Specify as (box, step) in "\
"pixels. None => calculate inside program\n"\
"This parameter sets the box and step sizes "\
"to use near bright sources (determined by the "\
"adaptive_thresh parameter). The large-scale "\
"box size is set with the rms_box parameter.",
group = "adaptive_rms_box")
adaptive_thresh = Option(None, Float(),
doc = "Sources with pixels "\
"above adaptive_thresh*clipped_rms will be considered as "\
"bright sources (i.e., with potential artifacts). "\
"Minimum is 10.0. "\
"None => calculate inside program\n"\
"This parameter sets the SNR above which "\
"sources may be affected by strong artifacts "\
"Sources that meet the SNR threshold will use the "\
"small-scale rms_box (which helps to exclude artifacts) "\
"if their sizes at a threshold of 10.0 is less "\
"than 25 beam areas.\n"
"If None, the threshold is varied from 500 "\
"to 50 to attempt to obtain at least 5 candidate "\
"bright sources.",
group = "adaptive_rms_box")
#--------------------------------A-TROUS OPTIONS--------------------------------
atrous_jmax = Int(0,
doc = 'Max allowed wavelength order, 0 => calculate '\
'inside program\n'\
'This is an integer which is the maximum order of '\
'the a-trous wavelet decomposition. If 0 (or <0 or '\
'>15), then the value is determined within the '\
'program. The value of this parameter is then '\
'estimated as the (lower) rounded off value of '\
'ln[(nm-l)/(l-1) + 1]/ln2 + 1 where nm is the '\
'minimum of the residual image size (n, m) in pixels '\
'and l is the length of the filter a-trous lpf (see '\
'the atrous_lpf parameter for more info).\nA sensible '\
'value of jmax is such that the size of the kernel is '\
'not more than 3-4 times smaller than the smallest image '\
'dimension.',
group = "atrous_do")
atrous_lpf = Enum('b3', 'tr',
doc = "Low pass filter, either 'b3' or "\
"'tr', for B3 spline or Triangle\n"\
"This is the low pass filter, which can be "\
"either the B3 spline or the Triangle function, which "\
"is used to generate the a-trous wavelets. The B3 "\
"spline is [1, 4, 6, 4, 1] and the triangle is "\
"[1, 2, 1], normalised so that the sum is unity. The "\
"lengths of the filters are hence 5 and 3 respectively.",
group = "atrous_do")
atrous_bdsm_do = Bool(True,
doc = "Perform source extraction on each wavelet "\
"scale\n"\
"If True, fitting is done on each wavelet scale "\
"(or sum of scales if atrous_sum is True). If False, "\
"no fitting is done.",
group = "atrous_do")
atrous_orig_isl = Bool(False,
doc = "Restrict wavelet Gaussians to islands found "\
"in original image\n"\
"If True, all wavelet Gaussians must lie within "\
"the boundaries of islands found in the original "\
"image. If False, new islands that are found only in "\
"the wavelet images are included in the final "\
"fit.",
group = "atrous_do")
atrous_sum = Bool(True,
doc = "Fit to the sum of remaining wavelet scales\n"\
"If True, fitting is done on an image that is the sum "\
"of the remaining wavelet scales. Using the sum will "\
"generally result in improved signal. If False, "\
"fitting is done on only the wavelet scale under "\
"consideration.",
group = "atrous_do")
use_scipy_fft = Bool(True,
doc = "Use fast SciPy FFT for convolution\n"\
"If True, the SciPy FFT function will be used instead "\
"of the custom version. The SciPy version is much "\
"faster but also uses much more memory.",
group = "atrous_do")
#--------------------------------FLAGGING OPTIONS--------------------------------
flag_smallsrc = Bool(False,
doc = "Flag sources smaller than "\
"flag_minsize_bm times beam area\n"\
"If True, "\
"then fitted Gaussians whose size is less than "\
"flag_minsize_bm times the synthesized beam area are "\
"flagged. When "\
"combining Gaussians into sources, an "\
"error is raised if a 2x2 box with the peak of "\
"the Gaussian does not have all four pixels "\
"belonging to the source. Usually this means "\
"that the Gaussian is an artifact or has a very "\
"small size. \nIf False, then if either of the sizes "\
"of the fitted Gaussian is zero, then the "\
"Gaussian is flagged.\nIf the image is barely Nyquist "\
"sampled, this flag is best set to False. This "\
"flag is automatically set to False while "\
"decomposing wavelet images into Gaussians. ",
group = "flagging_opts")
flag_minsnr = Float(0.6,
doc = "Flag Gaussian if peak is less than flag_minsnr "\
"times thresh_pix times local rms\n"\
"Any fitted Gaussian whose peak is less than "\
"flag_minsnr times thresh_pix times the local rms "\
"is flagged. The flag value is increased by 1.",
group = "flagging_opts")
flag_maxsnr = Float(1.5,
doc = "Flag Gaussian if peak is greater than "\
"flag_maxsnr times image value at the peak\n"\
"Any fitted Gaussian whose peak is greater than "\
"flag_maxsnr times the image value at the peak "\
"is flagged. The flag value is increased by 2.",
group = "flagging_opts")
flag_maxsize_isl = Float(2.0,
doc = "Flag Gaussian if x, y bounding box "\
"around sigma-contour is factor times island bbox\n"\
"Any fitted Gaussian whose maximum x-dimension is "\
"larger than flag_maxsize_isl times the x-dimension "\
"of the island (and likewise for the y-dimension) is "\
"flagged. The flag value is increased by 16 (for x) "\
"and 32 (for y).",
group = "flagging_opts")
flag_maxsize_fwhm = Float(0.5,
doc = "Flag Gaussian if fwhm-contour times factor extends beyond island\n"\
"Any fitted Gaussian whose contour of flag_maxsize_fwhm times the fwhm "\
"falls outside the island is "\
"flagged. The flag value is increased by 256.",
group = "flagging_opts")
flag_bordersize = Int(0,
doc = "Flag Gaussian if centre is outside border "\
"- flag_bordersize pixels\n"\
"Any fitted Gaussian whose centre is border pixels "\
"outside the island bounding box is flagged. The flag "\
"value is increased by 4 (for x) and 8 (for y).",
group = "flagging_opts")
flag_maxsize_bm = Float(25.0,
doc = "Flag Gaussian if area greater than "\
"flag_maxsize_bm times beam area\n"\
"Any fitted "\
"Gaussian whose size is greater than flag_maxsize_"\
"bm times the synthesized beam is flagged. The "\
"flag value is increased by 64.",
group = "flagging_opts")
flag_minsize_bm = Float(0.7,
doc = "Flag Gaussian if flag_smallsrc = True "\
"and area smaller than flag_minsize_bm times "\
"beam area\n"\
"If flag_smallsrc is "\
"True, then any fitted Gaussian whose size "\
"is less than flag_maxsize_bm times the "\
"synthesized beam is flagged. The Gaussian "\
"flag is increased by 128.",
group = "flagging_opts")
#-----------------------------MULTICHANNEL OPTIONS--------------------------------
beam_spectrum = Option(None, List(Tuple(Float(), Float(), Float())),
doc = "FWHM of synthesized beam per channel. Specify as "\
"[(bmaj_ch1, bmin_ch1, bpa_ch1), (bmaj_ch2, "\
"bmin_ch2, bpa_ch2), etc.] in degrees. E.g., "\
"beam_spectrum = [(0.01, 0.01, 45.0), (0.02, "\
"0.01, 34.0)] for two channels. None => all "\
"equal to beam\n"\
"If None, then the channel-dependent "\
"restoring beam is either assumed to be a constant or "\
"to scale with frequency, depending on whether the "\
"parameter beam_sp_derive is False or True.",
group = "multichan_opts")
frequency_sp = Option(None, List(Float()),
doc = "Frequency in Hz of channels in input image when "\
"more than one channel is present. "\
"E.g., frequency_sp = [74e6, 153e6]. "\
"None => get from header\n"\
"If the frequency is not given "\
"by the user, then it is looked for in the "\
"image header. If not found, then an error "\
"is raised. PyBDSF will not work without the "\
"knowledge of the frequency.",
group = "multichan_opts")
beam_sp_derive = Bool(True,
doc = "If True and beam_spectrum is None, then "\
"assume header beam is for lowest frequency and scales "\
"with frequency for channels\n"\
"If True and the parameter beam_spectrum is None, then "\
"we assume that the beam in the header is for the lowest "\
"frequency of the image cube and scale accordingly to "\
"calculate the beam per channel. If False, then a "\
"constant value of the beam is taken instead.",
group = "multichan_opts")
collapse_mode = Enum('average', 'single', 'file',
doc = "Collapse method: 'average', "\
"'single', or 'file'. If 'file', use a user-provided "\
"file, else either average channels or take single "\
"channel to perform source detection on\n"\
"This parameter determines whether, when multiple "\
"channels are present, the source extraction is "\
"done on a single channel or an average of many "\
"channels.",
group = 'multichan_opts')
collapse_file = String(None,
doc = "If collapse_mode is 'file' then use this file "\
"as the ch0 image. The image supplied can be a FITS or CASA 2-, "\
"3-, or 4-D cube. The detection image and the main "\
"image must have the same size and be registered.",
group = 'multichan_opts')
collapse_ch0 = Int(0,
doc = "Number of the channel for source extraction, "\
"if collapse_mode = 'single', starting from 0",
group = 'multichan_opts')
collapse_av = List(None,
doc = "List of channels to average if collapse_mode "\
"= 'average', starting from 0. E.g., collapse_av "\
"= [0, 1, 5]. [] => all\n"\
"This parameter is a list of channels to be averaged "\
"to produce the continuum image for performing source "\
"extraction, if collapse_mode is 'average'. If the "\
"value is an empty list ([]), then all channels are used. Else, the "\
"value is a Python list of channel numbers, starting "\
"from 0 (i.e., the first channel has number 0, the "\
"second has number 1, etc.).",
group = 'multichan_opts')
collapse_wt = Enum('unity', 'rms',
doc = "Weighting: 'unity' or 'rms'. "\
"Average channels with weights = 1 or 1/rms_clip^2 if " \
"collapse_mode = 'average'\n"\
"When collapse_mode is 'average', then if this value "\
"is 'unity', the channels given by collapse_av are "\
"averaged with unit weights and if 'rms', then they "\
"are averaged with weights which are inverse square "\
"of the clipped rms of each channel image.",
group = 'multichan_opts')
#-----------------------------OUTPUT OPTIONS--------------------------------
plot_islands = Bool(False,
doc = 'Make separate plots of each island during '\
'fitting (for large images, this may take '\
'a long time and a lot of memory)',
group = "output_opts")
plot_allgaus = Bool(False,
doc = 'Make a plot of all Gaussians at the end',
group = "output_opts")
output_all = Bool(False,
doc = "Write out all files automatically to directory "\
"'outdir/filename_pybdsm'",
group = "output_opts")
opdir_overwrite = Enum('overwrite', 'append',
doc = "'overwrite'/'append': If output_all=True, "\
"delete existing "\
"files or append a new directory",
group = "output_opts")
bbs_patches = Enum(None, 'single', 'gaussian', 'source', 'mask',
doc = "For BBS format, type of patch to use: None "\
"=> no patches. "\
"'single' => all Gaussians in one patch. "\
"'gaussian' => each Gaussian gets its own "\
"patch. 'source' => all Gaussians belonging "\
"to a single source are grouped into one patch. "\
"'mask' => use mask file specified by bbs_patches_mask\n"\
"When the Gaussian catalogue is written as a "\
"BBS-readable sky file, this determines whether "\
"all Gaussians are in a single patch, there are "\
"no patches, all Gaussians for a given source "\
"are in a separate patch, each Gaussian gets "\
"its own patch, or a mask image is used to define "\
"the patches.\n"\
"If you wish to have patches defined by island, "\
"then set group_by_isl = True (under advanced_opts) "\
"before fitting to force all Gaussians in an "\
"island to be in a single source. Then set "\
"bbs_patches='source' when writing the catalog.",
group = "output_opts")
bbs_patches_mask = Option(None, String(),
doc = "Name of the mask file (of same size as input "\
"image) that defines the patches if bbs_patches "\
"= 'mask'\nA mask file may be used to define the "\
"patches in the output BBS sky model. The mask "\
"image should be 1 inside the patches and 0 "\
"elsewhere and should be the same size as the "\
"input image (before any trim_box is applied). Any "\
"Gaussians that fall outside of the patches "\
"will be ignored and will not appear in the "\
"output sky model.",
group = "output_opts")
solnname = Option(None, String(),
doc = "Name of the run, to be prepended "\
"to the name of the output directory. E.g., "\
"solname='Run_1'",
group = "output_opts")
indir = Option(None, String(),
doc = "Directory of input FITS files. None => get "\
"from filename",
group = "output_opts")
outdir = Option(None, String(),
doc = "Directory to use for all output files "\
"(including log files). None => parent directory of the "\
"input filename.",
group = "output_opts")
savefits_residim = Bool(False,
doc = "Save residual image as fits file",
group = "output_opts")
savefits_rmsim = Bool(False,
doc = "Save background rms image as fits file",
group = "output_opts")
savefits_meanim = Bool(False,
doc = "Save background mean image as fits file",
group = "output_opts")
savefits_modelim = Bool(False,
doc = "Save Gaussian model image as fits file",
group = "output_opts")
savefits_det_rmsim = Bool(False,
doc = "Save detection background rms image as fits file",
group = "output_opts")
savefits_det_meanim = Bool(False,
doc = "Save detection background mean image as fits file",
group = "output_opts")
savefits_rankim = Bool(False,
doc = "Save island rank image as fits file",
group = "output_opts")
savefits_normim = Bool(False,
doc = "Save norm image as fits file",
group = "output_opts")
print_timing = Bool(False,
doc = "Print basic timing information",
group = "output_opts")
verbose_fitting = Bool(False,
doc = "Print out extra information " \
"during fitting",
group = "output_opts")
quiet = Bool(False,
doc = "Suppress text output to screen. Output is "\
"still sent to the log file as usual",
group = "output_opts")
#------------------------POLARISATION OPTIONS------------------------------
pi_fit = Bool(True,
doc = "Check the polarized intesity (PI) image for "\
"sources not found in Stokes I\n"\
"If True, the polarized intensity image is "\
"searched for sources not present in the Stokes "\
"I image. If any such sources are found, they are "\
"added to the the Stokes I source lists. Use the "\
"pi_thresh_pix and pi_thresh_isl parameters to "\
"control island detection in the PI image.",
group = "polarisation_do")
pi_thresh_isl = Option(None, Float(),
doc = "Threshold for PI island boundary in number of sigma "\
"above the mean. None => use thresh_isl\n"\
"This parameter determines the region to which fitting "\
"is done in the polarized intensity (PI) image. "\
"A higher value will produce smaller islands, "\
"and hence smaller regions that are considered in the "\
"fits. A lower value will produce larger islands. "\
"Use the pi_thresh_pix parameter to set the detection "
"threshold for sources. Generally, pi_thresh_isl should "\
"be lower than pi_thresh_pix.",
group = "polarisation_do")
pi_thresh_pix = Option(None, Float(),
doc = "Source detection threshold for PI image: threshold for the "\
"island peak in number of sigma "\
"above the mean. None => use thresh_pix\n"\
"This parameter sets the overall detection threshold "\
"for islands in the polarized intensity (PI) image "\
"(i.e. pi_thresh_pix = 5 will find all sources "\
"with peak flux densities per beam of 5-sigma or greater). Use the "\
"pi_thresh_isl parameter to control how much of each island "\
"is used in fitting. Generally, pi_thresh_pix should be larger "\
"than pi_thresh_isl.",
group = "polarisation_do")
#-----------------------------PSF VARY OPTIONS--------------------------------
psf_generators = Enum('calibrators', 'field',
doc = "PSF generators: 'calibrators' or 'field'\n"\
" If 'calibrator', only one source is taken per "\
"facet, and sources between psf_snrtop and maximum "\
"SNR are primary Voronoi generators. If 'field', "\
"all sources between psf_snrbot and psf_snrtop are "\
"secondary generators to be used in tessellating. "\
"Currently, the 'field' option is not implemented.",
group = "hidden")
psf_nsig = Float(3.0,
doc = "Kappa for clipping within each bin\n"\
"When constructing a set of 'unresolved' sources "\
"for psf estimation, the (clipped) median, rms and "\
"mean of major and minor axis sizes of Gaussians versus "\
"SNR within each bin is calculated using kappa = "\
"psf_nsig.",
group = "psf_vary_do")
psf_over = Int(2,
doc = "Factor of nyquist sample for binning bmaj, "\
"etc. vs SNR",
group = "psf_vary_do")
psf_kappa2 = Float(2.0,
doc = "Kappa for clipping for analytic fit\n"\
"When iteratively arriving at a statistically "\
"probable set of 'unresolved' sources, the fitted "\
"major and minor axis sizes versus SNR are binned "\
"and fitted with analytical functions. Those "\
"Gaussians which are within psf_kappa2 times "\
"the fitted rms from the fitted median are then "\
"considered 'unresolved' and are used further to "\
"estimate the PSFs.",
group = "psf_vary_do")
psf_smooth = Option(None, Float(),
doc = "Size of Gaussian to use for smoothing of "\
"interpolated images in arcsec. None => no "\
"smoothing",
group = "psf_vary_do")
psf_snrcut = Float(10.0,
doc = "Minimum SNR for statistics\n"\
"Only Gaussians with SNR greater than this are "\
"considered for processing. The minimum value is 5.0",
group = "psf_vary_do")
psf_snrtop = Float(0.15,
doc = "Fraction of SNR > snrcut as primary generators\n"\
"If psf_generators is 'calibrator', then the peak "\
"pixels of Gaussians which are the psf_snrtop "\
"fraction of SNR are taken as Voronoi generators. If "\
"psf_generators is 'field', then peak pixels of "\
"Gaussians which are between psf_snrbot and psf_snrtop "\
"fraction of the highest SNR are taken.",
group = "psf_vary_do")
psf_snrbot = Float(0.20,
doc = "Fraction of SNR > snrcut as all generators\n"\
"If psf_generators is 'field', then all sources which "\
"are between a fraction psf_snrbot and a fraction "\
"psf_snrtop of the highest SNR Gaussians are taken as "\
"Voronoi generators. That is, for a value of 0.2, the "\
"top 20% (in terms of SNR) of Gaussians are taken.",
group = "hidden")
psf_snrcutstack = Float(15.0,
doc = "Unresolved sources with higher SNR "\
"taken for stacked psfs\n"\
"Only Gaussians with SNR greater than this are used for "\
"estimating psf images in each tile.",
group = "psf_vary_do")
psf_gencode = Enum('list', 'file',
doc = "'list'/'file': Take primary "\
"gens from Gaussian list or file\n"\
"This is a string which can be either of 'list' or "\
"'file' (default is 'list'; 'file' not implemented "\
"yet). If psf_generators is 'calibrators', then the "\
"generators used for Voronoi tessellation of the "\
"image are either taken from a file if psf gencode is "\
"'file' or are determined from the data if psf gencode "\
"is 'list' (see psf_snrcut and psf_snrtop). The maximum "\
"pixel for each source is used as the generator. For "\
"'file' to be used, a list of good sources whose "\
"psfs are believed to close to theoretical (e.g. strong "\
"calibrators) need to be supplied with the metadata.",
group = "hidden")
psf_primarygen = String('',
doc = "Filename for primary gens if psf_gencode='file'\n"\
"This is the filename with the generators if psf_gencode "\
"is 'file'. This is not yet implemented.",
group = "hidden")
psf_itess_method = Int(0,
doc = "0 = normal, 1 = 0 + round, 2 = LogSNR, "\
"3 = SqrtLogSNR\n"\
"This is an integer which can be 0, 1, 2 or 3 "\
"(default is 0), which corresponds to a tessellation "\
"method. "\
"If 0, 2 or 3, then the weights used for Voronoi "\
"tessellation are unity, log(SNR) and sqrt[log(SNR)] where "\
"SNR is the signal to noise ratio of the generator "\
"in a tile. If 1, then the image is tessellated such "\
"that each tile has smooth boundaries instead of straight "\
"lines, using pixel-dependent weights.",
group = "psf_vary_do")
psf_tess_sc = Enum('s', 'c',
doc = "('s')imple/('c')omplicated - normal "\
"or approximate (fuzzy)\n"\
"If 's', then each pixel can only belong to one Voronoi "\
"tile. If 'c', then we do a fuzzy tessellation where border "\
"pixels can belong to more than one tile. However, we do "\
"not yet process the result of fuzzy tessellation and hence "\
"it is advisable to use 's'.",
group = "hidden")
psf_tess_fuzzy = Float(0.05,
doc = "Fraction of overlap for fuzzy tessellation\n"\
"If psf_tess_sc is 'c', then this determines the fraction "\
"of overlap between adjacent tiles for fuzzy tessellation.",
group = "hidden")
psf_use_shap = Bool(False,
doc = "Use shapelets for PSF variation",
group = "hidden")
psf_high_snr = Option(None, Float(),
doc = "SNR above which all sources are taken to be unresolved. "\
"E.g., psf_high_snr = 20.0. None => no such selection is made\n"\
"Gaussians with SNR greater than this are "\
"used to determine the PSF variation, even if they are deemed "\
"to be resolved. This corrects for the unreliability at high SNRs in the "\
"algorithm used to find unresolved sources. The minimum value is 20.0",
group = "psf_vary_do")
psf_stype_only = Bool(True,
doc = "Restrict sources to "\
"be only of type 'S'",
group = "psf_vary_do")
psf_stype_only = Bool(True,
doc = "Restrict sources to "\
"be only of type 'S'",
group = "psf_vary_do")
psf_fwhm = Option(None, Tuple(Float(), Float(), Float()),
doc = "FWHM of the PSF. Specify as (maj, "\
"min, pos ang E of N) in degrees. "\
"E.g., psf_fwhm = (0.06, 0.02, 13.3). None => "\
"estimate from image\n"\
"If the size of the PSF is specified with this option, "\
"the PSF and its variation acrosss the image are "\
"assumed to be constant and are not estimated "\
"from the image. Instead, all sources "\
"are deconvolved with the specified PSF.",
group = "psf_vary_do")
#-----------------------------SHAPELET OPTIONS--------------------------------
shapelet_basis = Enum("cartesian", "polar",
doc = "Basis set for shapelet decomposition: "\
"'cartesian' or 'polar'\n"\
"If shapelet decomposition is done, this determines "\
"the type of shapelet basis used. Currently however, "\
"only cartesian is supported.",
group = "shapelet_do")
shapelet_fitmode = Enum("fit", None,
doc = "Calculate shapelet coeff's by fitting ('fit') "\
"or integrating (None)\n"\
"If shapelet do is True, then this determines the "\
"method of calculating shapelet coefficients. If None, "\
"then these are calculated by integrating (actually, "\
"by summing over pixels, which introduces errors due to "\
"discretisation). If 'fit', then the coefficients are "\
"found by least-squares fitting of the shapelet basis "\
"functions to the image.",
group = "shapelet_do")
shapelet_gresid = Bool(False,
doc = "Use Gaussian residual image for shapelet "\
"decomposition?\n"\
"If True, then the shapelet decomposition is done "\
"on the Gaussian residual image rather that the "\
"ch0 image.",
group = "shapelet_do")
#-------------------------SPECTRAL INDEX OPTIONS--------------------------------
flagchan_rms = Bool(True,
doc = "Flag channels before (averaging and) "\
"extracting spectral index, if their rms is "\
"more than 5 (clipped) sigma outside the median "\
"rms over all channels, but only if <= 10% of "\
"channels\n"\
"If True, then the clipped rms and median (r and m) "\
"of the clipped rms of each channel is calculated. "\
"Those channels whose clipped rms is greater than "\
"4r away from m are flagged prior to averaging and "\
"calculating spectral indices from the image cube. "\
"However, these channels are flagged only if the "\
"total number of these bad channels does not exceed "\
"10% of the total number of channels themselves.",
group = "spectralindex_do")
flagchan_list = List(None,
doc = "List of channels to flag before (averaging and) "\
"extracting spectral index\n"\
"This parameter is a list of channels to be flagged. "\
"Flagged channels will not be used during fitting. If the "\
"value is an empty list ([]), then all channels are used. Else, the "\
"value is a Python list of channel numbers, starting "\
"from 0 (i.e., the first channel has number 0, the "\
"second has number 1, etc.).",
group = 'spectralindex_do')
flagchan_snr = Bool(True,
doc = "Flag channels that do not meet SNR criterion "\
"set by specind_snr\n"\
"If True, then channels (after averaging if needed) "\
"will be flagged and will not be used during fitting.",
group = "spectralindex_do")
specind_maxchan = Int(0,
doc = "Maximum number of channels to average for "\
"a given source when when attempting to meet target SNR. "\
"1 => no averaging; 0 => no maximum\n"\
"If spectralindex_do is True, then for a given source, "\
"if the flux densities in each channel are below a threshold, "\
"then this determines the maximum number of channels to "\
"average.",
group = "spectralindex_do")
specind_snr = Float(3.0,
doc = "Target SNR to use when fitting power law. If "\
"there is insufficient SNR, neighboring channels "\
"are averaged to attempt to obtain the target SNR. "\
"Channels with SNRs below this will be flagged if "\
"flagchan_snr = True\n"\
"The maximum allowable number of channels to average "\
"is determined by the specind_maxchan parameter.",
group = "spectralindex_do")
#-------------------------HIDDEN OPTIONS--------------------------------
debug = Bool(False,
doc = "Print debug info to the logfile",
group = "hidden")
outfile = Option(None, String(),
doc = "Output file name. None => file is named "\
"automatically; 'SAMP' => send to SAMP hub "\
"(e.g., to TOPCAT, ds9, or Aladin)",
group = 'hidden')
broadcast = Bool(False,
doc = "Broadcast Gaussian and source IDs and "\
"coordinates to SAMP hub when a Gaussian is "\
"clicked?\nNote that for the "\
"IDs to be useful, a catalog must have been sent "\
"to the SAMP hub previously using the write_catalog "\
"task (with outfile = 'SAMP').",
group = 'hidden')
clobber = Bool(False,
doc = "Overwrite existing file?",
group = 'hidden')
format = Enum('fits', 'ds9', 'ascii', 'bbs', 'star', 'kvis', 'sagecal', 'csv', 'casabox',
doc = "Format of output catalog: 'bbs', "\
"'ds9', 'fits', 'star', 'kvis', 'ascii', 'csv', 'casabox', or 'sagecal'\n"\
"The following formats are supported:\n"\
"'bbs' - BlackBoard Selfcal sky model format "\
"(Gaussian list only)\n"\
"'ds9' - ds9 region format\n"\
"'fits' - FITS catalog format, readable by many "\
"software packages, including IDL, TOPCAT, Python, "\
"fv, Aladin, etc.\n"\
"'star' - AIPS STAR format (Gaussian list only)\n"\
"'kvis' - kvis format (Gaussian list only)\n"\
"'ascii' - simple text file\n"\
"'sagecal' - SAGECAL format (Gaussian list only)\n"\
"Catalogues with the 'fits' and 'ascii' formats "\
"include all available information (see headers "\
"of the output file for column definitions). The "\
"other formats include only a subset of the full "\
"information.",
group = 'hidden')
srcroot = Option(None, String(),
doc = "Root name for entries in the output catalog "\
"(BBS format only). None => use image file name",
group = 'hidden')
incl_chan = Bool(False,
doc = "Include flux densities from each channel "\
"(if any)?",
group = 'hidden')
incl_empty = Bool(False,
doc = "Include islands without any valid Gaussians "\
"(source list only)?\n"\
"If True, islands for which Gaussian fitting "\
"failed will be included in the output catalog. "\
"In these cases, the source IDs "\
"are negative.",
group = 'hidden')
force_output = Bool(False,
doc = "Force creation of output file, even if the "\
"catalog is empty?\n"\
"If True, the output catalog will be created, "\
"even if there are no sources. In this case, "\
"the catalog will have a header but no entries.",
group = 'hidden')
catalog_type = Enum('srl', 'gaul', 'shap',
doc = "Type of catalog to write: 'gaul' - Gaussian "\
"list, 'srl' - source list (formed "\
"by grouping Gaussians), 'shap' - shapelet "\
"list (FITS format only)",
group = 'hidden')
correct_proj = Bool(True,
doc = "Correct source parameters for image projection (BBS format only)?\n"\
"If True, the source parameters in the output catalog will be "\
"corrected for first-order projection effects. If False, "\
"no correction is done. In this case, the position angle "\
"is relative to the +y axis, NOT true north, and source sizes "\
"are calculated assuming a constant pixel scale (equal to the "
"scale at the image center).\n "\
"If True, the position angle and source size "\
"are corrected using the average pixel size and "
"angle offset (between the +y axis and north) at "\
"the location of the source center.",
group = 'hidden')
img_format = Enum('fits', 'casa',
doc = "Format of output image: 'fits' or 'casa'",
group = 'hidden')
img_type = Enum('gaus_resid', 'shap_resid', 'rms', 'mean', 'gaus_model',
'shap_model', 'ch0', 'pi', 'psf_major', 'psf_minor',
'psf_pa', 'psf_ratio', 'psf_ratio_aper', 'island_mask',
doc = "Type of image to export: 'gaus_resid', "\
"'shap_resid', 'rms', 'mean', 'gaus_model', "\
"'shap_model', 'ch0', 'pi', 'psf_major', "\
"'psf_minor', 'psf_pa', 'psf_ratio', 'psf_ratio_aper', "\
"'island_mask'\nThe following images "\
"can be exported:\n"\
"'ch0' - image used for source detection\n"\
"'rms' - rms map image\n"\
"'mean' - mean map image\n"\
"'pi' - polarized intensity image\n"\
"'gaus_resid' - Gaussian model residual image\n"\
"'gaus_model' - Gaussian model image\n"\
"'shap_resid' - Shapelet model residual image\n"\
"'shap_model' - Shapelet model image\n"\
"'psf_major' - PSF major axis FWHM image (FWHM in arcsec)\n"\
"'psf_minor' - PSF minor axis FWHM image (FWHM in arcsec)\n"\
"'psf_pa' - PSF position angle image (degrees east of north)\n"\
"'psf_ratio' - PSF peak-to-total flux ratio (in units of 1/beam)\n"\
"'psf_ratio_aper' - PSF peak-to-aperture flux ratio (in units of 1/beam)\n"\
"'island_mask' - Island mask image (0 = outside island, 1 = inside island)",
group = 'hidden')
mask_dilation = Int(0,
doc = "Number of iterations to use for island-mask dilation. "\
"0 => no dilation\nThis option determines the number of "\
"dilation iterations to use when making the island mask. "\
"More iterations implies larger masked regions (one iteration "\
"expands the size of features in the mask by one pixel in all "\
"directions). After dilation, a closing operation is performed "\
"(using a structure array the size of the beam) to remove gaps "\
"and holes in the mask that are smaller than the beam.",
group = "hidden")
pad_image = Bool(False,
doc = "Pad image (with zeros) to original size\nIf True, the output "\
"image is padded to be the same size as the original "\
"image (without any trimming defined by the trim_box "\
"parameter). If False, the output image will have the "\
"size specified by the trim_box parameter.",
group = "hidden")
ch0_image = Bool(True,
doc = "Show the ch0 image. This is the image used for "\
"source detection",
group = "hidden")
rms_image = Bool(True,
doc = "Show the background rms image",
group = "hidden")
mean_image = Bool(True,
doc = "Show the background mean image",
group = "hidden")
ch0_islands = Bool(True,
doc = "Show the ch0 image with islands and Gaussians "\
"(if any) overplotted",
group = "hidden")
ch0_flagged = Bool(False,
doc = "Show the ch0 image with flagged Gaussians "\
"(if any) overplotted",
group = "hidden")
gresid_image = Bool(True,
doc = "Show the Gaussian residual image",
group = "hidden")
sresid_image = Bool(False,
doc = "Show the shapelet residual image",
group = "hidden")
gmodel_image = Bool(True,
doc = "Show the Gaussian model image",
group = "hidden")
smodel_image = Bool(False,
doc = "Show the shapelet model image",
group = "hidden")
pi_image = Bool(False,
doc = "Show the polarized intensity image",
group = "hidden")
source_seds = Bool(False,
doc = "Plot the source SEDs and best-fit spectral "\
"indices (if image was processed with "\
"spectralindex_do = True). "\
"Sources may be chosen by ID with the 'c' key "\
"or, if ch0_islands = True, by picking a source with "\
"the mouse",
group = "hidden")
psf_major = Bool(False,
doc = "Show the PSF major axis variation (values are "\
"FWHM in arcsec)",
group = "hidden")
psf_minor = Bool(False,
doc = "Show the FWHM of PSF minor axis variation (values are "\
"FWHM in arcsec)",
group = "hidden")
psf_pa = Bool(False,
doc = "Show the PSF position angle variation (values are "\
"angle E from N in degrees)",
group = "hidden")
def __init__(self, values = None):
"""Build an instance of Opts and (possibly)
initialize some variables.
Parameters:
values: dictionary of key->value for initialization
of variables
"""
TCInit(self)
if values is not None:
self.set_opts(values)
def _parse_string_as_bool(self, bool_string):
"""
'private' function performing parse of a string containing
a bool representation as defined in the parameter set/otdb
implementation
"""
true_chars = ['t', 'T', 'y', 'Y', '1']
false_chars = ['f', 'F', 'n', 'N', '0']
if bool_string[0] in true_chars:
return True
if bool_string[0] in false_chars:
return False
raise tcError(
"Supplied string cannot be parsed as a bool: {0}".format(bool_string))
def set_opts(self, opts):
"""Set multiple variables at once.
opts should be dictionary of name->value
"""
opts = dict(opts)
for k, v in opts.items():
try:
# Fix for lofar parameter set integration:
# If the attribute is a bool, test if it is a string.
# and then try to parse it
if hasattr(self, k):
if isinstance(self.__getattribute__(k), bool):
if isinstance(v, bool) or v is None:
# just enter the bool into the parameter
pass
elif isinstance(v, basestring):
# Try parse it as a parameter set bool string
v = self._parse_string_as_bool(v)
else:
# raise error
raise tcError("unknown type for bool variable")
if v == "none":
v = None
self.__setattr__(k, v)
except tcError as e:
# Catch and re-raise as a RuntimeError
raise RuntimeError(
'Parameter "{0}" is not defined properly. \n {1}'.format(k
, str(e)))
def set_default(self, opt_names = None):
"""Set one or more opts to default value.
opt_names should be a list of opt names as strings, but can be
a string of a single opt name.
If None, set all opts to default values."""
if opt_names is None:
TCInit(self)
else:
if isinstance(opt_names, str):
opt_names = [opt_names]
for k in opt_names:
if isinstance(k, str):
self.__delattr__(k)
def info(self):
"""Pretty-print current values of options"""
## enumerate all options
opts = self.to_list()
res = ""
fmt = "%20s = %5s ## %s\n"
for k, v in opts:
res += fmt % (k, str(self.__getattribute__(k)),
str(v.doc()).split('\n')[0])
return res
def to_list(self, group=None):
"""Returns a sorted list of (name, TC object) tuples for all opts.
If the group name is specified, only opts that belong to that group
are returned.
"""
from . import tc
opts_list = []
for k, v in self.__class__.__dict__.items():
if isinstance(v, tc.TC):
if group is not None:
if v.group() == group:
opts_list.append((k, v))
else:
opts_list.append((k, v))
opts_list = sorted(opts_list)
return opts_list
def to_dict(self):
"""Returns a dictionary of names and values for all opts."""
from . import tc
opts_dict = {}
for k, v in self.__class__.__dict__.items():
if isinstance(v, tc.TC):
opts_dict.update({k: self.__getattribute__(k)})
return opts_dict
def get_names(self, group=None):
"""Returns a sorted list of names for all opts.
If the group name is specified, only opts that belong to that group
are returned.
"""
from . import tc
opts_list = []
for k, v in self.__class__.__dict__.items():
if isinstance(v, tc.TC):
if group is not None:
if v.group() == group:
opts_list.append(k)
else:
opts_list.append(k)
opts_list = sorted(opts_list)
return opts_list
def __setstate__(self, state):
self.set_opts(state)
def __getstate__(self):
from . import tc
state = {}
for k, v in self.__class__.__dict__.items():
if isinstance(v, tc.TC):
state.update({k: self.__getattribute__(k)})
return state
PyBDSF-1.11.0/bdsf/output.py 0000664 0000000 0000000 00000136735 14650706641 0015501 0 ustar 00root root 0000000 0000000 """Module output.
Defines functions that write the results of source detection in a
variety of formats. These are then used as methods of Image objects
and/or are called by the outlist operation if output_all is True.
"""
from __future__ import print_function
from __future__ import absolute_import
from .image import Op
class Op_outlist(Op):
"""Write out list of Gaussians
All available output lists are generated atm.
"""
def __call__(self, img):
if img.opts.output_all:
import os
if len(img.gaussians) > 0:
dir = img.basedir + '/catalogues/'
if not os.path.exists(dir):
os.makedirs(dir)
self.write_bbs(img, dir)
self.write_lsm(img, dir)
self.write_gaul(img, dir)
self.write_srl(img, dir)
self.write_aips(img, dir)
self.write_kvis(img, dir)
self.write_ds9(img, dir, objtype='gaul')
self.write_ds9(img, dir, objtype='srl')
self.write_gaul_FITS(img, dir)
self.write_srl_FITS(img, dir)
if not os.path.exists(img.basedir + '/misc/'):
os.makedirs(img.basedir + '/misc/')
self.write_opts(img, img.basedir + '/misc/')
self.save_opts(img, img.basedir + '/misc/')
img.completed_Ops.append('outlist')
def write_bbs(self, img, dir):
""" Writes the gaussian list as a bbs-readable file"""
if 'bbsname' in img.extraparams:
name = img.extraparams['bbsname']
else:
name = img.imagename
fname = dir + name + '.sky_in'
# Write Gaussian list
write_bbs_gaul(img, filename=fname, srcroot=img.opts.srcroot,
patch=img.opts.bbs_patches, sort_by='flux',
clobber=True, incl_empty=img.opts.incl_empty,
correct_proj=img.opts.correct_proj)
def write_lsm(self, img, dir):
""" Writes the gaussian list as an SAGECAL file"""
fname = dir + img.imagename + '.lsm'
write_lsm_gaul(img, filename=fname, sort_by='indx',
clobber=True,
incl_empty=img.opts.incl_empty)
def write_gaul(self, img, dir):
""" Writes the gaussian list as an ASCII file"""
fname = dir + img.imagename + '.gaul'
write_ascii_list(img, filename=fname, sort_by='indx',
clobber=True, objtype='gaul',
incl_empty=img.opts.incl_empty)
def write_srl(self, img, dir):
""" Writes the source list as an ASCII file"""
fname = dir + img.imagename + '.srl'
write_ascii_list(img, filename=fname, sort_by='indx',
clobber=True, objtype='srl',
incl_empty=img.opts.incl_empty)
def write_aips(self, img, dir):
""" Writes the gaussian list an AIPS STAR file"""
fname = dir + img.imagename + '.star'
write_star(img, filename=fname, sort_by='indx',
clobber=True)
def write_kvis(self, img, dir):
""" Writes the gaussian list as a kvis file"""
fname = dir + img.imagename + '.kvis.ann'
write_kvis_ann(img, filename=fname, sort_by='indx',
clobber=True)
def write_ds9(self, img, dir, objtype='gaul'):
""" Writes the gaussian list as a ds9 region file"""
fname = dir + img.imagename + '.' + objtype + '.ds9.reg'
write_ds9_list(img, filename=fname, srcroot=img.opts.srcroot,
clobber=True, deconvolve=False, objtype=objtype,
incl_empty=img.opts.incl_empty,)
def write_gaul_FITS(self, img, dir):
""" Writes the gaussian list as FITS binary table"""
fname = dir + img.imagename+'.gaul.FITS'
write_fits_list(img, filename=fname, sort_by='indx',
clobber=True, objtype='gaul',
incl_empty=img.opts.incl_empty,)
def write_srl_FITS(self, img, dir):
""" Writes the source list as FITS binary table"""
fname = dir + img.imagename+'.srl.FITS'
write_fits_list(img, filename=fname, sort_by='indx',
clobber=True, objtype='srl',
incl_empty=img.opts.incl_empty, incl_chan=img.opts.incl_chan)
def write_shap_FITS(self, img, dir):
""" Writes the shapelet list as a FITS file"""
fname = dir + img.imagename + '.shap.FITS'
write_fits_list(img, filename=fname, sort_by='indx',
clobber=True, objtype='shap')
def write_opts(self, img, dir):
""" Writes input parameters to a text file."""
import inspect
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
fname = 'parameters_used'
f = open(dir+fname, 'w')
mylog.info('Writing '+dir+fname)
for attr in inspect.getmembers(img.opts):
if attr[0][0] != '_':
if isinstance(attr[1], (int, str, bool, float, type(None), tuple, list)):
f.write('%-40s' % attr[0])
f.write(repr(attr[1])+'\n')
# Also print the values derived internally. They are all stored
# in img with the same name (e.g., img.opts.beam --> img.beam)
if hasattr(img, attr[0]):
used = img.__getattribute__(attr[0])
if used != attr[1] and isinstance(used, (int, str, bool, float,
type(None), tuple,
list)):
f.write('%-40s' % ' Value used')
f.write(repr(used)+'\n')
f.close()
def save_opts(self, img, dir):
""" Saves input parameters to a PyBDSM save file."""
from . import interface
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
fname = 'parameters.sav'
mylog.info('Writing '+dir+fname)
interface.save_pars(img, dir+fname, quiet=True)
def ra2hhmmss(deg):
"""Convert RA coordinate (in degrees) to HH MM SS"""
from math import modf
if deg < 0:
deg += 360.0
x, hh = modf(deg/15.)
x, mm = modf(x*60)
ss = x*60
return (int(hh), int(mm), ss)
def dec2ddmmss(deg):
"""Convert DEC coordinate (in degrees) to DD MM SS"""
from math import modf
sign = (-1 if deg < 0 else 1)
x, dd = modf(abs(deg))
x, ma = modf(x*60)
sa = x*60
return (int(dd), int(ma), sa, sign)
def B1950toJ2000(Bcoord):
""" Precess using Aoki et al. 1983. Same results as NED to ~0.2asec """
from math import sin, cos, pi, sqrt, asin, acos
import numpy as N
rad = 180.0/pi
ra, dec = Bcoord
A = N.array([-1.62557e-6, -0.31919e-6, -0.13843e-6])
M = N.array([[0.9999256782, 0.0111820609, 0.00485794], [-0.0111820610, 0.9999374784, -0.0000271474],
[-0.0048579477, -0.0000271765, 0.9999881997]])
r0 = N.zeros(3)
r0[0] = cos(dec/rad) * cos(ra/rad)
r0[1] = cos(dec/rad) * sin(ra/rad)
r0[2] = sin(dec/rad)
r0A = N.sum(r0*A)
r1 = r0 - A + r0A*r0
r = N.sum(M.transpose()*r1, axis=1)
rscal = sqrt(N.sum(r*r))
decj = asin(r[2]/rscal)*rad
d1 = r[0] / rscal / cos(decj/rad)
d2 = r[1] / rscal / cos(decj/rad)
raj = acos(d1)*rad
if d2 < 0.0:
raj = 360.0 - raj
Jcoord = [raj, decj]
return Jcoord
def write_bbs_gaul(img, filename=None, srcroot=None, patch=None,
incl_primary=True, sort_by='flux',
clobber=False, incl_empty=False, correct_proj=True):
"""Writes Gaussian list to a BBS sky model"""
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM.write_gaul")
if int(img.equinox) != 2000 and int(img.equinox) != 1950:
mylog.warning('Equinox of input image is not J2000 or B1950. '
'Sky model may not be appropriate for BBS.')
if int(img.equinox) == 1950:
mylog.warning('Equinox of input image is B1950. Coordinates '
'will be precessed to J2000.')
outl, outn, patl = list_and_sort_gaussians(img, patch=patch,
root=srcroot, sort_by=sort_by)
outstr_list = make_bbs_str(img, outl, outn, patl, incl_empty=incl_empty,
correct_proj=correct_proj)
if filename is None:
filename = img.imagename + '.sky_in'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
f = open(filename, 'w')
for s in outstr_list:
f.write(s)
f.close()
return filename
def write_lsm_gaul(img, filename=None, srcroot=None, patch=None,
incl_primary=True, sort_by='flux',
clobber=False, incl_empty=False):
"""Writes Gaussian list to a SAGECAL lsm sky model"""
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM.write_gaul")
if int(img.equinox) != 2000 and int(img.equinox) != 1950:
mylog.warning('Equinox of input image is not J2000 or B1950. '
'Sky model may not be appropriate for Sagecal.')
if int(img.equinox) == 1950:
mylog.warning('Equinox of input image is B1950. Coordinates '
'will be precessed to J2000.')
outl, outn, patl = list_and_sort_gaussians(img, patch=patch,
root=srcroot, sort_by=sort_by)
outstr_list = make_lsm_str(img, outl, outn, incl_empty=incl_empty)
if filename is None:
filename = img.imagename + '.lsm'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
f = open(filename, 'w')
for s in outstr_list:
f.write(s)
f.close()
return filename
def write_ds9_list(img, filename=None, srcroot=None, deconvolve=False,
clobber=False, incl_empty=False, objtype='gaul'):
"""Writes Gaussian list to a ds9 region file"""
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
if objtype == 'gaul':
outl, outn, patl = list_and_sort_gaussians(img, patch=None)
elif objtype == 'srl':
root = img.parentname
outl = [img.sources]
if incl_empty:
# Append the dummy sources for islands without any unflagged Gaussians
outl[0] += img.dsources
outn = []
for src in img.sources:
outn.append(root + '_i' + str(src.island_id) + '_s' +
str(src.source_id))
if incl_empty:
# Append the dummy sources for islands without any unflagged Gaussians
for dsrc in img.dsources:
outn.append(root + '_i' + str(dsrc.island_id) + '_s' +
str(dsrc.source_id))
outn = [outn]
outstr_list = make_ds9_str(img, outl, outn, deconvolve=deconvolve,
objtype=objtype, incl_empty=incl_empty)
if filename is None:
filename = img.imagename + '.' + objtype + '.reg'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
f = open(filename, "w")
for s in outstr_list:
f.write(s)
f.close()
return filename
def write_ascii_list(img, filename=None, sort_by='indx', format='ascii',
incl_chan=False, incl_empty=False, clobber=False, objtype='gaul'):
"""Writes Gaussian list to an ASCII file"""
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
if objtype == 'gaul':
outl, outn, patl = list_and_sort_gaussians(img, patch=None, sort_by=sort_by)
elif objtype == 'srl':
outl = [img.sources]
if incl_empty:
# Append the dummy sources for islands without any unflagged Gaussians
outl[0] += img.dsources
outstr_list = make_ascii_str(img, outl, objtype=objtype, incl_chan=incl_chan,
incl_empty=incl_empty, format=format)
if filename is None:
if objtype == 'gaul':
filename = img.imagename + '.gaul'
elif objtype == 'srl':
filename = img.imagename + '.srl'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
f = open(filename, "w")
for s in outstr_list:
f.write(s)
f.close()
return filename
def write_casa_gaul(img, filename=None, incl_empty=False, clobber=False):
"""Writes a clean box file for use in casapy"""
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
outl, outn, patl = list_and_sort_gaussians(img, patch=None)
outstr_list = make_casa_str(img, outl)
if filename is None:
filename = img.imagename + '.box'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
f = open(filename, "w")
for s in outstr_list:
f.write(s)
f.close()
return filename
def write_fits_list(img, filename=None, sort_by='index', objtype='gaul',
incl_chan=False, incl_empty=False, clobber=False):
""" Write as FITS binary table.
"""
from . import mylogger
import os
import numpy as N
from astropy.io import fits as pyfits
from ._version import __version__
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
if objtype == 'gaul':
outl, outn, patl = list_and_sort_gaussians(img, patch=None, sort_by=sort_by)
elif objtype == 'srl':
outl = [img.sources]
if incl_empty:
# Append the dummy sources for islands without any unflagged Gaussians
outl[0] += img.dsources
elif objtype == 'shap':
outl = [[isl for isl in img.islands if hasattr(isl, 'shapelet_nmax')]]
nmax = 0
if objtype == 'shap':
# loop over shapelets and get maximum size of coefficient matrix
for isl in outl[0]:
if hasattr(isl, 'shapelet_nmax'):
if isl.shapelet_nmax > nmax:
nmax = isl.shapelet_nmax
nmax += 1
if img.opts.aperture is not None:
incl_aper = True
else:
incl_aper = False
if len(outl[0]) > 0:
cvals, cnames, cformats, cunits = make_output_columns(outl[0][0], fits=True,
objtype=objtype,
incl_spin=img.opts.spectralindex_do,
incl_chan=incl_chan,
incl_pol=img.opts.polarisation_do,
incl_aper=incl_aper,
incl_empty=incl_empty,
nmax=nmax, nchan=img.nchan)
out_list = make_fits_list(img, outl, objtype=objtype, nmax=nmax, incl_empty=incl_empty, incl_chan=incl_chan)
col_list = []
for ind, col in enumerate(out_list):
list1 = pyfits.Column(name=cnames[ind], format=cformats[ind],
unit=cunits[ind], array=N.array(out_list[ind]))
col_list.append(list1)
if len(col_list) == 0:
col_list = [pyfits.Column(name='Blank', format='1J')]
tbhdu = pyfits.BinTableHDU.from_columns(col_list)
if objtype == 'gaul':
tbhdu.header.add_comment('Gaussian list for '+img.filename)
elif objtype == 'srl':
tbhdu.header.add_comment('Source list for '+img.filename)
elif objtype == 'shap':
tbhdu.header.add_comment('Shapelet list for '+img.filename)
tbhdu.header.add_comment('Generated by PyBDSM version %s'
% (__version__, ))
freq = "%.5e" % img.frequency
tbhdu.header.add_comment('Reference frequency of the detection ("ch0") image: %s Hz' % freq)
tbhdu.header.add_comment('Equinox : %s' % img.equinox)
tbhdu.header['INIMAGE'] = (img.filename, 'Filename of image')
tbhdu.header['FREQ0'] = (float(freq), 'Reference frequency')
tbhdu.header['EQUINOX'] = (img.equinox, 'Equinox')
for key in img.header.keys():
if key in ['HISTORY', 'COMMENT', '']:
continue
tbhdu.header.add_comment('%s = %s' % (key, repr(img.header[key])))
if filename is None:
filename = img.imagename + '.' + objtype + '.fits'
if os.path.exists(filename) and not clobber:
return None
mylog.info('Writing ' + filename)
try:
tbhdu.writeto(filename, overwrite=True)
except TypeError:
# The "overwrite" argument was added in astropy v1.3, so fall back to "clobber"
# if it doesn't work
tbhdu.writeto(filename, clobber=True)
return filename
def write_kvis_ann(img, filename=None, sort_by='indx',
clobber=False):
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
if filename is None:
filename = img.imagename + '.kvis.ann'
if os.path.exists(filename) and not clobber:
return None
f = open(filename, 'w')
mylog.info('Writing '+filename)
f.write("### KVis annotation file\n\n")
f.write("color green\n\n")
outl, outn, patl = list_and_sort_gaussians(img, patch=None, sort_by=sort_by)
for g in outl[0]:
iidx = g.island_id
# kvis does not correct for postion-dependent angle or pixel scale
# for region files, so we must use the uncorrected values
ra, dec = g.centre_sky
shape = g.size_sky_uncorr
str = 'text %10.5f %10.5f %d\n' % \
(ra, dec, iidx)
f.write(str)
str = 'ellipse %10.5f %10.5f %10.7f %10.7f %10.4f\n' % \
(ra, dec, shape[0], shape[1], shape[2])
f.write(str)
f.close()
return filename
def write_star(img, filename=None, sort_by='indx',
clobber=False):
from .output import ra2hhmmss, dec2ddmmss
from . import mylogger
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Output")
if filename is None:
filename = img.imagename + '.star'
if os.path.exists(filename) and not clobber:
return None
f = open(filename, 'w')
mylog.info('Writing '+filename)
outl, outn, patl = list_and_sort_gaussians(img, patch=None, sort_by=sort_by)
for g in outl[0]:
A = g.peak_flux
ra, dec = g.centre_sky
shape = g.size_sky_uncorr
# convert to canonical representation
ra = ra2hhmmss(ra)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
str = '%2i %2i %6.3f ' \
'%c%2i %2i %6.3f ' \
'%9.4f %9.4f %7.2f ' \
'%2i %13.7f %10s\n' % \
(ra[0], ra[1], ra[2],
decsign, dec[0], dec[1], dec[2],
shape[0]*3600, shape[1]*3600, shape[2],
4, A, '')
f.write(str)
f.close()
return filename
def make_bbs_str(img, glist, gnames, patchnames, objtype='gaul',
incl_empty=False, correct_proj=True):
"""Makes a list of string entries for a BBS sky model."""
from .output import ra2hhmmss
from .output import dec2ddmmss
import numpy as N
outstr_list = []
freq = "%.5e" % img.frequency
if len(patchnames) == 0:
# Handle empty list: just write default header
outstr_list.append("format = Name, Type, Ra, Dec, I, Q, U, V, "
"MajorAxis, MinorAxis, Orientation, "
"ReferenceFrequency='"+freq+"', "
"SpectralIndex='[]'\n\n")
elif patchnames[0] is None:
outstr_list.append("format = Name, Type, Ra, Dec, I, Q, U, V, "
"MajorAxis, MinorAxis, Orientation, "
"ReferenceFrequency='"+freq+"', "
"SpectralIndex='[]'\n\n")
else:
outstr_list.append("format = Name, Type, Patch, Ra, Dec, I, Q, U, V, "
"MajorAxis, MinorAxis, Orientation, "
"ReferenceFrequency='"+freq+"', "
"SpectralIndex='[]'\n\n")
if objtype == 'shap':
raise RuntimeError("Shapelets not yet supported in the BBS format.")
else:
patchname_last = ''
for pindx, patch_name in enumerate(patchnames): # loop over patches
if patch_name is not None and patch_name != patchname_last:
outstr_list.append(', , ' + patch_name + ', 00:00:00, +00.00.00\n')
patchname_last = patch_name
gaussians_in_patch = glist[pindx]
names_in_patch = gnames[pindx]
for gindx, g in enumerate(gaussians_in_patch):
if g.gaus_num >= 0 or (g.gaus_num < 0 and incl_empty):
src_name = names_in_patch[gindx]
ra, dec = g.centre_sky
if img.equinox == 1950:
ra, dec = B1950toJ2000([ra, dec])
ra = ra2hhmmss(ra)
sra = str(ra[0]).zfill(2)+':'+str(ra[1]).zfill(2)+':'+str("%.6f" % (ra[2])).zfill(6)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
sdec = decsign+str(dec[0]).zfill(2)+'.'+str(dec[1]).zfill(2)+'.'+str("%.6f" % (dec[2])).zfill(6)
total = str("%.3e" % (g.total_flux))
if correct_proj:
deconv = list(g.deconv_size_sky)
else:
deconv = list(g.deconv_size_sky_uncorr)
if deconv[0] == 0.0 and deconv[1] == 0.0:
stype = 'POINT'
deconv[2] = 0.0
else:
stype = 'GAUSSIAN'
deconv1 = str("%.5e" % (deconv[0]*3600.0))
deconv2 = str("%.5e" % (deconv[1]*3600.0))
deconv3 = str("%.5e" % (deconv[2]))
deconvstr = deconv1 + ', ' + deconv2 + ', ' + deconv3
specin = '-0.8'
if 'spectralindex' in img.completed_Ops:
if g.spec_indx is not None and N.isfinite(g.spec_indx):
specin = str("%.3e" % (g.spec_indx))
sep = ', '
if img.opts.polarisation_do:
Q_flux = str("%.3e" % (g.total_flux_Q))
U_flux = str("%.3e" % (g.total_flux_U))
V_flux = str("%.3e" % (g.total_flux_V))
else:
Q_flux = '0.0'
U_flux = '0.0'
V_flux = '0.0'
if patch_name is None:
outstr_list.append(src_name + sep + stype + sep + sra + sep +
sdec + sep + total + sep + Q_flux + sep +
U_flux + sep + V_flux + sep +
deconvstr + sep + freq + sep +
'[' + specin + ']\n')
else:
outstr_list.append(src_name + sep + stype + sep + patch_name +
sep + sra + sep + sdec + sep + total + sep +
Q_flux + sep + U_flux + sep + V_flux + sep +
deconvstr + sep + freq + sep +
'[' + specin + ']\n')
else:
outstr_list.pop()
return outstr_list
def make_lsm_str(img, glist, gnames, incl_empty=False):
"""Makes a list of string entries for a SAGECAL sky model."""
from .output import ra2hhmmss
from .output import dec2ddmmss
import numpy as N
from ._version import __version__
outstr_list = ["# SAGECAL sky model\n"]
freq = "%.5e" % img.frequency
outstr_list.append('# Generated by PyBDSM version %s\n'
% (__version__, ))
outstr_list.append("# Name | RA (hr,min,sec) | DEC (deg,min,sec) | I | Q | U | V | SI | RM | eX | eY | eP | freq0\n\n")
for gindx, g in enumerate(glist[0]):
if g.gaus_num >= 0 or (g.gaus_num < 0 and incl_empty):
src_name = gnames[0][gindx]
ra, dec = g.centre_sky
if img.equinox == 1950:
ra, dec = B1950toJ2000([ra, dec])
ra = ra2hhmmss(ra)
sra = str(ra[0]).zfill(2)+' '+str(ra[1]).zfill(2)+' '+str("%.6f" % (ra[2])).zfill(6)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
sdec = decsign+str(dec[0]).zfill(2)+' '+str(dec[1]).zfill(2)+' '+str("%.6f" % (dec[2])).zfill(6)
total = str("%.3e" % (g.total_flux))
deconv = list(g.deconv_size_sky)
if deconv[0] == 0.0 and deconv[1] == 0.0:
sname = 'P' + src_name
deconv[2] = 0.0
else:
sname = 'G' + src_name
# Make sure Gaussian is not 1-D, as SAGECAL cannot handle these
if deconv[0] < 1e-5:
deconv[0] = 1e-5
if deconv[1] < 1e-5:
deconv[1] = 1e-5
# The following conversions taken from the SABECAL script "convert_skymodel.py"
deconv1 = str("%.5e" % (deconv[0]*N.pi/180.0/2.0))
deconv2 = str("%.5e" % (deconv[1]*N.pi/180.0/2.0))
deconv3 = str("%.5e" % (N.pi/2-(N.pi-deconv[2]/180.0*N.pi)))
deconvstr = deconv1 + ' ' + deconv2 + ' ' + deconv3
specin = '-0.8'
if 'spectralindex' in img.completed_Ops:
if g.spec_indx is not None and N.isfinite(g.spec_indx):
specin = str("%.3e" % (g.spec_indx))
sep = ' '
if img.opts.polarisation_do:
Q_flux = str("%.3e" % g.total_flux_Q)
U_flux = str("%.3e" % g.total_flux_U)
V_flux = str("%.3e" % g.total_flux_V)
else:
Q_flux = '0.0'
U_flux = '0.0'
V_flux = '0.0'
outstr_list.append(sname + sep + sra + sep +
sdec + sep + total + sep + Q_flux + sep +
U_flux + sep + V_flux + sep +
specin + sep + '0' + sep + deconvstr + sep +
freq + sep + '\n')
return outstr_list
def make_ds9_str(img, glist, gnames, deconvolve=False, objtype='gaul', incl_empty=False):
"""Makes a list of string entries for a ds9 region file."""
from . import mylogger
outstr_list = []
if img.equinox is None:
equinox = 'fk5'
else:
if int(img.equinox) == 2000:
equinox = 'fk5'
elif int(img.equinox) == 1950:
equinox = 'fk4'
else:
mylog = mylogger.logging.getLogger("PyBDSM.write_ds9")
mylog.warning('Equinox of input image is not J2000 or B1950. '
'Regions may not be correct.')
equinox = 'fk5'
outstr_list.append('# Region file format: DS9 version 4.0\nglobal color=green '
'font="helvetica 10 normal" select=1 highlite=1 edit=1 '
'move=1 delete=1 include=1 fixed=0 source\n'+equinox+'\n')
for gindx, g in enumerate(glist[0]):
if objtype == 'gaul':
objid = g.gaus_num
else:
objid = g.source_id
if objid >= 0 or (objid < 0 and incl_empty):
src_name = gnames[0][gindx]
if objtype == 'gaul':
ra, dec = g.centre_sky
else:
ra, dec = g.posn_sky_centroid
# ds9 does not correct for postion-dependent angle or pixel scale
# for region files, so we must use the uncorrected values
if deconvolve:
deconv = g.deconv_size_sky_uncorr
else:
deconv = g.size_sky_uncorr
if deconv[0] == 0.0 and deconv[1] == 0.0:
deconv[2] = 0.0
region = 'point(' + str(ra) + ',' + str(dec) + \
') # point=cross width=2 text={' + src_name + '}\n'
else:
# ds9 can't handle 1-D Gaussians, so make sure they are 2-D
if deconv[0] < 1.0/3600.0:
deconv[0] = 1.0/3600.0
if deconv[1] < 1.0/3600.0:
deconv[1] = 1.0/3600.0
region = 'ellipse(' + str(ra) + ',' + str(dec) + ',' + \
str(deconv[0]*3600.0) + '",' + str(deconv[1]*3600.0) + \
'",' + str(deconv[2]+90.0) + ') # text={' + src_name + '}\n'
outstr_list.append(region)
return outstr_list
def make_ascii_str(img, glist, objtype='gaul', format='ascii', incl_empty=False,
incl_chan=False):
"""Makes a list of string entries for an ascii region file."""
from ._version import __version__
outstr_list = []
freq = "%.5e" % img.frequency
if objtype == 'gaul':
outstr_list.append('# Gaussian list for '+img.filename+'\n')
elif objtype == 'srl':
outstr_list.append('# Source list for '+img.filename+'\n')
outstr_list.append('# Generated by PyBDSM version %s\n'
% (__version__, ))
outstr_list.append('# Reference frequency of the detection ("ch0") image: %s Hz\n' % freq)
outstr_list.append('# Equinox : %s \n\n' % img.equinox)
if img.opts.aperture is not None:
incl_aper = True
else:
incl_aper = False
for i, g in enumerate(glist[0]):
cvals, cnames, cformats, cunits = make_output_columns(g, fits=False,
objtype=objtype,
incl_spin=img.opts.spectralindex_do,
incl_chan=incl_chan,
incl_pol=img.opts.polarisation_do,
incl_aper=incl_aper,
incl_empty=incl_empty,
nchan=img.nchan)
if cvals is not None:
cformats[-1] += "\n"
if format == 'ascii':
if i == 0:
outstr_list.append("# " + " ".join(cnames) + "\n")
outstr_list.append(" ".join(cformats).format(*cvals))
else:
if i == 0:
outstr_list.append("# " + ", ".join(cnames) + "\n")
outstr_list.append(", ".join(cformats).format(*cvals))
return outstr_list
def make_fits_list(img, glist, objtype='gaul', nmax=30, incl_empty=False,
incl_chan=False):
from . import functions as func
out_list = []
if img.opts.aperture is not None:
incl_aper = True
else:
incl_aper = False
for g in glist[0]:
cvals, ext1, ext2, ext3 = make_output_columns(g, fits=True, objtype=objtype,
incl_spin=img.opts.spectralindex_do,
incl_chan=incl_chan,
incl_pol=img.opts.polarisation_do,
incl_aper=incl_aper,
incl_empty=incl_empty,
nmax=nmax, nchan=img.nchan)
if cvals is not None:
out_list.append(cvals)
out_list = func.trans_gaul(out_list)
return out_list
def make_casa_str(img, glist):
"""Makes a list of string entries for a casa region file."""
from . import functions as func
outstr_list = ['#CRTFv0 CASA Region Text Format version 0\n']
scale = 2.0 # scale box to 2 times FWHM of Gaussian
for gindx, g in enumerate(glist[0]):
x, y = g.centre_pix
ellx, elly = func.drawellipse(g)
blc = [min(ellx), min(elly)]
trc = [max(ellx), max(elly)]
blc[0] -= (x - blc[0]) * scale
blc[1] -= (y - blc[1]) * scale
trc[0] += (trc[0] - x) * scale
trc[1] += (trc[1] - y) * scale
blc_sky = img.pix2sky(blc)
trc_sky = img.pix2sky(trc)
blc_sky_str = convert_radec_str(blc_sky[0], blc_sky[1])
trc_sky_str = convert_radec_str(trc_sky[0], trc_sky[1])
# Format is: box [ [, ], [, ] ]
# Note that we use gindx rather than g.gaus_num so that
# all Gaussians will have a unique id, even if wavelet
# Gaussians are included.
outstr_list.append('box [[' + ', '.join(blc_sky_str) + '], [' +
', '.join(trc_sky_str) + ']] coord=J2000\n')
return outstr_list
def write_islands(img):
import numpy as N
import os
# write out island properties for reference since achaar doesnt work.
filename = img.basedir + '/misc/'
if not os.path.exists(filename):
os.makedirs(filename)
filename = filename + 'island_file'
if img.j == 0:
f = open(filename, 'w')
f.write('Wavelet# Island_id bbox origin shape mask_active mask_noisy size_active mean rms max_value ngaul gresid_mean ' +
'gresid_rms resid_rms resid_mean nsource \n')
else:
f = open(filename, 'a')
for isl in img.islands:
f.write('%5i %5i %5i %5i %5i %5i %5i %5i %5i %5i %10i %10i %10i %.3e %.3e %.3e %5i %.3e %.3e %5i \n'
% (img.j, isl.island_id, isl.bbox[0].start, isl.bbox[0].stop, isl.bbox[1].start, isl.bbox[1].stop,
isl.origin[0], isl.origin[1], isl.shape[0], isl.shape[1], N.sum(~isl.mask_active), N.sum(~isl.mask_noisy),
isl.size_active, isl.mean, isl.rms, isl.max_value, len(isl.gaul), isl.gresid_mean, isl.gresid_rms,
len(isl.sources)))
f.close()
def get_src(src_list, srcid):
"""Returns the source for srcid or None if not found"""
for src in src_list:
if src.source_id == srcid:
return src
return None
def convert_radec_str(ra, dec):
"""Takes ra, dec in degrees and returns BBS/CASA strings"""
ra = ra2hhmmss(ra)
sra = str(ra[0]).zfill(2)+':'+str(ra[1]).zfill(2)+':'+str("%.3f" % (ra[2])).zfill(6)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
sdec = decsign+str(dec[0]).zfill(2)+'.'+str(dec[1]).zfill(2)+'.'+str("%.3f" % (dec[2])).zfill(6)
return sra, sdec
def list_and_sort_gaussians(img, patch=None, root=None,
sort_by='index'):
"""Returns sorted lists of Gaussians and their names and patch names.
patch - can be "single", "gaussian", "source", or None
Returns (outlist, outnames, patchnames)
outlist is [[g1, g2, g3], [g4], ...]
outnames is [['root_i2_s1_g1', 'root_i2_s1_g2', 'root_i2_s1_g3'], ...]
patchnames is ['root_patch_s1', 'root_patch_s2', ...]
The names are root_iXX_sXX_gXX (or wXX_iXX_sXX_gXX for wavelet Gaussians)
"""
import numpy as N
from . import functions as func
# Define lists
if root is None:
root = img.parentname
gauslist = []
gausname = []
outlist = []
outnames = []
patchnames = []
patchnames_sorted = []
gausflux = [] # fluxes of Gaussians
gausindx = [] # indices of Gaussians
patchflux = [] # total flux of each patch
patchindx = [] # indices of sources
patchnums = [] # number of patch from mask
# If a mask image is to be used to define patches, read it in and
# make a rank image from it
use_mask = False
if patch not in ['single', 'gaussian', 'source', None]:
mask_file = img.opts.bbs_patches_mask
patches_mask, hdr = func.read_image_from_file(mask_file, img, img.indir)
use_mask = True
act_pixels = patches_mask[0, 0]
rank = len(act_pixels.shape)
import scipy.ndimage as nd
connectivity = nd.generate_binary_structure(rank, rank)
mask_labels, count = nd.label(act_pixels, connectivity)
src_list = img.sources
for src in src_list:
for g in src.gaussians:
gauslist.append(g)
gausflux.append(g.total_flux)
gausindx.append(g.gaus_num)
jstr = '_w' + str(g.jlevel)
gausname.append(root + jstr + '_i' + str(src.island_id) + '_s' +
str(src.source_id) + '_g' + str(g.gaus_num))
if patch == 'gaussian':
outlist.append(gauslist)
outnames.append(gausname)
patchnames.append(root + '_patch' + jstr + '_g' + str(g.gaus_num))
patchflux.append(N.sum(gausflux))
patchindx.append(g.gaus_num)
gauslist = [] # reset for next Gaussian
gausname = []
gausflux = []
gausindx = []
if use_mask:
patchnums.append(mask_labels[g.centre_pix[0], g.centre_pix[1]])
if patch == 'source':
sorted_gauslist = list(gauslist)
sorted_gausname = list(gausname)
if sort_by == 'flux':
# Sort Gaussians by flux within each source
indx = N.argsort(N.array(gausflux)).tolist()
indx.reverse()
elif sort_by == 'index':
# Sort Gaussians by index within each source
indx = N.argsort(N.array(gausindx)).tolist()
else:
# Unrecognized property --> Don't sort
indx = range(len(gausindx))
for i, si in enumerate(indx):
sorted_gauslist[i] = gauslist[si]
sorted_gausname[i] = gausname[si]
outlist.append(sorted_gauslist)
outnames.append(sorted_gausname)
patchnames.append(root + '_patch' + '_s' + str(src.source_id))
patchflux.append(N.sum(gausflux))
patchindx.append(src.source_id)
gauslist = [] # reset for next source
gausname = []
gausflux = []
if use_mask:
unique_patch_ids = set(patchnums)
# Check if there is a patch with id = 0. If so, this means there were
# some Gaussians that fell outside of the regions in the patch
# mask file.
if 0 in unique_patch_ids:
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSM.write_gaul")
mylog.warning('Some sources fall outside of the regions '
'defined in the mask file. These sources are not '
'included in the output sky model.')
for p in unique_patch_ids:
if p != 0:
in_patch = N.where(patchnums == p)
outlist.append(N.array(gauslist)[in_patch].tolist())
outnames.append(N.array(gausname)[in_patch].tolist())
patchnames.append('patch_'+str(p))
patchflux.append(N.sum(N.array(gausflux)[in_patch]))
patchindx.append(p)
# Sort
if patch == 'single' or patch is None:
outlist = [list(gauslist)]
outlist_sorted = [list(gauslist)]
outnames = [list(gausname)]
outnames_sorted = [list(gausname)]
if patch == 'single':
patchnames = [root + '_patch']
else:
patchnames = [None]
if sort_by == 'flux':
# Sort by Gaussian flux
indx = N.argsort(N.array(gausflux)).tolist()
indx.reverse()
elif sort_by == 'index':
# Sort by Gaussian index
indx = N.argsort(N.array(gausindx)).tolist()
else:
# Unrecognized property --> Don't sort
indx = list(range(len(gausindx)))
for i, si in enumerate(indx):
outlist_sorted[0][i] = outlist[0][si]
outnames_sorted[0][i] = outnames[0][si]
patchnames_sorted = list(patchnames)
else:
outlist_sorted = list(outlist)
outnames_sorted = list(outnames)
patchnames_sorted = list(patchnames)
if sort_by == 'flux':
# Sort by patch flux
indx = N.argsort(N.array(patchflux)).tolist()
indx.reverse()
elif sort_by == 'index':
# Sort by source index
indx = N.argsort(N.array(patchindx)).tolist()
else:
# Unrecognized property --> Don't sort
indx = list(range(len(gausindx)))
for i, si in enumerate(indx):
outlist_sorted[i] = outlist[si]
outnames_sorted[i] = outnames[si]
patchnames_sorted[i] = patchnames[si]
return (outlist_sorted, outnames_sorted, patchnames_sorted)
def make_output_columns(obj, fits=False, objtype='gaul', incl_spin=False,
incl_chan=False, incl_pol=False, incl_aper=False,
incl_empty=False, nmax=30, nchan=1):
"""Returns a list of column names, formats, and units for Gaussian, Source, or Shapelet"""
import numpy as N
# First, define a list of columns in order desired, using the names of
# the attributes of the object
if objtype == 'gaul':
names = ['gaus_num', 'island_id', 'source_id', 'jlevel',
'centre_sky', 'centre_skyE', 'total_flux',
'total_fluxE', 'peak_flux', 'peak_fluxE',
'centre_pix', 'centre_pixE', 'size_sky', 'size_skyE',
'size_sky_uncorr', 'size_skyE_uncorr',
'deconv_size_sky', 'deconv_size_skyE',
'deconv_size_sky_uncorr', 'deconv_size_skyE_uncorr',
'total_flux_isl', 'total_flux_islE', 'rms',
'mean', 'gresid_rms', 'gresid_mean', 'wave_rms', 'wave_mean',
'code']
elif objtype == 'srl':
if incl_aper:
infix = ['aperture_flux', 'aperture_fluxE']
else:
infix = []
names = ['source_id', 'island_id', 'posn_sky_centroid',
'posn_sky_centroidE', 'total_flux',
'total_fluxE',
'peak_flux_max', 'peak_flux_maxE'] + infix + \
['posn_sky_max', 'posn_sky_maxE',
'posn_pix_centroid', 'posn_pix_centroidE', 'posn_pix_max',
'posn_pix_maxE',
'size_sky', 'size_skyE',
'size_sky_uncorr', 'size_skyE_uncorr',
'deconv_size_sky', 'deconv_size_skyE',
'deconv_size_sky_uncorr', 'deconv_size_skyE_uncorr',
'total_flux_isl', 'total_flux_islE',
'rms_isl', 'mean_isl', 'gresid_rms',
'gresid_mean', 'code']
elif objtype == 'shap':
names = ['island_id', 'shapelet_posn_sky', 'shapelet_posn_skyE',
'shapelet_basis', 'shapelet_beta', 'shapelet_nmax', 'shapelet_cf']
else:
print('Object type unrecongnized.')
return (None, None, None, None)
if incl_spin:
names += ['spec_indx', 'e_spec_indx']
if incl_chan:
names += ['specin_flux', 'specin_fluxE', 'specin_freq']
if incl_pol:
names += ['total_flux_Q', 'total_fluxE_Q', 'total_flux_U', 'total_fluxE_U',
'total_flux_V', 'total_fluxE_V', 'lpol_fraction', 'lpol_fraction_loerr',
'lpol_fraction_hierr', 'cpol_fraction', 'cpol_fraction_loerr',
'cpol_fraction_hierr', 'tpol_fraction', 'tpol_fraction_loerr',
'tpol_fraction_hierr', 'lpol_angle', 'lpol_angle_err']
cnames = []
cformats = []
cunits = []
cvals = []
skip_next = False
for n, name in enumerate(names):
if hasattr(obj, name):
if name in ['specin_flux', 'specin_fluxE', 'specin_freq']:
# As these are variable length lists, they must
# (unfortunately) be treated differently.
val = obj.__getattribute__(name)
colname = obj.__dict__[name+'_def']._colname
units = obj.__dict__[name+'_def']._units
for i in range(nchan):
if i < len(val):
cvals.append(val[i])
cnames.append(colname[0]+'_ch'+str(i+1))
cunits.append(units[0])
else:
cvals.append(N.NaN)
cnames.append(colname[0]+'_ch'+str(i+1))
cunits.append(units[0])
else:
if not skip_next:
val = obj.__getattribute__(name)
colname = obj.__dict__[name+'_def']._colname
units = obj.__dict__[name+'_def']._units
if units is None:
units = ' '
if isinstance(val, list) or isinstance(val, tuple):
# This is a list, so handle it differently. We assume the next
# entry will have the errors, and they are interleaved to be
# in the order (val, error).
next_name = names[n+1]
val_next = obj.__getattribute__(next_name)
colname_next = obj.__dict__[next_name+'_def']._colname
units_next = obj.__dict__[next_name+'_def']._units
if units_next is None:
units_next = ' '
for i in range(len(val)):
cvals.append(val[i])
cvals.append(val_next[i])
cnames.append(colname[i])
cnames.append(colname_next[i])
cunits.append(units[i])
cunits.append(units_next[i])
skip_next = True
elif isinstance(val, N.ndarray):
# This is a numpy array, so flatten it
tarr = val.flatten()
tarr2 = N.resize(tarr, nmax**2)
tarr2[tarr.shape[0]:] = N.NaN
cvals.append(tarr2)
cnames.append(colname)
cunits.append(units)
else:
cvals.append(val)
cnames.append(colname)
cunits.append(units)
else:
skip_next = False
for i, v in enumerate(cvals):
if fits:
if isinstance(v, int):
cformats.append('J')
elif isinstance(v, float) or isinstance(v, N.float32) or isinstance(v, N.float64):
cformats.append('D')
elif isinstance(v, str):
cformats.append('A')
elif isinstance(v, N.ndarray):
cformats.append('%iD' % (nmax**2,))
else:
raise RuntimeError("Format not supported.")
else:
if isinstance(v, int):
cformats.append('{'+str(i)+':4d}')
elif isinstance(v, float) or isinstance(v, N.float32) or isinstance(v, N.float64):
cformats.append('{'+str(i)+':.14f}')
elif isinstance(v, str):
cformats.append('{'+str(i)+':4s}')
else:
raise RuntimeError("Format not supported.")
if objtype == 'gaul':
if obj.gaus_num < 0 and not incl_empty:
return (None, cnames, cformats, cunits)
if objtype == 'srl':
if obj.source_id < 0 and not incl_empty:
return (None, cnames, cformats, cunits)
return (cvals, cnames, cformats, cunits)
PyBDSF-1.11.0/bdsf/plotresults.py 0000664 0000000 0000000 00000071366 14650706641 0016537 0 ustar 00root root 0000000 0000000 """Plotting module
This module is used to display fits results.
"""
from __future__ import print_function
from __future__ import absolute_import
from .image import *
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
import matplotlib.cm as cm
import matplotlib.patches as mpatches
from matplotlib.widgets import Button
from matplotlib.patches import Ellipse
from matplotlib.lines import Line2D
from matplotlib import collections
from math import log10
from . import functions as func
from .const import fwsig
import os
import warnings
import numpy as N
warnings.simplefilter(action='ignore', category=FutureWarning)
def plotresults(img, ch0_image=True, rms_image=True, mean_image=True,
ch0_islands=True, gresid_image=True, sresid_image=False,
gmodel_image=True, smodel_image=False, pyramid_srcs=False,
source_seds=False, ch0_flagged=False, pi_image=False,
psf_major=False, psf_minor=False, psf_pa=False, broadcast=False):
"""Show the results of a fit."""
global img_ch0, img_rms, img_mean, img_gaus_mod, img_shap_mod
global img_gaus_resid, img_shap_resid, pixels_per_beam, pix2sky
global vmin, vmax, vmin_cur, vmax_cur, ch0min, ch0max, img_pi
global low, fig, images, src_list, srcid_cur, sky2pix, markers
global img_psf_maj, img_psf_min, img_psf_pa, do_broadcast, samp_client
global samp_key, samp_gaul_table_url, samp_srl_table_url
if not has_pl:
print("\033[31;1mWARNING\033[0m: Matplotlib not found. Plotting is disabled.")
return
if hasattr(img, 'samp_client'):
samp_client = img.samp_client
samp_key = img.samp_key
if hasattr(img, 'samp_srl_table_url'):
samp_srl_table_url = img.samp_srl_table_url
else:
samp_srl_table_url = None
if hasattr(img, 'samp_gaul_table_url'):
samp_gaul_table_url = img.samp_gaul_table_url
else:
samp_gaul_table_url = None
else:
samp_clent = None
samp_key = None
samp_srl_table_url = None
samp_gaul_table_url = None
do_broadcast = broadcast
# Define the images. The images are used both by imshow and by the
# on_press() and coord_format event handlers
pix2sky = img.pix2sky
sky2pix = img.sky2pix
gfactor = 2.0 * N.sqrt(2.0 * N.log(2.0))
pixels_per_beam = 2.0 * N.pi * (img.beam2pix(img.beam)[0]
* img.beam2pix(img.beam)[1]) / gfactor**2
# Construct lists of images, titles, etc.
images = []
titles = []
names = []
markers = []
img_gaus_mod = None # default needed for key press event
img_shap_mod = None # default needed for key press event
if ch0_image:
img_ch0 = img.ch0_arr
images.append(img_ch0)
titles.append('Original (ch0) Image\n(arbitrary logarithmic scale)')
names.append('ch0')
if ch0_islands:
img_ch0 = img.ch0_arr
images.append(img_ch0)
if hasattr(img, 'ngaus'):
if hasattr(img, 'ch0_pi_arr'):
ch0_str = 'Islands (hatched boundaries; red = PI only) and\nGaussians'
else:
ch0_str = 'Islands (hatched boundaries) and\nGaussians'
if hasattr(img, 'atrous_gaussians'):
ch0_str += ' (red = wavelet)'
titles.append(ch0_str)
else:
titles.append('Islands (hatched boundaries)')
names.append('ch0')
if ch0_flagged:
if not hasattr(img, 'ngaus'):
print('Image was not fit with Gaussians. Skipping display of flagged Gaussians.')
else:
img_ch0 = img.ch0_arr
images.append(img_ch0)
titles.append('Flagged Gaussians')
names.append('ch0')
if pi_image:
if not hasattr(img, 'ch0_pi_arr'):
print('Polarization module not run. Skipping PI image.')
else:
img_pi = img.ch0_pi_arr
images.append(img_pi)
titles.append('Polarized Intensity Image')
names.append('ch0_pi')
if rms_image:
img_rms = img.rms_arr
images.append(img_rms)
titles.append('Background rms Image')
names.append('rms')
if gresid_image:
if not hasattr(img, 'ngaus'):
print('Image was not fit with Gaussians. Skipping residual Gaussian image.')
else:
img_gaus_resid = img.resid_gaus_arr
images.append(img_gaus_resid)
titles.append('Gaussian Residual Image')
names.append('gaus_resid')
if gmodel_image:
if not hasattr(img, 'ngaus'):
print('Image was not fit with Gaussians. Skipping model Gaussian image.')
else:
img_gaus_mod = img.model_gaus_arr
images.append(img_gaus_mod)
titles.append('Gaussian Model Image')
names.append('gaus_mod')
if mean_image:
img_mean = img.mean_arr
images.append(img_mean)
titles.append('Background mean Image')
names.append('mean')
if sresid_image:
if img.opts.shapelet_do == False:
print('Image was not decomposed into shapelets. Skipping residual shapelet image.')
else:
img_shap_resid = img.resid_shap_arr
images.append(img_shap_resid)
titles.append('Shapelet Residual Image')
names.append('shap_resid')
if smodel_image:
if img.opts.shapelet_do == False:
print('Image was not decomposed into shapelets. Skipping model shapelet image.')
else:
img_shap_mod = img.model_shap_arr
images.append(img_shap_mod)
titles.append('Shapelet Model Image')
names.append('shap_mod')
if source_seds:
if img.opts.spectralindex_do == False:
print('Source SEDs were not fit. Skipping source SED plots.')
else:
src_list = img.sources
sed_src = get_src(src_list, 0)
if sed_src is None:
print('No sources found. Skipping source SED plots.')
else:
images.append('seds')
titles.append('')
names.append('seds')
srcid_cur = 0
if pyramid_srcs:
if img.opts.atrous_do == False:
print('Image was not decomposed into wavelets. Skipping wavelet images.')
else:
# Get the unique j levels and store them. Only make subplots for
# occupied j levels
print('Pyramidal source plots not yet supported.')
# j_list = []
# for p in img.pyrsrcs:
# for l in p.jlevels:
# j_list.append(l)
# j_set = set(j_list)
# j_with_gaus = list(j_set)
# index_first_waveplot = len(images)
# for i in range(len(j_with_gaus)):
# images.append('wavelets')
# names.append('pyrsrc'+str(i))
if psf_major or psf_minor or psf_pa:
if img.opts.psf_vary_do == False:
print('PSF variation not calculated. Skipping PSF variation images.')
else:
if psf_major:
img_psf_maj = img.psf_vary_maj_arr*fwsig
images.append(img_psf_maj)
titles.append('PSF Major Axis FWHM (pixels)')
names.append('psf_maj')
if psf_minor:
img_psf_min = img.psf_vary_min_arr*fwsig
images.append(img_psf_min)
titles.append('PSF Minor Axis FWHM (pixels)')
names.append('psf_min')
if psf_pa:
img_psf_pa = img.psf_vary_pa_arr
images.append(img_psf_pa)
titles.append('PSF Pos. Angle FWhM (degrees)')
names.append('psf_pa')
if images == []:
print('No images to display.')
return
im_mean = img.clipped_mean
im_rms = img.clipped_rms
if img.resid_gaus_arr is None:
low = 1.1*abs(img.min_value)
else:
low = N.max([1.1*abs(img.min_value),1.1*abs(N.nanmin(img.resid_gaus_arr))])
if low <= 0.0:
low = 1E-6
vmin_est = im_mean - im_rms*5.0 + low
if vmin_est <= 0.0:
vmin = N.log10(low)
else:
vmin = N.log10(vmin_est)
vmax = N.log10(im_mean + im_rms*30.0 + low)
ch0min = vmin
ch0max = N.log10(img.max_value + low)
vmin_cur = vmin
vmax_cur = vmax
origin = 'lower'
colours = ['m', 'b', 'c', 'g', 'y', 'k'] # reserve red ('r') for wavelets
styles = ['-', '-.', '--']
print('=' * 72)
print('NOTE -- With the mouse pointer in plot window:')
print(' Press "i" ........ : Get integrated flux densities and mean rms')
print(' values for the visible portion of the image')
print(' Press "m" ........ : Change min and max scaling values')
print(' Press "n" ........ : Show / hide island IDs')
print(' Press "0" ........ : Reset scaling to default')
if 'seds' in images:
print(' Press "c" ........ : Change source for SED plot')
if ch0_islands and hasattr(img, 'ngaus'):
print(' Click Gaussian ... : Print Gaussian and source IDs (zoom_rect mode, ')
print(' toggled with the "zoom" button and indicated in ')
print(' the lower right corner, must be off)')
if 'seds' in images:
print(' The SED plot will also show the chosen source.')
print('_' * 72)
if len(images) > 1:
numx = 2
else:
numx = 1
numy = int(N.ceil(float(len(images))/float(numx)))
fig = pl.figure(figsize=(max(15, 10.0*float(numy)/float(numx)), 10.0))
fig.canvas.manager.set_window_title('PyBDSM Fit Results for '+ img.filename)
gray_palette = cm.gray
gray_palette.set_bad('k')
for i, image in enumerate(images):
if image != 'wavelets' and image != 'seds':
if i == 0:
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ')'
else:
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ', sharex=ax1' + \
', sharey=ax1)'
exec(cmd)
if 'PSF' in titles[i]:
im = image
else:
im = N.log10(image + low)
if 'Islands' in titles[i]:
island_offsets_x = []
island_offsets_y = []
border_color = []
ax = pl.gca()
for iisl, isl in enumerate(img.islands):
xb, yb = isl.border
if hasattr(isl, '_pi'):
for c in range(len(xb)):
border_color.append('r')
else:
for c in range(len(xb)):
border_color.append('#afeeee')
island_offsets_x += xb.tolist()
island_offsets_y += yb.tolist()
marker = ax.text(N.max(xb)+2, N.max(yb), str(isl.island_id),
color='#afeeee', clip_on=True)
marker.set_visible(not marker.get_visible())
markers.append(marker)
# draw the gaussians with one colour per source or island
# (if gaul2srl was not run)
if hasattr(img, 'nsrc'):
nsrc = len(isl.sources)
for isrc in range(nsrc):
col = colours[int(isrc % 6)]
style = styles[int(isrc/6 % 3)]
src = isl.sources[isrc]
for g in src.gaussians:
if hasattr(g, 'valid'):
valid = g.valid
else:
valid = True
if g.jlevel == 0 and valid and g.gaus_num >= 0:
gidx = g.gaus_num
e = Ellipse(xy=g.centre_pix, width=g.size_pix[0],
height=g.size_pix[1], angle=g.size_pix[2]+90.0)
ax.add_artist(e)
e.set_picker(3)
e.set_clip_box(ax.bbox)
e.set_facecolor(col)
e.set_alpha(0.5)
e.gaus_id = gidx
e.src_id = src.source_id
e.jlevel = g.jlevel
e.isl_id = g.island_id
e.tflux = g.total_flux
e.pflux = g.peak_flux
e.centre_sky = g.centre_sky
if len(img.islands) > 0:
island_offsets = list(zip(N.array(island_offsets_x), N.array(island_offsets_y)))
isl_borders = collections.AsteriskPolygonCollection(4, offsets=island_offsets, color=border_color,
transOffset=ax.transData, sizes=(10.0,))
ax.add_collection(isl_borders)
if hasattr(img, 'gaussians'):
for atrg in img.gaussians:
if atrg.jlevel > 0 and atrg.gaus_num >= 0:
col = 'r'
style = '-'
gidx = atrg.gaus_num
e = Ellipse(xy=atrg.centre_pix, width=atrg.size_pix[0], height=atrg.size_pix[1], angle=atrg.size_pix[2]+90.0)
ax.add_artist(e)
e.set_picker(3)
e.set_clip_box(ax.bbox)
e.set_edgecolor(col)
e.set_facecolor('none')
e.set_alpha(0.8)
e.gaus_id = gidx
e.src_id = atrg.source_id
e.jlevel = atrg.jlevel
e.isl_id = atrg.island_id
e.tflux = atrg.total_flux
e.pflux = atrg.peak_flux
e.centre_sky = atrg.centre_sky
if 'Flagged' in titles[i]:
for iisl, isl in enumerate(img.islands):
ax = pl.gca()
style = '-'
for ig, g in enumerate(isl.fgaul):
col = colours[ig % 6]
ellx, elly = func.drawellipse(g)
gline, = ax.plot(ellx, elly, color = col,
linestyle = style, picker=3)
gline.flag = g.flag
if 'PSF' in titles[i]:
cmd = 'ax' + str(i+1) + ".imshow(N.transpose(im), origin=origin, "\
"interpolation='nearest', cmap=gray_palette)"
else:
cmd = 'ax' + str(i+1) + ".imshow(N.transpose(im), origin=origin, "\
"interpolation='nearest',vmin=vmin, vmax=vmax, cmap=gray_palette)"
exec(cmd)
cmd = 'ax' + str(i+1) + '.format_coord = format_coord_'+names[i]
exec(cmd)
pl.title(titles[i])
elif image == 'seds':
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ')'
exec(cmd)
ax = pl.gca()
plot_sed(sed_src, ax)
elif image == 'wavelets':
if i == index_first_waveplot:
for j in range(len(j_with_gaus)):
cmd = 'ax' + str(j+i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(j+i+1) + ', sharex=ax1, '+\
'sharey=ax1)'
exec(cmd)
pl.title('Pyramidal Sources for\nWavelet Scale J = ' +
str(j_with_gaus[j]))
for pyr in img.pyrsrcs:
for iisl, isl in enumerate(pyr.islands):
jj = pyr.jlevels[iisl]
jindx = j_with_gaus.index(jj)
col = colours[pyr.pyr_id % 6]
ind = N.where(~isl.mask_active)
cmd = "ax" + str(jindx + index_first_waveplot + 1) + \
".plot(ind[0]+isl.origin[0], "\
"ind[1]+isl.origin[1], '.', color=col)"
exec(cmd)
fig.canvas.mpl_connect('key_press_event', on_press)
fig.canvas.mpl_connect('pick_event', on_pick)
pl.show()
pl.close('all')
def on_pick(event):
global images, srcid_cur, samp_client, samp_key, do_broadcast, samp_gaul_table_url, samp_srl_table_url
g = event.artist
if hasattr(g, 'gaus_id'):
gaus_id = g.gaus_id
src_id = g.src_id
isl_id = g.isl_id
tflux = g.tflux
pflux = g.pflux
wav_j = g.jlevel
if wav_j == 0:
print('Gaussian #' + str(gaus_id) + ' (in src #' + str(src_id) + \
', isl #' + str(isl_id) + '): F_tot = ' + str(round(tflux,4)) + \
' Jy, F_peak = ' + str(round(pflux,4)) + ' Jy/beam')
else:
print('Gaussian #' + str(gaus_id) + ' (in src #' + str(src_id) + \
', isl #' + str(isl_id) + ', wav #' + str(wav_j) + \
'): F_tot = ' + str(round(tflux,3)) + ' Jy, F_peak = ' + \
str(round(pflux,4)) + ' Jy/beam')
# Transmit src_id, gaus_id, and coordinates to SAMP Hub (if we are connected)
if do_broadcast and samp_key is not None:
if samp_gaul_table_url is not None:
func.send_highlight_row(samp_client, samp_key, samp_gaul_table_url, gaus_id)
if samp_srl_table_url is not None:
func.send_highlight_row(samp_client, samp_key, samp_srl_table_url, src_id)
func.send_coords(samp_client, samp_key, g.centre_sky)
# Change source SED
# First check that SEDs are being plotted and that the selected Gaussian
# is from the zeroth wavelet image
has_sed = False
if 'seds' in images and wav_j == 0:
has_sed = True
if not has_sed:
return
ax_indx = images.index('seds')
sed_src = get_src(src_list, src_id)
if srcid_cur == src_id:
return
srcid_cur = src_id
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] == 'seds':
plot_sed(sed_src, ax)
else:
print('Flagged Gaussian (flag = ' + str(g.flag) + '; use "' + \
"help 'flagging_opts'" + '" for flag meanings)')
pl.draw()
def on_press(event):
"""Handle keypresses"""
from .interface import raw_input_no_history
import numpy
global img_ch0, img_rms, img_mean, img_gaus_mod, img_shap_mod
global pixels_per_beam, vmin, vmax, vmin_cur, vmax_cur, img_pi
global ch0min, ch0max, low, fig, images, src_list, srcid_cur
global markers
if event.key == '0':
print('Resetting limits to defaults (%.4f -- %.4f Jy/beam)' \
% (pow(10, vmin)-low,
pow(10, vmax)-low))
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
im = ax.get_images()[0]
im.set_clim(vmin, vmax)
vmin_cur = vmin
vmax_cur = vmax
pl.draw()
if event.key == 'm':
# Modify scaling
# First check that there are images to modify
has_image = False
for im in images:
if isinstance(im, numpy.ndarray):
has_image = True
if not has_image:
return
minscl = 'a'
while isinstance(minscl, str):
try:
if minscl == '':
minscl = pow(10, vmin_cur) - low
break
minscl = float(minscl)
except ValueError:
prompt = "Enter min value (current = %.4f Jy/beam) : " % (pow(10, vmin_cur)-low,)
try:
minscl = raw_input_no_history(prompt)
except RuntimeError:
print('Sorry, unable to change scaling.')
return
minscl = N.log10(minscl + low)
maxscl = 'a'
while isinstance(maxscl, str):
try:
if maxscl == '':
maxscl = pow(10, vmax_cur) - low
break
maxscl = float(maxscl)
except ValueError:
prompt = "Enter max value (current = %.4f Jy/beam) : " % (pow(10, vmax_cur)-low,)
try:
maxscl = raw_input_no_history(prompt)
except RuntimeError:
print('Sorry, unable to change scaling.')
return
maxscl = N.log10(maxscl + low)
if maxscl <= minscl:
print('Max value must be greater than min value!')
return
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
im = ax.get_images()[0]
im.set_clim(minscl, maxscl)
vmin_cur = minscl
vmax_cur = maxscl
pl.draw()
if event.key == 'c':
# Change source SED
# First check that SEDs are being plotted
has_sed = False
if 'seds' in images:
has_sed = True
if not has_sed:
return
srcid = 'a'
while isinstance(srcid, str):
try:
if srcid == '':
srcid = srcid_cur
break
srcid = int(srcid)
except ValueError:
prompt = "Enter source ID (current = %i) : " % (srcid_cur,)
try:
srcid = raw_input_no_history(prompt)
except RuntimeError:
print('Sorry, unable to change source.')
return
ax_indx = images.index('seds')
sed_src = get_src(src_list, srcid)
if sed_src is None:
print('Source not found!')
return
srcid_cur = srcid
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] == 'seds':
plot_sed(sed_src, ax)
pl.draw()
if event.key == 'i':
# Print info about visible region
has_image = False
axes_list = fig.get_axes()
# Get limits of visible region
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
has_image = True
break
if not has_image:
return
if xmin < 0:
xmin = 0
if xmax > img_ch0.shape[0]:
xmax = img_ch0.shape[0]
if ymin < 0:
ymin = 0
if ymax > img_ch0.shape[1]:
ymax = img_ch0.shape[1]
flux = N.nansum(img_ch0[xmin:xmax, ymin:ymax])/pixels_per_beam
mask = N.isnan(img_ch0[xmin:xmax, ymin:ymax])
num_pix_unmasked = float(N.size(N.where(mask == False), 1))
mean_rms = N.nansum(img_rms[xmin:xmax, ymin:ymax])/num_pix_unmasked
mean_map_flux = N.nansum(img_mean[xmin:xmax, ymin:ymax])/pixels_per_beam
if img_gaus_mod is None:
gaus_mod_flux = 0.0
else:
gaus_mod_flux = N.nansum(img_gaus_mod[xmin:xmax, ymin:ymax])/pixels_per_beam
print('Visible region (%i:%i, %i:%i) :' % (xmin, xmax, ymin, ymax))
print(' ch0 flux density from sum of pixels ... : %f Jy'\
% (flux,))
print(' Background mean map flux density ...... : %f Jy'\
% (mean_map_flux,))
print(' Gaussian model flux density ........... : %f Jy'\
% (gaus_mod_flux,))
if img_shap_mod is not None:
shap_mod_flux = N.nansum(img_shap_mod[xmin:xmax, ymin:ymax])/pixels_per_beam
print(' Shapelet model flux density ........... : %f Jy'\
% (shap_mod_flux,))
print(' Mean rms (from rms map) ............... : %f Jy/beam'\
% (mean_rms,))
if event.key == 'n':
# Show/Hide island numbers
if markers:
for marker in markers:
marker.set_visible(not marker.get_visible())
pl.draw()
# The following functions add ra, dec and flux density to the
# coordinates in the lower-right-hand corner of the figure window.
# Since each axis needs its own function (to return its particular
# flux), we need a separate function for each subplot.
def format_coord_ch0(x, y):
"""Custom coordinate format for ch0 image"""
global img_ch0
im = img_ch0
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_ch0_pi(x, y):
"""Custom coordinate format for ch0 image"""
global img_pi
im = img_pi
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_rms(x, y):
"""Custom coordinate format for rms image"""
global img_rms
im = img_rms
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_mean(x, y):
"""Custom coordinate format for mean image"""
global img_mean
im = img_mean
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_gaus_mod(x, y):
"""Custom coordinate format for Gaussian model image"""
global img_gaus_mod
im = img_gaus_mod
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_shap_mod(x, y):
"""Custom coordinate format for shapelet model image"""
global img_shap_mod
im = img_shap_mod
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_gaus_resid(x, y):
"""Custom coordinate format for Gaussian residual image"""
global img_gaus_resid
im = img_gaus_resid
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_shap_resid(x, y):
"""Custom coordinate format for shapelet residual image"""
global img_shap_resid
im = img_shap_resid
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_psf_maj(x, y):
"""Custom coordinate format for PSF major image"""
global img_psf_maj
im = img_psf_maj
coord_str = make_coord_str(x, y, im, unit='arcsec')
return coord_str
def format_coord_psf_min(x, y):
"""Custom coordinate format for PSF minor image"""
global img_psf_min
im = img_psf_min
coord_str = make_coord_str(x, y, im, unit='arcsec')
return coord_str
def format_coord_psf_pa(x, y):
"""Custom coordinate format for PSF pos. ang. image"""
global img_psf_pa
im = img_psf_pa
coord_str = make_coord_str(x, y, im, unit='degrees')
return coord_str
def xy_to_radec_str(x, y):
"""Converts x, y in image coords to a sexigesimal string"""
from .output import ra2hhmmss, dec2ddmmss
global pix2sky
ra, dec = pix2sky([x, y])
ra = ra2hhmmss(ra)
sra = str(ra[0]).zfill(2)+':'+str(ra[1]).zfill(2)+':'+str("%.1f" % (ra[2])).zfill(3)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
sdec = decsign+str(dec[0]).zfill(2)+':'+str(dec[1]).zfill(2)+':'+str("%.1f" % (dec[2])).zfill(3)
return sra, sdec
def make_coord_str(x, y, im, unit='Jy/beam'):
"""Makes the x, y, ra, dec, flux string"""
rastr, decstr = xy_to_radec_str(x, y)
col = int(x + 0.5)
row = int(y + 0.5)
numcols, numrows = im.shape
if col >= 0 and col < numcols\
and row >= 0 and row < numrows:
z = im[col, row]
return 'x=%1.1f, y=%1.1f, RA=%s, Dec=%s, F=%+1.4f %s' % (x, y, rastr, decstr, z, unit)
else:
return 'x=%1.1f, y=%1.1f' % (x, y)
def plot_sed(src, ax):
"""Plots the SED for source 'src' to axis 'ax'"""
global sky2pix
global fig
ax.cla()
norm = src.spec_norm
spin = src.spec_indx
espin = src.e_spec_indx
y = N.array(src.specin_flux)
ey = N.array(src.specin_fluxE)
x = N.array(src.specin_freq)
ax.errorbar(N.log10(x/1e6), N.log10(y), yerr=ey/y, fmt='bo')
ax.plot(N.log10(x/1e6), N.log10(norm)+N.log10(x/src.specin_freq0)*spin,
'-g', label="alpha = %.2f" % (spin,))
pos = sky2pix(src.posn_sky_centroid)
xpos = int(pos[0])
ypos = int(pos[1])
pl.title('SED of source #'+str(src.source_id)+'\n'
+'(x = '+str(xpos)+', y = '+str(ypos)+')')
pl.xlabel('log Frequency (MHz)')
pl.ylabel('log Flux Density (Jy)')
pl.legend()
def get_src(src_list, srcid):
"""Returns the source for srcid or None if not found"""
for src in src_list:
if src.source_id == srcid:
return src
return None
PyBDSF-1.11.0/bdsf/polarisation.py 0000664 0000000 0000000 00000075672 14650706641 0016647 0 ustar 00root root 0000000 0000000 """Module polarisation.
This module finds the Q, U, and V fluxes, the total, linear, and circular
polarisation fractions and the linear polarisation angle of each source identified
by gaul2srl. The position angle is defined from North, with positive angles
towards East.
"""
from __future__ import absolute_import
from .image import *
from .islands import *
from .gaul2srl import *
from .preprocess import Op_preprocess
from .rmsimage import Op_rmsimage
from .threshold import Op_threshold
from .islands import Op_islands
from .gausfit import Op_gausfit
from .gaul2srl import Op_gaul2srl
from .make_residimage import Op_make_residimage
from .const import fwsig
from . import mylogger
import numpy as N
from . import functions as func
from . import statusbar
class Op_polarisation(Op):
""" Finds the flux in each Stokes and calculates the polarisation fraction
and angle.
Fluxes are calculated by summing all nonmasked pixels assigned to
the Gaussian. If a pixel contains contributions from two or more
Gaussians, its flux is divided between the Gaussians by the ratio of
fluxes that they contribute to the pixel. Errors on the fluxes are
derived by summing the same pixels in the rms maps in quadrature.
The results are stored in the Gaussian and Source structures.
Fits are also done to the polarized intensity (PI) image to
determine if there are any islands of emission that lie outside
those found in the I image. If there are, they are fit and the
process above is done for them too.
For linearly polarised emission, the signal and noise add
vectorially, giving a Rice distribution (Vinokur 1965) instead of a
Gaussian one. To correct for this, a bias is estimated and removed
from the polarisation fraction using the same method used for the
NVSS catalog (see ftp://ftp.cv.nrao.edu/pub/nvss/catalog.ps). Errors
on the linear and total polarisation fractions and polarisation
angle are estimated using the debiased polarised flux and standard
error propagation. See Sparks & Axon (1999) for a more detailed
treatment.
Prerequisites: module gaul2srl should be run first."""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Polarisatn")
if img.opts.polarisation_do:
mylog.info('Extracting polarisation properties for all sources')
pols = ['I', 'Q', 'U', 'V']
# Run gausfit and gual2srl on PI image to look for polarized sources
# undetected in I
fit_PI = img.opts.pi_fit
n_new = 0
ch0_pi = N.sqrt(img.ch0_Q_arr**2 + img.ch0_U_arr**2)
img.ch0_pi_arr = ch0_pi
if fit_PI:
from . import _run_op_list
mylogger.userinfo(mylog, "\nChecking PI image for new sources")
mask = img.mask_arr
# Set up image object for PI image.
pi_chain, pi_opts = self.setpara_bdsm(img)
pimg = Image(pi_opts)
pimg.beam = img.beam
pimg.pixel_beam = img.pixel_beam
pimg.pixel_beamarea = img.pixel_beamarea
pimg.log = 'PI.'
pimg.basedir = img.basedir
pimg.imagename = img.imagename
pimg.frequency = img.frequency
pimg.equinox = img.equinox
pimg.shape = img.shape
pimg.pix2beam = img.pix2beam
pimg.beam2pix = img.beam2pix
pimg.pix2gaus = img.pix2gaus
pimg.gaus2pix = img.gaus2pix
pimg.pix2sky = img.pix2sky
pimg.sky2pix = img.sky2pix
pimg.pix2coord = img.pix2coord
pimg.wcs_obj = img.wcs_obj
pimg.mask_arr = mask
pimg.masked = img.masked
pimg.ch0_arr = ch0_pi
pimg._pi = True
success = _run_op_list(pimg, pi_chain)
if not success:
return
img.pi_islands = pimg.islands
img.pi_gaussians = pimg.gaussians
img.pi_sources = pimg.sources
# Now check for new sources in the PI image that are not
# found in the Stokes I image. If any new sources are found,
# adjust their IDs to follow after those found in I.
new_isl = []
new_src = []
new_gaus = []
n_new_src = 0
if len(img.islands) == 0:
isl_id = 0
src_id = 0
gaus_id = 0
else:
isl_id = img.islands[-1].island_id
src_id = img.sources[-1].source_id
gaus_id = img.gaussians[-1].gaus_num
for pi_isl in pimg.islands:
new_sources = []
for pi_src in pi_isl.sources:
if img.pyrank[int(img.sky2pix(pi_src.posn_sky_max)[0]),
int(img.sky2pix(pi_src.posn_sky_max)[1])] == -1:
src_id += 1
pi_src._pi = True
pi_src.island_id = isl_id
pi_src.source_id = src_id
pi_src.spec_indx = N.NaN
pi_src.e_spec_indx = N.NaN
pi_src.spec_norm = N.NaN
pi_src.specin_flux = [N.NaN]
pi_src.specin_fluxE = [N.NaN]
pi_src.specin_freq = [N.NaN]
pi_src.specin_freq0 = N.NaN
for gaus in pi_src.gaussians:
gaus.island_id = isl_id
gaus.source_id = src_id
gaus.spec_indx = N.NaN
gaus.e_spec_indx = N.NaN
gaus.spec_norm = N.NaN
gaus.specin_flux = [N.NaN]
gaus.specin_fluxE = [N.NaN]
gaus.specin_freq = [N.NaN]
gaus.specin_freq0 = N.NaN
new_sources.append(pi_src)
new_src.append(pi_src)
n_new_src += 1
for g in pi_src.gaussians:
gaus_id += 1
new_gaus.append(g)
g.gaus_num = gaus_id
if len(new_sources) > 0:
isl_id += 1
pi_isl.sources = new_sources
pi_isl.island_id = isl_id
pi_isl._pi = True
new_isl.append(pi_isl)
n_new = len(new_isl)
mylogger.userinfo(mylog, "New sources found in PI image", '%i (%i total)' %
(n_new, img.nsrc+n_new))
if n_new > 0:
img.islands += new_isl
img.sources += new_src
img.gaussians += new_gaus
img.nsrc += n_new_src
renumber_islands(img)
bar = statusbar.StatusBar('Calculating polarisation properties .... : ', 0, img.nsrc)
if img.opts.quiet == False:
bar.start()
for isl in img.islands:
isl_bbox = isl.bbox
ch0_I = img.ch0_arr[tuple(isl_bbox)]
ch0_Q = img.ch0_Q_arr[tuple(isl_bbox)]
ch0_U = img.ch0_U_arr[tuple(isl_bbox)]
ch0_V = img.ch0_V_arr[tuple(isl_bbox)]
ch0_images = [ch0_I, ch0_Q, ch0_U, ch0_V]
for i, src in enumerate(isl.sources):
# For each source, assume the morphology does not change
# across the Stokes cube. This assumption allows us to fit
# the Gaussians of each source to each Stokes image by
# simply fitting only the overall normalizations of the
# individual Gaussians.
#
# First, fit all source Gaussians to each Stokes image:
x, y = N.mgrid[isl_bbox]
gg = src.gaussians
fitfix = N.ones(len(gg)) # fit only normalization
srcmask = isl.mask_active
total_flux = N.zeros((4, len(fitfix)), dtype=N.float32) # array of fluxes: N_Stokes x N_Gaussians
errors = N.zeros((4, len(fitfix)), dtype=N.float32) # array of fluxes: N_Stokes x N_Gaussians
for sind, image in enumerate(ch0_images):
if (sind==0 and hasattr(src, '_pi')) or sind > 0: # Fit I only for PI sources
p, ep = func.fit_mulgaus2d(image, gg, x, y, srcmask, fitfix)
for ig in range(len(fitfix)):
bm_pix = N.array([img.pixel_beam()[0], img.pixel_beam()[1], img.pixel_beam()[2]])
total_flux[sind, ig] = p[ig*6]*p[ig*6+3]*p[ig*6+4]/(bm_pix[0]*bm_pix[1])
p = N.insert(p, N.arange(len(fitfix))*6+6, total_flux[sind])
if sind > 0:
rms_img = img.__getattribute__('rms_'+pols[sind]+'_arr')
else:
rms_img = img.rms_arr
if len(rms_img.shape) > 1:
rms_isl = rms_img[tuple(isl.bbox)].mean()
else:
rms_isl = rms_img
errors[sind] = func.get_errors(img, p, rms_isl)[6]
# Now, assign fluxes to each Gaussian.
src_flux_I = 0.0
src_flux_Q = 0.0
src_flux_U = 0.0
src_flux_V = 0.0
src_flux_I_err_sq = 0.0
src_flux_Q_err_sq = 0.0
src_flux_U_err_sq = 0.0
src_flux_V_err_sq = 0.0
for ig, gaussian in enumerate(src.gaussians):
init_gaus_attr(gaussian)
flux_I = total_flux[0, ig]
flux_I_err = abs(errors[0, ig])
flux_Q = total_flux[1, ig]
flux_Q_err = abs(errors[1, ig])
flux_U = total_flux[2, ig]
flux_U_err = abs(errors[2, ig])
flux_V = total_flux[3, ig]
flux_V_err = abs(errors[3, ig])
if hasattr(src, '_pi'):
gaussian.total_flux = flux_I
gaussian.total_fluxE = flux_I_err
gaussian.total_flux_Q = flux_Q
gaussian.total_flux_U = flux_U
gaussian.total_flux_V = flux_V
gaussian.total_fluxE_Q = flux_Q_err
gaussian.total_fluxE_U = flux_U_err
gaussian.total_fluxE_V = flux_V_err
if hasattr(src, '_pi'):
src_flux_I += flux_I
src_flux_I_err_sq += flux_I_err**2
src_flux_Q += flux_Q
src_flux_U += flux_U
src_flux_V += flux_V
src_flux_Q_err_sq += flux_Q_err**2
src_flux_U_err_sq += flux_U_err**2
src_flux_V_err_sq += flux_V_err**2
# Calculate and store polarisation fractions and angle for each Gaussian in the island
# For this we need the I flux, which we can just take from g.total_flux and src.total_flux
flux_I = gaussian.total_flux
flux_I_err = gaussian.total_fluxE
stokes = [flux_I, flux_Q, flux_U, flux_V]
stokes_err = [flux_I_err, flux_Q_err, flux_U_err, flux_V_err]
lpol_frac, lpol_frac_loerr, lpol_frac_hierr = self.calc_lpol_fraction(stokes, stokes_err) # linear pol fraction
lpol_ang, lpol_ang_err = self.calc_lpol_angle(stokes, stokes_err) # linear pol angle
cpol_frac, cpol_frac_loerr, cpol_frac_hierr = self.calc_cpol_fraction(stokes, stokes_err) # circular pol fraction
tpol_frac, tpol_frac_loerr, tpol_frac_hierr = self.calc_tpol_fraction(stokes, stokes_err) # total pol fraction
gaussian.lpol_fraction = lpol_frac
gaussian.lpol_fraction_loerr = lpol_frac_loerr
gaussian.lpol_fraction_hierr = lpol_frac_hierr
gaussian.cpol_fraction = cpol_frac
gaussian.cpol_fraction_loerr = cpol_frac_loerr
gaussian.cpol_fraction_hierr = cpol_frac_hierr
gaussian.tpol_fraction = tpol_frac
gaussian.tpol_fraction_loerr = tpol_frac_loerr
gaussian.tpol_fraction_hierr = tpol_frac_hierr
gaussian.lpol_angle = lpol_ang
gaussian.lpol_angle_err = lpol_ang_err
# Store fluxes for each source in the island
init_src_attr(src)
if hasattr(src, '_pi'):
src.total_flux = src_flux_I
src.total_fluxE = N.sqrt(src_flux_I_err_sq)
src.total_flux_Q = src_flux_Q
src.total_flux_U = src_flux_U
src.total_flux_V = src_flux_V
src.total_fluxE_Q = N.sqrt(src_flux_Q_err_sq)
src.total_fluxE_U = N.sqrt(src_flux_U_err_sq)
src.total_fluxE_V = N.sqrt(src_flux_V_err_sq)
# Calculate and store polarisation fractions and angle for each source in the island
# For this we need the I flux, which we can just take from g.total_flux and src.total_flux
src_flux_I = src.total_flux
src_flux_I_err = src.total_fluxE
stokes = [src_flux_I, src_flux_Q, src_flux_U, src_flux_V]
stokes_err = [src_flux_I_err, N.sqrt(src_flux_Q_err_sq), N.sqrt(src_flux_U_err_sq), N.sqrt(src_flux_V_err_sq)]
lpol_frac, lpol_frac_loerr, lpol_frac_hierr = self.calc_lpol_fraction(stokes, stokes_err) # linear pol fraction
lpol_ang, lpol_ang_err = self.calc_lpol_angle(stokes, stokes_err) # linear pol angle
cpol_frac, cpol_frac_loerr, cpol_frac_hierr = self.calc_cpol_fraction(stokes, stokes_err) # circular pol fraction
tpol_frac, tpol_frac_loerr, tpol_frac_hierr = self.calc_tpol_fraction(stokes, stokes_err) # total pol fraction
src.lpol_fraction = lpol_frac
src.lpol_fraction_loerr = lpol_frac_loerr
src.lpol_fraction_hierr = lpol_frac_hierr
src.cpol_fraction = cpol_frac
src.cpol_fraction_loerr = cpol_frac_loerr
src.cpol_fraction_hierr = cpol_frac_hierr
src.tpol_fraction = tpol_frac
src.tpol_fraction_loerr = tpol_frac_loerr
src.tpol_fraction_hierr = tpol_frac_hierr
src.lpol_angle = lpol_ang
src.lpol_angle_err = lpol_ang_err
if bar.started:
bar.increment()
bar.stop()
img.completed_Ops.append('polarisation')
####################################################################################
def calc_lpol_fraction(self, stokes, err):
""" Calculate linear polarisation fraction and error from:
stokes = [I, Q, U, V] and err = [Ierr, Qerr, Uerr, Verr]
"""
I, Q, U, V = stokes
Ierr, Qerr, Uerr, Verr = err
QUerr = N.mean([Qerr, Uerr])
stokes_lpol = [I, Q, U, 0.0]
err_lpol = [Ierr, Qerr, Uerr, 0.0]
lfrac, loerr, uperr, Iup, Qup, Uup, Vup = self.estimate_err_frac_with_limits(stokes_lpol, err_lpol)
# If all are detections, debias and use error propagation instead
if not Iup and not Qup and not Uup:
lpol = N.sqrt(Q**2 + U**2)
lpol_debiased = self.debias(lpol, QUerr) # debias (to first order)
if lpol_debiased > 0.0:
lfrac = lpol_debiased / I
dlfrac = lfrac * N.sqrt((Ierr/I)**2 + (Q*Qerr/lpol_debiased**2)**2 + (U*Uerr/lpol_debiased**2)**2)
else:
# if debiased fraction is consistent with zero, estimate a ballpark error with biased value
lfrac = 0.0
lpolsq = Q**2 + U**2
dlfrac = N.sqrt(lpolsq) / I * N.sqrt((Ierr/I)**2 + (Q*Qerr/lpolsq)**2 + (U*Uerr/lpolsq)**2)
loerr = dlfrac
uperr = dlfrac
lfrac, loerr, uperr = self.check_frac(lfrac, loerr, uperr)
return lfrac, loerr, uperr
####################################################################################
def calc_cpol_fraction(self, stokes, err):
""" Calculate circular polarisation fraction and error from:
stokes = [I, Q, U, V] and err = [Ierr, Qerr, Uerr, Verr]
"""
I, Q, U, V = stokes
Ierr, Qerr, Uerr, Verr = err
stokes_cpol = [I, 0.0, 0.0, V]
err_cpol = [Ierr, 0.0, 0.0, Verr]
cfrac, loerr, uperr, Iup, Qup, Uup, Vup = self.estimate_err_frac_with_limits(stokes_cpol, err_cpol)
# If all are detections, debias and use error propagation instead
if not Iup and not Vup:
cfrac = abs(V) / I
dcfrac = cfrac * N.sqrt((Ierr/I)**2 + (Verr/V)**2)
loerr = dcfrac
uperr = dcfrac
cfrac, loerr, uperr = self.check_frac(cfrac, loerr, uperr)
return cfrac, loerr, uperr
####################################################################################
def calc_tpol_fraction(self, stokes, err):
""" Calculate total polarisation fraction and error from:
stokes = [I, Q, U, V] and err = [Ierr, Qerr, Uerr, Verr]
"""
I, Q, U, V = stokes
Ierr, Qerr, Uerr, Verr = err
QUerr = N.mean([Qerr, Uerr])
tfrac, loerr, uperr, Iup, Qup, Uup, Vup = self.estimate_err_frac_with_limits(stokes, err)
# If all are detections, debias and use error propagation instead
if not Iup and not Qup and not Uup and not Vup:
lpol = N.sqrt(Q**2 + U**2)
lpol_debiased = self.debias(lpol, QUerr)
tpol_debiased = N.sqrt(Q**2 + U**2 + V**2) - (lpol - lpol_debiased) # debias (to first order)
if tpol_debiased > 0.0:
tfrac = tpol_debiased / I
dtfrac = tfrac * N.sqrt((Ierr/I)**2 + (Q*Qerr/tpol_debiased**2)**2 + (U*Uerr/tpol_debiased**2)**2 + (V*Verr/tpol_debiased**2)**2)
else:
# if debiased fraction is consistent with zero, estimate a ballpark error with biased value
tfrac = 0.0
tpolsq = Q**2 + U**2 + V**2
dtfrac = N.sqrt(tpolsq) / I * N.sqrt((Ierr/I)**2 + (Q*Qerr/tpolsq)**2 + (U*Uerr/tpolsq)**2 + (V*Verr/tpolsq)**2)
loerr = dtfrac
uperr = dtfrac
tfrac, loerr, uperr = self.check_frac(tfrac, loerr, uperr)
return tfrac, loerr, uperr
####################################################################################
def calc_lpol_angle(self, stokes, err, sig=3.0):
""" Calculate linear polarisation angle and error (in degrees) from:
stokes = [I, Q, U, V] and err = [Ierr, Qerr, Uerr, Verr]
"""
I, Q, U, V = stokes
Ierr, Qerr, Uerr, Verr = err
if abs(Q) < sig*abs(Qerr) and abs(U) < sig*abs(Uerr):
return 0.0, 0.0
ang = 0.5 * N.arctan2(U, Q) * 180.0 / N.pi
dang = 0.5 / (1.0 + (U/Q)**2) * N.sqrt((Uerr/Q)**2 + (U*Qerr/Q**2)**2) * 180.0 / N.pi
return ang, dang
####################################################################################
def debias(self, pflux, QUerr):
""" Debiases the linearly polarised flux using the same method
used for the NVSS catalog (see ftp://ftp.cv.nrao.edu/pub/nvss/catalog.ps).
"""
data_table=N.array([[1.253,1.2530], [1.256,1.1560], [1.266,1.0660], [1.281,0.9814],
[1.303,0.9030], [1.330,0.8304], [1.364,0.7636], [1.402,0.7023],
[1.446,0.6462], [1.495,0.5951], [1.549,0.5486], [1.606,0.5064],
[1.668,0.4683], [1.734,0.4339], [1.803,0.4028], [1.875,0.3749],
[1.950,0.3498], [2.027,0.3273], [2.107,0.3070], [2.189,0.2888],
[2.272,0.2724], [2.358,0.2576], [2.444,0.2442], [2.532,0.2321],
[2.621,0.2212], [2.711,0.2112], [2.802,0.2021], [2.894,0.1938],
[2.986,0.1861], [3.079,0.1791], [3.173,0.1726], [3.267,0.1666],
[3.361,0.1610], [3.456,0.1557], [3.551,0.1509], [3.646,0.1463],
[3.742,0.1420], [3.838,0.1380], [3.934,0.1342], [4.031,0.1306]])
pnorm = pflux / QUerr
if pnorm <= data_table[0,0]:
bias = data_table[0,1]
else:
if pnorm >= data_table[-1,0]:
bias = 1.0 / (2.0 * pnorm) + 1.0 / (8.0 * pnorm**3)
pnorm = pnorm - bias
bias = 1.0 / (2.0 * pnorm) + 1.0 / (8.0 * pnorm**3)
else:
bias = N.interp(pnorm, data_table[:,0], data_table[:,1])
pflux_debiased = pflux - bias * QUerr
return pflux_debiased
def check_frac(self, frac, loerr, uperr):
if frac < 0.0:
frac = 0.0
if frac > 1.0:
frac = 1.0
if loerr < 0.0:
loerr = frac
if frac + uperr > 1.0:
uperr = 1.0 - frac
return frac, loerr, uperr
####################################################################################
def setpara_bdsm(self, img):
chain = [Op_preprocess, Op_rmsimage(), Op_threshold(), Op_islands(),
Op_gausfit(), Op_gaul2srl(), Op_make_residimage()]
opts = img.opts.to_dict()
if img.opts.pi_thresh_isl is not None:
opts['thresh_isl'] = img.opts.pi_thresh_isl
if img.opts.pi_thresh_pix is not None:
opts['thresh_pix'] = img.opts.pi_thresh_pix
opts['thresh'] = 'hard'
opts['polarisation_do'] = False
opts['filename'] = ''
opts['detection_image'] = ''
opts['output_all'] = False
ops = []
for op in chain:
if isinstance(op, type):
ops.append(op())
else:
ops.append(op)
return ops, opts
def estimate_err_frac_with_limits(self, stokes, err, sig=3.0):
"""Estimate reasonable errors on polarization fraction when upper
limits are present.
"""
I, Q, U, V = stokes
Ierr, Qerr, Uerr, Verr = err
Iup = False
Qup = False
Uup = False
Vup = False
if abs(I) < sig * abs(Ierr):
Iup = True
if abs(Q) < sig * abs(Qerr):
Q = 0.0
Qup = True
if abs(U) < sig * abs(Uerr):
U = 0.0
Uup = True
if abs(V) < sig * abs(Verr):
V = 0.0
Vup = True
pol = N.sqrt(Q**2 + U**2 + V**2)
frac = pol / I
if frac < 0.0:
frac = 0.0
if frac > 1.0:
frac = 1.0
if Iup:
if Qup and Uup and Vup:
frac = 0.0
loerr = 0.0
uperr = 1.0
else:
loerr = frac - N.sqrt((abs(Q) - Qerr)**2 + (abs(U) - Uerr)**2 + (abs(V) - Verr)**2) / abs(Ierr)
uperr = 1.0 - frac
else:
loerr = frac - N.sqrt((abs(Q) - Qerr)**2 + (abs(U) - Uerr)**2 + (abs(V) - Verr)**2) / (I + Ierr)
uperr = N.sqrt((abs(Q) + Qerr)**2 + (abs(U) + Uerr)**2 + (abs(V) + Verr)**2) / (I - Ierr) - frac
if loerr < 0.0:
loerr = frac
if frac + uperr > 1.0:
uperr = 1.0 - frac
return frac, loerr, uperr, Iup, Qup, Uup, Vup
def double_bbox(self, bbox, shape):
"""Expand bbox of the island by factor of 2
bbox is isl.bbox
shape is img.shape
"""
def expand(bbox, shape):
bbox_width = (bbox.stop - bbox.start)/2.0
return slice(max(0, bbox.start - bbox_width), min(shape, bbox.stop + bbox_width))
return map(expand, bbox, shape)
def renumber_islands(img):
"""Renumbers island_ids (after, e.g., removing one)
Also renumbers the pyrank image.
"""
for i, isl in enumerate(img.islands):
isl.island_id = i
for g in isl.gaul:
g.island_id = i
for dg in isl.dgaul:
dg.island_id = i
if i == 0:
img.pyrank[tuple(isl.bbox)] = N.invert(isl.mask_active) - 1
else:
img.pyrank[tuple(isl.bbox)] = N.invert(isl.mask_active) * isl.island_id - isl.mask_active
gaussian_list = [g for isl in img.islands for g in isl.gaul]
img.gaussians = gaussian_list
def init_gaus_attr(gaussian):
### Insert polarization attributes
gaussian.total_flux_Q_def = Float(doc="Total flux density (Jy), Stokes Q", colname='Total_Q',
units='Jy')
gaussian.total_fluxE_Q_def = Float(doc="Error in total flux density (Jy), Stokes Q", colname='E_Total_Q',
units='Jy')
gaussian.total_flux_U_def = Float(doc="Total flux density (Jy), Stokes U", colname='Total_U',
units='Jy')
gaussian.total_fluxE_U_def = Float(doc="Error in total flux density (Jy), Stokes U", colname='E_Total_U',
units='Jy')
gaussian.total_flux_V_def = Float(doc="Total flux density (Jy), Stokes V", colname='Total_V',
units='Jy')
gaussian.total_fluxE_V_def = Float(doc="Error in total flux density (Jy), Stokes V", colname='E_Total_V',
units='Jy')
gaussian.lpol_fraction_def = Float(doc="Linear polarisation fraction",
colname='Linear_Pol_frac', units=None)
gaussian.lpol_fraction_loerr_def = Float(doc="Linear polarisation fraction low error",
colname='Elow_Linear_Pol_frac', units=None)
gaussian.lpol_fraction_hierr_def = Float(doc="Linear polarisation fraction high error",
colname='Ehigh_Linear_Pol_frac', units=None)
gaussian.cpol_fraction_def = Float(doc="Circular polarisation fraction",
colname='Circ_Pol_Frac', units=None)
gaussian.cpol_fraction_loerr_def = Float(doc="Circular polarisation fraction low error",
colname='Elow_Circ_Pol_Frac', units=None)
gaussian.cpol_fraction_hierr_def = Float(doc="Circular polarisation fraction high error",
colname='Ehigh_Circ_Pol_Frac', units=None)
gaussian.tpol_fraction_def = Float(doc="Total polarisation fraction",
colname='Total_Pol_Frac', units=None)
gaussian.tpol_fraction_loerr_def = Float(doc="Total polarisation fraction low error",
colname='Elow_Total_Pol_Frac', units=None)
gaussian.tpol_fraction_hierr_def = Float(doc="Total polarisation fraction high error",
colname='Ehigh_Total_Pol_Frac', units=None)
gaussian.lpol_angle_def = Float(doc="Polarisation angle (deg from North towards East)",
colname='Linear_Pol_Ang', units='deg')
gaussian.lpol_angle_err_def = Float(doc="Polarisation angle error (deg)",
colname='E_Linear_Pol_Ang', units='deg')
def init_src_attr(source):
### Insert polarization attributes
source.total_flux_Q_def = Float(doc="Total flux density (Jy), Stokes Q", colname='Total_Q',
units='Jy')
source.total_fluxE_Q_def = Float(doc="Error in total flux density (Jy), Stokes Q", colname='E_Total_Q',
units='Jy')
source.total_flux_U_def = Float(doc="Total flux density (Jy), Stokes U", colname='Total_U',
units='Jy')
source.total_fluxE_U_def = Float(doc="Error in total flux density (Jy), Stokes U", colname='E_Total_U',
units='Jy')
source.total_flux_V_def = Float(doc="Total flux density (Jy), Stokes V", colname='Total_V',
units='Jy')
source.total_fluxE_V_def = Float(doc="Error in total flux density (Jy), Stokes V", colname='E_Total_V',
units='Jy')
source.lpol_fraction_def = Float(doc="Linear polarisation fraction",
colname='Linear_Pol_frac', units=None)
source.lpol_fraction_loerr_def = Float(doc="Linear polarisation fraction low error",
colname='Elow_Linear_Pol_frac', units=None)
source.lpol_fraction_hierr_def = Float(doc="Linear polarisation fraction high error",
colname='Ehigh_Linear_Pol_frac', units=None)
source.cpol_fraction_def = Float(doc="Circular polarisation fraction",
colname='Circ_Pol_Frac', units=None)
source.cpol_fraction_loerr_def = Float(doc="Circular polarisation fraction low error",
colname='Elow_Circ_Pol_Frac', units=None)
source.cpol_fraction_hierr_def = Float(doc="Circular polarisation fraction high error",
colname='Ehigh_Circ_Pol_Frac', units=None)
source.tpol_fraction_def = Float(doc="Total polarisation fraction",
colname='Total_Pol_Frac', units=None)
source.tpol_fraction_loerr_def = Float(doc="Total polarisation fraction low error",
colname='Elow_Total_Pol_Frac', units=None)
source.tpol_fraction_hierr_def = Float(doc="Total polarisation fraction high error",
colname='Ehigh_Total_Pol_Frac', units=None)
source.lpol_angle_def = Float(doc="Polarisation angle (deg from North towards East)",
colname='Linear_Pol_Ang', units='deg')
source.lpol_angle_err_def = Float(doc="Polarisation angle error (deg)",
colname='E_Linear_Pol_Ang', units='deg')
PyBDSF-1.11.0/bdsf/preprocess.py 0000664 0000000 0000000 00000015537 14650706641 0016322 0 ustar 00root root 0000000 0000000 """Module preprocess
Calculates some basic statistics of the image and sets up processing
parameters for PyBDSM.
"""
from __future__ import absolute_import
import numpy as N
from . import _cbdsm
from .image import *
from math import pi, sqrt, log
from . import const
from . import functions as func
from . import mylogger
class Op_preprocess(Op):
"""Preprocessing -- calculate some basic statistics and set
processing parameters. Should assume that pixels outside the universe
are blanked in QC ? """
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Preprocess")
bstat = func.bstat
if img.opts.kappa_clip is None:
kappa = -img.pixel_beamarea()
else:
kappa = img.opts.kappa_clip
if img.opts.polarisation_do:
pols = ['I', 'Q', 'U', 'V']
ch0images = [img.ch0_arr, img.ch0_Q_arr, img.ch0_U_arr, img.ch0_V_arr]
img.clipped_mean_QUV = []
img.clipped_rms_QUV = []
else:
pols = ['I'] # assume I is always present
ch0images = [img.ch0_arr]
if hasattr(img, 'rms_mask'):
mask = img.rms_mask
else:
mask = img.mask_arr
opts = img.opts
for ipol, pol in enumerate(pols):
image = ch0images[ipol]
### basic stats
mean, rms, cmean, crms, cnt = bstat(image, mask, kappa)
if cnt > 198: cmean = mean; crms = rms
if pol == 'I':
if func.approx_equal(crms, 0.0, rel=None):
raise RuntimeError('Clipped rms appears to be zero. Check for regions '\
'with values of 0 and\nblank them (with NaNs) '\
'or use trim_box to exclude them.')
img.raw_mean = mean
img.raw_rms = rms
img.clipped_mean= cmean
img.clipped_rms = crms
mylog.info('%s %.4f %s %.4f %s ' % ("Raw mean (Stokes I) = ", mean*1000.0, \
'mJy and raw rms = ',rms*1000.0, 'mJy'))
mylog.info('%s %.4f %s %s %.4f %s ' % ("sigma clipped mean (Stokes I) = ", cmean*1000.0, \
'mJy and ','sigma clipped rms = ',crms*1000.0, 'mJy'))
else:
img.clipped_mean_QUV.append(cmean)
img.clipped_rms_QUV.append(crms)
mylog.info('%s %s %s %.4f %s %s %.4f %s ' % ("sigma clipped mean (Stokes ", pol, ") = ", cmean*1000.0, \
'mJy and ','sigma clipped rms = ',crms*1000.0, 'mJy'))
image = img.ch0_arr
# Check if pixels are outside the universe
if opts.check_outsideuniv:
mylogger.userinfo(mylog, "Checking for pixels outside the universe")
noutside_univ = self.outside_univ(img)
img.noutside_univ = noutside_univ
frac_blank = round(float(noutside_univ)/float(image.shape[0]*image.shape[1]),3)
mylogger.userinfo(mylog, "Number of additional pixels blanked", str(noutside_univ)
+' ('+str(frac_blank*100.0)+'%)')
else:
noutside_univ = 0
# If needed, (re)mask the image
if noutside_univ > 0:
mask = N.isnan(img.ch0_arr)
masked = mask.any()
img.masked = masked
if masked:
img.mask_arr = mask
img.blankpix = N.sum(mask)
### max/min pixel value & coordinates
shape = image.shape[0:2]
if mask is not None:
img.blankpix = N.sum(mask)
if img.blankpix == 0:
max_idx = image.argmax()
min_idx = image.argmin()
else:
max_idx = N.nanargmax(image)
min_idx = N.nanargmin(image)
img.maxpix_coord = N.unravel_index(max_idx, shape)
img.minpix_coord = N.unravel_index(min_idx, shape)
img.max_value = image.flat[max_idx]
img.min_value = image.flat[min_idx]
### Solid angle of the image
cdelt = N.array(img.wcs_obj.acdelt[:2])
img.omega = N.product(shape)*abs(N.product(cdelt))/(180.*180./pi/pi)
### Total flux in ch0 image
if 'atrous' in img.filename or img._pi or img.log == 'Detection image':
# Don't do this estimate for atrous wavelet images
# or polarized intensity image,
# as it doesn't give the correct flux. Also, ignore
# the flux in the detection image, as it's likely
# wrong (e.g., not corrected for the primary beam).
img.ch0_sum_jy = 0
else:
im_flux = N.nansum(image)/img.pixel_beamarea() # Jy
img.ch0_sum_jy = im_flux
mylogger.userinfo(mylog, 'Flux from sum of (non-blank) pixels',
'%.3f Jy' % (im_flux,))
### if image seems confused, then take background mean as zero instead
alpha_sourcecounts = 2.5 # approx diff src count slope. 2.2?
if opts.bmpersrc_th is None:
if mask is not None:
unmasked = N.where(~img.mask_arr)
n = (image[unmasked] >= 5.*crms).sum()
else:
n = (image >= 5.*crms).sum()
if n <= 0:
n = 1
mylog.info('No pixels in image > 5-sigma.')
mylog.info('Taking number of pixels above 5-sigma as 1.')
img.bmpersrc_th = N.product(shape)/((alpha_sourcecounts-1.)*n)
mylog.info('%s %6.2f' % ('Estimated bmpersrc_th = ', img.bmpersrc_th))
else:
img.bmpersrc_th = opts.bmpersrc_th
mylog.info('%s %6.2f' % ('Taking default bmpersrc_th = ', img.bmpersrc_th))
confused = False
if opts.mean_map == 'default':
if img.bmpersrc_th <= 25. or cmean/crms >= 0.1:
confused = True
img.confused = confused
mylog.info('Parameter confused is '+str(img.confused))
img.completed_Ops.append('preprocess')
return img
def outside_univ(self,img):
""" Checks if a pixel is outside the universe and is not blanked,
and blanks it. (fits files written by CASA dont do this). """
noutside = 0
n, m = img.ch0_arr.shape
for i in range(n):
for j in range(m):
out = False
err = ''
pix1 = (i,j)
try:
skyc = img.pix2sky(pix1)
pix2 = img.sky2pix(skyc)
if abs(pix1[0]-pix2[0]) > 0.5 or abs(pix1[1]-pix2[1]) > 0.5: out=True
except RuntimeError as err:
pass
if out or ("8" in str(err)):
noutside += 1
ch0 = img.ch0_arr
ch0[pix1] = float("NaN")
img.ch0_arr = ch0
return noutside
PyBDSF-1.11.0/bdsf/psf_vary.py 0000664 0000000 0000000 00000142561 14650706641 0015764 0 ustar 00root root 0000000 0000000 from __future__ import print_function
from __future__ import absolute_import
import numpy as N
from astropy.io import fits as pyfits
from .image import *
from . import mylogger
from copy import deepcopy as cp
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
import scipy
import scipy.signal as S
from . import _cbdsm
from . import functions as func
from . import _pytesselate as _pytess
from . import shapelets as sh
from scipy.optimize import leastsq
from . import nat
from math import *
from . import statusbar
from .const import fwsig
from . import multi_proc as mp
import itertools
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
class Op_psf_vary(Op):
"""Computes variation of psf across the image """
def __call__(self, img):
if img.opts.psf_vary_do:
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Psf_Vary")
mylogger.userinfo(mylog, '\nEstimating PSF variations')
opts = img.opts
dir = img.basedir + '/misc/'
plot = False # debug figures
image = img.ch0_arr
if img.nisl == 0:
mylog.warning("No islands found. Skipping PSF variation estimation.")
img.completed_Ops.append('psf_vary')
return
if opts.psf_fwhm is not None:
# User has specified a constant PSF to use, so skip PSF fitting/etc.
psf_maj = opts.psf_fwhm[0] # FWHM in deg
psf_min = opts.psf_fwhm[1] # FWHM in deg
psf_pa = opts.psf_fwhm[2] # PA in deg
mylogger.userinfo(mylog, 'Using constant PSF (major, minor, pos angle)',
'(%.5e, %.5e, %s) degrees' % (psf_maj, psf_maj,
round(psf_pa, 1)))
else:
# Use did not specify a constant PSF to use, so estimate it
over = 2
generators = opts.psf_generators; nsig = opts.psf_nsig; kappa2 = opts.psf_kappa2
snrtop = opts.psf_snrtop; snrbot = opts.psf_snrbot; snrcutstack = opts.psf_snrcutstack
gencode = opts.psf_gencode; primarygen = opts.psf_primarygen; itess_method = opts.psf_itess_method
tess_sc = opts.psf_tess_sc; tess_fuzzy= opts.psf_tess_fuzzy
bright_snr_cut = opts.psf_high_snr
s_only = opts.psf_stype_only
if opts.psf_snrcut < 5.0:
mylogger.userinfo(mylog, "Value of psf_snrcut too low; increasing to 5")
snrcut = 5.0
else:
snrcut = opts.psf_snrcut
img.psf_snrcut = snrcut
if opts.psf_high_snr is not None:
if opts.psf_high_snr < 10.0:
mylogger.userinfo(mylog, "Value of psf_high_snr too low; increasing to 10")
high_snrcut = 10.0
else:
high_snrcut = opts.psf_high_snr
else:
high_snrcut = opts.psf_high_snr
img.psf_high_snr = high_snrcut
wtfns=['unity', 'roundness', 'log10', 'sqrtlog10']
if 0 <= itess_method < 4: tess_method=wtfns[itess_method]
else: tess_method='unity'
### now put all relevant gaussian parameters into a list
ngaus = img.ngaus
nsrc = img.nsrc
num = N.zeros(nsrc, dtype=N.int32)
peak = N.zeros(nsrc)
xc = N.zeros(nsrc)
yc = N.zeros(nsrc)
bmaj = N.zeros(nsrc)
bmin = N.zeros(nsrc)
bpa = N.zeros(nsrc)
code = N.array(['']*nsrc);
rms = N.zeros(nsrc)
src_id_list = []
for i, src in enumerate(img.sources):
src_max = 0.0
for gmax in src.gaussians:
# Take only brightest Gaussian per source
if gmax.peak_flux > src_max:
src_max = gmax.peak_flux
g = gmax
num[i] = i
peak[i] = g.peak_flux
xc[i] = g.centre_pix[0]
yc[i] = g.centre_pix[1]
bmaj[i] = g.size_pix[0]
bmin[i] = g.size_pix[1]
bpa[i] = g.size_pix[2]
code[i] = img.sources[g.source_id].code
rms[i] = img.islands[g.island_id].rms
gauls = (num, peak, xc, yc, bmaj, bmin, bpa, code, rms)
tr_gauls = self.trans_gaul(gauls)
# takes gaussians with code=S and snr > snrcut.
if s_only:
tr = [n for n in tr_gauls if n[1]/n[8]>snrcut and n[7] == 'S']
else:
tr = [n for n in tr_gauls if n[1]/n[8]>snrcut]
g_gauls = self.trans_gaul(tr)
# computes statistics of fitted sizes. Same as psfvary_fullstat.f in fBDSM.
bmaj_a, bmaj_r, bmaj_ca, bmaj_cr, ni = _cbdsm.bstat(bmaj, None, nsig)
bmin_a, bmin_r, bmin_ca, bmin_cr, ni = _cbdsm.bstat(bmin, None, nsig)
bpa_a, bpa_r, bpa_ca, bpa_cr, ni = _cbdsm.bstat(bpa, None, nsig)
# get subset of sources deemed to be unresolved. Same as size_ksclip_wenss.f in fBDSM.
flag_unresolved = self.get_unresolved(g_gauls, img.beam, nsig, kappa2, over, img.psf_high_snr, plot)
if len(flag_unresolved) == 0:
mylog.warning('Insufficient number of sources to determine PSF variation.\nTry changing the PSF options or specify a (constant) PSF with the "psf_fwhm" option')
return
# see how much the SNR-weighted sizes of unresolved sources differ from the synthesized beam.
wtsize_beam_snr = self.av_psf(g_gauls, img.beam, flag_unresolved)
# filter out resolved sources
tr_gaul = self.trans_gaul(g_gauls)
tr = [n for i, n in enumerate(tr_gaul) if flag_unresolved[i]]
g_gauls = self.trans_gaul(tr)
mylogger.userinfo(mylog, 'Number of unresolved sources', str(len(g_gauls[0])))
# get a list of voronoi generators. vorogenS has values (and not None) if generators='field'.
vorogenP, vorogenS = self.get_voronoi_generators(g_gauls, generators, gencode, snrcut, snrtop, snrbot, snrcutstack)
mylogger.userinfo(mylog, 'Number of generators for PSF variation', str(len(vorogenP[0])))
if len(vorogenP[0]) < 3:
mylog.warning('Insufficient number of generators')
return
mylogger.userinfo(mylog, 'Tesselating image')
# group generators into tiles
tile_prop = self.edit_vorogenlist(vorogenP, frac=0.9)
# tesselate the image
volrank, vorowts = self.tesselate(vorogenP, vorogenS, tile_prop, tess_method, tess_sc, tess_fuzzy, \
generators, gencode, image.shape)
if opts.output_all:
func.write_image_to_file(img.use_io, img.imagename + '.volrank.fits', volrank, img, dir)
tile_list, tile_coord, tile_snr = tile_prop
ntile = len(tile_list)
bar = statusbar.StatusBar('Determining PSF variation ............... : ', 0, ntile)
mylogger.userinfo(mylog, 'Number of tiles for PSF variation', str(ntile))
# For each tile, calculate the weighted averaged psf image. Also for all the sources in the image.
cdelt = list(img.wcs_obj.acdelt[0:2])
factor=3.
psfimages, psfcoords, totpsfimage, psfratio, psfratio_aper = self.psf_in_tile(image, img.beam, g_gauls, \
cdelt, factor, snrcutstack, volrank, tile_prop, plot, img)
npsf = len(psfimages)
if opts.psf_use_shap:
if opts.psf_fwhm is None:
# use totpsfimage to get beta, centre and nmax for shapelet decomposition. Use nmax=5 or 6
mask=N.zeros(totpsfimage.shape, dtype=bool)
(m1, m2, m3)=func.moment(totpsfimage, mask)
betainit=sqrt(m3[0]*m3[1])*2.0 * 1.4
tshape = totpsfimage.shape
cen = N.array(N.unravel_index(N.argmax(totpsfimage), tshape))+[1,1]
cen = tuple(cen)
nmax = 12
basis = 'cartesian'
betarange = [0.5,sqrt(betainit*max(tshape))]
beta, error = sh.shape_varybeta(totpsfimage, mask, basis, betainit, cen, nmax, betarange, plot)
if error == 1: print(' Unable to find minimum in beta')
# decompose all the psf images using the beta from above
nmax=12; psf_cf=[]
for i in range(npsf):
psfim = psfimages[i]
cf = sh.decompose_shapelets(psfim, mask, basis, beta, cen, nmax, mode='')
psf_cf.append(cf)
if img.opts.quiet == False:
bar.increment()
bar.stop()
# transpose the psf image list
xt, yt = N.transpose(tile_coord)
tr_psf_cf = N.transpose(N.array(psf_cf))
# interpolate the coefficients across the image. Ok, interpolate in scipy for
# irregular grids is crap. doesnt even pass through some of the points.
# for now, fit polynomial.
compress = 100.0
x, y = N.transpose(psfcoords)
if len(x) < 3:
mylog.warning('Insufficient number of tiles to do interpolation of PSF variation')
return
psf_coeff_interp, xgrid, ygrid = self.interp_shapcoefs(nmax, tr_psf_cf, psfcoords, image.shape, \
compress, plot)
psfshape = psfimages[0].shape
skip = 5
aa = self.create_psf_grid(psf_coeff_interp, image.shape, xgrid, ygrid, skip, nmax, psfshape, \
basis, beta, cen, totpsfimage, plot)
img.psf_images = aa
else:
if opts.psf_fwhm is None:
if ntile < 4:
mylog.warning('Insufficient number of tiles to do interpolation of PSF variation')
return
else:
# Fit stacked PSFs with Gaussians and measure aperture fluxes
bm_pix = N.array([img.pixel_beam()[0]*fwsig, img.pixel_beam()[1]*fwsig, img.pixel_beam()[2]])
psf_maj = N.zeros(npsf)
psf_min = N.zeros(npsf)
psf_pa = N.zeros(npsf)
if img.opts.quiet == False:
bar.start()
for i in range(ntile):
psfim = psfimages[i]
mask = N.zeros(psfim.shape, dtype=bool)
x_ax, y_ax = N.indices(psfim.shape)
maxv = N.max(psfim)
p_ini = [maxv, (psfim.shape[0]-1)/2.0*1.1, (psfim.shape[1]-1)/2.0*1.1, bm_pix[0]/fwsig*1.3,
bm_pix[1]/fwsig*1.1, bm_pix[2]*2]
para, ierr = func.fit_gaus2d(psfim, p_ini, x_ax, y_ax, mask)
### first extent is major
if para[3] < para[4]:
para[3:5] = para[4:2:-1]
para[5] += 90
### clip position angle
para[5] = divmod(para[5], 180)[1]
psf_maj[i] = para[3]
psf_min[i] = para[4]
posang = para[5]
while posang >= 180.0:
posang -= 180.0
psf_pa[i] = posang
if img.opts.quiet == False:
bar.increment()
bar.stop()
# Interpolate Gaussian parameters
if img.aperture is None:
psf_maps = [psf_maj, psf_min, psf_pa, psfratio]
else:
psf_maps = [psf_maj, psf_min, psf_pa, psfratio, psfratio_aper]
nimgs = len(psf_maps)
bar = statusbar.StatusBar('Interpolating PSF images ................ : ', 0, nimgs)
if img.opts.quiet == False:
bar.start()
map_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.interp_prop),
psf_maps, itertools.repeat(psfcoords),
itertools.repeat(image.shape)), numcores=opts.ncores,
bar=bar)
if img.aperture is None:
psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int = map_list
else:
psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int, psf_ratio_aper_int = map_list
# Smooth if desired
if img.opts.psf_smooth is not None:
sm_scale = img.opts.psf_smooth / img.pix2beam([1.0, 1.0, 0.0])[0] / 3600.0 # pixels
if img.opts.aperture is None:
psf_maps = [psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int]
else:
psf_maps = [psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int, psf_ratio_aper_int]
nimgs = len(psf_maps)
bar = statusbar.StatusBar('Smoothing PSF images .................... : ', 0, nimgs)
if img.opts.quiet == False:
bar.start()
map_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.blur_image),
psf_maps, itertools.repeat(sm_scale)), numcores=opts.ncores,
bar=bar)
if img.aperture is None:
psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int = map_list
else:
psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int, psf_ratio_aper_int = map_list
# Make sure all smoothed, interpolated images are ndarrays
psf_maj_int = N.array(psf_maj_int)
psf_min_int = N.array(psf_min_int)
psf_pa_int = N.array(psf_pa_int)
psf_ratio_int = N.array(psf_ratio_int)
if img.aperture is None:
psf_ratio_aper_int = N.zeros(psf_maj_int.shape, dtype=N.float32)
else:
psf_ratio_aper_int = N.array(psf_ratio_aper_int, dtype=N.float32)
# Blank with NaNs if needed
mask = img.mask_arr
if isinstance(mask, N.ndarray):
pix_masked = N.where(mask == True)
psf_maj_int[pix_masked] = N.nan
psf_min_int[pix_masked] = N.nan
psf_pa_int[pix_masked] = N.nan
psf_ratio_int[pix_masked] = N.nan
psf_ratio_aper_int[pix_masked] = N.nan
# Store interpolated images. The major and minor axis images are
# the sigma in units of arcsec, the PA image in units of degrees east of
# north, the ratio images in units of 1/beam.
img.psf_vary_maj_arr = psf_maj_int * img.pix2beam([1.0, 1.0, 0.0])[0] * 3600.0 # sigma in arcsec
img.psf_vary_min_arr = psf_min_int * img.pix2beam([1.0, 1.0, 0.0])[0] * 3600.0 # sigma in arcsec
img.psf_vary_pa_arr = psf_pa_int
img.psf_vary_ratio_arr = psf_ratio_int # in 1/beam
img.psf_vary_ratio_aper_arr = psf_ratio_aper_int # in 1/beam
if opts.output_all:
func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_maj.fits', img.psf_vary_maj_arr*fwsig, img, dir)
func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_min.fits', img.psf_vary_min_arr*fwsig, img, dir)
func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_pa.fits', img.psf_vary_pa_arr, img, dir)
func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_ratio.fits', img.psf_vary_ratio_arr, img, dir)
func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_ratio_aper.fits', img.psf_vary_ratio_aper_arr, img, dir)
# Loop through source and Gaussian lists and deconvolve the sizes using appropriate beam
bar2 = statusbar.StatusBar('Correcting deconvolved source sizes ..... : ', 0, img.nsrc)
if img.opts.quiet == False:
bar2.start()
for src in img.sources:
src_pos = img.sky2pix(src.posn_sky_centroid)
src_pos_int = (int(src_pos[0]), int(src_pos[1]))
gaus_c = img.gaus2pix(src.size_sky, src.posn_sky_centroid)
if opts.psf_fwhm is None:
gaus_bm = [psf_maj_int[src_pos_int]*fwsig, psf_min_int[src_pos_int]*fwsig, psf_pa_int[src_pos_int]]
else:
# Use user-specified constant PSF instead
gaus_bm = img.beam2pix(opts.psf_fwhm)
gaus_dc, err = func.deconv2(gaus_bm, gaus_c)
src.deconv_size_sky = img.pix2gaus(gaus_dc, src_pos)
src.deconv_size_skyE = [0.0, 0.0, 0.0]
for g in src.gaussians:
gaus_c = img.gaus2pix(g.size_sky, src.posn_sky_centroid)
gaus_dc, err = func.deconv2(gaus_bm, gaus_c)
g.deconv_size_sky = img.pix2gaus(gaus_dc, g.centre_pix)
g.deconv_size_skyE = [0.0, 0.0, 0.0]
if img.opts.quiet == False:
bar2.spin()
if img.opts.quiet == False:
bar2.increment()
bar2.stop()
img.completed_Ops.append('psf_vary')
##################################################################################################
def trans_gaul(self, q):
" transposes a tuple of .gaul values "
y=[]
for i in range(len(q[0])):
elem=[]
for j in range(len(q)):
elem.append(q[j][i])
y.append(elem)
return y
##################################################################################################
def bindata(self, over, num): #ptpbin,nbin,ptplastbin, same as get_bins in fBDSM.
if num <= 100: ptpbin=num/5
if num > 100: ptpbin=num/10
if num > 1000: ptpbin=num/20
if ptpbin % 2 == 1: ptpbin=ptpbin+1
if num < 10: ptpbin=num
ptpbin = float(ptpbin) # cast to float to avoid integer division errors
nbin=int((num-ptpbin)/(ptpbin/over)+1)
ptplastbin=int((num-1)-(nbin-1)*ptpbin/over)
nbin=nbin+1
return ptpbin, nbin, ptplastbin
##################################################################################################
def bin_and_stats_ny(self, x,y,over,ptpbin,nbin,ptplastbin,nsig):
import math
n1=N.array(range(nbin))+1 # bin number
n2=N.array([ptpbin]*nbin); n2[nbin-2]=ptplastbin; n2[nbin-1]=ptpbin/over
n3=N.array([ptpbin]*nbin, dtype=float); n3[nbin-1]=float(over)*(len(x)-ptpbin/2)/(nbin-1)
xval=N.zeros(nbin)
meany=N.zeros(nbin); stdy=N.zeros(nbin); mediany=N.zeros(nbin)
for i in range(nbin):
lb=int(round(1+(n1[i]-1)*n3[i]/over+(1-1))-1) # -1 for python indexing
ub=int(round(1+(n1[i]-1)*n3[i]/over+(n2[i]-1))-1) # -1 for python indexing
x1=x[lb:ub+1]; y1=y[lb:ub+1]
# do calcmedianclip2vec.f for code=YYN
if len(x1) > 0 and len(y1) > 0:
nout=100; niter=0
while nout>0 and niter<6:
med1=N.median(y1[:])
med2=10.**(N.median(N.log10(x1[:])))
medstd=0 # calcmedianstd.f
for j in y1: medstd += (j-med1)*(j-med1)
medstd=math.sqrt(medstd/len(y1)) #
av1=N.mean(y1); std1=func.std(y1)
av2=N.mean(x1); std2=func.std(x1)
# get_medianclip_vec2
z=N.transpose([x1, y1])
z1=N.transpose([n for n in z if abs(n[1]-med1)<=nsig*medstd])
nout=len(x1)-len(z1[0])
x1=z1[0]; y1=z1[1];
niter+=1
xval[i]=med2;
meany[i]=av1; stdy[i]=std1; mediany[i]=med1
if stdy[nbin-1]/mediany[nbin-1] > stdy[nbin-2]/mediany[nbin-2]:
stdy[nbin-1]=stdy[nbin-2]/mediany[nbin-2]*mediany[nbin-1]
return xval, meany, stdy, mediany
##################################################################################################
def LM_fit(self, x, y, err, funct, order=0):
if funct == func.poly:
p0=N.array([y[N.argmax(x)]] + [0]*order)
if funct == func.wenss_fit:
p0=N.array([y[N.argmax(x)]] + [1.])
res=lambda p, x, y, err: (y-funct(p, x))/err
(p, flag)=leastsq(res, p0, args=(x, y, err))
return p
##################################################################################################
def fit_bins_func(self, x,y,over,ptpbin,nbin,ptplastbin,nsig): # sub_size_ksclip
import math
(xval,meany,stdy,medy)=self.bin_and_stats_ny(x,y,over,ptpbin,nbin,ptplastbin,nsig)
yfit=stdy/medy
err=N.array([1.]*nbin)
if ptplastbin > 0:
err[nbin-2]=err[0]*math.sqrt(1.0*ptpbin/ptplastbin)
err[nbin-1]=err[0]*math.sqrt(1.0*ptpbin*over/ptplastbin)
i=0
while i 0.25*nbin:
sind=int(round(0.25*nbin))-1
s_c=self.LM_fit(xval[sind:],yfit[sind:],err[sind:], func.wenss_fit)
err[:]=1.
s_cm=self.LM_fit(N.log10(xval),medy,err,func.poly, order=1)
if len(xval) >= 3:
s_dm=self.LM_fit(N.log10(xval),medy,err,func.poly, order=2)
else:
s_dm = (N.array([s_cm[0], s_cm[1], 0.0]), 0)
if ptpbin<75: s_dm=N.append(s_cm[:], [0.])
return s_c, s_dm
##################################################################################################
def get_unresolved(self, g_gauls, beam, nsig, kappa2, over, bright_snr_cut=20.0, plot=False):
""""Gets subset of unresolved sources
Also flags as unresolved all sources with SNRs above
bright_cut_snr, since fitting below is unreliable for bright
sources.
"""
num=len(g_gauls[0])
if num < 10:
# Too few sources to do fitting
return []
b1=N.asarray(g_gauls[4])/(beam[0]*3600.)
b2=N.asarray(g_gauls[5])/(beam[1]*3600.)
s1=N.asarray(g_gauls[1])/N.array(g_gauls[8])
snr=N.array(s1)
index=snr.argsort()
snr=snr[index]
nmaj=N.array(b1)[index]
nmin=N.array(b2)[index]
# if plot: pl.figure()
f_sclip=N.zeros((2,num), dtype=bool)
for idx, nbeam in enumerate([nmaj, nmin]):
xarr=N.copy(snr)
yarr=N.copy(nbeam)
niter=0; nout=num; noutold=nout*2
while niter<10 and nout >0.75*num:
(ptpbin, nbin, ptplastbin)=self.bindata(over,nout) # get_bins in fBDSM
(s_c,s_dm) = self.fit_bins_func(xarr,yarr,over,ptpbin,nbin,ptplastbin,nsig) # size_ksclip_wenss in fBDSM
noutold = len(xarr)
z = N.transpose([xarr, yarr, s_dm[0]+s_dm[1]*N.log10(xarr)+s_dm[2]*(N.log10(xarr)**2.), \
N.sqrt(s_c[0]*s_c[0]+s_c[1]*s_c[1]/(xarr*xarr)) ])
z1 = N.transpose([n for n in z if abs(n[1]-n[2])/(n[2]*n[3]) unresolved
logsnr=N.log10(snr)
dumr = N.sqrt(s_c[0]*s_c[0]+s_c[1]*s_c[1]/(snr*snr))
med = s_dm[0]+s_dm[1]*logsnr+s_dm[2]*(logsnr*logsnr)
f_sclip[idx] = N.abs((nbeam-med)/(med*dumr)) < N.array([kappa2]*num)
f_s = f_sclip[0]*f_sclip[1]
# Add bright sources
if bright_snr_cut is not None:
if bright_snr_cut < 20.0:
bright_snr_cut = 20.0
bright_srcs = N.where(snr >= bright_snr_cut)
if len(bright_srcs[0]) > 0:
f_s[bright_srcs] = True
# now make plots
# if plot:
# bb=[b1, b2]
# pl.subplot(211+idx)
# pl.semilogx(s1, bb[idx], 'og')
# f0=f_sclip[idx][index.argsort()]
# sf=[n for i, n in enumerate(s1) if f0[i]]
# b1f=[n for i, n in enumerate(bb[idx]) if f0[i]]
# pl.semilogx(sf, b1f, 'or')
# pl.semilogx(snr,med,'-')
# pl.semilogx(snr,med+med*dumr*(N.array([kappa2]*num)),'-')
# pl.semilogx(snr,med-med*dumr*(N.array([kappa2]*num)),'-')
# pl.title(' axis ' + str(idx))
#
return f_s[index.argsort()]
##################################################################################################
def av_psf(self, g_gauls, beam, flag):
""" calculate how much the SNR-weighted sizes of unresolved sources differs from the
synthesized beam. Same as av_psf.f in fBDSM."""
from math import sqrt
bmaj = N.asarray(g_gauls[4])
bmin = N.asarray(g_gauls[5])
bpa = N.asarray(g_gauls[6])
wt = N.asarray(g_gauls[1])/N.asarray(g_gauls[8])
flagwt = wt*flag
sumwt = N.sum(flagwt)
w1 = N.sum(flagwt*flagwt)
wtavbm = N.array([N.sum(bmaj*flagwt), N.sum(bmin*flagwt), N.sum(bpa*flagwt)])/sumwt
dumrar = N.array([N.sum(bmaj*bmaj*flagwt), N.sum(bmin*bmin*flagwt), N.sum(bpa*bpa*flagwt)])
dd = sumwt*sumwt-w1
wtstdbm = N.sqrt((dumrar - wtavbm*wtavbm*sumwt)*sumwt/dd)
avpa = N.sum(bpa*flagwt-180.0*flagwt*N.array(bpa >= 90))/sumwt
stdpa = N.sum(bpa*flagwt+(180.0*180.0-360.0*bpa)*flagwt*N.array(bpa >= 90))
stdpa = sqrt(abs((stdpa-avpa*avpa*sumwt)*sumwt/dd))
if stdpa < wtstdbm[2]:
wtstdbm[2] = stdpa
wtavbm[2] = avpa
return (wtavbm - N.array([beam[0]*3600.0, beam[1]*3600.0, beam[2]]))/wtstdbm
##################################################################################################
def get_voronoi_generators(self, g_gauls, generators, gencode, snrcut, snrtop, snrbot, snrcutstack):
"""This gets the list of all voronoi generators. It is either the centres of the brightest
sources, or is imported from metadata (in future)."""
from math import sqrt
num=len(g_gauls[0])
snr=N.asarray(g_gauls[1])/N.asarray(g_gauls[8])
index=snr.argsort()
snr_incr = snr[index]
snr = snr_incr[::-1]
x = N.asarray(g_gauls[2])[index]
y = N.asarray(g_gauls[3])[index]
cutoff = 0
if generators == 'calibrators' or generators == 'field':
if gencode != 'file':
gencode = 'list'
if gencode == 'list':
cutoff = int(round(num*(snrtop)))
if cutoff > len(snr):
cutoff = len(snr)
# Make sure we don't fall below snrcutstack (SNR cut for stacking of PSFs), since
# it makes no sense to make tiles with generators that fall below this cut.
if snr[cutoff-1] < snrcutstack:
cutoff = num - snr_incr.searchsorted(snrcutstack)
if generators == 'calibrators':
if gencode == 'file':
raise NotImplementedError("gencode=file not yet implemented.")
x1 = x.tolist()
y1 = y.tolist()
x1.reverse()
y1.reverse()
snr1 = snr.tolist()
vorogenP = N.asarray([x1[0:cutoff], y1[0:cutoff], snr1[0:cutoff]])
vorogenS = None
return vorogenP, vorogenS
##################################################################################################
def edit_vorogenlist(self, vorogenP, frac):
""" Edit primary voronoi generator list. Each tile has a tile centre and can
have more than one generator to be averaged. tile_list is a list of arrays, indexed
by the tile number and each array is an array of numbers in the ngen list which are
the generators in that tile. xtile, ytile and snrtile are arrays of length number_of_tiles
and have x,y,snr of each tile. Group together generators
if closer than a fraction of dist to third closest."""
xgen, ygen, snrgen = vorogenP
flag = N.zeros(len(xgen))
coord = N.array([xgen,ygen]).transpose()
tile_list = []
tile_coord = []; tile_snr = []
for i in range(len(xgen)):
dist = N.array([func.dist_2pt(coord[i], t) for t in coord])
# dist = N.array(map(lambda t: func.dist_2pt(coord[i], t), coord))
indi = N.argsort(dist)
sortdist = dist[indi]
if sortdist[1] < frac * sortdist[2]: # first is the element itself
if flag[indi[1]] + flag[i] == 0: # not already deleted from other pair
tile_list.append([i, indi[1]])
tile_coord.append((coord[i]*snrgen[i]+coord[indi[1]]*snrgen[indi[1]])/(snrgen[i]+snrgen[indi[1]]))
tile_snr.append(snrgen[i]+snrgen[indi[1]])
flag[i] = 1
flag[indi[1]] = 1
else:
if len(dist) > 3:
if sortdist[1]+sortdist[2] < 2.0*frac*sortdist[3]: # for 3 close-by sources
in1=indi[1]
in2=indi[2]
if flag[in1]+flag[in2]+flag[i] == 0: # not already deleted from others
tile_list.append([i, in1, in2])
tile_coord.append((coord[i]*snrgen[i]+coord[in1]*snrgen[in1]+coord[in2]*snrgen[in2]) \
/(snrgen[i]+snrgen[in1]+snrgen[in2]))
tile_snr.append(snrgen[i]+snrgen[in1]+snrgen[in2])
flag[i] = 1
flag[in1] = 1
flag[in2] = 1
else:
tile_list.append([i])
tile_coord.append(coord[i])
tile_snr.append(snrgen[i])
# Assign any leftover generators
for i in range(len(xgen)):
if flag[i] == 0:
tile_list.append([i])
tile_coord.append(coord[i])
tile_snr.append(snrgen[i])
return tile_list, tile_coord, tile_snr
##################################################################################################
def tess_simple(self, vorogenP, wts, tess_sc, tess_fuzzy, shape):
""" Simple tesselation """
xgen, ygen, snrgen = vorogenP
volrank = _pytess.pytess_simple(shape[0], shape[1], xgen, ygen, snrgen, \
wts, tess_fuzzy, tess_sc)
return volrank
##################################################################################################
def tess_roundness(self, vorogenP, tess_sc, tess_fuzzy, shape):
""" Tesselation, modified to make the tiles more round. """
xgen, ygen, snrgen = vorogenP
volrank = _pytess.pytess_roundness(shape[0], shape[1], xgen, ygen, snrgen, \
tess_fuzzy, tess_sc)
return volrank
##################################################################################################
def pixintile(self, tilecoord, pixel, tess_method, wts, tess_sc, tess_fuzzy):
""" This has routines to find out which tile a given pixel belongs to. """
if tess_method == 'roundness':
#tilenum = pytess_roundness(tilecoord, pixel, wts, tess_sc, tess_fuzzy)
print(" Not yet implemented !!!! ")
return 0
else:
xgen, ygen = tilecoord
xgen = N.asarray(xgen)
ygen = N.asarray(ygen)
ngen = len(xgen)
i,j = pixel
dist = N.sqrt((i-xgen)*(i-xgen)+(j-ygen)*(j-ygen))/wts
minind = dist.argmin()
if tess_sc == 's':
tilenum=minind
else:
print(" Not yet implemented !!!! ")
return tilenum
##################################################################################################
def tesselate(self, vorogenP, vorogenS, tile_prop, tess_method, tess_sc, tess_fuzzy, generators, gencode, shape):
""" Various ways of tesselating. If generators='calibrator', no need to tesselate, just get
modified list based on very nearby sources. If generators='field' then tesselate. The image
is tesselated based on tile_prop. """
wtfn={'unity' : lambda x : N.ones(len(x)), \
'log10' : N.log10, \
'sqrtlog10' : lambda x : N.sqrt(N.log10(x)), \
'roundness' : N.array}
tile_list, tile_coord, tile_snr = tile_prop
xt = self.trans_gaul(tile_coord)[0]
yt = self.trans_gaul(tile_coord)[1]
vorogenT = xt, yt, tile_snr
wt_fn = wtfn[tess_method]
wts = wt_fn(tile_snr)
if tess_method == 'roundness':
volrank = self.tess_roundness(vorogenT, tess_sc, tess_fuzzy, shape)
else:
volrank = self.tess_simple(vorogenT, wts, tess_sc, tess_fuzzy, shape)
return volrank, wts
##################################################################################################
def edit_tile(self, ltnum, g_gauls, flag_unresolved, snrcutstack, volrank, tile_prop, tess_sc, \
tess_fuzzy, wts, tess_method, plot):
""" Looks at tiles with no (or one) unresolved source inside it and deletes it and recomputes
the tiling. For now, does not recompute since we wont use the rank for those pixels anyway."""
if ltnum > 1: raise NotImplementedError("NOT YET IMPLEMENTED FOR LTNUM>1")
tile_list, tile_coord, tile_snr = tile_prop
tr_gaul = self.trans_gaul(g_gauls)
tr=[n for i, n in enumerate(tr_gaul) if flag_unresolved[i] and n[1]/n[8] >= snrcutstack]
ntile = len(tile_list)
ngenpertile=N.zeros(ntile)
for itile in range(ntile):
tile_gauls = [n for n in tr if volrank[int(round(n[2])),int(round(n[3]))]-1 \
== itile]
ngenpertile[itile]=len(tile_gauls)
new_n = N.sum(ngenpertile >= ltnum)
# prepare list of good tiles to pass to pixintile
goodtiles = N.array(N.where(ngenpertile >= ltnum)[0])
new_n = len(goodtiles)
tile_coord_n = [n for i,n in enumerate(tile_coord) if i in goodtiles]
wts_n = [n for i,n in enumerate(wts) if i in goodtiles]
r2t = N.zeros(ntile, dtype=int)
entry = -1
for itile in range(ntile):
if ngenpertile[itile] >= ltnum:
r2t[itile] = itile
else:
pixel = tile_coord[itile]
tilenum = self.pixintile(self.trans_gaul(tile_coord_n), pixel, tess_method, wts_n, tess_sc, tess_fuzzy)
r2t[itile] = tilenum
for itile in range(new_n):
num = N.sum(r2t == itile)
if num == 0:
minarr = -999
while minarr != itile:
arr = N.where(r2t > itile)[0]
minarr = r2t[arr].min()-1
for i in arr: r2t[i]=r2t[i]-1
n_tile_list = []; n_tile_coord = []; n_tile_snr = []
for itile in range(new_n):
ind = N.where(r2t == itile)[0]; ind1 = []
for i in ind: ind1 = ind1 + tile_list[i]
n_tile_list.append(ind1)
snrs = N.array([tile_snr[i] for i in ind])
coords = N.array([tile_coord[i] for i in ind])
n_tile_snr.append(N.sum(snrs))
n_tile_coord.append(N.sum([snrs[i]*coords[i] for i in range(len(snrs))], 0)/N.sum(snrs))
ngenpertile=N.zeros(new_n)
for itile in range(new_n):
tile_gauls = [n for n in tr if r2t[volrank[int(round(n[2])),int(round(n[3]))]-1] \
== itile]
ngenpertile[itile]=len(tile_gauls)
tile_prop = n_tile_list, n_tile_coord, n_tile_snr
return ngenpertile, tile_prop, r2t
##################################################################################################
def stackpsf(self, image, beam, g_gauls, wts, cdelt, factor):
""" Stacks all the images of sources in the gaussian list gauls from image, out to
a factor times the beam size. Currently the mask is for the whole image but need to
modify it for masks for each gaussian. These gaussians are supposed to be relatively
isolated unresolved sources. Cut out an image a big bigger than facXbeam and imageshift
to nearest half pixel and then add.
Does not handle masks etc well at all. Masks for image for blanks, masks for \
islands, etc."""
gxcens_pix = g_gauls[2]
gycens_pix = g_gauls[3]
peak = g_gauls[1]
psfimsize = int(round(max(beam[0], beam[1])/max(cdelt[0], cdelt[1]) * factor)) # fac X fwhm; fac ~ 2
psfimage = N.zeros((psfimsize, psfimsize), dtype=N.float32)
cs2=cutoutsize2 = int(round(psfimsize*(1. + 2./factor)/2.)) # size/2. factor => to avoid edge effects etc
cc = cutoutcen_ind=[cs2, cs2]
cpsf=cen_psf_ind = N.array([int(int(round(psfimsize))/2)]*2)
wt=0.
num=len(gxcens_pix)
for isrc in range(num): # MASK !!!!!!!!!!!
wt += wts[isrc]
gcp=N.array([gxcens_pix[isrc], gycens_pix[isrc]])
gcen_ind=gcp-1
rc=rcen_ind = N.asarray(N.round(gcen_ind), dtype=int)
shift=cc-(gcen_ind-(rc-cs2))
cutimage = image[rc[0]-cs2:rc[0]+cs2,rc[1]-cs2:rc[1]+cs2]
if len(cutimage.shape) == 3: cutimage=cutimage[:,:,0]
if 0 not in cutimage.shape:
if sum(sum(N.isnan(cutimage))) == 0:
im_shift = func.imageshift(cutimage, shift)
im_shift = im_shift/peak[isrc]*wts[isrc]
subim_shift = im_shift[cc[0]-cpsf[0]:cc[0]-cpsf[0]+psfimsize,cc[1]-cpsf[1]:cc[1]-cpsf[1]+psfimsize]
if subim_shift.shape == psfimage.shape:
# Check shapes, as they can differ if source is near edge of image.
# If they do differ, don't use that source (may be distorted).
psfimage += subim_shift
psfimage = psfimage/wt
return psfimage
##################################################################################################
def psf_in_tile(self, image, beam, g_gauls, cdelt, factor, snrcutstack, volrank, \
tile_prop, plot, img):
""" For each tile given by tile_prop, make a list of all gaussians in the constituent tesselations
and pass it to stackpsf with a weight for each gaussian, to calculate the average psf per tile.
Should define weights inside a tile to include closure errors """
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Psf_Vary")
tile_list, tile_coord, tile_snr = tile_prop
tr_gaul = self.trans_gaul(g_gauls)
tr=[n for i, n in enumerate(tr_gaul)]# if n[1]/n[8] >= snrcutstack]
ntile = len(tile_list)
psfimages = []
psfcoords = []
psfratio = [] # ratio of peak flux to total flux
psfratio_aper = [] # ratio of peak flux to aperture flux
srcpertile = N.zeros(ntile)
snrpertile = N.zeros(ntile)
xt, yt = N.transpose(tile_coord)
if plot:
pl.figure(None)
colours=['b','g','r','c','m','y','k']*(len(xt)/7+1)
pl.axis([0.0, image.shape[0], 0.0, image.shape[1]])
pl.title('Tesselated image with tile centres and unresolved sources')
for i in range(ntile):
pl.plot([xt[i]], [yt[i]], 'D'+colours[i])
pl.text(xt[i], yt[i], str(i))
for itile in range(ntile):
tile_gauls = [n for n in tr if volrank[int(round(n[2])),int(round(n[3]))]-1 \
== itile]
t_gauls = self.trans_gaul(tile_gauls)
srcpertile[itile] = len(tile_gauls)
if plot:
pl.plot(t_gauls[2], t_gauls[3], 'x'+'k', mew=1.3)#colours[itile])
for i, ig in enumerate(t_gauls[2]):
xx=[xt[itile], ig]
yy=[yt[itile], t_gauls[3][i]]
pl.plot(xx,yy,'-'+colours[itile])
wts = N.asarray(t_gauls[1])/N.asarray(t_gauls[8]) # wt is SNR
snrpertile[itile] = sum(wts)
mylog.info('PSF tile #%i (center = %i, %i): %i unresolved sources, SNR = %.1f' %
(itile, xt[itile], yt[itile], srcpertile[itile], snrpertile[itile]))
a = self.stackpsf(image, beam, t_gauls, wts, cdelt, factor)
psfimages.append(a)
psfcoords.append([sum(N.asarray(t_gauls[2])*wts)/sum(wts), sum(N.asarray(t_gauls[3])*wts)/sum(wts)])
# Find peak/total flux ratio for sources in tile. If an aperture is given,
# use the aperture flux as well.
# t_gauls[0] is source_id
src_ratio = []
src_wts = []
src_ratio_aper = []
src_wts_aper = []
for gt in tile_gauls:
src = img.sources[gt[0]]
if img.aperture is not None:
src_ratio_aper.append(src.peak_flux_max / src.aperture_flux)
src_wts_aper.append(src.total_flux / src.aperture_fluxE)
src_ratio.append(src.peak_flux_max / src.total_flux)
src_wts.append(src.total_flux / src.total_fluxE)
if img.aperture is not None:
psfratio_aper.append(sum(N.asarray(src_ratio_aper)*src_wts_aper)/sum(src_wts_aper))
else:
psfratio_aper.append(0.0)
psfratio.append(sum(N.asarray(src_ratio)*src_wts)/sum(src_wts))
totpsfimage = psfimages[0]*snrpertile[0]
for itile in range(1,ntile):
totpsfimage += psfimages[itile]*snrpertile[itile]
totpsfimage = totpsfimage/sum(snrpertile)
if plot:
pl.imshow(N.transpose(volrank), origin='lower', interpolation='nearest'); pl.colorbar()
if plot:
pl.figure(None)
pl.clf()
ax = pl.subplot(1,1,1)
pax = ax.get_position()
start = N.array((pax.xmin, pax.ymin))
stop = N.array((pax.xmax, pax.ymax))
plaxis = pl.axis([0, image.shape[0], 0, image.shape[1]])
pl.title('Stacked psf for each tile')
for itile in range(ntile):
im=psfimages[itile]
sz=0.07
spt = int(round(snrpertile[itile]*10))/10.
titl='n='+str(int(round(srcpertile[itile])))+'; SNR='+str(spt)
posn=[psfcoords[itile][0], psfcoords[itile][1]]
normposn=N.array(stop-start, dtype=float)/N.array(image.shape[0:2])*posn+start
a=pl.axes([normposn[0]-sz/2., normposn[1]-sz/2., sz, sz])
pl.contour(im,15)
pl.title(titl, fontsize='small')
pl.setp(a, xticks=[], yticks=[])
pl.show()
return psfimages, psfcoords, totpsfimage, psfratio, psfratio_aper
##################################################################################################
def interp_shapcoefs(self, nmax, tr_psf_cf, psfcoords, imshape, compress, plot):
"""Interpolate using natgrid.
Check to see if variation is significant.
"""
x, y = N.transpose(psfcoords)
index = [(i,j) for i in range(nmax+1) for j in range(nmax+1-i)]
xi=x
yi=y
xo=N.arange(0.0,round(imshape[0]), round(compress))
yo=N.arange(0.0,round(imshape[1]), round(compress))
rgrid=nat.Natgrid(xi,yi,xo,yo)
p={}
for coord in index:
z = N.array(tr_psf_cf[coord]) # else natgrid cant deal with noncontiguous memory
p[coord] = rgrid.rgrd(z)
# if plot:
# for i,coord in enumerate(index):
# if i % 36 == 0:
# pl.figure(None)
# pl.clf()
# title = 'Interpolated shapelet coefficients'
# if i>0: title = title+' (cont)'
# pl.suptitle(title)
# pl.subplot(6,6,(i%36)+1)
# pl.title(str(coord))
# pl.plot(xi/compress, yi/compress, 'xk')
# pl.imshow(p[coord], interpolation='nearest')
# pl.colorbar()
return p, xo, yo
##################################################################################################
def interp_prop(self, prop, psfcoords, imshape, compress=1):
"""Interpolate using natgrid.
Should check to see if variation is significant.
"""
x, y = N.transpose(psfcoords)
xi=x
yi=y
xo=N.arange(0.0,round(imshape[0]), round(compress))
yo=N.arange(0.0,round(imshape[1]), round(compress))
rgrid=nat.Natgrid(xi,yi,xo,yo)
prop_int = rgrid.rgrd(prop)
return prop_int
##################################################################################################
def create_psf_grid(self, psf_coeff_interp, imshape, xgrid, ygrid, skip, nmax, psfshape, basis, beta,
cen, totpsfimage, plot):
""" Creates a image with the gridded interpolated psfs. xgrid and ygrid are 1d numpy arrays
with the x and y coordinates of the grids. """
# if plot:
# plnum=N.zeros(2)
# for i in range(2):
# dum=pl.figure(None)
# plnum[i]=dum.number
# pl.clf()
# if i == 0: pl.suptitle('Gridded psfs')
# if i == 1: pl.suptitle('Gridded residual psfs')
# ax = pl.subplot(1,1,1)
# plaxis = pl.axis([0, imshape[0], 0, imshape[1]])
# pax = ax.get_position()
# start = N.array((pax.xmin, pax.ymin))
# stop = N.array((pax.xmax, pax.ymax))
# sz=0.07
mask=N.zeros(psfshape, dtype=bool) # right now doesnt matter
xg=xgrid[::skip+1]
yg=ygrid[::skip+1]
index = [(i,j) for i in range(0,len(xgrid),skip+1) for j in range(0,len(ygrid),skip+1)]
xy = [(i,j) for i in xgrid[::skip+1] for j in ygrid[::skip+1]]
blah=[]
for i, coord in enumerate(index):
maxpsfshape = [0, 0]
for k in psf_coeff_interp:
if k[0]+1 > maxpsfshape[0]:
maxpsfshape[0] = k[0]+1
if k[1]+1 > maxpsfshape[1]:
maxpsfshape[1] = k[1]+1
cf = N.zeros(maxpsfshape)
for k in psf_coeff_interp:
cf[k]=psf_coeff_interp[k][coord]
cf = N.transpose(cf)
psfgridim = sh.reconstruct_shapelets(psfshape, mask, basis, beta, cen, nmax, cf)
blah.append(psfgridim)
# if plot:
# for j in range(2):
# pl.figure(plnum[j])
# posn = [xy[i][0], xy[i][1]]
# normposn =N.array(stop-start, dtype=float)/N.array(imshape[0:2])*posn+start
# a=pl.axes([normposn[0]-sz/2., normposn[1]-sz/2., sz, sz])
# if j == 0: pl.contour(psfgridim,15)
# if j == 1: pl.contour(psfgridim-totpsfimage,15)
# pl.setp(a, xticks=[], yticks=[])
# pl.colorbar()
# if plot:
# pl.figure(plnum[0])
# pl.figure(plnum[1])
#
return blah
##################################################################################################
def blur_image(self, im, n, ny=None) :
""" blurs the image by convolving with a gaussian kernel of typical
size n. The optional keyword argument ny allows for a different
size in the y direction.
"""
from scipy.ndimage import gaussian_filter
sx = n
if ny is not None:
sy = ny
else:
sy = n
improc = gaussian_filter(im, [sy, sx])
return improc
PyBDSF-1.11.0/bdsf/pybdsf.py 0000664 0000000 0000000 00000067133 14650706641 0015423 0 ustar 00root root 0000000 0000000 """Interactive PyBDSF shell.
This module initializes the interactive PyBDSF shell, which is a customized
IPython enviroment. It should be called from the terminal prompt using the
command "pybdsf".
"""
from __future__ import print_function
import bdsf
from bdsf.image import Image
import pydoc
import sys
import inspect
###############################################################################
# Functions needed only in the custom IPython shell are defined here. Other
# functions used by both the custom shell and normal Python or IPython
# environments are defined in interface.py.
#
# Before starting the IPython shell, we need to define all the functions and
# variables that we want in the namespace. Note that we adopt the convention
# for this UI of using lines of 72 characters max for doc strings and the
# start-up banner. However, the parameter list will fill the entire available
# terminal width to consume as few vertical lines as possible.
global _img
_img = Image({'filename':''})
_img._is_interactive_shell = True
T = True
F = False
true = True
false = False
def inp(cur_cmd=None):
"""List inputs for current task.
If a task is given as an argument, inp sets the current task
to the given task. If no task is given, inp lists the parameters
of the current task.
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
if cur_cmd is not None:
if not hasattr(cur_cmd, 'arg_list'):
print('\033[31;1mERROR\033[0m: not a valid task')
return
_set_current_cmd(cur_cmd)
else:
if not hasattr(_img, '_current_cmd'):
print('\033[31;1mERROR\033[0m: no task is set')
return
bdsf.interface.list_pars(_img, opts_list=_img._current_cmd_arg_list,
banner=_img._current_cmd_desc,
use_groups=_img._current_cmd_use_groups)
def go(cur_cmd=None):
"""Executes the current task.
If a task is given as an argument, go executes the given task,
even if it is not the current task. The current task is not
changed in this case.
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
if cur_cmd is None:
if not hasattr(_img, '_current_cmd'):
print('\033[31;1mERROR\033[0m: no task is set')
return
cur_cmd = _img._current_cmd
if not hasattr(cur_cmd, 'arg_list'):
print('\033[31;1mERROR\033[0m: not a valid task')
return
cur_cmd()
def default(cur_cmd=None):
"""Resets all parameters for a task to their default values.
If a task name is given (e.g., "default show_fit"), the
parameters for that task are reset. If no task name is
given, the parameters of the current task are reset.
"""
global _img
if cur_cmd is None:
if not hasattr(_img, '_current_cmd'):
print('\033[31;1mERROR\033[0m: no task is set')
return
cur_cmd = _img._current_cmd
if hasattr(cur_cmd, 'arg_list'):
opts_list = cur_cmd.arg_list
else:
print('\033[31;1mERROR\033[0m: not a valid task')
return
_img.opts.set_default(opts_list)
_replace_vals_in_namespace(opt_names=opts_list)
def tget(filename=None):
"""Load processing parameters from a parameter save file.
A file name may be given (e.g., "tget 'savefile.sav'"), in which case the
parameters are loaded from the file specified. If no file name is given,
the parameters are loaded from the file 'pybdsf.last' if it exists.
Normally, the save file is created by the tput command (try "help tput"
for more info).
The save file is a "pickled" python dictionary which can be loaded into
python and edited by hand. See the pickle module for more information.
Below is an example of how to edit a save file by hand:
BDSF [1]: import pickle
BDSF [2]: with open('savefile.sav', 'r') as savefile:
BDSF [3]: pars = pickle.load(savefile)
BDSF [4]: pars['rms_box'] = (80, 20) --> change rms_box parameter
BDSF [5]: with open('savefile.sav', 'w') as savefile:
BDSF [6]: pickle.dump(pars, savefile) --> save changes
"""
try:
import cPickle as pickle
except ImportError:
import pickle
import os
global _img
# Check whether user has given a task name as input (as done in casapy).
# If so, reset filename to None.
if hasattr(filename, 'arg_list'):
filename = None
if filename is None or filename == '':
if os.path.isfile('pybdsf.last'):
filename = 'pybdsf.last'
else:
print('\033[31;1mERROR\033[0m: No file name given and '\
'"pybdsf.last" not found.\nPlease specify a file to load.')
return
if os.path.isfile(filename):
try:
pkl_file = open(filename, 'rb')
pars = pickle.load(pkl_file)
pkl_file.close()
_img.opts.set_opts(pars)
_replace_vals_in_namespace()
print("--> Loaded parameters from file '" + filename + "'.")
except:
print("\033[31;1mERROR\033[0m: Could not read file '" + \
filename + "'.")
else:
print("\033[31;1mERROR\033[0m: File '" + filename + "' not found.")
def tput(filename=None, quiet=False):
"""Save processing parameters to a file.
A file name may be given (e.g., "tput 'savefile.sav'"), in which case the
parameters are saved to the file specified. If no file name is given, the
parameters are saved to the file 'pybdsf.last'. The saved parameters can
be loaded using the tget command (try "help tget" for more info).
The save file is a "pickled" python dictionary which can be loaded into
python and edited by hand. See the pickle module for more information.
Below is an example of how to edit a save file by hand:
BDSF [1]: import pickle
BDSF [2]: with open('savefile.sav', 'r') as savefile:
BDSF [3]: pars = pickle.load(savefile)
BDSF [4]: pars['rms_box'] = (80, 20) --> change rms_box parameter
BDSF [5]: with open('savefile.sav', 'w') as savefile:
BDSF [6]: pickle.dump(pars, savefile) --> save changes
"""
try:
import cPickle as pickle
except ImportError:
import pickle
global _img
success = _set_pars_from_prompt()
if not success:
return
if filename is None or filename == '':
filename = 'pybdsf.last'
# convert opts to dictionary
pars = _img.opts.to_dict()
output = open(filename, 'wb')
pickle.dump(pars, output, protocol=0)
output.close()
if not quiet:
print("--> Saved parameters to file '" + filename + "'.")
def _set_pars_from_prompt():
"""Gets parameters and value and stores them in _img.
To do this, we extract all the valid parameter names
and values from the f_globals directory. Then, use
set_pars() to set them all.
Returns True if successful, False if not.
"""
global _img
f = sys._getframe(len(inspect.stack())-2)
f_dict = f.f_globals
# Check through all possible options and
# build options dictionary
opts = _img.opts.to_dict()
user_entered_opts = {}
for k, v in opts.items():
if k in f_dict:
if f_dict[k] == '':
# Set option to default value in _img and namespace
_img.opts.set_default(k)
f_dict[k] = _img.opts.__getattribute__(k)
user_entered_opts.update({k: f_dict[k]})
# Finally, set the options
try:
_img.opts.set_opts(user_entered_opts)
return True
except RuntimeError as err:
# If an opt fails to set, replace its value in the namespace
# with its current value in _img. Then print error so user knows.
err_msg = str(err)
err_msg_trim = err_msg.split('(')[0]
indx1 = err_msg_trim.find('"') + 1
indx2 = err_msg_trim.find('"', indx1)
k = err_msg_trim[indx1:indx2]
orig_opt_val = opts[k]
f_dict[k] = orig_opt_val
print('\033[31;1mERROR\033[0m: ' + err_msg_trim + \
'\nResetting to previous value.')
return False
def _replace_vals_in_namespace(opt_names=None):
"""Replaces opt values in the namespace with the ones in _img.
opt_names - list of option names to replace (can be string if only one)
"""
global _img
f = sys._getframe(len(inspect.stack())-2)
f_dict = f.f_globals
if opt_names is None:
opt_names = _img.opts.get_names()
if isinstance(opt_names, str):
opt_names = [opt_names]
for opt_name in opt_names:
if opt_name in f_dict:
f_dict[opt_name] = _img.opts.__getattribute__(opt_name)
def _set_current_cmd(cmd):
"""Sets information about current command in img.
This function is used to emulate a casapy interface.
"""
global _img
cmd_name = cmd.__name__
doc = cmd.__doc__
_img._current_cmd = cmd
_img._current_cmd_name = cmd_name
_img._current_cmd_desc = cmd_name.upper() + ': ' + doc.split('\n')[0]
_img._current_cmd_arg_list = cmd.arg_list
_img._current_cmd_use_groups = cmd.use_groups
###############################################################################
# Next, we define the tasks such that they may be called directly by
# the user if so desired. These functions simply pass on the user-
# specified arguments to the appropriate Image method. Here we also
# define the detailed doc strings used by help, and, after each task
# definition, we define its list of arguments and whether it should
# use the opts 'group' attribute, both needed when inp is called. If
# a new parameter is added to a task, it needs to be added to opts.py
# and to the list of arguments for the task below (the "arg_list")
# attribute.
def process_image(**kwargs):
"""Find and measure sources in an image.
There are many possible parameters and options for process_image. Use
"inp process_image" to list them. To get more information about a
parameter, use help. E.g.,
> help 'rms_box'
When process_image is executed, PyBDSF performs the following steps in
order:
1. Reads in the image.
2. Calculates basic statistics of the image and stores them in the Image
object. Calculates sensible values of processing parameters and stores
them. First calculates mean and rms, with and without (3-sigma)
clipping, min and max pixel and values, solid angle. Hereafter, rms
indicates the 3-sigma clipped measure. Next, the number of beams per
source is calculated (see help on algorithms for details), using a
sensible estimate of boxsize and stepsize (which can be set using the
rms_box parameter). Finally, the thresholds are set. They can either be
hard-thresholded (by the user or set as 5-sigma for pixel threshold and
3-sigma for island boundaries internally) or can be calculated using the
False Detection Rate (FDR) method using an user defined value for
alpha. If the user does not specify whether hard thresholding or FDR
should be applied, one or the other is chosen internally based on the
ratio of expected false pixels and true pixels (the choice is written
out in the log file).
3. Calculates rms image. 3-sigma clipped rms and mean are calculated
inside boxes of size boxsize in steps of stepsize. Intermediate values
are calculated using bilinear interpolation (it was seen that bicubic
spline did not yield appreciably better results but is also
available). Depending on the resulting statistics (see help on
algorithms for details), we either adopt the rms image or a constant rms
in the following analysis.
4. Identifies islands of contiguous emission. First all pixels greater
than the pixel threshold are identified (and sorted by descending flux
order). Next, starting from each of these pixels, all contiguous pixels
(defined by 8-connectivity, i.e., the surrounding eight pixels) higher
than the island boundary threshold are identified as belonging to one
island, accounting properly for overlaps of islands.
5. Fit multiple gaussians and/or shapelets to each island. For each
island, the subimages of emission and rms are cut out. The number of
multiple gaussians to be fit can be determined by three different
methods (see help on algorithms for details). With initial guesses
corresponding to these peaks, gaussians are simultaneously fit to the
island using the Levenberg-Marqhardt algorithm. Sensible criteria for bad
solutions are defined. If multiple gaussians are fit and one of them is
a bad solution then the number of gaussians is decreased by one and fit
again, till all solutions in the island are good (or zero in number, in
which case its flagged). After the final fit to the island, the
deconvolved size is computed assuming the theoretical beam and the
statistics in the source area and in the island are computed and
stored. Errors on each of the fitted parameters are computed using the
formulae in Condon (1997). Finally all good solutions are written into
the gaussian catalog as an ascii and binary file. If shapelets are
required, the program calculates optimal nmax, beta and the centre, and
stores these and the shapelet coefficients in a file.
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
# Save current command, as it might be overwritten when process
# is called by the user directly and is not the current command.
cur_cmd = _img._current_cmd
# Run process. Note that process automatically picks up options
# from the Image object, so we don't need to get_task_kwargs as
# we do for the other tasks.
success = _img.process(**kwargs)
# Now restore parameters and save to pybdsf.last
if success:
_set_current_cmd(cur_cmd)
tput(quiet=True)
task_list = _img.opts.get_names()
process_image.arg_list = task_list
process_image.use_groups = True
def show_fit(**kwargs):
"""Show results of fit.
Selected plots are displayed to give the user a quick overview of the
results of the fit. The plots may be zoomed, saved to a file, etc. using
the controls at the bottom of the plot window.
In addition, the following commands are available:
Press "i" ........ : Get integrated flux densities and mean rms
values for the visible portion of the image
Press "m" ........ : Change min and max scaling values
Press "n" ........ : Show / hide island IDs
Press "0" ........ : Reset scaling to default
Press "c" ........ : Change source for SED plot
Click Gaussian ... : Print Gaussian and source IDs (zoom_rect mode,
toggled with the "zoom" button and indicated in
the lower right corner, must be off)
The SED plot will also show the chosen source.
Parameters: ch0_image, rms_image, mean_image, ch0_islands,
gresid_image, sresid_image, gmodel_image,
smodel_image, source_seds, ch0_flagged, pi_image,
psf_major, psf_minor, psf_pa, broadcast
For more information about a parameter, use help. E.g.,
> help 'ch0_image'
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
img_kwargs = _get_task_kwargs(show_fit)
for k in kwargs:
# If user enters an argument, use it instead of
# that in _img
img_kwargs[k] = kwargs[k]
try:
success = _img.show_fit(**img_kwargs)
if success:
tput(quiet=True)
except KeyboardInterrupt:
print("\n\033[31;1mAborted\033[0m")
show_fit.arg_list = ['ch0_image', 'rms_image', 'mean_image', 'ch0_islands',
'gresid_image', 'sresid_image', 'gmodel_image',
'smodel_image', 'source_seds', 'ch0_flagged', 'pi_image',
'psf_major', 'psf_minor', 'psf_pa', 'broadcast']
show_fit.use_groups = False
def write_catalog(**kwargs):
"""Write the Gaussian, source, or shapelet list to a file.
The lists can be written in a number of formats. The information
included in the output file varies with the format used. Use
"help 'format'" for more information.
Parameters: outfile, format, srcroot, bbs_patches, incl_chan, clobber,
catalog_type, incl_empty, correct_proj, bbs_patches_mask
For more information about a parameter, use help. E.g.,
> help 'bbs_patches'
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
img_kwargs = _get_task_kwargs(write_catalog)
for k in kwargs:
# If user enters an argument, use it instead of
# that in _img
img_kwargs[k] = kwargs[k]
try:
success = _img.write_catalog(**img_kwargs)
if success:
tput(quiet=True)
except KeyboardInterrupt:
print("\n\033[31;1mAborted\033[0m")
write_catalog.arg_list = ['bbs_patches', 'format', 'outfile', 'srcroot',
'incl_chan', 'clobber', 'catalog_type', 'incl_empty',
'correct_proj', 'bbs_patches_mask']
write_catalog.use_groups = False
def export_image(**kwargs):
"""Write an image to disk.
Parameters: outfile, img_type, img_format, mask_dilation, pad_image, clobber
For more information about a parameter, use help. E.g.,
> help 'img_type'
"""
global _img
success = _set_pars_from_prompt()
if not success:
return
img_kwargs = _get_task_kwargs(export_image)
for k in kwargs:
# If user enters an argument, use it instead of
# that in _img
img_kwargs[k] = kwargs[k]
try:
success = _img.export_image(**img_kwargs)
if success:
tput(quiet=True)
except KeyboardInterrupt:
print("\n\033[31;1mAborted\033[0m")
export_image.arg_list = ['outfile', 'img_type', 'img_format', 'mask_dilation',
'pad_image', 'clobber']
export_image.use_groups = False
def _get_task_kwargs(task):
"""Returns dictionary of keyword arguments from _img for the given task."""
global _img
arg_list = task.arg_list
kwargs = {}
for a in arg_list:
kwargs.update({a: _img.opts.__getattribute__(a)})
return kwargs
###############################################################################
# Customize the help system for PyBDSF. The user can type "help task" to get
# help on a task (it prints the doc string) or "help 'opt'" to get help on
# a option (it prints the doc string defined in opts.py).
class bdsmDocHelper(pydoc.Helper):
def help(self, request):
global _img
topbar = '_' * 72 + '\n' # 72-character divider
if hasattr(request, '__name__'):
pydoc.pager(topbar + 'Help on ' + pydoc.text.bold(request.__name__)
+ ':\n\n' + pydoc.getdoc(request))
else:
opts = _img.opts.__class__.__dict__
try:
opt = opts[request]
desc_list = str(opt.doc()).split('\n')
desc = '\n\n'.join(desc_list)
default_val = opt._default
if isinstance(default_val, str):
valstr = "'" + default_val + "'"
else:
valstr = str(default_val)
default_val_text = 'Default value: ' + valstr
if opt.group() is not None and opt.group() != 'hidden':
group_text = '\nBelongs to group: ' + opt.group()
else:
group_text = ''
desc_text = bdsf.interface.wrap(desc, 72)
desc_text = '\n'.join(desc_text)
pydoc.pager(topbar + 'Help on the ' + pydoc.text.bold(request)
+ ' parameter:\n\n' + default_val_text
+ group_text
+ '\n\n' + desc_text)
except(KeyError):
print("Parameter '" + request + "' not recognized.")
pydoc.help = bdsmDocHelper(sys.stdin, sys.stdout)
###############################################################################
# Now run the IPython shell with this namespace and a customized autocompleter.
# The custom autocompleter is below. It adds task, command, and option names and
# a few common values to ipython's autocompleter. It also adds files in the
# local directory when they might be needed (but only if the user has started
# to enter a string -- this behavior is to help avoid entering filenames as
# non-strings; this is also done for the help autocomplete).
def _opts_completer(self, event):
""" Returns a list of strings with possible completions."""
import os
import glob
from bdsf.image import Image
img = Image({'filename':''})
opts = img.opts.get_names()
# Split the command entered by user when TAB was pressed
# and check for up to three components (from e.g. "par = val",
# which gives cmd1 = "par", cmd2 = "=", and cmd3 = "val")
cmd1 = (event.line).rsplit(None)[0]
if len((event.line).rsplit(None)) > 1:
cmd2 = (event.line).rsplit(None)[1]
else:
cmd2 = ''
if len((event.line).rsplit(None)) > 2:
cmd3 = (event.line).rsplit(None)[2]
else:
cmd3 = ''
# First, check to see if user has entered a parameter name
# and an equals sign. If so, check parameter type. If Enum
# or Option, match only to the allowable values.
# Allowable values are available from v._type.values if v is
# type Enum (v has no attribute _type.values if not).
if "=" in cmd1 or "=" in cmd2:
par_vals = []
if "=" in cmd1:
cmd3 = cmd1.split('=')[1]
cmd1 = cmd1.split('=')[0]
if cmd1 in opts:
from bdsf.tc import tcEnum, tcOption
v = img.opts.__class__.__dict__[cmd1]
partype = v._type
if isinstance(partype, tcOption):
par_vals = ['None']
elif isinstance(partype, tcEnum):
if ('"' in cmd2 or "'" in cmd2 or
'"' in cmd3 or "'" in cmd3):
par_vals = v._type.values
if not isinstance(par_vals, list):
par_vals = list(par_vals)
if None in par_vals:
# Remove None from list
pindx = par_vals.index(None)
par_vals.pop(pindx)
else:
if None in v._type.values:
par_vals.append('None')
if True in v._type.values:
par_vals.append('True')
if False in v._type.values:
par_vals.append('False')
elif v._default == True or v._default == False:
par_vals = ['True', 'False']
if cmd1 == 'filename' or cmd1 == 'outfile':
if ('"' in cmd2 or "'" in cmd2 or
'"' in cmd3 or "'" in cmd3):
# Also add files in current directory
found = [f.replace('\\','/') for f in glob.glob('*')]
if len(found) > 0:
for fnd in found:
par_vals.append(fnd)
return par_vals
elif cmd1 == 'inp' or cmd1 == 'go':
# Match task names only
cmds = ['process_image', 'write_catalog', 'export_image', 'show_fit']
return cmds
elif cmd1 == 'cd' or cmd1 == 'tput' or cmd1 == 'tget' or '!' in cmd1:
# Match to files in current directory (force use of ' or " with
# tput and tget, as filename must be a string).
files = []
found = [f.replace('\\','/') for f in glob.glob('*')]
if len(found) > 0:
for fnd in found:
files.append(fnd)
if cmd1 == 'tput' or cmd1 == 'tget' and not ('"' in cmd2 or
"'" in cmd2):
# User has not (yet) started to enter a string, so don't
# return filenames
return []
return files
elif cmd1 == 'help':
if '"' in cmd2 or "'" in cmd2:
# User has started to enter a string:
# Match to parameter names, as they must be strings
par_vals = opts
return par_vals
else:
# User has not started to enter a string:
# Match to commands + tasks only
cmds = ['process_image', 'write_catalog', 'export_image',
'show_fit', 'go', 'inp', 'tget', 'tput', 'default',
'changelog']
return cmds
else:
# Match to parameter, task, and command names only
# Add command names
opts.append('inp')
opts.append('go')
opts.append('tget')
opts.append('tput')
opts.append('default')
opts.append('help')
# Add task names
opts.append('process_image')
opts.append('show_fit')
opts.append('write_catalog')
opts.append('export_image')
return opts
def main():
# Define the welcome banner to print on startup. Also check if there is a newer
# version on the STRW ftp server. If there is, print a message to the user
# asking them to update.
from bdsf._changelog import changelog
from bdsf._version import __version__
divider1 = '=' * 72 + '\n'
divider2 = '_' * 72 + '\n'
banner = '\nPyBDSF version ' + __version__ + '\n'\
+ divider1 + 'PyBDSF commands\n'\
' inp task ............ : Set current task and list parameters\n'\
" par = val ........... : Set a parameter (par = '' sets it to default)\n"\
' Autocomplete (with TAB) works for par and val\n'\
' go .................. : Run the current task\n'\
' default ............. : Set current task parameters to default values\n'\
" tput ................ : Save parameter values\n"\
" tget ................ : Load parameter values\n"\
'PyBDSF tasks\n'\
' process_image ....... : Process an image: find sources, etc.\n'\
' show_fit ............ : Show the results of a fit\n'\
' write_catalog ....... : Write out list of sources to a file\n'\
' export_image ........ : Write residual/model/rms/mean image to a file\n'\
'PyBDSF help\n'\
' help command/task ... : Get help on a command or task\n'\
' (e.g., help process_image)\n'\
" help 'par' .......... : Get help on a parameter (e.g., help 'rms_box')\n"\
' help changelog ...... : See list of recent changes\n'\
+ divider2
# Go ahead and set the current task to process_image, so that the user does not
# need to enter "inp process_image" as the first step (the first task needed
# after startup will almost always be process_image).
_set_current_cmd(process_image)
from IPython.terminal.embed import InteractiveShellEmbed
# Use the traitlets config
from traitlets.config.loader import Config
from IPython.terminal.prompts import Prompts, Token
cfg = Config()
class CustomPrompt(Prompts):
def in_prompt_tokens(self, cli=None):
return [
(Token.Prompt, 'BDSF ['),
(Token.PromptNum, str(self.shell.execution_count)),
(Token.Prompt, ']: '),
]
def out_prompt_tokens(self):
return [
(Token.OutPrompt, ''),
]
cfg.TerminalInteractiveShell.prompts_class = CustomPrompt
cfg.InteractiveShellEmbed.autocall = 2
user_ns = globals()
user_ns.update(locals())
ipshell = InteractiveShellEmbed(config=cfg, banner1=banner,
user_ns=user_ns)
ipshell.set_hook('complete_command', _opts_completer, re_key = '.*')
ipshell()
PyBDSF-1.11.0/bdsf/readimage.py 0000664 0000000 0000000 00000061464 14650706641 0016053 0 ustar 00root root 0000000 0000000 """Module readimage.
Defines operation Op_readimage which initializes image and WCS
The current implementation tries to reduce input file to 2D if
possible, as this makes more sense atm. One more important thing
to note -- in its default configuration pyfits will read data
in non-native format, so we have to convert it before usage. See
the read_image_from_file in functions.py for details.
Lastly, wcs and spectal information are stored in the PyWCS
object img.wcs_obj.
"""
from __future__ import absolute_import
import numpy as N
from .image import *
from . import functions as func
from . import mylogger
import sys
import shutil
import tempfile
import time
import os
class Op_readimage(Op):
"""Image file loader
Loads image and configures wcslib machinery for it.
"""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM." + img.log + "Readimage")
if img.opts.filename == '':
raise RuntimeError('Image file name not specified.')
# Check for trailing "/" in file name (since CASA images are directories).
# Although the general rule is to not alter the values in opts (only the
# user should be able to alter these), in this case there is no harm in
# replacing the file name in opts with the '/' trimmed off.
if img.opts.filename[-1] == '/':
img.opts.filename = img.opts.filename[:-1]
img.filename = img.opts.filename
# Determine indir if not explicitly given by user (in img.opts.indir)
if img.opts.indir is None:
indir = os.path.dirname(img.filename)
if indir == '':
indir = './'
img.indir = indir
else:
img.indir = img.opts.indir
# Set up output paths, etc.
parentname, basedir = func.set_up_output_paths(img.opts)
img.parentname = parentname # root name for constructing output files
img.imagename = img.parentname + '.pybdsf' # root name of output images (e.g., rms image)
img.outdir = basedir # path of parent output directory
img.basedir = os.path.join(basedir, img.parentname+'_pybdsf') # used for opts.output_all
if img.opts.solnname is not None:
# Add solname (if any) to basedir
img.basedir += img.opts.solnname
# Read in data and header
img.use_io = ''
image_file = os.path.basename(img.opts.filename)
result = func.read_image_from_file(image_file, img, img.indir)
if result is None:
raise RuntimeError("Cannot open file " + repr(image_file) + ". " + img._reason)
else:
data, hdr = result
# Check whether caching is to be used. If it is, set up a
# temporary directory. The temporary directory will be
# removed automatically upon exit.
if img.opts.do_cache:
img.do_cache = True
else:
img.do_cache = False
if img.do_cache:
mylog.info('Using disk caching.')
tmpdir = os.path.join(img.outdir, img.parentname+'_tmp')
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
img._tempdir_parent = TempDir(tmpdir)
img.tempdir = TempDir(tempfile.mkdtemp(dir=tmpdir))
import atexit, shutil
atexit.register(shutil.rmtree, img._tempdir_parent, ignore_errors=True)
else:
img.tempdir = None
# Store data and header in img. If polarisation_do = False, only store pol == 'I'
img.nchan = data.shape[1]
img.nstokes = data.shape[0]
mylogger.userinfo(mylog, 'Image size',
str(data.shape[-2:]) + ' pixels')
mylogger.userinfo(mylog, 'Number of channels',
'%i' % data.shape[1])
mylogger.userinfo(mylog, 'Number of Stokes parameters',
'%i' % data.shape[0])
if img.opts.polarisation_do and data.shape[0] == 1:
img.opts.polarisation_do = False
mylog.warning('Image has Stokes I only. Polarisation module disabled.')
if img.opts.polarisation_do or data.shape[0] == 1:
img.image_arr = data
else:
img.image_arr = data[0, :].reshape(1, data.shape[1], data.shape[2], data.shape[3])
img.header = hdr
img.shape = data.shape
img.j = 0
### initialize wcs conversion routines
self.init_wcs(img)
self.init_beam(img)
self.init_freq(img)
year, code = self.get_equinox(img)
if year is None:
mylog.info('Equinox not found in image header. Assuming J2000.')
img.equinox = 2000.0
else:
mylog.info('Equinox of image is %f.' % year)
img.equinox = year
if img.opts.output_all:
# Set up directory to write output to
opdir = img.opts.opdir_overwrite
if opdir not in ['overwrite', 'append']:
img.opts.opdir_overwrite = 'append'
if opdir == 'append':
mylog.info('Appending output files to directory ' + img.basedir)
img.basedir = os.path.join(img.basedir, time.strftime("%d%b%Y_%H.%M.%S"))
else:
mylog.info('Overwriting output files (if any) in directory ' + img.basedir)
if os.path.isdir(img.basedir):
os.system("rm -fr " + img.basedir + '/*')
# Make the final output directory
if not os.path.exists(img.basedir):
os.makedirs(img.basedir)
del data
img.completed_Ops.append('readimage')
return img
def init_wcs(self, img):
"""Initialize wcs pixel <=> sky conversion routines.
"""
from math import pi
import warnings
hdr = img.header
try:
from astropy.wcs import WCS
from astropy.wcs import FITSFixedWarning
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
warnings.filterwarnings("ignore",category=FITSFixedWarning)
t = WCS(hdr)
t.wcs.fix()
except ImportError as err:
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
from pywcs import WCS
t = WCS(hdr)
t.wcs.fix()
acdelt = [abs(hdr['cdelt1']), abs(hdr['cdelt2'])]
# Here we define p2s and s2p to allow celestial coordinate
# transformations. Transformations for other axes (e.g.,
# spectral) are striped out.
def p2s(self, xy):
xy = list(xy)
for i in range(self.naxis-2):
xy.append(0)
if hasattr(self, 'wcs_pix2world'):
try:
xy_arr = N.array([xy[0:2]])
sky = self.wcs_pix2world(xy_arr, 0)
except:
xy_arr = N.array([xy])
sky = self.wcs_pix2world(xy_arr, 0)
else:
xy_arr = N.array([xy])
sky = self.wcs_pix2sky(xy_arr, 0)
return sky.tolist()[0][0:2]
def s2p(self, rd):
rd = list(rd)
for i in range(self.naxis-2):
rd.append(1) # For some reason, 0 gives nans with astropy in some situations
if hasattr(self, 'wcs_world2pix'):
try:
rd_arr = N.array([rd[0:2]])
pix = self.wcs_world2pix(rd_arr, 0)
except:
rd_arr = N.array([rd])
pix = self.wcs_world2pix(rd_arr, 0)
else:
rd_arr = N.array([rd])
pix = self.wcs_sky2pix(rd_arr, 0)
return pix.tolist()[0][0:2]
# Here we define functions to transform Gaussian parameters (major axis,
# minor axis, pos. angle) from the image plane to the celestial sphere.
# These transforms are valid only at the Gaussian's center and ignore
# any change across the extent of the Gaussian.
def gaus2pix(x, location=None, use_wcs=True):
""" Converts Gaussian parameters in deg to pixels.
x - (maj [deg], min [deg], pa [deg])
location - specifies the location in pixels (x, y) for which
transform is desired
Input beam angle should be degrees CCW from North.
The output beam angle is degrees CCW from the +y axis of the image.
"""
if use_wcs:
bmaj, bmin, bpa = x
brot = self.get_rot(img, location) # rotation delta CCW (in degrees) between N and +y axis of image
s1 = self.angdist2pixdist(img, bmaj, bpa, location=location)
s2 = self.angdist2pixdist(img, bmin, bpa + 90.0, location=location)
th = bpa + brot
s1, s2, th = func.fix_gaussian_axes(s1, s2, th)
return (s1, s2, th)
else:
return img.beam2pix(x)
def pix2gaus(x, location=None, use_wcs=True, is_error=False):
""" Converts Gaussian parameters in pixels to deg.
x - (maj [pix], min [pix], pa [deg])
location - specifies the location in pixels (x, y) for which
transform is desired
Input beam angle should be degrees CCW from the +y axis of the image.
The output beam angle is degrees CCW from North.
Set is_error = True when x contains the errors on the parameters instead of
the parameters themselves.
"""
if use_wcs:
s1, s2, th = x
if s1 == 0.0 and s2 == 0.0:
return (0.0, 0.0, 0.0)
th_rad = th / 180.0 * N.pi
bmaj = self.pixdist2angdist(img, s1, th, location=location)
bmin = self.pixdist2angdist(img, s2, th + 90.0, location=location)
bpa = th
if not is_error:
# Adjust the PA by the rotation delta and fix cases where
# major and minor axes are swapped
brot = self.get_rot(img, location) # rotation delta CCW (in degrees) between N and +y axis of image
bpa = th - brot
bmaj, bmin, bpa = func.fix_gaussian_axes(bmaj, bmin, bpa)
return (bmaj, bmin, bpa)
else:
return img.pix2beam(x, is_error=is_error)
def pix2coord(pix, location=None, use_wcs=True):
"""Converts size along x and y (in pixels) to size in RA and Dec (in degrees)
Currently, this function is only used to convert errors on x, y position
to errors in RA and Dec.
"""
if use_wcs:
# Account for projection effects
x, y = pix
brot = self.get_rot(img, location) # rotation delta CCW (in degrees) between N and +y axis of image
ra_dist_pix = N.sqrt( (x * N.cos(brot * N.pi / 180.0))**2 + (y * N.sin(brot * N.pi / 180.0))**2 )
dec_dist_pix = N.sqrt( (x * N.sin(brot * N.pi / 180.0))**2 + (y * N.cos(brot * N.pi / 180.0))**2 )
s1 = self.pixdist2angdist(img, ra_dist_pix, 90.0 - brot, location=location)
s2 = self.pixdist2angdist(img, dec_dist_pix, 0.0 - brot, location=location)
else:
x, y = pix
s1 = abs(x * cdelt1)
s2 = abs(y * cdelt2)
return (s1, s2)
if hasattr(t, 'wcs_pix2world'):
instancemethod = type(t.wcs_pix2world)
else:
instancemethod = type(t.wcs_pix2sky)
if sys.version_info[0] > 2:
t.p2s = instancemethod(p2s, t)
else:
t.p2s = instancemethod(p2s, t, WCS)
if hasattr(t, 'wcs_world2pix'):
instancemethod = type(t.wcs_world2pix)
else:
instancemethod = type(t.wcs_sky2pix)
if sys.version_info[0] > 2:
t.s2p = instancemethod(s2p, t)
else:
t.s2p = instancemethod(s2p, t, WCS)
img.wcs_obj = t
img.wcs_obj.acdelt = acdelt
img.pix2sky = t.p2s
img.sky2pix = t.s2p
img.gaus2pix = gaus2pix
img.pix2gaus = pix2gaus
img.pix2coord = pix2coord
def init_beam(self, img):
"""Initialize beam parameters, and conversion routines
to convert beam to/from pixel coordinates"""
from .const import fwsig
mylog = mylogger.logging.getLogger("PyBDSM.InitBeam")
hdr = img.header
cdelt1, cdelt2 = img.wcs_obj.acdelt[0:2]
### define beam conversion routines:
def beam2pix(x):
""" Converts beam in deg to pixels. Use when no dependence on
position is appropriate.
Input beam angle should be degrees CCW from North at image center.
The output beam angle is degrees CCW from the +y axis of the image.
"""
bmaj, bmin, bpa = x
s1 = abs(bmaj / cdelt1)
s2 = abs(bmin / cdelt2)
th = bpa
return (s1, s2, th)
def pix2beam(x, is_error=False):
""" Converts beam in pixels to deg. Use when no dependence on
position is appropriate.
Input beam angle should be degrees CCW from the +y axis of the image.
The output beam angle is degrees CCW from North at image center.
Set is_error = True when x contains the errors on the parameters instead of
the parameters themselves.
"""
s1, s2, th = x
bmaj = abs(s1 * cdelt1)
bmin = abs(s2 * cdelt2)
bpa = th
if not is_error:
bmaj, bmin, bpa = func.fix_gaussian_axes(bmaj, bmin, bpa)
return [bmaj, bmin, bpa]
def pixel_beam():
"""Returns the beam in sigma units in pixels"""
pbeam = beam2pix(img.beam)
return (pbeam[0]/fwsig, pbeam[1]/fwsig, pbeam[2])
def pixel_beamarea():
"""Returns the beam area in pixels"""
pbeam = beam2pix(img.beam)
return 1.1331 * pbeam[0] * pbeam[1]
### Get the beam information from the header
found = False
if img.opts.beam is not None:
beam = img.opts.beam
else:
try:
beam = (hdr['BMAJ'], hdr['BMIN'], hdr['BPA'])
found = True
except:
### try see if AIPS as put the beam in HISTORY as usual
for h in hdr['HISTORY']:
# Check if h is a string or a FITS Card object (long headers are
# split into Cards as of PyFITS 3.0.4)
if not isinstance(h, str):
hstr = h.value
else:
hstr = h
if N.all(['BMAJ' in hstr, 'BMIN' in hstr, 'BPA' in hstr, 'CLEAN' in hstr]):
try:
dum, dum, dum, bmaj, dum, bmin, dum, bpa = hstr.split()
except ValueError:
try:
dum, dum, bmaj, dum, bmin, dum, bpa, dum, dum = hstr.split()
except ValueError:
break
beam = (float(bmaj), float(bmin), float(bpa))
found = True
if not found: raise RuntimeError("No beam information found in image header.")
### convert beam into pixels (at image center)
pbeam = beam2pix(beam)
pbeam = (pbeam[0] / fwsig, pbeam[1] / fwsig, pbeam[2]) # IN SIGMA UNITS
### and store it
img.pix2beam = pix2beam
img.beam2pix = beam2pix
img.beam = beam # FWHM size in degrees
img.pixel_beam = pixel_beam # IN SIGMA UNITS in pixels
img.pixel_beamarea = pixel_beamarea
mylogger.userinfo(mylog, 'Beam shape (major, minor, pos angle)',
'(%.5e, %.5e, %s) degrees' % (beam[0], beam[1],
round(beam[2], 1)))
def init_freq(self, img):
"""Initialize frequency parameters and store them.
Basically, PyBDSM uses two frequency parameters:
img.frequency - the reference frequency in Hz of the ch0 image
img.freq_pars - the crval, crpix, and cdelt values for the
frequency axis in Hz
If the input frequency info (in the WCS) is not in Hz, it is
converted.
"""
try:
from astropy.wcs import WCS
except ImportError as err:
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from pywcs import WCS
mylog = mylogger.logging.getLogger("PyBDSM.InitFreq")
if img.opts.frequency_sp is not None and img.image_arr.shape[1] > 1:
# If user specifies multiple frequencies, then let
# collapse.py do the initialization
img.frequency = img.opts.frequency_sp[0]
img.freq_pars = (0.0, 0.0, 0.0)
mylog.info('Using user-specified frequencies.')
elif img.opts.frequency is not None and img.image_arr.shape[1] == 1:
img.frequency = img.opts.frequency
img.freq_pars = (img.frequency, 0.0, 0.0)
mylog.info('Using user-specified frequency.')
else:
spec_indx = img.wcs_obj.wcs.spec
if spec_indx == -1:
# No frequency axis; check header instead
hdr = img.header
if 'RESTFREQ' in hdr:
img.frequency = hdr['RESTFREQ']
img.freq_pars = (img.frequency, 0.0, 0.0)
elif 'FREQ' in hdr:
img.frequency = hdr['FREQ']
img.freq_pars = (img.frequency, 0.0, 0.0)
else:
raise RuntimeError('No frequency information found in image header.')
else:
# Here we define p2f and f2p to allow pixel to frequency
# transformations. Transformations for other axes (e.g.,
# celestial) are striped out.
#
# First, convert frequency to Hz if needed:
img.wcs_obj.wcs.sptr('FREQ-???')
def p2f(self, spec_pix):
spec_list = [0] * self.naxis
spec_list[spec_indx] = spec_pix
spec_pix_arr = N.array([spec_list])
if hasattr(self, 'wcs_pix2world'):
freq = self.wcs_pix2world(spec_pix_arr, 0)
else:
freq = self.wcs_pix2sky(spec_pix_arr, 0)
return freq.tolist()[0][spec_indx]
def f2p(self, freq):
freq_list = [0] * self.naxis
freq_list[spec_indx] = freq
freq_arr = N.array([freq_list])
if hasattr(self, 'wcs_world2pix'):
pix = self.wcs_world2pix(freq_arr, 0)
else:
pix = self.wcs_sky2pix(freq_arr, 0)
return pix.tolist()[0][spec_indx]
if hasattr(img.wcs_obj, 'wcs_pix2world'):
instancemethod = type(img.wcs_obj.wcs_pix2world)
else:
instancemethod = type(img.wcs_obj.wcs_pix2sky)
if sys.version_info[0] > 2:
img.wcs_obj.p2f = instancemethod(p2f, img.wcs_obj)
else:
img.wcs_obj.p2f = instancemethod(p2f, img.wcs_obj, WCS)
if hasattr(img.wcs_obj, 'wcs_world2pix'):
instancemethod = type(img.wcs_obj.wcs_world2pix)
else:
instancemethod = type(img.wcs_obj.wcs_sky2pix)
if sys.version_info[0] > 2:
img.wcs_obj.f2p = instancemethod(f2p, img.wcs_obj)
else:
img.wcs_obj.f2p = instancemethod(f2p, img.wcs_obj, WCS)
if img.opts.frequency is not None:
img.frequency = img.opts.frequency
else:
img.frequency = img.wcs_obj.p2f(0)
def get_equinox(self, img):
"""Gets the equinox from the header.
Returns float year with code, where code is:
1 - EQUINOX, EPOCH or RADECSYS keyword not found in header
0 - EQUINOX found as a numeric value
1 - EPOCH keyword used for equinox (not recommended)
2 - EQUINOX found as 'B1950'
3 - EQUINOX found as 'J2000'
4 - EQUINOX derived from value of RADECSYS keyword
'ICRS', 'FK5' ==> 2000, 'FK4' ==> 1950
"""
code = -1
year = None
hdr = img.header
if 'EQUINOX' in hdr:
year = hdr['EQUINOX']
if isinstance(year, str): # Check for 'J2000' or 'B1950' values
tst = year[:1]
if (tst == 'J') or (tst == 'B'):
year = float(year[1:])
if tst == 'J': code = 3
if tst == 'B': code = 2
else:
code = 0
else:
if 'EPOCH' in hdr: # Check EPOCH if EQUINOX not found
year = float(hdr['EPOCH'])
code = 1
else:
if 'RADECSYS' in hdr:
sys = hdr['RADECSYS']
code = 4
if sys[:3] == 'ICR': year = 2000.0
if sys[:3] == 'FK5': year = 2000.0
if sys[:3] == 'FK4': year = 1950.0
return year, code
def get_rot(self, img, location=None):
"""Returns CCW rotation angle (in degrees) between N and +y axis of image
location specifies the location in pixels (x, y) for which angle is desired
"""
if location is None:
x1 = img.image_arr.shape[2] / 2.0
y1 = img.image_arr.shape[3] / 2.0
else:
x1, y1 = location
ra, dec = img.pix2sky([x1, y1])
delta_dec = self.pixdist2angdist(img, 1.0, 0.0, location=[x1, y1]) # approx. size in degrees of 1 pixel
if dec + delta_dec > 90.0:
# shift towards south instead
delta_dec *= -1.0
x2, y2 = img.sky2pix([ra, dec + delta_dec])
try:
rot_ang_rad = N.arctan2(y2-y1, x2-x1) - N.pi / 2.0
if delta_dec < 0.0:
rot_ang_rad -= N.pi
except:
rot_ang_rad = 0.0
return rot_ang_rad * 180.0 / N.pi
def angdist2pixdist(self, img, angdist, pa, location=None):
"""Returns the distance in pixels for a given angular distance in degrees
pa - position angle in degrees east of north
location - x and y location of center
"""
if location is None:
x1 = int(img.image_arr.shape[2] / 2.0)
y1 = int(img.image_arr.shape[3] / 2.0)
else:
x1, y1 = location
pa_pix = self.get_rot(img, location)
x0 = x1 - 10.0 * N.sin( (pa + pa_pix) * N.pi / 180.0 )
y0 = y1 - 10.0 * N.cos( (pa + pa_pix) * N.pi / 180.0 )
ra0, dec0 = img.pix2sky([x0, y0])
x2 = x1 + 10.0 * N.sin( (pa + pa_pix) * N.pi / 180.0 )
y2 = y1 + 10.0 * N.cos( (pa + pa_pix) * N.pi / 180.0 )
ra2, dec2 = img.pix2sky([x2, y2])
angdist12 = func.angsep(ra0, dec0, ra2, dec2) # degrees
pixdist12 = N.sqrt( (x0 - x2)**2 + (y0 - y2)**2 ) # pixels
if angdist12 > 0.0:
result = angdist * pixdist12 / angdist12
if N.isnan(result) or result <= 0.0:
result = N.mean(img.wcs_obj.acdelt[0:2])
else:
result = N.mean(img.wcs_obj.acdelt[0:2])
return result
def pixdist2angdist(self, img, pixdist, pa, location=None):
"""Returns the angular distance in degrees for a given distance in pixels
pa - position angle in degrees CCW from +y axis
location - x and y location of center
"""
if location is None:
x1 = int(img.image_arr.shape[2] / 2.0)
y1 = int(img.image_arr.shape[3] / 2.0)
else:
x1, y1 = location
x0 = x1 - pixdist / 2.0 * N.sin(pa * N.pi / 180.0)
y0 = y1 - pixdist / 2.0 * N.cos(pa * N.pi / 180.0)
ra0, dec0 = img.pix2sky([x0, y0])
x2 = x1 + pixdist / 2.0 * N.sin(pa * N.pi / 180.0)
y2 = y1 + pixdist / 2.0 * N.cos(pa * N.pi / 180.0)
ra2, dec2 = img.pix2sky([x2, y2])
angdist12 = func.angsep(ra0, dec0, ra2, dec2) # degrees
return angdist12
class TempDir(str):
"""Container for temporary directory for image caching.
Directory is deleted when garbage collected/zero references """
def __del__(self):
import os
if os.path.exists(self.__str__()):
shutil.rmtree(self.__str__())
PyBDSF-1.11.0/bdsf/rmsimage.py 0000664 0000000 0000000 00000135516 14650706641 0015741 0 ustar 00root root 0000000 0000000 """Module rmsimage.
Defines operation Op_rmsimage which calculates mean and
rms maps.
The current implementation will handle both 2D and 3D images,
where for 3D case it will calculate maps for each plane (=
Stokes images).
"""
from __future__ import absolute_import
import numpy as N
import scipy.ndimage as nd
from . import _cbdsm
from .image import Op, Image, NArray, List
from . import const
from . import mylogger
import os
from . import functions as func
import scipy.ndimage as nd
from . import multi_proc as mp
import itertools
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from .functions import read_image_from_file
class Op_rmsimage(Op):
"""Calculate rms & noise maps
Prerequisites: Module preprocess should be run first.
"""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"RMSimage")
mylogger.userinfo(mylog, "Calculating background rms and mean images")
if img.opts.polarisation_do:
pols = ['I', 'Q', 'U', 'V']
ch0_images = [img.ch0_arr, img.ch0_Q_arr, img.ch0_U_arr, img.ch0_V_arr]
cmeans = [img.clipped_mean] + img.clipped_mean_QUV
crmss = [img.clipped_rms] + img.clipped_rms_QUV
else:
pols = ['I'] # assume I is always present
ch0_images = [img.ch0_arr]
cmeans = [img.clipped_mean]
crmss = [img.clipped_rms]
mask = img.mask_arr
opts = img.opts
cdelt = N.array(img.wcs_obj.acdelt[:2])
# Determine box size for rms/mean map calculations.
# If user specifies rms_box, use it. Otherwise, use either an
# adaptive binning scheme that shrinks the box near
# the brightest sources or estimate rms_box from bright sources.
#
# The adaptive scheme calculates the rms/mean map
# at two different scales:
# 1) using a large rms_box, set by size of largest source
# 2) using a small rms_box, set by size of largest bright source
# Then, the rms and mean values at a given point are determined
# by a weighted average of the values in the maps at the two
# scales.
fwsig = const.fwsig
min_adapt_threshold = 10.0
if opts.adaptive_thresh is None:
adapt_thresh = 50.0
start_thresh = 500.0
else:
adapt_thresh = opts.adaptive_thresh
if adapt_thresh < min_adapt_threshold:
adapt_thresh = min_adapt_threshold
opts.adaptive_thresh = min_adapt_threshold
start_thresh = adapt_thresh
brightsize = None
isl_pos = []
do_adapt = img.opts.adaptive_rms_box
img.use_rms_map = None
img.mean_map_type = None
# 'size' of brightest source
kappa1 = 3.0
try:
brightsize = int(round(2.*img.beam[0]/cdelt[0]/fwsig*
sqrt(2.*log(img.max_value/(kappa1*crms)))))
except:
brightsize = int(round(2.*img.beam[0]/cdelt[0]/fwsig))
mylog.info('Estimated size of brightest source (pixels) = '+str(brightsize))
# Using clipped mean and rms and a starting threshold of 500 sigma,
# search for bright sources. If fewer than 5 are found, reduce
# threshold until limit set by adapt_thresh is hit.
cmean = cmeans[0]
crms = crmss[0]
image = ch0_images[0]
shape = image.shape
isl_size_bright = []
isl_area_highthresh = []
isl_peak = []
max_isl_brightsize = 0.0
threshold = start_thresh
if do_adapt:
mylogger.userinfo(mylog, "Using adaptive scaling of rms_box")
while len(isl_size_bright) < 5 and threshold >= adapt_thresh:
isl_size_bright=[]
isl_maxposn = []
if img.masked:
act_pixels = ~(mask.copy())
act_pixels[~mask] = (image[~mask]-cmean)/threshold >= crms
else:
act_pixels = (image-cmean)/threshold >= crms
threshold *= 0.8
rank = len(image.shape)
connectivity = nd.generate_binary_structure(rank, rank)
labels, count = nd.label(act_pixels, connectivity)
slices = nd.find_objects(labels)
for idx, s in enumerate(slices):
isl_size_bright.append(max([s[0].stop-s[0].start, s[1].stop-s[1].start]))
size_area = (labels[s] == idx+1).sum()/img.pixel_beamarea()*2.0
isl_area_highthresh.append(size_area)
isl_maxposn.append(tuple(N.array(N.unravel_index(N.argmax(image[s]), image[s].shape))+\
N.array((s[0].start, s[1].start))))
isl_peak.append(nd.maximum(image[s], labels[s], idx+1))
# Check islands found above at thresh_isl threshold to determine if
# the bright source is embedded inside a large island or not. If it is,
# exclude it from the bright-island list. Also find the size of the
# largest island at this threshold to set the large-scale rms_box
bright_threshold = threshold
threshold = 10.0
if img.masked:
act_pixels = ~(mask.copy())
act_pixels[~mask] = (image[~mask]-cmean)/threshold >= crms
else:
act_pixels = (image-cmean)/threshold >= crms
rank = len(image.shape)
connectivity = nd.generate_binary_structure(rank, rank)
labels, count = nd.label(act_pixels, connectivity)
slices = nd.find_objects(labels)
isl_size = []
isl_size_highthresh = []
isl_size_lowthresh = []
isl_snr = []
thratio = threshold/bright_threshold
for idx, s in enumerate(slices):
isl_area_lowthresh = (labels[s] == idx+1).sum()/img.pixel_beamarea()*2.0
isl_maxposn_lowthresh = tuple(N.array(N.unravel_index(N.argmax(image[s]), image[s].shape))+
N.array((s[0].start, s[1].start)))
isl_size += [s[0].stop-s[0].start, s[1].stop-s[1].start]
if do_adapt and isl_maxposn_lowthresh in isl_maxposn:
bright_indx = isl_maxposn.index(isl_maxposn_lowthresh)
if isl_area_lowthresh < 25.0 or isl_area_lowthresh/isl_area_highthresh[bright_indx] < 8.0:
isl_pos.append(isl_maxposn_lowthresh)
isl_size_lowthresh.append(max([s[0].stop-s[0].start, s[1].stop-s[1].start]))
isl_size_highthresh.append(isl_size_bright[bright_indx])
isl_snr.append(isl_peak[bright_indx]/crms)
if len(isl_size) == 0:
max_isl_size = 0.0
else:
max_isl_size = max(isl_size)
mylog.info('Maximum extent of largest 10-sigma island using clipped rms (pixels) = '+str(max_isl_size))
if len(isl_size_highthresh) == 0:
max_isl_size_highthresh = 0.0
max_isl_size_lowthresh = 0.0
else:
max_isl_size_highthresh = max(isl_size_highthresh)
max_isl_size_lowthresh = max(isl_size_lowthresh)
avg_max_isl_size = (max_isl_size_highthresh + max_isl_size_lowthresh) / 2.0
if hasattr(img, '_adapt_rms_isl_pos'):
isl_pos = img._adapt_rms_isl_pos # set isl_pos to existing value (for wavelet analysis)
if len(isl_pos) == 0:
# No bright sources found
do_adapt = False
else:
img._adapt_rms_isl_pos = isl_pos
min_size_allowed = int(img.pixel_beam()[0]*9.0)
if opts.rms_box is None or (opts.rms_box_bright is None and do_adapt):
if do_adapt:
bsize = int(max(brightsize, min_size_allowed, max_isl_size_highthresh*2.0))
else:
bsize = int(max(brightsize, min_size_allowed, max_isl_size*2.0))
bsize2 = int(max(min(image.shape)/10.0, max_isl_size*5.0))
if bsize < min_size_allowed:
bsize = min_size_allowed
if bsize % 10 == 0: bsize += 1
if bsize2 < min_size_allowed:
bsize2 = min_size_allowed
if bsize2 % 10 == 0: bsize2 += 1
bstep = int(round(min(bsize/3., min(shape)/10.)))
bstep2 = int(round(min(bsize2/3., min(shape)/10.)))
if opts.rms_box_bright is None:
img.rms_box_bright = (bsize, bstep)
else:
img.rms_box_bright = opts.rms_box_bright
if opts.rms_box is None:
img.rms_box = (bsize2, bstep2)
else:
img.rms_box = opts.rms_box
else:
if do_adapt:
img.rms_box_bright = opts.rms_box_bright
img.rms_box = opts.rms_box
else:
img.rms_box_bright = opts.rms_box
img.rms_box = opts.rms_box
if opts.kappa_clip is None:
kappa = -img.pixel_beamarea()
else:
kappa = img.opts.kappa_clip
if do_adapt:
map_opts = (kappa, img.rms_box_bright, opts.spline_rank)
else:
map_opts = (kappa, img.rms_box, opts.spline_rank)
for ipol, pol in enumerate(pols):
data = ch0_images[ipol]
mean = N.zeros(data.shape, dtype=N.float32)
rms = N.zeros(data.shape, dtype=N.float32)
if len(pols) > 1:
pol_txt = ' (' + pol + ')'
else:
pol_txt = ''
## calculate rms/mean maps if needed
if ((opts.rms_map is not False) or (opts.mean_map not in ['zero', 'const'])) and img.rms_box[0] > min(image.shape)/4.0:
# rms box is too large - just use constant rms and mean
self.output_rmsbox_size(img)
mylogger.userinfo(mylog, 'Size of rms_box larger than 1/4 of image size')
mylogger.userinfo(mylog, 'Using constant background rms and mean')
img.use_rms_map = False
img.mean_map_type = 'const'
else:
if opts.rmsmean_map_filename is not None and len(opts.rmsmean_map_filename)!=0:
# from astropy.io import fits as pyfits
def CheckShape(A):
if len(A.shape)!=4:
raise RuntimeError("Array shape should be len 4 (nch,npol,nx,ny)")
if A.shape[0]!=1:
raise RuntimeError("Array should be single channel")
if A.shape[1]!=1:
raise RuntimeError("Array should be single pol")
mean_fits_name,rms_fits_name=opts.rmsmean_map_filename
mylogger.userinfo(mylog, "Skipping mean and rms image computation as external images supplied")
mylogger.userinfo(mylog, " Opening mean image: %s"%mean_fits_name)
# mean = pyfits.open(mean_fits_name, mode="readonly")[0].data
mean, hdr = read_image_from_file(mean_fits_name, img, img.indir)
CheckShape(mean); mean = mean[0,0]
mylogger.userinfo(mylog, " Opening rms image: %s"%rms_fits_name)
# rms = pyfits.open(rms_fits_name, mode="readonly")[0].data
rms, hdr = read_image_from_file(rms_fits_name, img, img.indir)
CheckShape(rms); rms = rms[0,0]
elif (opts.rms_map is not False) or (opts.mean_map not in ['zero', 'const']):
if len(data.shape) == 2: ## 2d case
mean, rms = self.calculate_maps(img, data, mean, rms, mask, map_opts, do_adapt=do_adapt,
bright_pt_coords=isl_pos, rms_box2=img.rms_box,
logname="PyBDSM."+img.log, ncores=img.opts.ncores)
elif len(data.shape) == 3: ## 3d case
if not isinstance(mask, N.ndarray):
mask = N.zeros(data.shape[0], dtype=bool)
for i in range(data.shape[0]):
## iterate each plane
mean, rms = self.calculate_maps(img, data[i], mean[i], rms[i], mask[i], map_opts,
do_adapt=do_adapt, bright_pt_coords=isl_pos,
rms_box2=img.rms_box, logname="PyBDSM."+img.log,
ncores=img.opts.ncores)
else:
mylog.critical('Image shape not handleable' + pol_txt)
raise RuntimeError("Can't handle array of this shape" + pol_txt)
self.output_rmsbox_size(img)
if do_adapt:
mylogger.userinfo(mylog, 'Number of sources using small scale', str(len(isl_pos)))
mylog.info('Background rms and mean images computed' + pol_txt)
## check if variation of rms/mean maps is significant enough:
# check_rmsmap() sets img.use_rms_map
# check_meanmap() sets img.mean_map_type
if pol == 'I':
if opts.rms_map is None and img.use_rms_map is None:
if do_adapt and len(isl_pos) > 0:
# Always use 2d map if there is at least one bright
# source and adaptive scaling is desired
img.use_rms_map = True
else:
self.check_rmsmap(img, rms)
elif opts.rms_map is not None:
img.use_rms_map = opts.rms_map
if img.use_rms_map is False:
mylogger.userinfo(mylog, 'Using constant background rms')
else:
mylogger.userinfo(mylog, 'Using 2D map for background rms')
if opts.mean_map == 'default' and img.mean_map_type is None:
self.check_meanmap(img, rms)
elif opts.mean_map != 'default':
img.mean_map_type = opts.mean_map
if img.mean_map_type != 'map':
mylogger.userinfo(mylog, 'Using constant background mean')
else:
mylogger.userinfo(mylog, 'Using 2D map for background mean')
## if rms map is insignificant, or rms_map==False use const value
if img.use_rms_map is False:
if opts.rms_value is None:
rms[:] = crmss[ipol]
else:
rms[:] = opts.rms_value
mylogger.userinfo(mylog, 'Value of background rms' + pol_txt,
'%.2e Jy/beam' % rms[0][0])
else:
rms_min = N.nanmin(rms)
rms_max = N.nanmax(rms)
mylogger.userinfo(mylog, 'Min/max values of background rms map' + pol_txt,
'(%.2e, %.2e) Jy/beam' % (rms_min, rms_max))
if img.mean_map_type != 'map':
if opts.mean_map == 'zero':
val = 0.0
else:
val = img.clipped_mean
mean[:] = val
mylogger.userinfo(mylog, 'Value of background mean' + pol_txt,
str(round(val,5))+' Jy/beam')
else:
mean_min = N.nanmin(mean)
mean_max = N.nanmax(mean)
mylogger.userinfo(mylog, 'Min/max values of background mean map' + pol_txt,
'(%.2e, %.2e) Jy/beam' % (mean_min, mean_max))
if pol == 'I':
# Apply mask to mean_map and rms_map by setting masked values to NaN
if isinstance(mask, N.ndarray):
pix_masked = N.where(mask == True)
mean[pix_masked] = N.nan
rms[pix_masked] = N.nan
img.mean_arr = mean
img.rms_arr = rms
if opts.savefits_rmsim or opts.output_all:
if img.waveletimage:
resdir = img.basedir + '/wavelet/background/'
else:
resdir = img.basedir + '/background/'
if not os.path.exists(resdir): os.makedirs(resdir)
func.write_image_to_file(img.use_io, img.imagename + '.rmsd_I.fits', rms, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.rmsd_I.fits'))
if opts.savefits_meanim or opts.output_all:
if img.waveletimage:
resdir = img.basedir + '/wavelet/background/'
else:
resdir = img.basedir + '/background/'
if not os.path.exists(resdir): os.makedirs(resdir)
func.write_image_to_file(img.use_io, img.imagename + '.mean_I.fits', mean, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.mean_I.fits'))
if opts.savefits_normim or opts.output_all:
if img.waveletimage:
resdir = img.basedir + '/wavelet/background/'
else:
resdir = img.basedir + '/background/'
if not os.path.exists(resdir): os.makedirs(resdir)
zero_pixels = N.where(rms <= 0.0)
rms_nonzero = rms.copy()
rms_nonzero[zero_pixels] = N.NaN
func.write_image_to_file(img.use_io, img.imagename + '.norm_I.fits', (image-mean)/rms_nonzero, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.norm_I.fits'))
else:
img.__setattr__('mean_'+pol+'_arr', mean)
img.__setattr__('rms_'+pol+'_arr', rms)
img.completed_Ops.append('rmsimage')
return img
def check_rmsmap(self, img, rms):
"""Calculates the statistics of the rms map and decides, when
rms_map=None, whether to take the map (if variance
is significant) or a constant value
"""
from math import sqrt
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Checkrms ")
cdelt = img.wcs_obj.acdelt[:2]
bm = (img.beam[0], img.beam[1])
fw_pix = sqrt(N.product(bm)/abs(N.product(cdelt)))
if img.masked:
unmasked = N.where(~img.mask_arr)
stdsub = N.std(rms[unmasked])
maxrms = N.max(rms[unmasked])
else:
stdsub = N.std(rms)
maxrms = N.max(rms)
rms_expect = img.clipped_rms/sqrt(2)/img.rms_box[0]*fw_pix
mylog.debug('%s %10.6f %s' % ('Standard deviation of rms image = ', stdsub*1000.0, 'mJy'))
mylog.debug('%s %10.6f %s' % ('Expected standard deviation = ', rms_expect*1000.0, 'mJy'))
if stdsub > 1.1*rms_expect:
img.use_rms_map = True
mylogger.userinfo(mylog, 'Variation in rms image significant')
else:
img.use_rms_map = False
mylogger.userinfo(mylog, 'Variation in rms image not significant')
return img
def check_meanmap(self, img, mean):
"""Calculates the statistics of the mean map and decides, when
mean_map=None, whether to take the map (if variance
is significant) or a constant value
"""
from math import sqrt
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Checkmean ")
cdelt = img.wcs_obj.acdelt[:2]
bm = (img.beam[0], img.beam[1])
fw_pix = sqrt(N.product(bm)/abs(N.product(cdelt)))
if img.masked:
unmasked = N.where(~img.mask_arr)
stdsub = N.std(mean[unmasked])
maxmean = N.max(mean[unmasked])
else:
stdsub = N.std(mean)
maxmean = N.max(mean)
rms_expect = img.clipped_rms/img.rms_box[0]*fw_pix
mylog.debug('%s %10.6f %s' % ('Standard deviation of mean image = ', stdsub*1000.0, 'mJy'))
mylog.debug('%s %10.6f %s' % ('Expected standard deviation = ', rms_expect*1000.0, 'mJy'))
# For mean map, use a higher threshold than for the rms map, as radio images
# should rarely, if ever, have significant variations in the mean
if stdsub > 5.0*rms_expect:
img.mean_map_type = 'map'
mylogger.userinfo(mylog, 'Variation in mean image significant')
else:
if img.confused:
img.mean_map_type = 'zero'
else:
img.mean_map_type = 'const'
mylogger.userinfo(mylog, 'Variation in mean image not significant')
return img
def calculate_maps(self, img, data, mean, rms, mask, map_opts, do_adapt,
bright_pt_coords=[], rms_box2=None,
logname=None, ncores=None):
"""Calls map_2d and checks for problems"""
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Calcmaps ")
rms_ok = False
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Calcmaps ")
opts = img.opts
kappa = map_opts[0]
spline_rank = opts.spline_rank
while not rms_ok:
self.map_2d(data, mean, rms, mask, *map_opts, do_adapt=do_adapt,
bright_pt_coords=bright_pt_coords, rms_box2=rms_box2,
logname=logname, ncores=ncores)
if img.masked:
test = N.any(rms[~img.mask_arr] < 0.0)
else:
test = N.any(rms < 0.0)
if test:
rms_ok = False
if (opts.rms_box_bright is None and do_adapt) or (opts.rms_box is None and not do_adapt):
# Increase box by 20%
if do_adapt:
new_width = int(img.rms_box_bright[0]*1.2)
if new_width == img.rms_box_bright[0]:
new_width = img.rms_box_bright[0] + 1
new_step = int(new_width/3.0)
img.rms_box_bright = (new_width, new_step)
if img.rms_box_bright[0] > min(img.ch0_arr.shape)/4.0:
mylogger.userinfo(mylog, 'Size of rms_box_bright larger than 1/4 of image size')
mylogger.userinfo(mylog, 'Using constant background rms and mean')
img.use_rms_map = False
img.rms_box = img.rms_box_bright
img.mean_map_type = 'const'
rms_ok = True
else:
map_opts = (kappa, img.rms_box_bright, spline_rank)
else:
new_width = int(img.rms_box[0]*1.2)
if new_width == img.rms_box[0]:
new_width = img.rms_box[0] + 1
new_step = int(new_width/3.0)
img.rms_box = (new_width, new_step)
if img.rms_box[0] > min(img.ch0_arr.shape)/4.0:
mylogger.userinfo(mylog, 'Size of rms_box larger than 1/4 of image size')
mylogger.userinfo(mylog, 'Using constant background rms and mean')
img.use_rms_map = False
img.mean_map_type = 'const'
rms_ok = True
else:
map_opts = (kappa, img.rms_box, spline_rank)
else:
# User has specified box size, use order=1 to prevent negatives
if spline_rank > 1:
mylog.warning('Negative values found in rms map interpolated with spline_rank = %i' % spline_rank)
mylog.warning('Using spline_rank = 1 (bilinear interpolation) instead')
spline_rank = 1
if do_adapt:
map_opts = (kappa, img.rms_box_bright, spline_rank)
else:
map_opts = (kappa, img.rms_box, spline_rank)
else:
raise RuntimeError('RMS map has negative values')
else:
rms_ok = True
return mean, rms
def map_2d(self, arr, out_mean, out_rms, mask=False,
kappa=3, box=None, interp=1, do_adapt=False,
bright_pt_coords=None, rms_box2=None, logname='', ncores=None):
"""Calculate mean&rms maps and store them into provided arrays
Parameters:
arr: 2D array with data
out_mean, out_rms: 2D arrays where to store calculated maps
mask: mask
kappa: clipping value for rms/mean calculations
box: tuple of (box_size, box_step) for calculating map
rms_box2 = large-scale box size
interp: order of interpolating spline used to interpolate
calculated map
do_adapt: use adaptive binning
"""
mask_small = mask
axes, mean_map1, rms_map1 = self.rms_mean_map(arr, mask_small, kappa, box, ncores)
ax = [self.remap_axis(ashp, axv) for (ashp, axv) in zip(arr.shape, axes)]
ax = N.meshgrid(*ax[-1::-1])
pt_src_scale = box[0]
if do_adapt:
out_rms2 = N.zeros(rms_map1.shape, dtype=N.float32)
out_mean2 = N.zeros(rms_map1.shape, dtype=N.float32)
# Generate rms/mean maps on large scale
box2 = rms_box2
axes2, mean_map2, rms_map2 = self.rms_mean_map(arr, mask, kappa, box2, ncores)
# Interpolate to get maps on small scale grid
axes2mod = axes2[:]
axes2mod[0] = axes2[0]/arr.shape[0]*mean_map1.shape[0]
axes2mod[1] = axes2[1]/arr.shape[1]*mean_map1.shape[1]
ax2 = [self.remap_axis(ashp, axv) for (ashp, axv) in zip(out_rms2.shape, axes2mod)]
ax2 = N.meshgrid(*ax2[-1::-1])
nd.map_coordinates(rms_map2, ax2[-1::-1], order=interp, output=out_rms2)
nd.map_coordinates(mean_map2, ax2[-1::-1], order=interp, output=out_mean2)
rms_map = out_rms2
mean_map = out_mean2
# For each bright source, find nearest points and weight them towards
# the small scale maps.
xscale = float(arr.shape[0])/float(out_rms2.shape[0])
yscale = float(arr.shape[1])/float(out_rms2.shape[1])
scale = [xscale, yscale]
size = 15
for bright_pt in bright_pt_coords:
bbox, src_center = self.make_bright_src_bbox(bright_pt, scale, size, out_rms2.shape)
bbox_xsize = bbox[0].stop-bbox[0].start
bbox_ysize = bbox[1].stop-bbox[1].start
src_center[0] -= bbox[0].start
src_center[1] -= bbox[1].start
weights = N.ones((bbox_xsize, bbox_ysize))
# Taper weights to zero where small-scale value is within a factor of
# 2 of large-scale value. Use distance to center of the box
# to determine taper value. This tapering prevents the use of the
# small-scale box beyond the range of artifacts.
low_vals_ind = N.where(rms_map1[tuple(bbox)]/out_rms2[tuple(bbox)] < 2.0)
if len(low_vals_ind[0]) > 0:
dist_to_cen = []
for (x,y) in zip(low_vals_ind[0],low_vals_ind[1]):
dist_to_cen.append(N.sqrt( (x-src_center[0])**2 +
(y-src_center[1])**2 ))
med_dist_to_cen = N.min(dist_to_cen)
for x in range(bbox_xsize):
for y in range(bbox_ysize):
dist_to_cen = N.sqrt( (x-src_center[0])**2 +
(y-src_center[1])**2 )
if dist_to_cen >= med_dist_to_cen:
weights[x,y] = 1.0 - dist_to_cen/N.sqrt(bbox_xsize**2+bbox_ysize**2)*2.0
rms_map[tuple(bbox)] = rms_map1[tuple(bbox)]*weights + out_rms2[tuple(bbox)]*(1.0-weights)
mean_map[tuple(bbox)] = mean_map1[tuple(bbox)]*weights + out_mean2[tuple(bbox)]*(1.0-weights)
else:
rms_map = rms_map1
mean_map = mean_map1
# Interpolate to image coords
mylog = mylogger.logging.getLogger(logname+"Rmsimage")
nd.map_coordinates(rms_map, ax[-1::-1], order=interp, output=out_rms)
nd.map_coordinates(mean_map, ax[-1::-1], order=interp, output=out_mean)
# Apply mask to mean_map and rms_map by setting masked values to NaN
if isinstance(mask, N.ndarray):
pix_masked = N.where(mask == True)
out_mean[pix_masked] = N.nan
out_rms[pix_masked] = N.nan
def rms_mean_map(self, arr, mask=False, kappa=3, box=None, ncores=None):
"""Calculate map of the mean/rms values
Parameters:
arr: 2D array with data
mask: mask
kappa: clipping for calculating rms/mean within each box
box: box parameters (box_size, box_step)
Returns:
axes: list of 2 arrays with coordinates of boxes alongside each axis
mean_map: map of mean values
rms_map: map of rms values
Description:
This function calculates clipped mean and rms maps for the array.
The algorithm is a moving-window algorithm, where mean&rms are
calculated within a window of a size (box_size * box_size), and the
window is stepped withing the image by steps of box_steps.
Special care is taken for the borders of the image -- outer borders
(where box doesn't fit properly) are given one extra round with a box
applied to the border of the image. Additionally outer values are
extrapolated to cover whole image size, to simplify further processing.
See also routine 'remap_axes' for 'inverting' axes array
Example:
for an input image of 100x100 pixels calling rms_mean_map with default
box parameters (50, 25) will result in the following:
axes = [array([ 0. , 24.5, 49.5, 74.5, 99. ]),
array([ 0. , 24.5, 49.5, 74.5, 99. ])]
mean_map = <5x5 array>
rms_map = <5x5 array>
rms_map[1,1] is calculated for arr[0:50, 0:50]
rms_map[2,1] is calculated for arr[25:75, 0:50]
...etc...
rms_map[0,0] is extrapolated as .5*(rms_map[0,1] + rms_map[1,0])
rms_map[0,1] is extrapolated as rms_map[1,1]
"""
mylog = mylogger.logging.getLogger("PyBDSM.RmsMean")
if box is None:
box = (50, 25)
if box[0] < box[1]:
raise RuntimeError('Box size is less than step size.')
# Some math first: boxcount is number of boxes alongsize each axis,
# bounds is non-zero for axes which have extra pixels beyond last box
BS, SS = box
imgshape = N.array(arr.shape)
# If boxize is less than 10% of image, use simple extrapolation to
# derive the edges of the mean and rms maps; otherwise, use padded
# versions of arr and mask to derive the mean and rms maps
if float(BS)/float(imgshape[0]) < 0.1 and \
float(BS)/float(imgshape[1]) < 0.1:
use_extrapolation = True
else:
use_extrapolation = False
if use_extrapolation:
boxcount = 1 + (imgshape - BS)/SS
bounds = N.asarray((boxcount-1)*SS + BS < imgshape, dtype=int)
mapshape = 2 + boxcount + bounds
else:
boxcount = 1 + imgshape/SS
bounds = N.asarray((boxcount-1)*SS < imgshape, dtype=int)
mapshape = boxcount + bounds
pad_border_size = int(BS/2.0)
new_shape = (arr.shape[0] + 2*pad_border_size, arr.shape[1]
+ 2*pad_border_size)
arr_pad = self.pad_array(arr, new_shape)
if mask is None:
mask_pad = None
else:
mask_pad = self.pad_array(mask, new_shape)
# Make arrays for calculated data
mapshape = [int(ms) for ms in mapshape]
boxcount = [int(bc) for bc in boxcount]
mean_map = N.zeros(mapshape, dtype=N.float32)
rms_map = N.zeros(mapshape, dtype=N.float32)
axes = [N.zeros(len, dtype=N.float32) for len in mapshape]
# Step 1: internal area of the image
# Make a list of coordinates to send to process_mean_rms_maps()
coord_list = []
ind_list = []
for i in range(boxcount[0]):
for j in range(boxcount[1]):
if use_extrapolation:
coord_list.append((i+1, j+1))
else:
coord_list.append((i, j))
ind_list.append([i*SS, i*SS+BS, j*SS, j*SS+BS])
# Now call the parallel mapping function. Returns a list of [mean, rms]
# for each coordinate.
if use_extrapolation:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask), itertools.repeat(arr),
itertools.repeat(kappa)), numcores=ncores)
else:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask_pad), itertools.repeat(arr_pad),
itertools.repeat(kappa)), numcores=ncores)
for i, co in enumerate(coord_list):
cm, cr = cm_cr_list[i]
mean_map[co] = cm
rms_map[co] = cr
# Check if all regions have too few unmasked pixels
if mask is not None and N.size(N.where(mean_map != N.inf)) == 0:
raise RuntimeError("No unmasked regions from which to determine "\
"mean and rms maps")
# Step 2: borders of the image
if bounds[0]:
coord_list = []
ind_list = []
for j in range(boxcount[1]):
if use_extrapolation:
coord_list.append((-2, j+1))
ind_list.append([-BS, arr.shape[0], j*SS,j*SS+BS])
else:
coord_list.append((-1, j))
ind_list.append([-BS, arr_pad.shape[0], j*SS,j*SS+BS])
if use_extrapolation:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask), itertools.repeat(arr),
itertools.repeat(kappa)), numcores=ncores)
else:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask_pad), itertools.repeat(arr_pad),
itertools.repeat(kappa)), numcores=ncores)
for i, co in enumerate(coord_list):
cm, cr = cm_cr_list[i]
mean_map[co] = cm
rms_map[co] = cr
if bounds[1]:
coord_list = []
ind_list = []
for i in range(boxcount[0]):
if use_extrapolation:
coord_list.append((i+1, -2))
ind_list.append([i*SS,i*SS+BS, -BS,arr.shape[1]])
else:
coord_list.append((i, -1))
ind_list.append([i*SS,i*SS+BS, -BS,arr_pad.shape[1]])
if use_extrapolation:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask), itertools.repeat(arr),
itertools.repeat(kappa)), numcores=ncores)
else:
cm_cr_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_mean_rms_maps),
ind_list, itertools.repeat(mask_pad), itertools.repeat(arr_pad),
itertools.repeat(kappa)), numcores=ncores)
for i, co in enumerate(coord_list):
cm, cr = cm_cr_list[i]
mean_map[co] = cm
rms_map[co] = cr
if bounds.all():
if use_extrapolation:
ind = [-BS,arr.shape[0], -BS,arr.shape[1]]
self.for_masked(mean_map, rms_map, mask, arr, ind,
kappa, [-2, -2])
else:
ind = [-BS,arr_pad.shape[0], -BS,arr_pad.shape[1]]
self.for_masked(mean_map, rms_map, mask_pad, arr_pad, ind,
kappa, [-1, -1])
# Step 3: correct(extrapolate) borders of the image
def correct_borders(map):
map[0, :] = map[1, :]
map[:, 0] = map[:, 1]
map[-1, :] = map[-2, :]
map[:, -1] = map[:, -2]
map[0,0] = (map[1,0] + map[0, 1])/2.
map[-1,0] = (map[-2, 0] + map[-1, 1])/2.
map[0, -1] = (map[0, -2] + map[1, -1])/2.
map[-1,-1] = (map[-2, -1] + map[-1, -2])/2.
if use_extrapolation:
correct_borders(mean_map)
correct_borders(rms_map)
# Step 4: fill in coordinate axes
for i in range(2):
if use_extrapolation:
axes[i][1:boxcount[i]+1] = (N.arange(boxcount[i])*SS
+ BS/2. - .5)
if bounds[i]:
axes[i][-2] = imgshape[i] - BS/2. - .5
else:
axes[i][0:boxcount[i]] = N.arange(boxcount[i])*SS - .5
if bounds[i]:
axes[i][-2] = imgshape[i] - .5
axes[i][-1] = imgshape[i] - 1
# Step 5: fill in boxes with < 5 unmasked pixels (set to values of
# N.inf)
unmasked_boxes = N.where(mean_map != N.inf)
if N.size(unmasked_boxes,1) < mapshape[0]*mapshape[1]:
mean_map = self.fill_masked_regions(mean_map)
rms_map = self.fill_masked_regions(rms_map)
return axes, mean_map, rms_map
def process_mean_rms_maps(self, ind, mask, arr, kappa):
"""Finds mean and rms for one region of an input arr"""
cm, cr = self.for_masked_mp(mask, arr, ind,
kappa)
return cm, cr
def fill_masked_regions(self, themap, magic=N.inf):
"""Fill masked regions (defined where values == magic) in themap.
"""
masked_boxes = N.where(themap == magic) # locations of masked regions
for i in range(N.size(masked_boxes,1)):
num_unmasked = 0
x, y = masked_boxes[0][i], masked_boxes[1][i]
delx = dely = 1
while num_unmasked == 0:
x1 = x - delx
if x1 < 0: x1 = 0
x2 = x + 1 + delx
if x2 > themap.shape[0]: x2 = themap.shape[0]
y1 = y - dely
if y1 < 0: y1 = 0
y2 = y + 1 + dely
if y2 > themap.shape[1]: y2 = themap.shape[1]
cutout = themap[x1:x2, y1:y2].ravel()
goodcutout = cutout[cutout != magic]
num_unmasked = len(goodcutout)
if num_unmasked > 0:
themap[x, y] = N.nansum(goodcutout)/float(len(goodcutout))
delx += 1
dely += 1
themap[N.where(N.isnan(themap))] = 0.0
return themap
def pad_array(self, arr, new_shape):
"""Returns a padded array by mirroring around the edges."""
# Assume that padding is the same for both axes and is equal
# around all edges.
half_size = int((new_shape[0] - arr.shape[0]) / 2)
arr_pad = N.zeros( (new_shape), dtype=arr.dtype)
# left band
band = arr[:half_size, :]
arr_pad[:half_size, half_size:-half_size] = N.flipud( band )
# right band
band = arr[-half_size:, :]
arr_pad[-half_size:, half_size:-half_size] = N.flipud( band )
# bottom band
band = arr[:, :half_size]
arr_pad[half_size:-half_size, :half_size] = N.fliplr( band )
# top band
band = arr[:, -half_size:]
arr_pad[half_size:-half_size, -half_size:] = N.fliplr( band )
# central band
arr_pad[half_size:-half_size, half_size:-half_size] = arr
# bottom left corner
band = arr[:half_size,:half_size]
arr_pad[:half_size,:half_size] = N.flipud(N.fliplr(band))
# top right corner
band = arr[-half_size:,-half_size:]
arr_pad[-half_size:,-half_size:] = N.flipud(N.fliplr(band))
# top left corner
band = arr[:half_size,-half_size:]
arr_pad[:half_size,-half_size:] = N.flipud(N.fliplr(band))
# bottom right corner
band = arr[-half_size:,:half_size]
arr_pad[-half_size:,:half_size] = N.flipud(N.fliplr(band))
return arr_pad
def for_masked(self, mean_map, rms_map, mask, arr, ind, kappa, co):
bstat = func.bstat#_cbdsm.bstat
a, b, c, d = ind; i, j = co
if mask is None:
m, r, cm, cr, cnt = bstat(arr[a:b, c:d], mask, kappa)
if cnt > 198: cm = m; cr = r
mean_map[i, j], rms_map[i, j] = cm, cr
else:
pix_unmasked = N.where(mask[a:b, c:d] == False)
npix_unmasked = N.size(pix_unmasked,1)
if npix_unmasked > 20: # find clipped mean/rms
m, r, cm, cr, cnt = bstat(arr[a:b, c:d], mask[a:b, c:d], kappa)
if cnt > 198: cm = m; cr = r
mean_map[i, j], rms_map[i, j] = cm, cr
else:
if npix_unmasked > 5: # just find simple mean/rms
cm = N.mean(arr[pix_unmasked])
cr = N.std(arr[pix_unmasked])
mean_map[i, j], rms_map[i, j] = cm, cr
else: # too few unmasked pixels --> set mean/rms to inf
mean_map[i, j], rms_map[i, j] = N.inf, N.inf
def for_masked_mp(self, mask, arr, ind, kappa):
bstat = func.bstat #_cbdsm.bstat
a, b, c, d = ind
if mask is None:
m, r, cm, cr, cnt = bstat(arr[a:b, c:d], mask, kappa)
if cnt > 198: cm = m; cr = r
else:
pix_unmasked = N.where(mask[a:b, c:d] == False)
npix_unmasked = N.size(pix_unmasked,1)
if npix_unmasked > 20: # find clipped mean/rms
m, r, cm, cr, cnt = bstat(arr[a:b, c:d], mask[a:b, c:d], kappa)
if cnt > 198: cm = m; cr = r
else:
if npix_unmasked > 5: # just find simple mean/rms
cm = N.mean(arr[pix_unmasked])
cr = N.std(arr[pix_unmasked])
else: # too few unmasked pixels --> set mean/rms to inf
cm = N.inf
cr = N.inf
return cm, cr
def remap_axis(self, size, arr):
"""Invert axis mapping done by rms_mean_map
rms_mean_map 'compresses' axes by returning short arrays with
coordinades of the boxes. This routine 'inverts' this compression
by calculating coordinates of each pixel of the original array
within compressed array.
Parameters:
size: size of the original (and resulting) array
arr : 'compressed' axis array from rms_mean_map
Example:
the following 'compressed' axis (see example in rms_mean_map):
ax = array([ 0. , 24.5, 49.5, 74.5, 99. ])
will be remapped as:
print remap_axis(100, ax)
[ 0. 0.04081633 0.08163265 0.12244898 ....
...............................................
3.91836735 3.95918367 4. ]
which means that pixel 0 in the original image corresponds to pixels
0 in the rms/mean_map array (which is 5x5 array).
pixel 1 of the original image has coordinate of 0.04081633 in the
compressed image (e.g. it has no exact counterpart, and it's value
should be obtained by interpolation)
"""
from math import floor, ceil
res = N.zeros(size, dtype=N.float32)
for i in range(len(arr) - 1):
i1 = arr[i]
i2 = arr[i+1]
t = N.arange(ceil(i1), floor(i2)+1, dtype=float)
res[int(ceil(i1)):int(floor(i2))+1] = i + (t-i1)/(i2-i1)
return res
def make_bright_src_bbox(self, coord, scale, size, shape):
"""Returns bbox given coordinates of center and scale"""
xindx = int(coord[0]/scale[0])
yindx = int(coord[1]/scale[1])
xlow = xindx - int(size/2.0)
if xlow < 0:
xlow = 0
xhigh = xindx + int(size/2.0) + 1
if xhigh > shape[0]:
xhigh = shape[0]
ylow = yindx - int(size/2.0)
if ylow < 0:
ylow = 0
yhigh = yindx + int(size/2.0) + 1
if yhigh > shape[1]:
yhigh = shape[1]
src_center = [xindx, yindx]
return [slice(xlow, xhigh, None), slice(ylow, yhigh, None)], src_center
def output_rmsbox_size(self, img):
"""Prints rms/mean box size"""
opts = img.opts
do_adapt = opts.adaptive_rms_box
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"RMSimage")
if (opts.rms_map is not False) or (opts.mean_map not in ['zero', 'const']):
if do_adapt:
if opts.rms_box_bright is None:
mylogger.userinfo(mylog, 'Derived rms_box (box size, step size)',
'(' + str(img.rms_box_bright[0]) + ', ' +
str(img.rms_box_bright[1]) + ') pixels (small scale)')
else:
mylogger.userinfo(mylog, 'Using user-specified rms_box',
'(' + str(img.rms_box_bright[0]) + ', ' +
str(img.rms_box_bright[1]) + ') pixels (small scale)')
if opts.rms_box is None:
mylogger.userinfo(mylog, 'Derived rms_box (box size, step size)',
'(' + str(img.rms_box[0]) + ', ' +
str(img.rms_box[1]) + ') pixels (large scale)')
else:
mylogger.userinfo(mylog, 'Using user-specified rms_box',
'(' + str(img.rms_box[0]) + ', ' +
str(img.rms_box[1]) + ') pixels (large scale)')
else:
if opts.rms_box is None:
mylogger.userinfo(mylog, 'Derived rms_box (box size, step size)',
'(' + str(img.rms_box[0]) + ', ' +
str(img.rms_box[1]) + ') pixels')
else:
mylogger.userinfo(mylog, 'Using user-specified rms_box',
'(' + str(img.rms_box[0]) + ', ' +
str(img.rms_box[1]) + ') pixels')
PyBDSF-1.11.0/bdsf/shapefit.py 0000664 0000000 0000000 00000014516 14650706641 0015734 0 ustar 00root root 0000000 0000000 """Module shapelets
This will do all the shapelet analysis of islands in an image
"""
from __future__ import absolute_import
from .image import *
from .islands import *
from .shapelets import *
from . import mylogger
from . import statusbar
from . import multi_proc as mp
import itertools
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from . import functions as func
from .gausfit import find_bbox
class Op_shapelets(Op):
""" Get the image and mask from each island and send it to
shapelet programs which can then also be called seperately """
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Shapefit")
bar = statusbar.StatusBar('Decomposing islands into shapelets ...... : ', 0, img.nisl)
opts = img.opts
if img.opts.shapelet_do:
if img.nisl == 0:
mylog.warning("No islands found. Skipping shapelet decomposition.")
img.completed_Ops.append('shapelets')
return
if not opts.quiet:
bar.start()
# Set up multiproccessing. First create a simple copy of the Image
# object that contains the minimal data needed.
opts_dict = opts.to_dict()
img_simple = Image(opts_dict)
img_simple.pixel_beamarea = img.pixel_beamarea
img_simple.pixel_beam = img.pixel_beam
img_simple.thresh_pix = img.thresh_pix
img_simple.minpix_isl = img.minpix_isl
img_simple.clipped_mean = img.clipped_mean
img_simple.shape = img.ch0_arr.shape
# Now call the parallel mapping function. Returns a list of
# [beta, centre, nmax, basis, cf] for each island
shap_list = mp.parallel_map(func.eval_func_tuple,
zip(itertools.repeat(self.process_island),
img.islands, itertools.repeat(img_simple),
itertools.repeat(opts)), numcores=opts.ncores,
bar=bar)
for id, isl in enumerate(img.islands):
beta, centre, nmax, basis, cf = shap_list[id]
isl.shapelet_beta=beta
isl.shapelet_centre=centre
isl.shapelet_posn_sky=img.pix2sky(centre)
isl.shapelet_posn_skyE=[0.0, 0.0, 0.0]
isl.shapelet_nmax=nmax
isl.shapelet_basis=basis
isl.shapelet_cf=cf
img.completed_Ops.append('shapelets')
def process_island(self, isl, img, opts=None):
"""Processes a single island.
Returns shapelet parameters.
"""
if opts is None:
opts = img.opts
if opts.shapelet_gresid:
shape = img.shape
thresh= opts.fittedimage_clip
model_gaus = N.zeros(shape, dtype=N.float32)
for g in isl.gaul:
C1, C2 = g.centre_pix
b = find_bbox(thresh*isl.rms, g)
bbox = N.s_[max(0, int(C1-b)):min(shape[0], int(C1+b+1)),
max(0, int(C2-b)):min(shape[1], int(C2+b+1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(g, x_ax, y_ax)
model_gaus[bbox] = model_gaus[bbox] + ffimg
arr = isl.image - isl.islmean - model_gaus[tuple(isl.bbox)]
else:
arr = isl.image - isl.islmean
mask = isl.mask_active
basis = opts.shapelet_basis
beam_pix = img.pixel_beam()
mode = opts.shapelet_fitmode
if mode != 'fit':
mode = ''
fixed = (0,0,0)
(beta, centre, nmax) = self.get_shapelet_params(arr, mask, basis, beam_pix, fixed, N.array(isl.origin), mode)
cf = decompose_shapelets(arr, mask, basis, beta, centre, nmax, mode)
return [beta, tuple(N.array(centre) + N.array(isl.origin)), nmax, basis, cf]
def get_shapelet_params(self, image, mask, basis, beam_pix, fixed, ori, mode, beta=None, cen=None, nmax=None):
""" This takes as input an image, its mask (false=valid), basis="cartesian"/"polar",
fixed=(i,j,k) where i,j,k =0/1 to calculate or take as fixed for (beta, centre, nmax),
beam_pix has the beam in (pix_fwhm, pix_fwhm, deg),
beta (the scale), cen (centre of basis expansion), nmax (max order). The output
is an updated set of values of (beta, centre, nmax). If fixed is 1 and the value is not
specified as an argument, then fixed is taken as 0."""
from math import sqrt, log, floor
from . import functions as func
import numpy as N
if fixed[0]==1 and beta is None: fixed[0]=0
if fixed[1]==1 and cen is None: fixed[1]=0
if fixed[2]==1 and nmax is None: fixed[2]=0
if fixed[0]*fixed[1]==0:
(m1, m2, m3)=func.moment(image, mask)
if fixed[0]==0:
try:
beta = sqrt(m3[0]*m3[1])*2.0
except ValueError:
beta = 0.5
if beta == 0.0:
beta = 0.5
if fixed[1]==0:
cen=m2
if fixed[2]==0:
(n, m)=image.shape
nmax=int(round(sqrt(1.0*n*n+m*m)/beam_pix[1]))-1
nmax=min(max(nmax*2+2,10),10) # totally ad hoc
npix = N.product(image.shape)-N.sum(mask)
if nmax*nmax >= n*m : nmax = int(floor(sqrt(npix-1))) # -1 is for when n*m is a perfect square
if mode == 'fit': # make sure npara <= npix
nmax_max = int(round(0.5*(-3+sqrt(1+8*npix))))
nmax=min(nmax, nmax_max)
betarange=[0.5,sqrt(beta*max(n,m))] # min, max
if fixed[1]==0:
cen=shape_findcen(image, mask, basis, beta, nmax, beam_pix) # + check_cen_shapelet
#print 'First Centre = ',cen,N.array(cen)+ori
from time import time
t1 = time()
if fixed[0]==0:
beta, err=shape_varybeta(image, mask, basis, beta, cen, nmax, betarange, plot=False)
t2 = time()
#print 'TIME ',t2-t1, '\n'
#print 'Final Beta = ',beta, err
if fixed[1]==0 and fixed[0]==0:
cen=shape_findcen(image, mask, basis, beta, nmax, beam_pix) # + check_cen_shapelet
#print 'Final Cen = ',N.array(cen)+ori
return beta, cen, nmax
PyBDSF-1.11.0/bdsf/shapelets.py 0000664 0000000 0000000 00000032374 14650706641 0016123 0 ustar 00root root 0000000 0000000 """Module shapelets.
nmax => J = 0..nmax; hence nmax+1 orders calculated.
ordermax = nmax+1; range(ordermax) has all the values of n
Order n => J=n, where J=0 is the gaussian.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as N
try:
from astropy.io import fits as pyfits
except ImportError as err:
import pyfits
from scipy.optimize import leastsq
def decompose_shapelets(image, mask, basis, beta, centre, nmax, mode):
""" Decomposes image (with mask) and beta, centre (2-tuple) , nmax into basis
shapelets and returns the coefficient matrix cf.
Mode is 'fit' or 'integrate' for method finding coeffs. If fit then integrated
values are taken as initial guess.
"""
# bad = False
# if (beta < 0 or beta/max(image.shape) > 5 or \
# (max(N.abs(list(centre)))-max(image.shape)/2) > 10*max(image.shape)): bad = True
hc = shapelet_coeff(nmax, basis)
ordermax=nmax+1
Bset=N.zeros((ordermax, ordermax, image.shape[0], image.shape[1]), dtype=N.float32)
cf = N.zeros((ordermax,ordermax)) # coefficient matrix, will fill up only lower triangular part.
index = [(i,j) for i in range(ordermax) for j in range(ordermax-i)] # i=0->nmax, j=0-nmax-i
for coord in index:
B = shapelet_image(basis, beta, centre, hc, coord[0], coord[1], image.shape)
if mode == 'fit': Bset[coord[0] , coord[1], ::] = B
m = N.copy(mask)
for i, v in N.ndenumerate(mask): m[i] = not v
cf[coord] = N.sum(image*B*m)
if mode == 'fit':
npix = N.product(image.shape)-N.sum(mask)
npara = (nmax+1)*(nmax+2)*0.5
cfnew = fit_shapeletbasis(image, mask, cf, Bset)
recon1 = reconstruct_shapelets(image.shape, mask, basis, beta, centre, nmax, cf)
recon2 = reconstruct_shapelets(image.shape, mask, basis, beta, centre, nmax, cfnew)
if N.std(recon2) < 1.2*N.std(recon1): cf = cfnew
return cf
def fit_shapeletbasis(image, mask, cf0, Bset):
""" Fits the image to the shapelet basis functions to estimate shapelet coefficients
instead of integrating it out. This should avoid the problems of digitisation and hence
non-orthonormality. """
from . import functions as func
ma = N.where(~mask.flatten())
cfshape = cf0.shape
res=lambda p, image, Bset, cfshape, mask_flat : (image.flatten()-func.shapeletfit(p, Bset, cfshape))[ma]
if len(ma) <= 5:
# Not enough degrees of freedom
cf = cf0
else:
(cf, flag)=leastsq(res, cf0.flatten(), args=(image, Bset, cfshape, ma))
cf = cf.reshape(cfshape)
return cf
def reconstruct_shapelets(size, mask, basis, beta, centre, nmax, cf):
""" Reconstructs a shapelet image of size, for pixels which are unmasked, for a given
beta, centre, nmax, basis and the shapelet coefficient matrix cf. """
rimage = N.zeros(size, dtype=N.float32)
hc = []
hc = shapelet_coeff(nmax, basis)
index = [(i,j) for i in range(nmax) for j in range(nmax-i)]
for coord in index:
B = shapelet_image(basis, beta, centre, hc, coord[0], coord[1], size)
rimage += B*cf[coord]
return rimage
def shapelet_image(basis, beta, centre, hc, nx, ny, size):
""" Takes basis, beta, centre (2-tuple), hc matrix, x, y, size and returns the image of the shapelet of
order nx,ny on an image of size size. Does what getcartim.f does in fBDSM. nx,ny -> 0-nmax
Centre is by Python convention, for retards who count from zero. """
from math import sqrt,pi
try:
from scipy import factorial
except ImportError:
try:
from scipy.misc.common import factorial
except ImportError:
try:
from scipy.misc import factorial
except ImportError:
from scipy.special import factorial
hcx = hc[nx,:]
hcy = hc[ny,:]
ind = N.array([nx,ny])
fact = factorial(ind)
dumr1 = N.sqrt((2.0**(ind))*sqrt(pi)*fact)
x = (N.arange(size[0],dtype=float)-centre[0])/beta
y = (N.arange(size[1],dtype=float)-centre[1])/beta
dumr3 = N.zeros(size[0])
for i in range(size[0]):
for j in range(ind[0]+1):
dumr3[i] += hcx[j]*(x[i]**j)
B_nx = N.exp(-0.50*x*x)*dumr3/dumr1[0]/sqrt(beta)
dumr3 = N.zeros(size[1])
for i in range(size[1]):
for j in range(ind[1]+1):
dumr3[i] += hcy[j]*(y[i]**j)
B_ny = N.exp(-0.50*y*y)*dumr3/dumr1[1]/sqrt(beta)
return N.outer(B_nx,B_ny)
def shape_findcen(image, mask, basis, beta, nmax, beam_pix): # + check_cen_shapelet
""" Finds the optimal centre for shapelet decomposition. Minimising various
combinations of c12 and c21, as in literature doesnt work for all cases.
Hence, for the c1 image, we find the zero crossing for every vertical line
and for the c2 image, the zero crossing for every horizontal line, and then
we find intersection point of these two. This seems to work even for highly
non-gaussian cases. """
from . import functions as func
import sys
hc = []
hc = shapelet_coeff(nmax, basis)
msk=N.zeros(mask.shape, dtype=bool)
for i, v in N.ndenumerate(mask): msk[i] = not v
n,m = image.shape
cf12 = N.zeros(image.shape, dtype=N.float32)
cf21 = N.zeros(image.shape, dtype=N.float32)
index = [(i,j) for i in range(n) for j in range(m)]
for coord in index:
if msk[coord]:
B12 = shapelet_image(basis, beta, coord, hc, 0, 1, image.shape)
cf12[coord] = N.sum(image*B12*msk)
if coord==(27,51): dumpy = B12
B21 = shapelet_image(basis, beta, coord, hc, 1, 0, image.shape)
cf21[coord] = N.sum(image*B21*msk)
else:
cf12[coord] = None
cf21[coord] = None
(xmax,ymax) = N.unravel_index(image.argmax(),image.shape) # FIX with mask
if xmax in [1,n] or ymax in [1,m]:
(m1, m2, m3) = func.moment(mask)
xmax,ymax = N.round(m2)
# in high snr area, get zero crossings for each horizontal and vertical line for c1, c2 resp
tr_mask=mask.transpose()
tr_cf21=cf21.transpose()
try:
(x1,y1) = getzeroes_matrix(mask, cf12, ymax, xmax) # y1 is array of zero crossings
(y2,x2) = getzeroes_matrix(tr_mask, tr_cf21, xmax, ymax) # x2 is array of zero crossings
# find nominal intersection pt as integers
xind=N.where(x1==xmax)
yind=N.where(y2==ymax)
xind=xind[0][0]
yind=yind[0][0]
# now take 2 before and 2 after, fit straight lines, get proper intersection
ninter=5
if xind<3 or yind<3 or xind>n-2 or yind>m-2:
ninter = 3
xft1 = x1[xind-(ninter-1)/2:xind+(ninter-1)/2+1]
yft1 = y1[xind-(ninter-1)/2:xind+(ninter-1)/2+1]
xft2 = x2[yind-(ninter-1)/2:yind+(ninter-1)/2+1]
yft2 = y2[yind-(ninter-1)/2:yind+(ninter-1)/2+1]
sig = N.ones(ninter, dtype=float)
smask1=N.array([r == 0 for r in yft1])
smask2=N.array([r == 0 for r in xft2])
cen=[0.]*2
if sum(smask1) 0:
#print 'Error '+str(error)+' in finding centre, will take 1st moment instead.'
(m1, m2, m3) = func.moment(image, mask)
cen = m2
return cen
def getzeroes_matrix(mask, cf, cen, cenx):
""" For a matrix cf, and a mask, this returns two vectors; x is the x-coordinate
and y is the interpolated y-coordinate where the matrix cf croses zero. If there
is no zero-crossing, y is zero for that column x. """
x = N.arange(cf.shape[0], dtype=N.float32)
y = N.zeros(cf.shape[0], dtype=N.float32)
# import pylab as pl
# pl.clf()
# pl.imshow(cf, interpolation='nearest')
# ii = N.random.randint(100); pl.title(' zeroes' + str(ii))
# print 'ZZ ',cen, cenx, ii
for i in range(cf.shape[0]):
l = [mask[i,j] for j in range(cf.shape[1])]
npts = len(l)-sum(l)
#print 'npts = ',npts
if npts > 3 and not N.isnan(cf[i,cen]):
mrow=mask[i,:]
if sum(l) == 0:
low=0
up=cf.shape[1]-1
else:
low = mrow.nonzero()[0][mrow.nonzero()[0].searchsorted(cen)-1]
#print 'mrow = ',i, mrow, low,
try:
up = mrow.nonzero()[0][mrow.nonzero()[0].searchsorted(cen)]
#print 'up1= ', up
except IndexError:
if [mrow.nonzero()[0].searchsorted(cen)][0]==len(mrow.nonzero()):
up = len(mrow)
#print 'up2= ', up,
else:
raise
#print
low += 1; up -= 1
npoint = up-low+1
xfn = N.arange(npoint)+low
yfn = cf[i,xfn]
root, error = shapelet_getroot(xfn, yfn, x[i], cenx, cen)
if error != 1:
y[i] = root
else:
y[i] = 0.0
else:
y[i] = 0.0
return x,y
def shapelet_getroot(xfn, yfn, xco, xcen, ycen):
""" This finds the root for finding the shapelet centre. If there are multiple roots, takes
that which closest to the 'centre', taken as the intensity barycentre. This is the python
version of getroot.f of anaamika."""
from . import functions as func
root=None
npoint=len(xfn)
error=0
if npoint == 0:
error = 1
elif yfn.max()*yfn.min() >= 0.:
error=1
minint=0; minintold=0
for i in range(1,npoint):
if yfn[i-1]*yfn[i] < 0.:
if minintold == 0: # so take nearest to centre
if abs(yfn[i-1]) < abs(yfn[i]):
minint=i-1
else:
minint=i
else:
dnew=func.dist_2pt([xco,xfn[i]], [xcen,ycen])
dold=func.dist_2pt([xco,xfn[minintold]], [xcen,ycen])
if dnew <= dold:
minint=i
else:
minint=minintold
minintold=minint
if minint < 1 or minint > npoint: error=1
if error != 1:
low=minint-min(2,minint)#-1)
up=minint+min(2,npoint-1-minint) # python array indexing rubbish
nfit=up-low+1
xfit=xfn[low:low+nfit]
yfit=yfn[low:low+nfit]
sig=N.ones(nfit)
smask=N.zeros(nfit, dtype=bool)
xx=[i for i in range(low,low+nfit)]
[c, m], errors = func.fit_mask_1d(xfit, yfit, sig, smask, func.poly, do_err=False, order=1)
root=-c/m
if root < xfn[low] or root > xfn[up]: error=1
return root, error
def shapelet_check_centre(image, mask, cen, beam_pix):
"Checks if the calculated centre for shapelet decomposition is sensible. """
from math import pi
error = 0
n, m = image.shape
x, y = round(cen[0]), round(cen[1])
if x <= 0 or x >= n or y <= 0 or y >= m: error = 1
if error == 0:
if not mask[int(round(x)),int(round(y))]: error == 2
if error > 0:
if (N.product(mask.shape)-sum(sum(mask)))/(pi*0.25*beam_pix[0]*beam_pix[1]) < 2.5:
error = error*10 # expected to fail since source is too small
return error
def shape_varybeta(image, mask, basis, betainit, cen, nmax, betarange, plot):
""" Shapelet decomposes and then reconstructs an image with various values of beta
and looks at the residual rms vs beta to estimate the optimal value of beta. """
from . import _cbdsm
nbin = 30
delta = (2.0*betainit-betainit/2.0)/nbin
beta_arr = betainit/4.0+N.arange(nbin)*delta
beta_arr = N.arange(0.5, 6.05, 0.05)
nbin = len(beta_arr)
res_rms=N.zeros(nbin)
for i in range(len(beta_arr)):
cf = decompose_shapelets(image, mask, basis, beta_arr[i], cen, nmax, mode='')
im_r = reconstruct_shapelets(image.shape, mask, basis, beta_arr[i], cen, nmax, cf)
im_res = image - im_r
ind = N.where(~mask)
res_rms[i] = N.std(im_res[ind])
minind = N.argmin(res_rms)
if minind > 1 and minind < nbin:
beta = beta_arr[minind]
error = 0
else:
beta = betainit
error = 1
# if plot:
# pl.figure()
# pl.plot(beta_arr,res_rms,'*-')
# pl.xlabel('Beta')
# pl.ylabel('Residual rms')
return beta, error
def shapelet_coeff(nmax=20,basis='cartesian'):
""" Computes shapelet coefficient matrix for cartesian and polar
hc=shapelet_coeff(nmax=10, basis='cartesian') or
hc=shapelet_coeff(10) or hc=shapelet_coeff().
hc(nmax) will be a nmax+1 X nmax+1 matrix."""
import numpy as N
order=nmax+1
if basis == 'polar':
raise NotImplementedError("Polar shapelets not yet implemented.")
hc=N.zeros([order,order])
hnm1=N.zeros(order); hn=N.zeros(order)
hnm1[0]=1.0; hn[0]=0.0; hn[1]=2.0
hc[0]=hnm1
hc[1]=hn
for ind in range(3,order+1):
n=ind-2
hnp1=-2.0*n*hnm1
hnp1[1:] += 2.0*hn[:order-1]
hc[ind-1]=hnp1
hnm1=hn
hn=hnp1
return hc
PyBDSF-1.11.0/bdsf/sourcecounts.py 0000664 0000000 0000000 00000030453 14650706641 0016663 0 ustar 00root root 0000000 0000000 """Sourcecounts
s is flux in Jy and n is number > s per str
"""
import numpy as N
s=N.array([ 9.9999997e-05, 0.00010328281, 0.00010667340, 0.00011017529, 0.00011379215, 0.00011752774, 0.00012138595, \
0.00012537083, 0.00012948645, 0.00013373725, 0.00013812761, 0.00014266209, 0.00014734542, 0.00015218249, 0.00015717837, \
0.00016233824, 0.00016766752, 0.00017317173, 0.00017885664, 0.00018472817, 0.00019079246, 0.00019705582, 0.00020352470, \
0.00021020604, 0.00021710672, 0.00022423393, 0.00023159511, 0.00023919797, 0.00024705040, 0.00025516062, 0.00026353705, \
0.00027218851, 0.00028112394, 0.00029035273, 0.00029988447, 0.00030972913, 0.00031989696, 0.00033039862, 0.00034124497, \
0.00035244724, 0.00036401744, 0.00037596744, 0.00038830977, 0.00040105727, 0.00041422324, 0.00042782145, 0.00044186602, \
0.00045637166, 0.00047135353, 0.00048682719, 0.00050280854, 0.00051931484, 0.00053636299, 0.00055397081, 0.00057215663, \
0.00059093948, 0.00061033899, 0.00063037529, 0.00065106933, 0.00067244272, 0.00069451780, 0.00071731757, 0.00074086577, \
0.00076518703, 0.00079030672, 0.00081625103, 0.00084304705, 0.00087072275, 0.00089930650, 0.00092882907, 0.00095932081, \
0.00099081360, 0.0010233402, 0.0010569346, 0.0010916317, 0.0011274681, 0.0011644807, 0.0012027085, 0.0012421905, \
0.0012829694, 0.0013250869, 0.0013685870, 0.0014135153, 0.0014599183, 0.0015078448, 0.0015573446, 0.0016084694, \
0.0016612725, 0.0017158090, 0.0017721358, 0.0018303118, 0.0018903976, 0.0019524558, 0.0020165513, 0.0020827511, \
0.0021511239, 0.0022217415, 0.0022946771, 0.0023700071, 0.0024478103, 0.0025281659, 0.0026111610, 0.0026968806, \
0.0027854142, 0.0028768543, 0.0029712960, 0.0030688383, 0.0031695808, 0.0032736324, 0.0033810998, 0.0034920950, \
0.0036067341, 0.0037251366, 0.0038474260, 0.0039737299, 0.0041041803, 0.0042389128, 0.0043780687, 0.0045217923, \
0.0046702349, 0.0048235501, 0.0049818982, 0.0051454445, 0.0053143604, 0.0054888208, 0.0056690089, 0.0058551119, \
0.0060473247, 0.0062458473, 0.0064508831, 0.0066626542, 0.0068813767, 0.0071072797, 0.0073405989, 0.0075815772, \
0.0078304661, 0.0080875214, 0.0083530201, 0.0086272340, 0.0089104511, 0.0092029646, 0.0095050810, 0.0098171150, \
0.010139393, 0.010472251, 0.010816036, 0.011171106, 0.011537833, 0.011916599, 0.012307799, 0.012711842, 0.013129148, \
0.013560154, 0.014005309, 0.014465077, 0.014939931, 0.015430382, 0.015936933, 0.016460113, 0.017000468, 0.017558562, \
0.018134978, 0.018730316, 0.019345198, 0.019980265, 0.020636180, 0.021313628, 0.022013316, 0.022735972, 0.023482339, \
0.024253221, 0.025049411, 0.025871737, 0.026721058, 0.027598262, 0.028504262, 0.029440004, 0.030406466, 0.031404655, \
0.032435611, 0.033500414, 0.034600168, 0.035736032, 0.036909178, 0.038120817, 0.039372254, 0.040664773, 0.041999724, \
0.043378498, 0.044802535, 0.046273317, 0.047792386, 0.049361322, 0.050981764, 0.052655403, 0.054383982, 0.056169309, \
0.058013245, 0.059917714, 0.061884668, 0.063916229, 0.066014484, 0.068181612, 0.070419893, 0.072731644, 0.075119294, \
0.077585325, 0.080132306, 0.082762904, 0.085479856, 0.088286005, 0.091184273, 0.094177686, 0.097269312, 0.10046248, \
0.10376048, 0.10716674, 0.11068483, 0.11431842, 0.11807128, 0.12194734, 0.12595065, 0.13008538, 0.13435584, 0.13876650, \
0.14332195, 0.14802694, 0.15288639, 0.15790530, 0.16308904, 0.16844295, 0.17397262, 0.17968382, 0.18558250, 0.19167484, \
0.19796717, 0.20446607, 0.21117832, 0.21811092, 0.22527111, 0.23266634, 0.24030435, 0.24819310, 0.25634068, 0.26475587, \
0.27344733, 0.28242409, 0.29169556, 0.30127138, 0.31116158, 0.32137644, 0.33192664, 0.34282318, 0.35407743, 0.36570114, \
0.37770644, 0.39010584, 0.40291208, 0.41613895, 0.42980003, 0.44390959, 0.45848233, 0.47353345, 0.48907870, 0.50513422, \
0.52171689, 0.53884387, 0.55653316, 0.57480311, 0.59367281, 0.61316204, 0.63329101, 0.65408045, 0.67555267, 0.69772983, \
0.72063506, 0.74429214, 0.76872587, 0.79396176, 0.82002604, 0.84694600, 0.87474972, 0.90346611, 0.93312526, 0.96375805, \
0.99539644, 1.0280730, 1.0618227, 1.0966804, 1.1326823, 1.1698662, 1.2082708, 1.2479361, 1.2889036, 1.3312160, 1.3749173, \
1.4200534, 1.4666711, 1.5148191, 1.5645479, 1.6159091, 1.6689565, 1.7237452, 1.7803327, 1.8387777, 1.8991414, 1.9614867, \
2.0258787, 2.0923846, 2.1610713, 2.2320154, 2.3052883, 2.3809667, 2.4591296, 2.5398583, 2.6232371, 2.7093532, 2.7982962, \
2.8901591, 2.9850378, 3.0830312, 3.1842413, 3.2887743, 3.3967385, 3.5082474, 3.6234167, 3.7423668, 3.8652217, 3.9921098, \
4.1231637, 4.2585196, 4.3983188, 4.5427074, 4.6918364, 4.8458605, 5.0049415, 5.1692443, 5.3389411, 5.5142026, 5.6952238, \
5.8821878, 6.0752892, 6.2747297, 6.4807177, 6.6934676, 6.9132018, 7.1401496, 7.3745475, 7.6166406, 7.8666806, 8.1249294, \
8.3916559, 8.6671391, 8.9516649, 9.2455320, 9.5490456, 9.8625231, 10.186292, 10.520689, 10.866064, 11.222776, 11.591200, \
11.971718, 12.364727, 12.770638, 13.189876, 13.622874, 14.070073, 14.531968, 15.009026, 15.501744, 16.010639, 16.536238, \
17.079092, 17.639769, 18.218849, 18.816940, 19.434666, 20.072670, 20.731619, 21.412201, 22.115124, 22.841122, 23.590954, \
24.365402, 25.165274, 25.991404, 26.844654, 27.725914, 28.636105, 29.576176, 30.547108, 31.549913, 32.585640, 33.655365, \
34.760208, 35.901321, 37.079857, 38.297119, 39.554344, 40.852840, 42.193966, 43.579117, 45.009739, 46.487324, 48.013420, \
49.589611, 51.217548, 52.898926, 54.635498, 56.429081, 58.281548, 60.194820, 62.170906, 64.211861, 66.319824, 68.496979, \
70.745613, 73.068062, 75.466751, 77.944183, 80.502945, 83.145714, 85.875237, 88.694359, 91.606033, 94.613190, 97.719162, \
100.92711, 104.24036, 107.66238, 111.19673, 114.84712, 118.61734, 122.51133, 126.53315, 130.68700, 134.97722, 139.40826, \
143.98479, 148.71155, 153.59348, 158.63567, 163.84338, 169.22206, 174.77731, 180.51492, 186.44090, 192.56142, 198.88284, \
205.41180, 212.15511, 219.11977, 226.31306, 233.74251, 241.41557, 249.34081, 257.52621, 265.98032, 274.71198, 283.73026, \
293.04462, 302.66473, 312.60065, 322.86276, 333.46173, 344.40869, 355.71500, 367.39246, 379.45328, 391.91003, 404.77573, \
418.06375, 431.78802, 445.96283, 460.60297, 475.72372, 491.34085, 507.47067, 524.13000, 541.33624, 559.10730, 577.46179, \
596.41876, 615.99811, 636.21954, 657.10541, 678.67700, 700.95673, 723.96783, 747.73438, 772.28113, 797.63373, 823.81854, \
850.86298, 878.79529, 907.64453, 937.44080, 968.21527, 1000.0000])
n=N.array([ 3.7709775e+10, 3.6065767e+10, 3.4493432e+10, 3.2989649e+10, 3.1551425e+10, 3.0175900e+10, \
2.8860342e+10, 2.7602137e+10, \
2.6398808e+10, 2.5247922e+10, 2.4147204e+10, 2.3094475e+10, 2.2087643e+10, 2.1124704e+10, 2.0203747e+10, 1.9322939e+10, \
1.8480527e+10, 1.7674846e+10, 1.6904289e+10, 1.6167328e+10, 1.5462490e+10, 1.4788384e+10, 1.4143675e+10, 1.3527065e+10, \
1.2937335e+10, 1.2373316e+10, 1.1833886e+10, 1.1317971e+10, 1.0824550e+10, 1.0352640e+10, 9.9013028e+09, 9.4696428e+09, \
9.0568028e+09, 8.6619587e+09, 8.2843305e+09, 7.9231647e+09, 7.5777439e+09, 7.2473825e+09, 6.9314243e+09, 6.6292444e+09, \
6.3402342e+09, 6.0638244e+09, 5.7994639e+09, 5.5466291e+09, 5.3048166e+09, 5.0735457e+09, 4.8523587e+09, 4.6408141e+09, \
4.4384916e+09, 4.2449897e+09, 4.0599278e+09, 3.8829297e+09, 3.7136481e+09, 3.5517468e+09, 3.3969042e+09, 3.2488120e+09, \
3.1071754e+09, 2.9717143e+09, 2.8421588e+09, 2.7182515e+09, 2.5997458e+09, 2.4864064e+09, 2.3780086e+09, 2.2743360e+09, \
2.1751834e+09, 2.0803535e+09, 1.9896579e+09, 1.9029162e+09, 1.8199575e+09, 1.7406141e+09, 1.6647299e+09, 1.5921536e+09, \
1.5227420e+09, 1.4563558e+09, 1.3928644e+09, 1.3321405e+09, 1.2740643e+09, 1.2185199e+09, 1.1653979e+09, 1.1145907e+09, \
1.0659987e+09, 1.0195252e+09, 9.7507763e+08, 9.3256806e+08, 8.9191149e+08, 8.5302746e+08, 8.1583853e+08, 7.8027117e+08, \
7.4625421e+08, 7.1372032e+08, 6.8260474e+08, 6.5284576e+08, 6.2438406e+08, 5.9716326e+08, 5.7112922e+08, 5.4623008e+08, \
5.2241651e+08, 4.9964106e+08, 4.7785866e+08, 4.5702573e+08, 4.3710147e+08, 4.1804544e+08, 3.9982026e+08, 3.8238954e+08, \
3.6571878e+08, 3.4977482e+08, 3.3452595e+08, 3.1994208e+08, 3.0599382e+08, 2.9265363e+08, 2.7989501e+08, 2.6769266e+08, \
2.5602224e+08, 2.4486062e+08, 2.3418562e+08, 2.2397598e+08, 2.1421147e+08, 2.0487264e+08, 1.9594099e+08, 1.8739867e+08, \
1.7922877e+08, 1.7141509e+08, 1.6394203e+08, 1.5679477e+08, 1.4995909e+08, 1.4342146e+08, 1.3716880e+08, 1.3118874e+08, \
1.2546940e+08, 1.1999951e+08, 1.1476796e+08, 1.0976452e+08, 1.0497919e+08, 1.0040248e+08, 96025304., 91838968., \
87835200., 84005912., 80343576., 76840880., 73490912., 70286984., 67222736., 64292076., 61489172., 58808476., \
56244648., 53792588., 51447432., 49204512., 47059380., 45007768., 43045600., 41168972., 39374160., 37657620., \
36015888., 34445724., 32944024., 31507790., 30134168., 28820430., 27563966., 26362278., 25212982., 24113790., \
23062518., 22057078., 21095472., 20175804., 19296216., 18454972., 17650402., 16880912., 16144966., 15441105., \
14767931., 14124105., 13508346., 12919433., 12356192., 11817510., 11302309., 10809571., 10338324., 9887611.0, \
9456547.0, 9044277.0, 8649980.0, 8272873.0, 7912207.0, 7567264.5, 7237360.0, 6921837.5, 6620071.0, 6331461.0, \
6055433.0, 5791438.5, 5538953.0, 5297479.5, 5066528.5, 4845647.0, 4634395.5, 4432353.0, 4239119.0, 4054309.2, \
3877556.2, 3708509.5, 3546832.0, 3392203.5, 3244316.0, 3102876.0, 2967602.0, 2838228.0, 2729847.5, 2624870.5, \
2524750.2, 2429229.0, 2338061.0, 2251017.0, 2167880.5, 2088448.4, 2012529.5, 1939942.6, 1870518.1, 1804095.8, \
1740523.8, 1679660.2, 1621370.6, 1565526.9, 1512157.9, 1460823.1, 1411600.0, 1364385.6, 1319083.4, 1275602.0, \
1233855.0, 1193760.2, 1155241.0, 1118223.9, 1082639.1, 1048421.7, 1015509.1, 983842.56, 953365.38, 924024.94, \
895770.81, 868555.00, 842332.44, 817144.38, 792764.06, 769256.56, 746584.44, 724711.62, 703604.50, 683230.62, \
663559.44, 644562.06, 626210.06, 608477.38, 591338.81, 574770.50, 558749.50, 543254.06, 528263.38, 513757.69, \
499717.94, 486126.28, 473019.56, 460262.88, 447906.47, 435935.03, 424334.22, 413089.53, 402187.88, 391616.53, \
381363.44, 371416.84, 361765.66, 352399.28, 343307.47, 334480.50, 325909.12, 317584.28, 309497.50, 301640.47, \
294005.56, 286584.88, 279402.72, 272383.66, 265559.03, 258922.31, 252467.16, 246187.56, 240077.75, 234132.17, \
228345.47, 222712.61, 217228.62, 211888.83, 206688.67, 201623.84, 196690.11, 191883.45, 187200.03, 182636.05, \
178187.92, 173852.23, 169645.80, 165521.64, 161500.73, 157580.05, 153756.70, 150027.80, 146390.59, 142842.50, \
139380.91, 136003.44, 132707.70, 129491.38, 126352.36, 123288.48, 120297.67, 117378.02, 114527.58, 111744.49, \
109027.01, 106373.41, 103781.99, 101262.79, 98789.008, 96373.047, 94013.438, 91708.680, 89457.398, 87258.211, \
85109.805, 83010.930, 80960.391, 78956.891, 76999.320, 75086.586, 73217.594, 71391.312, 69606.703, 67862.789, \
66158.609, 64493.254, 62865.801, 61275.387, 59728.344, 58208.258, 56722.930, 55271.520, 53853.266, 52467.410, \
51113.223, 49789.961, 48496.941, 47233.500, 45998.977, 44792.723, 43614.117, 42462.578, 41337.504, 40238.328, \
39164.488, 38115.469, 37090.699, 36089.668, 35111.887, 34156.848, 33228.004, 32316.406, 31426.256, 30557.111, \
29708.504, 28880.010, 28071.193, 27281.650, 26510.949, 25758.721, 25024.562, 24308.115, 23608.990, 22926.832, \
22261.293, 21612.029, 20978.699, 20360.971, 19758.527, 19171.037, 18598.217, 18039.732, 17495.309, 16966.436, \
16448.930, 15944.685, 15453.382, 14974.762, 14508.550, 14054.481, 13612.296, 13181.744, 12762.577, 12354.543, \
11957.408, 11570.935, 11194.892, 10829.060, 10473.206, 10127.119, 9790.5850, 9463.3916, 9145.3301, 8836.2021, \
8535.8027, 8243.9434, 7961.2437, 7685.7393, 7418.2314, 7158.5264, 6906.4458, 6661.8105, 6424.4482, 6194.1807, \
5970.8477, 5754.2710, 5544.2944, 5340.7573, 5143.5054, 4952.3828, 4767.2373, 4587.9229, 4414.2944, 4246.2085, \
4083.5212, 3926.0977, 3773.8032, 3626.5049, 3484.0715, 3346.3752, 3213.5771, 3084.9297, 2960.6602, 2840.6472, \
2724.7744, 2612.9258, 2504.9900, 2400.8569, 2300.4167, 2203.5654, 2110.1995, 2020.2166, 1933.5188, 1850.0120, \
1769.5944, 1692.1769, 1617.6688, 1545.9810, 1477.0260, 1410.7202, 1346.9801, 1285.7245, 1226.8739, 1170.3518, \
1116.1688, 1064.0614, 1014.0633, 966.10516, 920.11682, 876.03217, 833.78497, 793.31201, 754.55164, 717.44275, \
681.92755, 647.94806, 615.44952, 584.37762, 554.67981, 526.30505, 499.20432, 473.32895, 448.63220, 425.07007, \
402.59656, 381.16980, 360.74893, 341.31854, 322.78470, 305.14084, 288.35059, 272.37881, 257.19098, 242.75432, \
229.03673, 216.00752, 203.63695, 191.89633])
s=s/1000.0
PyBDSF-1.11.0/bdsf/spectralindex.py 0000664 0000000 0000000 00000072174 14650706641 0017002 0 ustar 00root root 0000000 0000000 """Module Spectral index.
This module calculates spectral indices for Gaussians and sources for a multichannel cube.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as N
from .image import Op
from . import mylogger
from copy import deepcopy as cp
from . import functions as func
from . import statusbar
class Op_spectralindex(Op):
"""Computes spectral index of every gaussian and every source.
First do a quick fit to all channels to determine whether averaging over
frequency is needed to obtain desired SNR (set by img.opts.specind_snr).
This averaging should be done separately for both Gaussians and
sources. For S and C sources, averaging only needs to be done once
(as the sources have only one Gaussian).
For M sources, averaging is needed twice: once to obtain the desired
SNR for the faintest Gaussian in the source, and once to obtain the
desired SNR for the source as a whole.
If averaging is needed for a given source, don't let the
number of resulting channels fall below 2. If it is not possible
to obtain the desired SNR in 2 or more channels, set spec_indx of
Gaussian/source to NaN.
"""
def __call__(self, img):
global bar1
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"SpectIndex")
img.mylog = mylog
if img.opts.spectralindex_do:
mylogger.userinfo(mylog, '\nExtracting spectral indices for all ch0 sources')
shp = img.image_arr.shape
if shp[1] > 1:
# calc freq, beam_spectrum for nchan channels
self.freq_beamsp_unav(img)
sbeam = img.beam_spectrum
freqin = img.freq
# calc initial channel flags if needed
iniflags = self.iniflag(img)
img.specind_iniflags = iniflags
good_chans = N.where(iniflags == False)
unav_image = img.image_arr[0][good_chans]
unav_freqs = freqin[good_chans]
nmax_to_avg = img.opts.specind_maxchan
nchan = unav_image.shape[0]
mylogger.userinfo(mylog, 'Number of channels remaining after initial flagging', str(nchan))
if nmax_to_avg == 0:
nmax_to_avg = nchan
# calculate the rms map of each unflagged channel
bar1 = statusbar.StatusBar('Determing rms for channels in image ..... : ', 0, nchan)
if img.opts.quiet == False:
bar1.start()
rms_spec = self.rms_spectrum(img, unav_image) # bar1 updated here
bar2 = statusbar.StatusBar('Calculating spectral indices for sources : ', 0, img.nsrc)
c_wts = img.opts.collapse_wt
snr_desired = img.opts.specind_snr
if img.opts.quiet == False and img.opts.verbose_fitting == False:
bar2.start()
for src in img.sources:
isl = img.islands[src.island_id]
isl_bbox = isl.bbox
# Fit each channel with ch0 Gaussian(s) of the source,
# allowing only the normalization to vary.
chan_images = unav_image[:, isl_bbox[0], isl_bbox[1]]
chan_rms = rms_spec[:, isl_bbox[0], isl_bbox[1]]
beamlist = img.beam_spectrum
unavg_total_flux, e_unavg_total_flux = self.fit_channels(img, chan_images, chan_rms, src, beamlist)
# Check for upper limits and mask. gaus_mask is array of (N_channels x N_gaussians)
# and is True if measured flux is upper limit. n_good_chan_per_gaus is array of N_gaussians
# that gives number of unmasked channels for each Gaussian.
gaus_mask, n_good_chan_per_gaus = self.mask_upper_limits(unavg_total_flux, e_unavg_total_flux, snr_desired)
# Average if needed and fit again
# First find flux of faintest Gaussian of source and use it to estimate rms_desired
gflux = []
for g in src.gaussians:
gflux.append(g.peak_flux)
rms_desired = min(gflux)/snr_desired
total_flux = unavg_total_flux
e_total_flux = e_unavg_total_flux
freq_av = unav_freqs
nchan = chan_images.shape[0]
nchan_prev = nchan
while min(n_good_chan_per_gaus) < 2 and nchan > 2:
avimages, beamlist, freq_av, crms_av = self.windowaverage_cube(chan_images, rms_desired, chan_rms,
c_wts, sbeam, freqin, nmax_to_avg=nmax_to_avg)
total_flux, e_total_flux = self.fit_channels(img, avimages, crms_av, src, beamlist)
gaus_mask, n_good_chan_per_gaus = self.mask_upper_limits(total_flux, e_total_flux, snr_desired)
nchan = avimages.shape[0]
if nchan == nchan_prev:
break
nchan_prev = nchan
rms_desired *= 0.8
# Now fit Gaussian fluxes to obtain spectral indices.
# Only fit if there are detections (at specified sigma threshold)
# in at least two bands. If not, don't fit and set spec_indx
# and error to NaN.
for ig, gaussian in enumerate(src.gaussians):
npos = len(N.where(total_flux[:, ig] > 0.0)[0])
if img.opts.verbose_fitting:
if img.opts.flagchan_snr:
print('Gaussian #%i : averaged to %i channels, of which %i meet SNR criterion' % (gaussian.gaus_num,
len(total_flux[:, ig]), n_good_chan_per_gaus[ig]))
else:
print('Gaussian #%i : averaged to %i channels, all of which will be used' % (gaussian.gaus_num,
len(total_flux[:, ig])))
if (img.opts.flagchan_snr and n_good_chan_per_gaus[ig] < 2) or npos < 2:
gaussian.spec_indx = N.NaN
gaussian.e_spec_indx = N.NaN
gaussian.spec_norm = N.NaN
gaussian.specin_flux = [N.NaN]
gaussian.specin_fluxE = [N.NaN]
gaussian.specin_freq = [N.NaN]
gaussian.specin_freq0 = N.NaN
else:
if img.opts.flagchan_snr:
good_fluxes_ind = N.where(gaus_mask[:, ig] == False)
else:
good_fluxes_ind = range(len(freq_av))
fluxes_to_fit = total_flux[:, ig][good_fluxes_ind]
e_fluxes_to_fit = e_total_flux[:, ig][good_fluxes_ind]
freqs_to_fit = freq_av[good_fluxes_ind]
fit_res = self.fit_specindex(freqs_to_fit, fluxes_to_fit, e_fluxes_to_fit)
gaussian.spec_norm, gaussian.spec_indx, gaussian.e_spec_indx = fit_res
gaussian.specin_flux = fluxes_to_fit.tolist()
gaussian.specin_fluxE = e_fluxes_to_fit.tolist()
gaussian.specin_freq = freqs_to_fit.tolist()
gaussian.specin_freq0 = N.median(freqs_to_fit)
# Next fit total source fluxes for spectral index.
if len(src.gaussians) > 1:
# First, check unaveraged SNRs for total source.
src_total_flux = N.zeros((chan_images.shape[0], 1))
src_e_total_flux = N.zeros((chan_images.shape[0], 1))
src_total_flux[:,0] = N.sum(unavg_total_flux, 1) # sum over all Gaussians in source to obtain total fluxes in each channel
src_e_total_flux[:,0] = N.sqrt(N.sum(N.power(e_unavg_total_flux, 2.0), 1))
src_mask, n_good_chan = self.mask_upper_limits(src_total_flux, src_e_total_flux, snr_desired)
# Average if needed and fit again
rms_desired = src.peak_flux_max/snr_desired
total_flux = unavg_total_flux
e_total_flux = e_unavg_total_flux
freq_av = unav_freqs
nchan = chan_images.shape[0]
nchan_prev = nchan
while n_good_chan < 2 and nchan > 2:
avimages, beamlist, freq_av, crms_av = self.windowaverage_cube(chan_images, rms_desired, chan_rms,
c_wts, sbeam, freqin, nmax_to_avg=nmax_to_avg)
total_flux, e_total_flux = self.fit_channels(img, avimages, crms_av, src, beamlist)
src_total_flux = N.sum(total_flux, 1) # sum over all Gaussians in source to obtain total fluxes in each channel
src_e_total_flux = N.sqrt(N.sum(N.power(e_total_flux, 2.0), 1))
src_mask, n_good_chan = self.mask_upper_limits(src_total_flux, src_e_total_flux, snr_desired)
nchan = avimages.shape[0]
if nchan == nchan_prev:
break
nchan_prev = nchan
rms_desired *= 0.8
# Now fit source for spectral index.
src_total_flux = src_total_flux.reshape((src_total_flux.shape[0],))
src_e_total_flux = src_e_total_flux.reshape((src_e_total_flux.shape[0],))
src_mask = src_mask.reshape((src_mask.shape[0],))
if img.opts.verbose_fitting:
if img.opts.flagchan_snr:
print('Source #%i : averaged to %i channels, of which %i meet SNR criterion' % (src.source_id,
len(src_total_flux), nchan))
else:
print('Source #%i : averaged to %i channels, all of which will be used' % (src.source_id,
len(src_total_flux)))
npos = len(N.where(src_total_flux > 0.0)[0])
if isinstance(n_good_chan, int):
n_good_chan = [n_good_chan]
if (img.opts.flagchan_snr and n_good_chan[0] < 2) or npos < 2:
src.spec_indx = N.NaN
src.e_spec_indx = N.NaN
src.spec_norm = N.NaN
src.specin_flux = [N.NaN]
src.specin_fluxE = [N.NaN]
src.specin_freq = [N.NaN]
src.specin_freq0 = N.NaN
else:
if img.opts.flagchan_snr:
good_fluxes_ind = N.where(src_mask == False)
else:
good_fluxes_ind = range(len(freq_av))
fluxes_to_fit = src_total_flux[good_fluxes_ind]
e_fluxes_to_fit = src_e_total_flux[good_fluxes_ind]
freqs_to_fit = freq_av[good_fluxes_ind]
# if len(freqs_to_fit.shape) == 2:
# freqs_to_fit = freqs_to_fit.reshape((freqs_to_fit.shape[0],))
# if len(fluxes_to_fit.shape) == 2:
# fluxes_to_fit = fluxes_to_fit.reshape((fluxes_to_fit.shape[0],))
# if len(e_fluxes_to_fit.shape) == 2:
# e_fluxes_to_fit = e_fluxes_to_fit.reshape((e_fluxes_to_fit.shape[0],))
fit_res = self.fit_specindex(freqs_to_fit, fluxes_to_fit, e_fluxes_to_fit)
src.spec_norm, src.spec_indx, src.e_spec_indx = fit_res
src.specin_flux = fluxes_to_fit.tolist()
src.specin_fluxE = e_fluxes_to_fit.tolist()
src.specin_freq = freqs_to_fit.tolist()
src.specin_freq0 = N.median(freqs_to_fit)
else:
src.spec_norm = src.gaussians[0].spec_norm
src.spec_indx = src.gaussians[0].spec_indx
src.e_spec_indx = src.gaussians[0].e_spec_indx
src.specin_flux = src.gaussians[0].specin_flux
src.specin_fluxE = src.gaussians[0].specin_fluxE
src.specin_freq = src.gaussians[0].specin_freq
src.specin_freq0 = src.gaussians[0].specin_freq0
if bar2.started:
bar2.increment()
if bar2.started:
bar2.stop()
img.completed_Ops.append('spectralindex')
else:
mylog.warning('Image has only one channel. Spectral index module disabled.')
img.opts.spectralindex_do = False
####################################################################################
def flagchans_rmschan(self, crms, zeroflags, iniflags, cutoff):
""" Calculate clipped rms (r1) of the rms as fn of channel, crms, with zeroflags
applied and kappa=cutoff. Then exclude crms=0 (for NaN mages etc) and get ch.s
which are more than cutoff*r1 away from median of rms. If this is less than 10 %
of all channels, flag them.
"""
# crms_rms and median dont include rms=0 channels
nchan = len(crms)
mean, rms, cmean, crms_rms, cnt = func.bstat(crms, zeroflags, cutoff)
zeroind = N.where(crms==0)[0]
median = N.median(N.delete(crms, zeroind))
badind = N.where(N.abs(N.delete(crms, zeroind) - median)/crms_rms >=cutoff)[0]
frac = len(badind)/(nchan - len(zeroind))
if frac <= 0.1:
badind = N.where(N.abs(crms - median)/crms_rms >=cutoff)[0]
iniflags[badind] = True
return iniflags
####################################################################################
def iniflag(self, img):
""" Calculate clipped rms of every channel, and then median and clipped rms of this rms distribution.
Exclude channels where rms=0 (all pixels 0 or blanked) and of the remaining, if outliers beyond 5 sigma
are less then 10 % of number of channels, flag them. This is done only when flagchan_rms = True.
If False, only rms=0 (meaning, entire channel image is zero or blanked) is flagged."""
image = img.image_arr
nchan = image.shape[1]
iniflags = N.zeros(nchan, bool)
zeroflags = N.zeros(nchan, bool)
crms = img.channel_clippedrms
# First, check whether user has specified any channels to flag
if img.opts.flagchan_list is not None:
for chan in img.opts.flagchan_list:
zeroflags[chan] = True
# Next, flag channels with rms = 0
for ichan in range(nchan):
if crms[ichan] == 0: zeroflags[ichan] = True
iniflags = cp(zeroflags)
# Lastly, flag outliers
if img.opts.flagchan_rms:
iniflags = self.flagchans_rmschan(crms, zeroflags, iniflags, 4.0)
return iniflags
####################################################################################
def freq_beamsp_unav(self, img):
""" Defines img.beam_spectrum and img.freq for the unaveraged cube. """
# Find the channel frequencies
shp = img.image_arr.shape
img.freq = N.zeros(shp[1])
crval, cdelt, crpix = img.freq_pars
if img.wcs_obj.wcs.spec == -1 and \
img.opts.frequency_sp is None:
raise RuntimeError("Frequency info not found in header "\
"and frequencies not specified by user")
else:
if img.opts.frequency_sp is None:
for ichan in range(shp[1]):
img.freq[ichan] = img.wcs_obj.p2f(ichan)
else:
if len(img.opts.frequency_sp) != shp[1]:
raise RuntimeError("Number of channels does not match number "\
"of frequencies specified by user")
for ichan in range(shp[1]):
img.freq[ichan] = img.opts.frequency_sp[ichan]
# Find the channel beam shapes
sbeam = img.opts.beam_spectrum
if sbeam is not None and len(sbeam) != shp[1]:
sbeam = None # sanity check
if sbeam is None:
sbeam = []
hdr = img.header
try:
# search for channel beams in the image header
for ichan in range(shp[1]):
sbeam.append((hdr['BMAJ{}'.format(ichan+1)],
hdr['BMIN{}'.format(ichan+1)],
hdr['BPA{}'.format(ichan+1)]))
except KeyError:
# Channel beam info not found. Use constant beam or one scaled with
# frequency
if img.opts.beam_sp_derive:
# Adjust channel beam sizes assuming that the beam scales as 1/nu
# Note: beam is (major, minor, pos. angle)
for ichan in range(shp[1]):
sbeam.append((img.beam[0] * img.freq[0] / img.freq[ichan],
img.beam[1] * img.freq[0] / img.freq[ichan],
img.beam[2]))
else:
sbeam = [img.beam] * shp[1]
img.beam_spectrum = sbeam
####################################################################################
def rms_spectrum(self, img, image):
from .rmsimage import Op_rmsimage
global bar1
mylog = img.mylog
nchan = image.shape[0]
rms_map = img.use_rms_map
if img.opts.kappa_clip is None:
kappa = -img.pixel_beamarea()
else:
kappa = img.opts.kappa_clip
map_opts = (kappa, img.rms_box, img.opts.spline_rank)
if rms_map:
rms_spec = N.zeros(image.shape, dtype=N.float32)
mean = N.zeros(image.shape[1:], dtype=N.float32)
rms = N.zeros(image.shape[1:], dtype=N.float32)
median_rms = N.zeros(nchan)
for ichan in range(nchan):
if bar1.started:
bar1.increment()
dumi = Op_rmsimage()
mask = N.isnan(image[ichan])
Op_rmsimage.map_2d(dumi, image[ichan], mean, rms, mask, *map_opts)
rms_spec[ichan,:,:] = rms
median_rms[ichan] = N.median(rms)
else:
rms_spec = N.zeros(image.shape, dtype=N.float32)
for ichan in range(nchan):
if bar1.started:
bar1.increment()
rms_spec[ichan,:,:] = img.channel_clippedrms[ichan]
median_rms = rms_spec
if bar1.started:
bar1.stop()
str1 = " ".join(["%9.4e" % n for n in img.channel_clippedrms])
if rms_map:
mylog.debug('%s %s ' % ('Median rms of channels : ', str1))
mylog.info('RMS image made for each channel')
else:
mylog.debug('%s %s ' % ('RMS of channels : ', str1))
mylog.info('Clipped rms calculated for each channel')
return rms_spec
####################################################################################
def fit_specindex(self, freqarr, fluxarr, efluxarr, do_log=False):
""" Fits spectral index to data.
do_log is True/False implies you fit spectral index in logFlux vs logFreq space or not."""
from . import functions as func
import math
x = freqarr
flux = fluxarr
eflux = efluxarr
f0 = N.median(x)
mask = N.zeros(len(fluxarr), dtype=bool)
nan_errors = N.isnan(efluxarr)
mask[nan_errors] = 1
if do_log:
x = N.log10(x/f0); y = N.log10(flux); sig = N.abs(eflux/flux)/2.303
funct = func.poly
else:
x = x/f0; y = flux; sig = eflux
funct = func.sp_in
spin, espin = func.fit_mask_1d(x, y, sig, mask, funct, do_err=True, order=1)
if do_log:
spin[0] = math.pow(10.0, spin[0])
espin[0] = spin[0]*math.log(10.0)*espin[0]
return spin[0], spin[1], espin[1]
########################################################################################
def windowaverage_cube(self, imagein, rms_desired, chanrms, c_wts, sbeam,
freqin, n_min=2, nmax_to_avg=10):
"""Average neighboring channels of cube to obtain desired rms in at least n_min channels
The clipped rms of each channel is compared to the desired rms. If the
clipped rms is too high, the channel is averaged with as many neighboring
channels as necessary to obtain at least the desired rms. This is done
until the number of OK channels is 2. The averaging is done first at
the frequency extremes, as the frequency range of the resulting averaged
flux array will be maximized.
For example, if the desired rms is 0.1 and the list of rms's is:
[0.2, 0.2, 0.3, 0.2, 0.2]
the resulting channels that will be averaged are:
[[0, 1], [2], [3, 4]]
"""
from math import sqrt
from .collapse import avspc_blanks
# chan_list is a list of lists of channels to average. E.g., if we have
# 5 channels and we want to average only the first 2:
# chan_list = [[0,1], [2], [3], [4]]
if len(chanrms.shape) ==3:
crms = N.mean(N.nanmean(chanrms, axis=1), axis=1)
else:
crms = chanrms
chan_list = self.get_avg_chan_list(rms_desired, crms, nmax_to_avg)
n_new = len(chan_list)
beamlist = []
crms_av = N.zeros(n_new)
freq_av = N.zeros(n_new)
imageout = N.zeros((n_new, imagein.shape[1], imagein.shape[2]), dtype=N.float32)
for ichan, avg_list in enumerate(chan_list):
if len(avg_list) > 1:
imageout[ichan], dum = avspc_blanks(avg_list, imagein, crms, c_wts)
chan_slice = slice(avg_list[0], avg_list[1]+1)
beamlist.append(tuple(N.mean(sbeam[chan_slice], axis=0)))
freq_av[ichan] = N.mean(freqin[chan_slice])
crms_av[ichan] = 1.0/sqrt(N.sum(1.0/crms[chan_slice]**2))
else:
imageout[ichan] = imagein[avg_list[0]]
beamlist.append(sbeam[avg_list[0]])
freq_av[ichan] = N.mean(freqin[avg_list[0]])
crms_av[ichan] = 1.0/sqrt(N.sum(1.0/crms[avg_list[0]]**2))
return imageout, beamlist, freq_av, crms_av
def get_avg_chan_list(self, rms_desired, chanrms, nmax_to_avg):
"""Returns a list of channels to average to obtain given rms_desired
in at least 2 channels"""
end = 0
chan_list = []
nchan = len(chanrms)
good_ind = N.where(N.array(chanrms)/rms_desired < 1.0)[0]
num_good = len(good_ind)
if num_good < 2:
# Average channels at start of list
rms_avg = chanrms[0]
while rms_avg > rms_desired:
end += 1
chan_slice = slice(0, end)
rms_avg = 1.0/N.sqrt(N.sum(1.0/N.array(chanrms)[chan_slice]**2))
if end == nchan or end == nmax_to_avg:
break
if end == 0:
end = 1
chan_list.append(range(end))
if end == nchan:
# This means all channels are averaged into one. If this happens,
# instead average first half and second half to get two channels
# and return.
chan_list = [range(0, int(float(nchan)/2.0)), range(int(float(nchan)/2.0), nchan)]
return chan_list
# Average channels at end of list
rms_avg = chanrms[-1]
end = nchan
start = nchan
while rms_avg > rms_desired:
start -= 1
chan_slice = slice(start, end)
rms_avg = 1.0/N.sqrt(N.sum(1.0/chanrms[chan_slice]/chanrms[chan_slice]))
if end-start == nmax_to_avg:
break
if start <= max(chan_list[0]):
# This means we cannot get two averaged channels with desired rms,
# so just average remaining channels
chan_list.append(range(max(chan_list[0]), nchan))
else:
# First append any channels between those averaged at the start
# and those at the end
for i in range(max(chan_list[0])+1, start):
chan_list.append([i])
if start < end:
chan_list.append(range(start, end))
else:
# No averaging needed
for i in range(nchan):
chan_list.append([i])
return chan_list
def fit_channels(self, img, chan_images, clip_rms, src, beamlist):
"""Fits normalizations of Gaussians in source to multiple channels
If unresolved, the size of the Gaussians are adjusted to match the
channel's beam size (given by beamlist) before fitting.
Returns array of total fluxes (N_channels x N_Gaussians) and array
of errors (N_channels x N_Gaussians).
"""
from . import functions as func
from .const import fwsig
isl = img.islands[src.island_id]
isl_bbox = isl.bbox
nchan = chan_images.shape[0]
x, y = N.mgrid[isl_bbox]
gg = src.gaussians
fitfix = N.ones(len(gg)) # fit only normalization
srcmask = isl.mask_active
total_flux = N.zeros((nchan, len(fitfix))) # array of fluxes: N_channels x N_Gaussians
errors = N.zeros((nchan, len(fitfix))) # array of fluxes: N_channels x N_Gaussians
for cind in range(nchan):
image = chan_images[cind]
gg_adj = self.adjust_size_by_freq(img.beam, beamlist[cind], gg)
p, ep = func.fit_mulgaus2d(image, gg_adj, x, y, srcmask, fitfix, adj=True)
pbeam = img.beam2pix(beamlist[cind])
bm_pix = (pbeam[0]/fwsig, pbeam[1]/fwsig, pbeam[2]) # IN SIGMA UNITS
for ig in range(len(fitfix)):
total_flux[cind, ig] = p[ig*6]*p[ig*6+3]*p[ig*6+4]/(bm_pix[0]*bm_pix[1])
p = N.insert(p, N.arange(len(fitfix))*6+6, total_flux[cind])
rms_isl = N.nanmean(clip_rms[cind])
if N.isnan(rms_isl):
# If the channel rms is all NaNs, use the average rms value over all
# channels instead
rms_isl = N.nanmean(clip_rms)
if not N.isnan(rms_isl):
errors[cind] = func.get_errors(img, p, rms_isl, bm_pix=(bm_pix[0]*fwsig, bm_pix[1]*fwsig, bm_pix[2]))[6]
self.reset_size(gg)
return total_flux, errors
def adjust_size_by_freq(self, beam_ch0, beam, gg):
"""Adjust size of unresolved Gaussians to match the channel's beam size"""
gg_adj = []
for g in gg:
g.size_pix_adj = g.size_pix[:]
if g.deconv_size_sky[0] == 0.0:
g.size_pix_adj[0] *= beam[0] / beam_ch0[0]
if g.deconv_size_sky[1] == 0.0:
g.size_pix_adj[1] *= beam[1] / beam_ch0[1]
gg_adj.append(g)
return gg_adj
def reset_size(self, gg):
"""Reset size of unresolved Gaussians to match the ch0 beam size"""
for g in gg:
if hasattr(g, 'size_pix_adj'): del g.size_pix_adj
def mask_upper_limits(self, total_flux, e_total_flux, threshold):
"""Returns mask of upper limits"""
mask = N.zeros(total_flux.shape, dtype=bool)
if len(total_flux.shape) == 1:
is_src = True
ndet = 0
ncomp = 1
else:
is_src = False
ndet = N.zeros((total_flux.shape[1]), dtype=int)
ncomp = len(ndet)
for ig in range(ncomp):
for ichan in range(total_flux.shape[0]):
if is_src:
meas_flux = total_flux[ichan]
e_meas_flux = e_total_flux[ichan]
else:
meas_flux = total_flux[ichan, ig]
e_meas_flux = e_total_flux[ichan, ig]
if meas_flux < threshold * e_meas_flux:
# Upper limit
if is_src:
mask[ichan] = True
else:
mask[ichan, ig] = True
else:
# Detection
if is_src:
ndet += 1
mask[ichan] = False
else:
ndet[ig] += 1
mask[ichan, ig] = False
return mask, ndet
PyBDSF-1.11.0/bdsf/statusbar.py 0000664 0000000 0000000 00000006271 14650706641 0016140 0 ustar 00root root 0000000 0000000 """Display an animated statusbar"""
from __future__ import absolute_import
import sys
import os
from . import functions as func
class StatusBar():
# class variables:
# max: number of total items to be completed
# pos: number of completed items
# spin_pos: current position in array of busy_chars
# inc: amount of items to increment completed 'pos' by
# (shared resource)
# comp: amount of '=' to display in the progress bar
# started: whether or not the statusbar has been started
# color: color of text
def __init__(self, text, pos=0, max=100, color='\033[0m'):
self.text = text
self.pos = pos
self.max = max
self.busy_char = '|'
self.spin_pos = 0
self.inc = 0
self.started = 0
self.color = color
self.__getsize()
if max > 0:
self.comp = int(float(self.pos) / self.max * self.columns)
else:
self.comp = 0
# find number of columns in terminal
def __getsize(self):
try:
rows, columns = func.getTerminalSize()
except ValueError:
rows = columns = 0
if int(columns) > self.max + 2 + 44 + (len(str(self.max))*2 + 2):
self.columns = self.max
else:
# note: -2 is for brackets, -44 for 'Fitting islands...' text, rest is for pos/max text
self.columns = int(columns) - 2 - 44 - (len(str(self.max))*2 + 2)
return
# redraw progress bar
def __print(self):
self.__getsize()
sys.stdout.write('\x1b[1G')
if self.max == 0:
sys.stdout.write(self.color + self.text + '[] 0/0\033[0m\n')
else:
sys.stdout.write(self.color + self.text + '[' + '=' * self.comp + self.busy_char + '-'*(self.columns - self.comp - 1) + '] ' + str(self.pos) + '/' + str(self.max) + '\033[0m')
sys.stdout.write('\x1b[' + str(self.comp + 2 + 44) + 'G')
sys.stdout.flush()
return
# spin the spinner by one increment
def spin(self):
busy_chars = ['|','/','-','\\']
self.spin_pos += 1
if self.spin_pos >= len(busy_chars):
self.spin_pos = 0
# display the busy spinning icon
self.busy_char = busy_chars[self.spin_pos]
sys.stdout.write(self.color + busy_chars[self.spin_pos] + '\x1b[1D' + '\033[0m')
sys.stdout.flush()
# increment number of completed items
def increment(self):
self.inc = 1
if (self.pos + self.inc) >= self.max:
self.pos = self.max
self.comp = self.columns
self.busy_char = ''
self.__print()
return 0
else:
self.pos += self.inc
self.inc = 0
self.spin()
self.comp = int(float(self.pos) / self.max \
* self.columns)
self.__print()
return 1
def start(self):
self.started = 1
self.__print()
def stop(self):
if self.started:
self.pos = self.max
self.comp = self.columns
self.busy_char = ''
self.__print()
sys.stdout.write('\n')
self.started = 0
return 0
PyBDSF-1.11.0/bdsf/tc.py 0000664 0000000 0000000 00000050146 14650706641 0014536 0 ustar 00root root 0000000 0000000 """Defines some basic facilities for handling typed values.
It's quite basic and limited implementation tailored specifically for
use in the PyBDSM user-options and derived properties. For a user
option, one can define a group that is used when listing the options to
the screen. For a property (e.g., flux density), one can define the
column name to be used on output and the associated units.
For a much more generic and capable implementation I can recommend
to look at Enthought Traits package:
http://code.enthought.com/projects/traits
Defined are:
- a number tc-handlers which allow to type-check and/or cast
values to the specific type (tcCType, tcEnum, tcTuple,
tcOption, tcInstance, tcList, tcAny). These aren't really
inteded for use by end-user.
- class TC, which implements a concept of type-checked property
with default value.
- a number of wrappers around TC to simplify it's usage (Int,
Float, Bool, String, Tuple, Enum, Option, NArray, Instance,
tInstance, List, Any)
Usage:
For the most needs it's enough to use wrapper-interface.
One important remark -- class containing tc-variables should be
new-style class, thus you should explicitly inherit from 'object'
for Python < 2.6.
Example:
from tc import Int, Float, Bool, String, Tuple, Enum, \\
Option, NArray, Instance, Any, TCInit
class tst(object):
intval = Int(doc="Integer value")
boolval = Bool(True, "Some boolean flag")
op_type = Enum("op1", "op2", doc="Some enumerated value")
def __init__(self):
TCInit(self) ### this is optional
v = tst()
v.intval = 1 # OK
v.intval = "33" # OK, casted to 33
v.intval = "failure" # FAILS
v.op_type= "op2" # OK
v.op_type= "op3" # FAILS
"""
try:
import exceptions
except ImportError:
import builtins as exceptions
import types
_sequence_types = (list, tuple)
_class_types = (type, type)
_basic_types = (bool, int, int,
float, complex,
bytes, str)
############################################################
## Wrappers around TC to simplify it's usage for end-users
############################################################
def Int(value=0, doc=None, group=None, colname=None, units=None):
"""Create tc-value of type int"""
return TC(value, tcCType(int), doc, group, colname, units)
def Float(value=0., doc=None, group=None, colname=None, units=None):
"""Create tc-value of type float"""
return TC(value, tcCType(float), doc, group, colname, units)
def Bool(value=False, doc=None, group=None):
"""Create tc-value of type bool"""
return TC(value, tcCType(bool), doc, group)
def String(value='', doc=None, group=None, colname=None, units=None):
"""Create tc-value of type string"""
return TC(value, tcCType(str), doc, group, colname, units)
def Tuple(*values, **kws):
"""Create tc-value of type tuple.
Parameters:
values: zero or more arguments
kws: keyword arguments. Currently only 'doc' and 'group'
are recognized
If the first item of values is a tuple, it's used as the
default value. The remaining arguments are used to build
type constraints and should be TC values.
Examples:
Tuple((1,2,3)) # tuple of 3 integers, default = (1,2,3)
Tuple(Int(3), Float(2)) # tuple of int&float, default = (3, 2.0)
Tuple((1,2), Int(3), Float(2)) # tuple of int+float, default = (1, 2.0)
"""
doc = kws.pop('doc', None)
group = kws.pop('group', None)
if len(values) == 0:
return TC((), tcTuple(), doc, group)
default = None
if isinstance(values[0], tuple):
default, values = values[0], values[1:]
if default is None:
default = tuple([x._default for x in values])
if len(values) == 0:
values = [tc_from(x) for x in default]
return TC(default, tcTuple(*values), doc, group)
def Enum(*values, **kws):
"""Create tc-value of type enum.
Parameters:
values: list or tuple of valid values
kws: keyword arguments. Currently only 'doc' and 'group'
are recognized
Default value is taken to be values[0].
Examples:
Enum(3, [1,2,3]) # enum of 1,2,3 with default of 3
Enum(1,2,3) # enum of 1,2,3 with default of 1
"""
default = values[0]
if (len(values) == 2) and (type(values[1]) in _sequence_types):
values = values[1]
doc = kws.pop('doc', None)
group = kws.pop('group', None)
return TC(default, tcEnum(*values), doc, group)
def Option(value, type=None, doc=None, group=None):
"""Creates optional tc-value.
Parameters:
value, type: default value and type
doc: doc-string for the value
group: group designation for the value
"""
if type is None:
type = tc_from(value)
if isinstance(value, TC):
value = value._default
return TC(value, tcOption(type), doc, group)
def NArray(value=None, or_none=True, doc=None, group=None, colname=None,
units=None):
"""Creates tc-value which holds Numpy arrays
Parameters:
value: default value
or_none: if 'None' is valid value
group: group designation for the value
colname: name of column if quantity is to be output
units: units if quantity is to be output
"""
try:
import numpy as N
except:
raise tcError("Can't create tc-value of type NArray " \
"without access to numpy module")
return Instance(value, N.ndarray, or_none, doc, group, colname, units)
def Instance(value, type=None, or_none=True, doc=None, group=None,
colname=None, units=None):
"""Creates tc-value which holds instances of specific class.
Parameters:
value, type: default value and type
or_none: flag if 'None' is valid value for this variable
group: group designation for the value
colname: name of column if quantity is to be output
units: units if quantity is to be output
Examples:
Instance(instance, class)
Instance(instance)
Instance(class)
"""
if type is None:
if isinstance(value, _class_types):
value, type = None, value
else:
type = value.__class__
return TC(value, tcInstance(type, or_none), doc, group, colname, units)
def tInstance(type, or_none=False):
"""Create tc-handler for values which are instances of
the specific class.
This function is useless on it's own, and should be
used to create Instane-constrain for compound tc-values.
It's especially usefull for classes which have non-trivial
constructors.
Parameters:
type: target type/class
or_none: flag if 'None' is valid value for this variable
Example: we want to define tc-variable holding a list of objects
List(Instance(slice, or_none=False) ## FAILS, no default value
List(Instance(slice)) ## works, but list MAY contain None's
List(tInstance(slice)) ## GOOD
"""
if not isinstance(type, _class_types):
type = type.__class__
return tcInstance(type, or_none)
def List(value, type=None, doc=None, group=None, colname=None, units=None):
"""Creates tc-value which represents a list, where each element
obeys specific type-constrains.
Parameters:
doc: docstring for the object
value, type: default value and type
group: parameter group to which the option belongs
colname: name of column if quantity is to be output
units: units if quantity is to be output
Examples:
List(Int()) # list of integers, default value is []
List([1,2], Int()) # list of integers, default value is [1,2]
Just one more warning -- List always has default value
([] in the simples case), and this default value is shared
between the instances, so be carefull to not modify it.
Counter-example for it:
class tst(object):
l = List(Int())
x1 = tst()
x2 = tst() # both instances share default value
x1.l.append(1)
print x2.l # this will print [1]
x1.l = [2]
print x2.l # still [1], as x1 has it's own local value now
"""
if type is None:
value, type = [], tc_from(value)
return TC(value, tcList(type), doc, group, colname, units)
def Any(value=None, doc=None, group=None):
"""Creates tc-value of arbitrary type
(e.g. no type-checking is done)
"""
return TC(value, tcAny(), doc, group)
def TCInit(obj):
"""Initialize tc-variables in the new instance"""
TC.set_property_names(obj.__class__)
obj._tc_values = {}
############################################################
## Exception type
############################################################
class tcError(exceptions.Exception):
"""Custom exception type to simplify exception handling"""
pass
############################################################
## TC -- type-checked variable
############################################################
class TC(object):
"""TC is an implementation of the typed-checked value.
The primary usage pattern is via class attributes:
class Test(object): ### MUST be new-style object
value1 = Int(3)
value2 = Tuple(Int(5), Option(Any()))
test = Test()
print test.value1
test.value2 = (3, None)
An important restriction -- it might only be used with
new-style objects (e.g. objects derived from 'object'
or 'type'. And the attribute should be defined in the
class of the object.
"""
def __init__(self, value, _type=None, doc=None, group=None, colname=None,
units=None):
"""Create typed-checked object.
Parameters:
value: default value
_type: type specification (instance of tcHandler) or None
doc: docstring for the object
group: parameter group to which the option belongs
colname: name of column if quantity is to be output
units: units if quantity is to be output
"""
if _type is not None:
self._type = _type
else:
self._type = tc_from(value)
self._default = self._type.cast(value)
self._name = None # name is unknown atm
self._group = group
self._doc = doc
self._colname = colname
self._units = units
self.__doc__ = "default value is %s (%s)" % \
(str(self._default), self._type.info())
if doc is not None:
self.__doc__ += "\n" + doc
def __get__(self, instance, cls):
"""Get a value from instance (or return default value)"""
if instance is None:
return self
try:
return instance._tc_values[self]
except:
return self._default
def __set__(self, instance, value):
"""Set a value"""
try:
values = instance._tc_values
except:
values = instance._tc_values = {}
if not self._name:
self.set_property_names(instance.__class__)
values[self] = self._type.cast(value, self._name,
instance.__class__.__name__)
def __delete__(self, instance):
"""Revert value to default"""
try:
del instance._tc_values[self]
except:
pass
def cast(self, value, *args):
"""See tcHandler.cast"""
return self._type.cast(value, *args)
def info(self):
"""Return description of tc-value"""
return self.__doc__
def doc(self):
"""Return short description of tc-value"""
return self._doc
def group(self):
"""Return group designation of tc-value"""
return self._group
def colname(self):
"""Return column name designation of tc-value"""
return self._colname
def units(self):
"""Return units designation of tc-value"""
return self._units
@staticmethod
def set_property_names(klass):
"""Scan class definition and update _name for all
TC objects defined there"""
for k,v in klass.__dict__.items():
if isinstance(v, TC):
v._name = k
############################################################
## tcHandler and derived handlers for the specific
## types/values
############################################################
class tcHandler(object):
"""Base class for all tc-handlers"""
def cast(self, value, *args):
"""Check that provided value meets type requirements
or cast it to the specific type.
"""
self.error(strx(value), *args)
def is_valid(self, value):
"""Check if provided value can be safely casted to the
proper type"""
try:
self.cast(value)
return True
except:
return False
def info(self):
"""A description of a valid values"""
return "value of unknown type"
def error(self, value, *args):
if len(args) == 2 and args[0]:
error = "Failed to set property %s of class %s " \
"to a value of %s; expected %s." % \
(args[0], args[1], value, self.info())
else:
error = "A value of %s can't be casted to %s" % \
(value, self.info())
raise tcError(error, value, self.info(), *args)
############################################################
class tcAny(tcHandler):
"""Allows any values of any type"""
def cast(self, value, *args):
return value
def info(self):
return "any value"
############################################################
class tcCType(tcHandler):
"""Ensures that value has a specific python type
This handler implements so-called casting-approach, where
it will accept all values which can be converted to the
required type by the means of casting operation. For
example:
v = tcCType(int)
print v.cast(3) # casted to 3
print v.cast(3.3) # casted to 3
print v.cast("3") # casted to 3
"""
def __init__(self, _type):
"""Creates tcType handler.
Parameters:
_type: Python type object or a value of a reqired type
"""
if not isinstance(_type, type):
_type = type(_type)
self.type = _type
def cast(self, value, *args):
if type(value) is self.type:
return value
try:
return self.type(value)
except:
self.error("%s (%s)" % (str_type(value), reprx(value)),
*args)
def info(self):
return "a value of %s" % str_type(self.type)
############################################################
class tcEnum(tcHandler):
"""Ensures that a value is a member of a specified list of values"""
def __init__(self, *values):
"""Creates a tcEnum handler.
Parameters:
values: list or tuple of all legal values
Description:
The list of values can be provided as a list/tuple of values
or just specified in-line. So that ''tcEnum([1,2,3])'' and
''tcEnum(1,2,3)'' are equivalent.
"""
if len(values) == 1 and type(values[0]) in _sequence_types:
values = values[0]
self.values = values
def cast(self, value, *args):
if value in self.values:
return value
self.error(repr(value), *args)
def info(self):
res = "a value of %s" % \
" or ".join([repr(x) for x in self.values])
return res
############################################################
class tcTuple(tcHandler):
"""Ensures that a value is a tuple of specified length,
with elements that are of specified type
"""
def __init__(self, *args):
"""Creates a tcTuple handler.
Parameters:
args: list of tuple components
Description:
Each tuple component should be either a specific
tc-handler or a value which can be converted to it
(by the means of tc_from function)
"""
self.tcs = tuple([tc_from(x) for x in args])
def cast(self, value, *args):
try:
if type(value) in _sequence_types:
if len(value) == len(self.tcs):
res = []
for i, h in enumerate(self.tcs):
res.append(h.cast(value[i]))
return tuple(res)
except:
pass
self.error(reprx(value), *args)
def info(self):
res = "a tuple of the form: (%s)" % \
", ".join([x.info() for x in self.tcs])
return res
############################################################
class tcOption(tcHandler):
"""Implements an optional value: None or a value
restricted by another tcHandler"""
def __init__(self, _type):
"""Creates tcOption handler.
Parameters:
_type: tc-handle, Python type object or a value of
a reqired type
"""
self.type = tc_from(_type)
def cast(self, value, *args):
try:
if value is None:
return value
return self.type.cast(value)
except:
self.error("%s (%s)" % (str_type(value), reprx(value)),
*args)
def info(self):
return self.type.info() + " or None"
############################################################
class tcInstance(tcHandler):
"""Ensures that a value belongs to a specified python
class or type (or one of it's subclasses).
"""
def __init__(self, klass, or_none=True):
"""Creates tcInstance handler.
Parameters:
klass: Python class, type or an instance of python class
or_none: whether we should accept None as a valid value
(defaults to True)
"""
if not isinstance(klass, _class_types):
klass = klass.__class__
self.klass = klass
self.or_none = or_none
def cast(self, value, *args):
if (value is None) and self.or_none:
return value
if isinstance(value, self.klass):
return value
self.error(reprx(value), *args)
def info(self):
res = "an instance of " + str_type(self.klass)
if self.or_none:
res += " or None"
return res
############################################################
class tcList(tcHandler):
"""Ensures that a value is a list containing elements of
a specified kind. It also ensures that any change made
to the list does't violate the list type constrains.
"""
def __init__(self, kind):
"""Creates tcList handler.
Parameters:
kind: tc-handler constraining elements of the list
"""
self.type = tc_from(kind)
def cast(self, value, *args):
if isinstance(value, _sequence_types):
v = [self.type.cast(x, *args) for x in value]
return list(v)
self.error(reprx(value), *args)
def info(self):
return "a list where each element is " + self.type.info()
############################################################
def tc_from(v):
"""tc_from tries to guess an appropriate tc-handler for the
provided object.
The basic logic is a following:
- TC object results in it's internal type constrain
- for a instances and type-objects of the basic numerica
types we use tcCType handler
- a list of values results in tcEnum handler
- a tuple of values results in tcTuple handler
- a value of None results in tcAny handler
"""
if isinstance(v, TC):
return v._type
if isinstance(v, tcHandler):
return v
if v in _basic_types:
return tcCType(v)
if type(v) in _basic_types:
return tcCType(v)
if type(v) is list:
return tcEnum(v)
if type(v) is tuple:
return tcTuple(*v)
if v is None:
return tcAny()
error = "Can't create tc-handler for a value of %s (%s)" %\
(str_type(v), reprx(v))
raise tcError(error)
############################################################
def str_type(v):
"""Pretty-print type of v"""
if isinstance(v, _class_types):
return repr(v)[1:-1]
else:
return repr(type(v))[1:-1]
############################################################
def reprx(v):
"""Pretty-print value of v"""
if type(v) is types.InstanceType:
return v.__class__.__name__
else:
return repr(v)
PyBDSF-1.11.0/bdsf/threshold.py 0000664 0000000 0000000 00000010773 14650706641 0016126 0 ustar 00root root 0000000 0000000 """Module threshold.
Defines operation Op_threshold. If the option 'thresh' is defined
as 'fdr' then the value of thresh_pix is estimated using the
False Detection Rate algorithm (using the user defined value
of fdr_alpha). If thresh is None, then the false detection
probability is first calculated, and if the number of false source
pixels is more than fdr_ratio times the estimated number of true source
pixels, then FDR is chosen, else the hard threshold option is chosen.
Masked images aren't handled properly yet.
"""
from __future__ import absolute_import
import numpy as N
from .image import Op, Image, NArray
from math import sqrt,pi,log
from scipy.special import erfc
from . import const
from . import mylogger
class Op_threshold(Op):
"""Calculates FDR threshold if necessary.
Prerequisites: Module preprocess and rmsimage should be run first.
"""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Threshold ")
data = img.ch0_arr
mask = img.mask_arr
opts = img.opts
size = N.product(img.ch0_arr.shape)
sq2 = sqrt(2)
if img.opts.thresh is None:
source_p = self.get_srcp(img)
cutoff = 5.0
false_p = 0.5*erfc(cutoff/sq2)*size
if false_p < opts.fdr_ratio*source_p:
img.thresh = 'hard'
mylogger.userinfo(mylog, "Expected 5-sigma-clipped false detection rate < fdr_ratio")
mylogger.userinfo(mylog, "Using sigma-clipping ('hard') thresholding")
else:
img.thresh = 'fdr'
mylogger.userinfo(mylog, "Expected 5-sigma-clipped false detection rate > fdr_ratio")
mylogger.userinfo(mylog, "Using FDR (False Detection Rate) thresholding")
mylog.debug('%s %g' % ("Estimated number of source pixels (using sourcecounts.py) is ",source_p))
mylog.debug('%s %g' % ("Number of false positive pixels expected for 5-sigma is ",false_p))
mylog.debug("Threshold for pixels set to : "+str.swapcase(img.thresh))
else:
img.thresh = img.opts.thresh
if img.thresh=='fdr':
cdelt = img.wcs_obj.acdelt[:2]
bm = (img.beam[0], img.beam[1])
area_pix = int(round(N.product(bm)/(abs(N.product(cdelt))* \
pi/(4.0*log(2.0)))))
s0 = 0
for i in range(area_pix):
s0 += 1.0/(i+1)
slope = opts.fdr_alpha/s0
# sort erf of normalised image as vector
v = N.sort(0.5*erfc(N.ravel((data-img.mean_arr)/img.rms_arr)/sq2))[::-1]
pcrit = None
for i,x in enumerate(v):
if x < slope*i/size:
pcrit = x
break
if pcrit is None:
raise RuntimeError("FDR thresholding failed. Please check the input image for problems.")
dumr1 = 1.0-2.0*pcrit
dumr = 8.0/3.0/pi*(pi-3.0)/(4.0-pi)
# approx for inv(erfc)
sigcrit = sqrt(-2.0/pi/dumr-log(1.0-dumr1*dumr1)/2.0+ \
sqrt((2.0/pi/dumr+log(1.0-dumr1*dumr1)/2.0)* \
(2.0/pi/dumr+log(1.0-dumr1*dumr1)/2.0)- \
log(1.0-dumr1*dumr1)/dumr))*sq2
if pcrit == 0.0:
img.thresh = 'hard'
else:
img.thresh_pix = sigcrit
mylogger.userinfo(mylog, "FDR threshold (replaces thresh_pix)", str(round(sigcrit, 4)))
else:
img.thresh_pix = opts.thresh_pix
img.completed_Ops.append('threshold')
return img
def get_srcp(self, img):
from . import sourcecounts as sc
fwsig = const.fwsig
cutoff = 5.0
spin = -0.80
freq = img.frequency
bm = (img.beam[0], img.beam[1])
cdelt = img.wcs_obj.acdelt[:2]
x = 2.0*pi*N.product(bm)/abs(N.product(cdelt))/(fwsig*fwsig)*img.omega
smin_L = img.clipped_rms*cutoff*((1.4e9/freq)**spin)
scflux = sc.s
scnum = sc.n
index = 0
for i,s in enumerate(scflux):
if s < smin_L:
index = i
break
n1 = scnum[index]; n2 = scnum[-1]
s1 = scflux[index]; s2 = scflux[-1]
alpha = 1.0-log(n1/n2)/log(s1/s2)
A = (alpha-1.0)*n1/(s1**(1.0-alpha))
source_p = x*A*((cutoff*img.clipped_rms)**(1.0-alpha)) \
/((1.0-alpha)*(1.0-alpha))
return source_p
PyBDSF-1.11.0/bdsf/wavelet_atrous.py 0000664 0000000 0000000 00000076075 14650706641 0017205 0 ustar 00root root 0000000 0000000 """Compute a-trous wavelet transform of the gaussian residual image.
Do source extraction on this if asked.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as N
from .image import *
from . import mylogger
import os
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
from math import log, floor, sqrt
from .const import fwsig
from copy import deepcopy as cp
from . import functions as func
import gc
from numpy import array, product
import scipy.signal
from .preprocess import Op_preprocess
from .rmsimage import Op_rmsimage
from .threshold import Op_threshold
from .islands import Op_islands
from .gausfit import Op_gausfit, Gaussian
from .gaul2srl import Op_gaul2srl
from .make_residimage import Op_make_residimage
from .interface import raw_input_no_history
from . import statusbar
try:
import pyfftw.interfaces
pyfftw.interfaces.cache.enable()
N.fft.fftn = pyfftw.interfaces.numpy_fft.fftn
N.fft.ifftn = pyfftw.interfaces.numpy_fft.ifftn
scipy.signal.signaltools.fftn = pyfftw.interfaces.scipy_fftpack.fftn
scipy.signal.signaltools.ifftn = pyfftw.interfaces.scipy_fftpack.ifftn
has_pyfftw = True
except ImportError:
has_pyfftw = False
class Op_wavelet_atrous(Op):
"""Compute a-trous wavelet transform of the gaussian residual image."""
def __call__(self, img):
mylog = mylogger.logging.getLogger("PyBDSF." + img.log + "Wavelet")
if img.opts.atrous_do:
if img.nisl == 0:
mylog.warning("No islands found. Skipping wavelet decomposition.")
img.completed_Ops.append('wavelet_atrous')
return
mylog.info("Decomposing gaussian residual image into a-trous wavelets")
bdir = img.basedir + '/wavelet/'
if img.opts.output_all:
if not os.path.isdir(bdir):
os.makedirs(bdir)
if not os.path.isdir(bdir + '/residual/'):
os.makedirs(bdir + '/residual/')
if not os.path.isdir(bdir + '/model/'):
os.makedirs(bdir + '/model/')
dobdsm = img.opts.atrous_bdsm_do
filter = {'tr': {'size': 3, 'vec': [1. / 4, 1. / 2, 1. / 4], 'name': 'Triangle'},
'b3': {'size': 5, 'vec': [1. / 16, 1. / 4, 3. / 8, 1. / 4, 1. / 16], 'name': 'B3 spline'}}
if dobdsm:
wchain, wopts = self.setpara_bdsm(img)
n, m = img.ch0_arr.shape
# Calculate residual image that results from normal (non-wavelet) Gaussian fitting
Op_make_residimage()(img)
resid = img.resid_gaus_arr
lpf = img.opts.atrous_lpf
if lpf not in ['b3', 'tr']:
lpf = 'b3'
jmax = img.opts.atrous_jmax
l = len(filter[lpf]['vec']) # 1st 3 is arbit and 2nd 3 is whats expected for a-trous
if jmax < 1 or jmax > 15: # determine jmax
# Check if largest island size is
# smaller than 1/3 of image size. If so, use it to determine jmax.
min_size = min(resid.shape)
max_isl_shape = (0, 0)
for isl in img.islands:
if isl.image.shape[0] * isl.image.shape[1] > max_isl_shape[0] * max_isl_shape[1]:
max_isl_shape = isl.image.shape
if max_isl_shape != (0, 0) and min(max_isl_shape) < min(resid.shape) / 3.0:
min_size = min(max_isl_shape) * 4.0
else:
min_size = min(resid.shape)
jmax = int(floor(log((min_size / 3.0 * 3.0 - l) / (l - 1) + 1) / log(2.0) + 1.0)) + 1
if min_size * 0.55 <= (l + (l - 1) * (2 ** (jmax) - 1)):
jmax = jmax - 1
img.wavelet_lpf = lpf
img.wavelet_jmax = jmax
mylog.info("Using " + filter[lpf]['name'] + ' filter with J_max = ' + str(jmax))
img.atrous_islands = []
img.atrous_gaussians = []
img.atrous_sources = []
img.atrous_opts = []
img.resid_wavelets_arr = cp(img.resid_gaus_arr)
im_old = img.resid_wavelets_arr
total_flux = 0.0
ntot_wvgaus = 0
stop_wav = False
pix_masked = N.where(N.isnan(resid))
jmin = 1
if img.opts.ncores is None:
numcores = 1
else:
numcores = img.opts.ncores
for j in range(jmin, jmax + 1): # extra +1 is so we can do bdsm on cJ as well
mylogger.userinfo(mylog, "\nWavelet scale #" + str(j))
im_new = self.atrous(im_old, filter[lpf]['vec'], lpf, j, numcores=numcores, use_scipy_fft=img.opts.use_scipy_fft)
im_new[pix_masked] = N.nan # since fftconvolve wont work with blanked pixels
if img.opts.atrous_sum:
w = im_new
else:
w = im_old - im_new
im_old = im_new
suffix = 'w' + repr(j)
filename = img.imagename + '.atrous.' + suffix + '.fits'
if img.opts.output_all:
func.write_image_to_file('fits', filename, w, img, bdir)
mylog.info('%s %s' % ('Wrote ', img.imagename + '.atrous.' + suffix + '.fits'))
# now do bdsm on each wavelet image.
if dobdsm:
wopts['filename'] = filename
wopts['basedir'] = bdir
box = img.rms_box[0]
y1 = (l + (l - 1) * (2 ** (j - 1) - 1))
bs = max(5 * y1, box) # changed from 10 to 5
if bs > min(n, m) / 2:
wopts['rms_map'] = False
wopts['mean_map'] = 'const'
wopts['rms_box'] = None
else:
wopts['rms_box'] = (bs, bs/3)
if hasattr(img, '_adapt_rms_isl_pos'):
bs_bright = max(5 * y1, img.rms_box_bright[0])
if bs_bright < bs/1.5:
wopts['adaptive_rms_box'] = True
wopts['rms_box_bright'] = (bs_bright, bs_bright/3)
else:
wopts['adaptive_rms_box'] = False
if j <= 3:
wopts['ini_gausfit'] = 'default'
else:
wopts['ini_gausfit'] = 'nobeam'
wid = (l + (l - 1) * (2 ** (j - 1) - 1))
b1, b2 = img.pixel_beam()[0:2]
b1 = b1 * fwsig
b2 = b2 * fwsig
cdelt = img.wcs_obj.acdelt[:2]
wimg = Image(wopts)
wimg.beam = (sqrt(wid * wid + b1 * b1) * cdelt[0] * 2.0, sqrt(wid * wid + b2 * b2) * cdelt[1] * 2.0, 0.0)
wimg.orig_beam = img.beam
wimg.pixel_beam = img.pixel_beam
wimg.pixel_beamarea = img.pixel_beamarea
wimg.log = 'Wavelet.'
wimg.basedir = img.basedir
wimg.extraparams['bbsprefix'] = suffix
wimg.extraparams['bbsname'] = img.imagename + '.wavelet'
wimg.extraparams['bbsappend'] = True
wimg.bbspatchnum = img.bbspatchnum
wimg.waveletimage = True
wimg.j = j
wimg.indir = img.indir
if hasattr(img, '_adapt_rms_isl_pos'):
wimg._adapt_rms_isl_pos = img._adapt_rms_isl_pos
self.init_image_simple(wimg, img, w, '.atrous.' + suffix)
for op in wchain:
op(wimg)
gc.collect()
if isinstance(op, Op_islands) and img.opts.atrous_orig_isl:
if wimg.nisl > 0:
# Find islands that do not share any pixels with
# islands in original ch0 image.
good_isl = []
# Make original rank image boolean; rank counts from 0, with -1 being
# outside any island
orig_rankim_bool = N.array(img.pyrank + 1, dtype=bool)
# Multiply rank images
old_islands = orig_rankim_bool * (wimg.pyrank + 1) - 1
# Exclude islands that don't overlap with a ch0 island.
valid_ids = set(old_islands.flatten())
for idx, wvisl in enumerate(wimg.islands):
if idx in valid_ids:
wvisl.valid = True
good_isl.append(wvisl)
else:
wvisl.valid = False
wimg.islands = good_isl
wimg.nisl = len(good_isl)
mylogger.userinfo(mylog, "Number of islands found", '%i' % wimg.nisl)
# Renumber islands:
for wvindx, wvisl in enumerate(wimg.islands):
wvisl.island_id = wvindx
if isinstance(op, Op_gausfit):
# If opts.atrous_orig_isl then exclude Gaussians outside of
# the original ch0 islands
nwvgaus = 0
if img.opts.atrous_orig_isl:
gaul = wimg.gaussians
tot_flux = 0.0
if img.ngaus == 0:
gaus_id = -1
else:
gaus_id = img.gaussians[-1].gaus_num
for g in gaul:
if not hasattr(g, 'valid'):
g.valid = False
if not g.valid:
try:
isl_id = img.pyrank[int(g.centre_pix[0] + 1), int(g.centre_pix[1] + 1)]
except IndexError:
isl_id = -1
if isl_id >= 0:
isl = img.islands[isl_id]
gcenter = (int(g.centre_pix[0] - isl.origin[0]),
int(g.centre_pix[1] - isl.origin[1]))
if not isl.mask_active[gcenter]:
gaus_id += 1
gcp = Gaussian(img, g.parameters[:], isl.island_id, gaus_id)
gcp.gaus_num = gaus_id
gcp.wisland_id = g.island_id
gcp.jlevel = j
g.valid = True
isl.gaul.append(gcp)
isl.ngaus += 1
img.gaussians.append(gcp)
nwvgaus += 1
tot_flux += gcp.total_flux
else:
g.valid = False
g.jlevel = 0
else:
g.valid = False
g.jlevel = 0
vg = []
for g in wimg.gaussians:
if g.valid:
vg.append(g)
wimg.gaussians = vg
mylogger.userinfo(mylog, "Number of valid wavelet Gaussians", str(nwvgaus))
else:
# Keep all Gaussians and merge islands that overlap
tot_flux = check_islands_for_overlap(img, wimg)
# Now renumber the islands and adjust the rank image before going to next wavelet image
renumber_islands(img)
total_flux += tot_flux
if img.opts.interactive and has_pl:
dc = '\033[34;1m'
nc = '\033[0m'
print(dc + '--> Displaying islands and rms image...' + nc)
if max(wimg.ch0_arr.shape) > 4096:
print(dc + '--> Image is large. Showing islands only.' + nc)
wimg.show_fit(rms_image=False, mean_image=False, ch0_image=False,
ch0_islands=True, gresid_image=False, sresid_image=False,
gmodel_image=False, smodel_image=False, pyramid_srcs=False)
else:
wimg.show_fit()
prompt = dc + "Press enter to continue or 'q' stop fitting wavelet images : " + nc
answ = raw_input_no_history(prompt)
while answ != '':
if answ == 'q':
img.wavelet_jmax = j
stop_wav = True
break
answ = raw_input_no_history(prompt)
if len(wimg.gaussians) > 0:
img.resid_wavelets_arr = self.subtract_wvgaus(img.opts, img.resid_wavelets_arr, wimg.gaussians, wimg.islands)
if img.opts.atrous_sum:
im_old = self.subtract_wvgaus(img.opts, im_old, wimg.gaussians, wimg.islands)
if stop_wav:
break
pyrank = N.zeros(img.pyrank.shape, dtype=N.int32)
for i, isl in enumerate(img.islands):
isl.island_id = i
for g in isl.gaul:
g.island_id = i
for dg in isl.dgaul:
dg.island_id = i
pyrank[tuple(isl.bbox)] += N.invert(isl.mask_active) * (i + 1)
pyrank -= 1 # align pyrank values with island ids and set regions outside of islands to -1
img.pyrank = pyrank
img.ngaus += ntot_wvgaus
img.total_flux_gaus += total_flux
mylogger.userinfo(mylog, "Total flux density in model on all scales", '%.3f Jy' % img.total_flux_gaus)
if img.opts.output_all:
func.write_image_to_file('fits', img.imagename + '.atrous.cJ.fits',
im_new, img, bdir)
mylog.info('%s %s' % ('Wrote ', img.imagename + '.atrous.cJ.fits'))
func.write_image_to_file('fits', img.imagename + '.resid_wavelets.fits',
(img.ch0_arr - img.resid_gaus_arr + img.resid_wavelets_arr), img, bdir + '/residual/')
mylog.info('%s %s' % ('Wrote ', img.imagename + '.resid_wavelets.fits'))
func.write_image_to_file('fits', img.imagename + '.model_wavelets.fits',
(img.resid_gaus_arr - img.resid_wavelets_arr), img, bdir + '/model/')
mylog.info('%s %s' % ('Wrote ', img.imagename + '.model_wavelets.fits'))
img.completed_Ops.append('wavelet_atrous')
def atrous(self, image, filtvec, lpf, j, numcores=1, use_scipy_fft=True):
ff = filtvec[:]
for i in range(1, len(filtvec)):
ii = 1 + (2 ** (j - 1)) * (i - 1)
ff[ii:ii] = [0] * (2 ** (j - 1) - 1)
kern = N.outer(ff, ff)
unmasked = N.nan_to_num(image)
if use_scipy_fft:
im_new = scipy.signal.fftconvolve(unmasked, kern, mode='same')
else:
im_new = fftconvolve(unmasked, kern, mode='same', pad_to_power_of_two=False, numcores=numcores)
if im_new.shape != image.shape:
im_new = im_new[0:image.shape[0], 0:image.shape[1]]
return im_new
def setpara_bdsm(self, img):
chain = [Op_preprocess, Op_rmsimage(), Op_threshold(), Op_islands(),
Op_gausfit(), Op_gaul2srl(), Op_make_residimage()]
opts = {'thresh': 'hard'}
opts['thresh_pix'] = img.thresh_pix
opts['kappa_clip'] = 3.0
opts['rms_map'] = img.opts.rms_map
opts['mean_map'] = img.opts.mean_map
opts['thresh_isl'] = img.opts.thresh_isl
opts['minpix_isl'] = 6
opts['savefits_rmsim'] = False
opts['savefits_meanim'] = False
opts['savefits_rankim'] = False
opts['savefits_normim'] = False
opts['polarisation_do'] = False
opts['aperture'] = None
opts['group_by_isl'] = img.opts.group_by_isl
opts['quiet'] = img.opts.quiet
opts['ncores'] = img.opts.ncores
opts['flag_smallsrc'] = False
opts['flag_minsnr'] = 0.2
opts['flag_maxsnr'] = 1.2
opts['flag_maxsize_isl'] = 2.5
opts['flag_bordersize'] = 0
opts['flag_maxsize_bm'] = 50.0
opts['flag_minsize_bm'] = 0.2
opts['flag_maxsize_fwhm'] = 0.5
opts['bbs_patches'] = img.opts.bbs_patches
opts['filename'] = ''
opts['output_all'] = img.opts.output_all
opts['verbose_fitting'] = img.opts.verbose_fitting
opts['split_isl'] = False
opts['peak_fit'] = True
opts['peak_maxsize'] = 30.0
opts['detection_image'] = ''
opts['verbose_fitting'] = img.opts.verbose_fitting
ops = []
for op in chain:
if isinstance(op, type):
ops.append(op())
else:
ops.append(op)
return ops, opts
def init_image_simple(self, wimg, img, w, name):
wimg.ch0_arr = w
wimg.ch0_Q_arr = None
wimg.ch0_U_arr = None
wimg.ch0_V_arr = None
wimg.wcs_obj = img.wcs_obj
wimg.parentname = img.filename
wimg.filename = img.filename + name
wimg.imagename = img.imagename + name + '.pybdsf'
wimg.pix2sky = img.pix2sky
wimg.sky2pix = img.sky2pix
wimg.pix2beam = img.pix2beam
wimg.beam2pix = img.beam2pix
wimg.pix2gaus = img.pix2gaus
wimg.gaus2pix = img.gaus2pix
wimg.pix2coord = img.pix2coord
wimg.masked = img.masked
wimg.mask_arr = img.mask_arr
wimg.use_io = img.use_io
wimg.do_cache = img.do_cache
wimg.tempdir = img.tempdir
wimg.shape = img.shape
wimg.frequency = img.frequency
wimg.equinox = img.equinox
wimg.use_io = 'fits'
def subtract_wvgaus(self, opts, residim, gaussians, islands):
from . import functions as func
from .make_residimage import Op_make_residimage as opp
dummy = opp()
shape = residim.shape
thresh = opts.fittedimage_clip
for g in gaussians:
if g.valid:
C1, C2 = g.centre_pix
if hasattr(g, 'wisland_id'):
isl = islands[g.wisland_id]
else:
isl = islands[g.island_id]
b = opp.find_bbox(dummy, thresh * isl.rms, g)
bbox = N.s_[max(0, int(C1 - b)):min(shape[0], int(C1 + b + 1)),
max(0, int(C2 - b)):min(shape[1], int(C2 + b + 1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(g, x_ax, y_ax)
residim[bbox] = residim[bbox] - ffimg
return residim
def morphfilter_pyramid(self, img, bdir):
from math import ceil, floor
jmax = img.wavelet_jmax
ind = [i for i, isl in enumerate(img.atrous_islands) if len(isl) > 0]
ind.reverse()
lpyr = []
img.npyrsrc = -1
if len(ind) > 0:
for i in ind:
isls = img.atrous_islands[i]
for isl in isls:
if i != ind[0]:
dumr = []
for pyrsrc in lpyr:
belongs = pyrsrc.belongs(img, isl)
if belongs:
dumr.append(pyrsrc.pyr_id)
if len(dumr) == 1:
dumr = dumr[0]
pyrsrc = lpyr[dumr]
pyrsrc.add_level(img, i, isl)
else:
pyrsrc = Pyramid_source(img, isl, i)
lpyr.append(pyrsrc)
else:
pyrsrc = Pyramid_source(img, isl, i)
lpyr.append(pyrsrc)
img.pyrsrcs = lpyr
if img.opts.plot_pyramid and has_pl:
pl.figure()
a = ceil(sqrt(jmax))
b = floor(jmax / a)
if a * b < jmax:
b += 1
colours = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
sh = img.ch0_arr.shape
for pyr in img.pyrsrcs:
for iisl, isl in enumerate(pyr.islands):
jj = pyr.jlevels[iisl]
col = colours[pyr.pyr_id % 7]
pl.subplot(a, b, jj)
ind = N.where(~isl.mask_active)
pl.plot(ind[0] + isl.origin[0], ind[1] + isl.origin[1], '.', color=col)
pl.axis([0.0, sh[0], 0.0, sh[1]])
pl.title('J = ' + str(jj))
pl.savefig(bdir + img.imagename + '.pybdsf.atrous.pyramidsrc.png')
class Pyramid_source(object):
""" Pyramid_source is a source constructed out of multiple wavelet transform images. """
def __init__(self, img, island, level0):
img.npyrsrc = img.npyrsrc + 1
self.pyr_id = img.npyrsrc
self.islands = [island]
self.jlevels = [level0]
def belongs(self, img, isl):
from . import functions as func
# Get centroid of island (as integer)
mom = func.momanalmask_gaus(isl.image, isl.mask_active, 0, 1.0, False)
cen = N.array(mom[1:3]) + isl.origin
belong = False
# Check if lies within any island of self
for i, pyrisl in enumerate(self.islands):
if N.sum([pyrisl.bbox[j].start <= cen[j] < pyrisl.bbox[j].stop for j in range(2)]) == 2:
pix = tuple([cen[j] - pyrisl.origin[j] for j in range(2)])
if not pyrisl.mask_active[pix]:
belong = True
return belong
def add_level(self, img, level, isl):
self.islands.append(isl)
self.jlevels.append(level + 1)
Image.pyrsrcs = List(tInstance(Pyramid_source), doc="List of Pyramidal sources")
def fftconvolve(in1, in2, mode="full", pad_to_power_of_two=True, numcores=1):
"""Convolve two N-dimensional arrays using FFT. See convolve.
"""
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (N.issubdtype(in1.dtype, N.complex) or
N.issubdtype(in2.dtype, N.complex))
size = s1 + s2 - 1
if pad_to_power_of_two:
# Use 2**n-sized FFT; it might improve performance
fsize = 2 ** N.ceil(N.log2(size))
else:
# Padding to a power of two might degrade performance, too
fsize = size
if has_pyfftw:
IN1 = N.fft.fftn(in1, fsize, threads=numcores)
IN1 *= N.fft.fftn(in2, fsize, threads=numcores)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = N.fft.ifftn(IN1, threads=numcores)[fslice].copy()
else:
IN1 = N.fft.fftn(in1, fsize)
IN1 *= N.fft.fftn(in2, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = N.fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if product(s1, axis=0) > product(s2, axis=0):
osize = s1
else:
osize = s2
return func.centered(ret, osize)
elif mode == "valid":
return func.centered(ret, abs(s2 - s1) + 1)
def rebase_bbox(box, minv):
# return a new bounding box tuple where minv is subtracted from
# all the co-ordinate values
nbox = []
for i, sl in enumerate(box):
nbox.append(slice(sl.start-minv[i], sl.stop-minv[i], None))
return tuple(nbox)
def merge_bbox(box1, box2):
# For two bounding box tuples find the minimal n-dimensional space
# that encompasses both structures and make new bounding boxes in
# this co-ordinate system
minv = []
maxv = []
for sl1, sl2 in zip(box1, box2):
minv.append(min(sl1.start, sl2.start))
maxv.append(max(sl1.stop, sl2.stop))
nbox1 = rebase_bbox(box1, minv)
nbox2 = rebase_bbox(box2, minv)
dims = [y-x for x, y in zip(minv, maxv)]
fullbox = [slice(x, y, None) for x, y in zip(minv, maxv)]
return dims, nbox1, nbox2, N.array(minv), fullbox
def merge_islands(img, isl1, isl2):
"""Merge two islands into one
Final island has island_id of isl1. The Gaussians from isl2 are appended
those in the isl1 list, with numbering starting from the last number in
img.gaussians (which is also updated with the isl2 Gaussians).
The merged island replaces isl1 in img.
"""
from .islands import Island
import scipy.ndimage as nd
shape, nbox1, nbox2, origin, fullbox = merge_bbox(isl1.bbox, isl2.bbox)
mask1 = N.zeros(shape, dtype=bool)
mask1[nbox1] = ~isl1.mask_active
mask2 = N.zeros(shape, dtype=bool)
mask2[nbox2] = ~isl2.mask_active
overlap_mask = N.logical_and(mask1, mask2)
if N.any(overlap_mask):
full_mask = N.logical_or(mask1, mask2)
image = img.ch0_arr
mask = img.mask_arr
rms = img.rms_arr
mean = img.mean_arr
rank = len(image.shape)
connectivity = nd.generate_binary_structure(rank, rank)
labels, count = nd.label(full_mask, connectivity)
slices = nd.find_objects(labels)
bbox = slices[0]
new_bbox = rebase_bbox(bbox, -origin)
idx = isl1.island_id
# labels array passed to Island must be capable of being
# indexed by new bounding box, so convert. Do the subtraction
# first to avoid an expensive operation over the whole array
labels = labels-1+idx
new_labels = N.zeros(image.shape)
new_labels[tuple(fullbox)] = labels
beamarea = img.pixel_beamarea()
merged_isl = Island(image, mask, mean, rms, new_labels, new_bbox, idx, beamarea)
# Add all the Gaussians to the merged island
merged_isl.gaul = isl1.gaul
merged_isl.dgaul = isl1.dgaul
copy_gaussians(img, merged_isl, isl2)
img.islands[idx] = merged_isl
def copy_gaussians(img, isl1, isl2):
"""Copies Gaussians from isl2 to isl1
img.gaussians is also updated
"""
if img.ngaus == 0:
gaus_id = -1
else:
gaus_id = img.gaussians[-1].gaus_num
for g in isl2.gaul:
gaus_id += 1
gcp = Gaussian(img, g.parameters[:], isl1.island_id, gaus_id)
gcp.gaus_num = gaus_id
gcp.jlevel = g.jlevel
if g.jlevel > 0:
# Preserve the wavelet rms and mean values if the isl2 Gaussian was fit to
# a wavelet image
gcp.wave_rms = g.rms
gcp.wave_mean = g.mean
else:
gcp.wave_rms = 0.0
gcp.wave_mean = 0.0
isl1.gaul.append(gcp)
img.ngaus += 1
img.gaussians.append(gcp)
def renumber_islands(img):
"""Renumbers island_ids (after, e.g., removing one)
Also renumbers the pyrank image.
"""
pyrank = N.zeros(img.pyrank.shape, dtype=N.int32)
for i, isl in enumerate(img.islands):
isl.island_id = i
for g in isl.gaul:
g.island_id = i
for dg in isl.dgaul:
dg.island_id = i
pyrank[tuple(isl.bbox)] += N.invert(isl.mask_active) * (i + 1)
pyrank -= 1 # align pyrank values with island ids and set regions outside of islands to -1
img.pyrank = pyrank
gaussian_list = [g for isl in img.islands for g in isl.gaul]
img.gaussians = gaussian_list
def check_islands_for_overlap(img, wimg):
"""Checks for overlaps between img and wimg islands"""
have_numexpr = True
try:
import numexpr as ne
except:
have_numexpr = False
tot_flux = 0.0
bar = statusbar.StatusBar('Checking islands for overlap ............ : ', 0, len(wimg.islands))
# Make masks for regions that have islands
wpp = wimg.pyrank+1 # does not change, store for later
wav_rankim_bool = wpp > 0 # boolean
orig_rankim_bool = img.pyrank > -1
# Make "images" of island ids for overlaping regions
orig_islands = wav_rankim_bool * (img.pyrank + 1) - 1
if not img.opts.quiet:
bar.start()
for idx, wvisl in enumerate(wimg.islands):
if len(wvisl.gaul) > 0:
# Get unique island IDs. If an island overlaps with one
# in the original ch0 image, merge them together. If not,
# add the island as a new one.
wav_islands = orig_rankim_bool[tuple(wvisl.bbox)] * wpp[tuple(wvisl.bbox)] - 1
wav_ids = N.unique(wav_islands) # saves conversion to set and back
for wvg in wvisl.gaul:
tot_flux += wvg.total_flux
wvg.valid = True
if idx in wav_ids:
orig_idx = N.unique(orig_islands[tuple(wvisl.bbox)][wav_islands == idx])
if len(orig_idx) == 1:
merge_islands(img, img.islands[orig_idx[0]], wvisl)
else:
merge_islands(img, img.islands[orig_idx[0]], wvisl)
for oidx in orig_idx[1:]:
merge_islands(img, img.islands[orig_idx[0]], img.islands[oidx])
img.islands = [x for x in img.islands if x.island_id not in orig_idx[1:]]
renumber_islands(img)
# Now recalculate the overlap images, since the islands have changed
ipp = img.pyrank+1
if have_numexpr:
orig_islands = ne.evaluate('wav_rankim_bool * ipp - 1')
else:
orig_islands = wav_rankim_bool * ipp - 1
else:
isl_id = img.islands[-1].island_id + 1
new_isl = wvisl.copy(img.pixel_beamarea(), image=img.ch0_arr[tuple(wvisl.bbox)],
mean=img.mean_arr[tuple(wvisl.bbox)],
rms=img.rms_arr[tuple(wvisl.bbox)])
new_isl.gaul = []
new_isl.dgaul = []
new_isl.island_id = isl_id
img.islands.append(new_isl)
copy_gaussians(img, new_isl, wvisl)
if not img.opts.quiet:
bar.increment()
bar.stop()
return tot_flux
PyBDSF-1.11.0/cibuildwheel/ 0000775 0000000 0000000 00000000000 14650706641 0015272 5 ustar 00root root 0000000 0000000 PyBDSF-1.11.0/cibuildwheel/README.md 0000664 0000000 0000000 00000001150 14650706641 0016546 0 ustar 00root root 0000000 0000000 # Using `cibuildwheel`
The`cibuildwheel` tool is intended to be run on a CI server. However, for testing purposes, it is also possible to run `cibuildwheel` locally. In order to do so, you need to create a Python virtual environment and install `cibuildwheel` in it. Next go to the root directory of the project (i.e. the directory containing your `setup.py`, `setup.cfg`, and/or `pyproject.toml` file). Assuming you're on Linux, enter the following command:
```
cibuildwheel --platform linux
```
For more information check the `cibuildwheel --help` output, or goto https://cibuildwheel.readthedocs.io/en/stable/ PyBDSF-1.11.0/cibuildwheel/before_all.sh 0000775 0000000 0000000 00000002033 14650706641 0017721 0 ustar 00root root 0000000 0000000 #!/bin/bash -eux
#
# This script should be called by `cibuildwheel` in the `before-all` stage.
#
# This script will download and untar the Boost C++ source files.
# `${BOOST_VERSION}` should be set as `x.y.z`; the untarred sources can be
# found in the directory `${BOOST_BUILD_DIR}/boost`.
# Both environment variables must have been set.
# Download and untar the Boost C++ source files
# Rename the source directory to `boost`
function download_and_untar_boost
{
major=$(echo "${BOOST_VERSION}" | cut -d. -f1)
minor=$(echo "${BOOST_VERSION}" | cut -d. -f2)
patch=$(echo "${BOOST_VERSION}" | cut -d. -f3)
name="boost"
long_name="${name}_${major}_${minor}_${patch}"
site="https://sourceforge.net"
directory="projects/${name}/files/${name}/${major}.${minor}.${patch}"
file="${long_name}.tar.bz2"
url="${site}/${directory}/${file}"
rm -rf "${BOOST_BUILD_DIR}"
mkdir -p "${BOOST_BUILD_DIR}"
cd "${BOOST_BUILD_DIR}"
curl -L -o - "${url}" | tar -xjf -
mv "${long_name}" "${name}"
}
set -o pipefail
download_and_untar_boost
PyBDSF-1.11.0/cibuildwheel/before_build.sh 0000775 0000000 0000000 00000002305 14650706641 0020252 0 ustar 00root root 0000000 0000000 #!/bin/bash -eux
#
# This script should be called by `cibuildwheel` in the `before-build` stage.
#
# This script will first install the oldest supported `numpy` to maximize
# portability. Next the Boost Python libraries will be built from source,
# including the bindings to NumPy. The Boost sources must be in the directory
# `${BOOST_BUILD_DIR}/boost`. The libraries will be installed in the directory
# `${BOOST_INSTALL_DIR}`. Both environment variables must have been set.
# Ensure we start with a clean slate
function cleanup
{
rm -rf "${BOOST_BUILD_DIR}/boost/bin.v2"
rm -rf "${BOOST_INSTALL_DIR}"
}
# Install oldest supported numpy
function install_numpy
{
pip install oldest-supported-numpy
}
# Build the Boost Python libraries
function build_boost_python
{
nproc=$(python -c 'import multiprocessing as mp; print(mp.cpu_count())')
inc_dir=$(python -c 'import sysconfig as sc; print(sc.get_path("include"))')
cd "${BOOST_BUILD_DIR}/boost"
./bootstrap.sh --prefix="${BOOST_INSTALL_DIR}" \
--with-libraries=python \
--with-toolset=gcc
./b2 -j"${nproc}" \
cxxflags="-fPIC -I${inc_dir}" \
link=static,shared \
install
}
set -o pipefail
cleanup
install_numpy
build_boost_python
PyBDSF-1.11.0/doc/ 0000775 0000000 0000000 00000000000 14650706641 0013377 5 ustar 00root root 0000000 0000000 PyBDSF-1.11.0/doc/Makefile 0000664 0000000 0000000 00000012705 14650706641 0015044 0 ustar 00root root 0000000 0000000 # Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make ' where is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyBDSM.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyBDSM.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/PyBDSM"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyBDSM"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
PyBDSF-1.11.0/doc/anaamika_overview.doc 0000664 0000000 0000000 00000004052 14650706641 0017557 0 ustar 00root root 0000000 0000000
//_______________________________________________________________________________
// Description of source code tree
/*!
\page Anaamika Anaamika
\defgroup Anaamika Anaamika - Blob Detection and Source Measurement
The current version of BDSM is the Python version PyBDSM. It is currently maintained and developed at Sterrewacht Leiden.
\section anaamika_code_tree Organiation of the source code
The diagram below show the basic organisation of the source code directories:
\verbatim
lofarsoft
|-- data
|-- doc
|-- release
|-- build
|-- devel_common
|-- external
`-- src
|-- contrib
|-- CR-Tools
|-- DAL
`-- Anaamika <-- Project top-level directory
|-- implement
| |-- fBDSM
| |-- fits
| |-- shaplelets
| `-- PyBDSM
|-- apps
|-- data
|-- doc
`-- scripts
\endverbatim
In this:
- \c implement contains source code from which a library or a set of
libraries is build. The generic substructure is
\verbatim
implement
|-- Module1
|-- Module2
|
`-- ModuleN
\endverbatim
which allows for the creation of multiple smaller library (one per module),
as well as the creation of a single library based on the contents of the
various modules.
- \c apps contains application executables, typically C/C++ (or Fortran)
sources which are compile into a stand-alone program; most of the types
these source will link again the library created from the sources contained
in \c implement.
- \c scripts contains (shell) scripts. Depending one the point of view,
this also might be considerated the location for Python scripts, when
drawing the line between code being executed in a dynamic environment and
code resulting in a static executable originating from a compiled source.
- \c doc contains all sorts of documentation -- user manual, reference
manuals -- but also additional sources (such as this file) processed by
Doxygen.
*/
PyBDSF-1.11.0/doc/requirements.txt 0000664 0000000 0000000 00000000040 14650706641 0016655 0 ustar 00root root 0000000 0000000 sphinx-rtd-theme
setuptools_scm
PyBDSF-1.11.0/doc/source/ 0000775 0000000 0000000 00000000000 14650706641 0014677 5 ustar 00root root 0000000 0000000 PyBDSF-1.11.0/doc/source/HydraA_74MHz_fit.png 0000664 0000000 0000000 00003005660 14650706641 0020362 0 ustar 00root root 0000000 0000000 PNG
IHDR /iCCPICC Profile xc``2ptqre``+)
rwRR` ``\\ yy |2U +'300 %@q9@HR6.
r!+ v.zH}:b'A2 vIj^ʢ#ǔTbϼĒZ@!
A!ahii "q(v!(cd2f` G1G)I/15C}}s ïPo pHYs gR IDATx uSLߔ"B"i T2E!LS*(9C)e$$cJsL9
u{s=>yzg{ٟӉ H@$ H@$ H@$ H@$ g$ H@$ H@$ H@$ H@$h(@$ H@$ H@$ H@$ H@ ʌe$ H@$ H@$ H@$ H@8$ H@$ H@$ H@$ H@F2#q$ H@$ H@$ H@$ H@2 H@$ H@$ H@$ H@$ H\f;) H@$ H@$ H@$ H@$c@$ H@$ H@$ H@$ H`$h(3NJ@$ H@$ H@$ H@$ h($ H@$ H@$ H@$ H@ ʌe$ H@$ H@$ H@$ H@8$ H@$ H@$ H@$ H@F2#q$ H@$ H@$ H@$ H@2 H@$ H@$ H@$ H@$ H\f;) H@$ H@$ H@$ H@$c@$ H@$ H@$ H@$ H`$h(3NJ@$ H@$ H@$ H@$ h($ H@$ H@$ H@$ H@ ʌe$ H@$ H@$ H@$ H@8$ H@$ H@$ H@$ H@F2#q$ H@$ H@$ H@$ H@2 H@$ H@$ H@$ H@$ H\f;) H@$ H@$ H@$ H@$c@$ H@$ H@$ H@$ H`$h(3NJ@$ H@$ H@$ H@$ h($ H@$ H@$ H@$ H@ ʌe$ H@$ H@$ H@$ H@8$ H@$ H@$ H@$ H@F2#q$ H@$ H@$ H@$ H@2 H@$ H@$ H@$ H@$ H\f;) H@$ H@$ H@$ H@$c@$ H@$ H@$ H@$ H`$h(3NJ@$ H@$ H@$ H@$ h($ H@$ H@$ H@$ H@ ʌe$ H@$ H@$ H@$ H@8$ H@$ H@$ H@$ H@F2#q$ H@$ H@$ H@$ H@2 H@$ H@$ H@$ H@$ H\f;) H@$ H@$ H@$ H@$c@$ H@$ H@$ H@$ H`$h(3NJ@$ H@$ H@$ H@$ h($ H@$ H@$ H@$ H@ ʌe$ H@$ H@$ H@$ H@8$ H@$ H@$ H@$ H@F2#q$ H@$ H@$ H@$ H@2 H@$ H@$ H@$ H@$ H\f;) H@$ H@$ H@$ H@$c@$ H@$ H@$ H@$ H`$h(3NJ@$ H@$ H@$ H@$ h($ H@$ H@$ H@$ H@ ʌe$ H@$ H@$ H@$ H@8$ H@$ H@$ H@$ H@F2#q$ H@$ H@$ H@$ H@2 H@$ H@$ H@$ H@$ H\f;) H@$ H@$ H@$ H@$c@$ H@$ H@$ H@$ H`$h(3NJ@$ H@$ H@$ H@$ h($ H@$ H@$ H@$ H@ ʌe$ H@$ H@$ H@$ H@8$ H@$ H@$ H@$ H@F2#q$ H@$ H@$ H@$ H@2 H@$ H@$ H@$ H@$ H\f;) H@$ H@$ H@$ H@$c@$ H@$ H@$ H@$ H`$h(3NJ@$ H@$ H@$ H@$ h($ H@$ H@$ H@$ H@ ʌe$ H@$ H@$ H@$ H@8$ H@$ H@$ H@$ H@F2#q$ H@$ H@$ H@$ H@2 H@$ H@$ H@$ H@$ H\f;) H@$ H@$ H@$ H@$c@$ H@$ H@$ H@$ H`$h(3NJ@$ H@$ H@$ H@$ h($ H@$ H@$ H@$ H@ ʌe$ H@$ H@$ H@$ H@8$ H@$ H@$ H@$ H@F2#q$ H@$ H@$ H@$ H@2 H@$ H@$ H@$ H@$ H\f;) H@$ H@$ H@$ H@$c@$ H@$ H@$ H@$ H`$h(3NJ@$ H@$ H@$ H@$ / H@$ !'"cǿwX`.8M1GQo-Xo(u@]t0~@n mm+<D G
C9$z(J뮰K:h|S]+Bulp̌veI5oy睓*$ H@6<}7Synaz~7%BWsv3A~߇^:K_פnj>qG