pax_global_header00006660000000000000000000000064131716062000014506gustar00rootroot0000000000000052 comment=98b32ca1b1db6f0e3236fbc8dbcaa0f21d14c456 mpop-1.5.0/000077500000000000000000000000001317160620000124645ustar00rootroot00000000000000mpop-1.5.0/.bumpversion.cfg000066400000000000000000000001441317160620000155730ustar00rootroot00000000000000[bumpversion] current_version = 1.5.0 commit = True tag = True [bumpversion:file:mpop/version.py] mpop-1.5.0/.gitchangelog.rc000066400000000000000000000143371317160620000155330ustar00rootroot00000000000000## ## Format ## ## ACTION: [AUDIENCE:] COMMIT_MSG [!TAG ...] ## ## Description ## ## ACTION is one of 'chg', 'fix', 'new' ## ## Is WHAT the change is about. ## ## 'chg' is for refactor, small improvement, cosmetic changes... ## 'fix' is for bug fixes ## 'new' is for new features, big improvement ## ## AUDIENCE is optional and one of 'dev', 'usr', 'pkg', 'test', 'doc' ## ## Is WHO is concerned by the change. ## ## 'dev' is for developpers (API changes, refactors...) ## 'usr' is for final users (UI changes) ## 'pkg' is for packagers (packaging changes) ## 'test' is for testers (test only related changes) ## 'doc' is for doc guys (doc only changes) ## ## COMMIT_MSG is ... well ... the commit message itself. ## ## TAGs are additionnal adjective as 'refactor' 'minor' 'cosmetic' ## ## They are preceded with a '!' or a '@' (prefer the former, as the ## latter is wrongly interpreted in github.) Commonly used tags are: ## ## 'refactor' is obviously for refactoring code only ## 'minor' is for a very meaningless change (a typo, adding a comment) ## 'cosmetic' is for cosmetic driven change (re-indentation, 80-col...) ## 'wip' is for partial functionality but complete subfunctionality. ## ## Example: ## ## new: usr: support of bazaar implemented ## chg: re-indentend some lines !cosmetic ## new: dev: updated code to be compatible with last version of killer lib. ## fix: pkg: updated year of licence coverage. ## new: test: added a bunch of test around user usability of feature X. ## fix: typo in spelling my name in comment. !minor ## ## Please note that multi-line commit message are supported, and only the ## first line will be considered as the "summary" of the commit message. So ## tags, and other rules only applies to the summary. The body of the commit ## message will be displayed in the changelog without reformatting. ## ## ``ignore_regexps`` is a line of regexps ## ## Any commit having its full commit message matching any regexp listed here ## will be ignored and won't be reported in the changelog. ## ignore_regexps = [ r'@minor', r'!minor', r'@cosmetic', r'!cosmetic', r'@refactor', r'!refactor', r'@wip', r'!wip', r'^Merge commit .* into HEAD', r'^([cC]hg|[fF]ix|[nN]ew)\s*:\s*[p|P]kg:', r'^([cC]hg|[fF]ix|[nN]ew)\s*:\s*[d|D]ev:', r'^(.{3,3}\s*:)?\s*[fF]irst commit.?\s*$', ] ## ``section_regexps`` is a list of 2-tuples associating a string label and a ## list of regexp ## ## Commit messages will be classified in sections thanks to this. Section ## titles are the label, and a commit is classified under this section if any ## of the regexps associated is matching. ## section_regexps = [ ('New', [ r'^[nN]ew\s*:\s*((dev|use?r|pkg|test|doc)\s*:\s*)?([^\n]*)$', ]), ('Changes', [ r'^[cC]hg\s*:\s*((dev|use?r|pkg|test|doc)\s*:\s*)?([^\n]*)$', ]), ('Fix', [ r'^([Bb]ug)?[fF]ix\s*:\s*((dev|use?r|pkg|test|doc)\s*:\s*)?([^\n]*)$', ]), ('Other', None ## Match all lines ), ] ## ``body_process`` is a callable ## ## This callable will be given the original body and result will ## be used in the changelog. ## ## Available constructs are: ## ## - any python callable that take one txt argument and return txt argument. ## ## - ReSub(pattern, replacement): will apply regexp substitution. ## ## - Indent(chars=" "): will indent the text with the prefix ## Please remember that template engines gets also to modify the text and ## will usually indent themselves the text if needed. ##git log --pretty=format:"- %s%n%b" --since="$(git show -s --format=%ad `git rev-list --tags --max-count=1`)" ## - Wrap(regexp=r"\n\n"): re-wrap text in separate paragraph to fill 80-Columns ## ## - noop: do nothing ## ## - ucfirst: ensure the first letter is uppercase. ## (usually used in the ``subject_process`` pipeline) ## ## - final_dot: ensure text finishes with a dot ## (usually used in the ``subject_process`` pipeline) ## ## - strip: remove any spaces before or after the content of the string ## ## Additionally, you can `pipe` the provided filters, for instance: #body_process = Wrap(regexp=r'\n(?=\w+\s*:)') | Indent(chars=" ") #body_process = Wrap(regexp=r'\n(?=\w+\s*:)') #body_process = noop body_process = ReSub(r'(?m)\s*^Signed-off-by: .*$\s*', '') ## ``subject_process`` is a callable ## ## This callable will be given the original subject and result will ## be used in the changelog. ## ## Available constructs are those listed in ``body_process`` doc. subject_process = (strip | ReSub(r'^([cC]hg|[fF]ix|[nN]ew)\s*:\s*((dev|use?r|pkg|test|doc)\s*:\s*)?([^\n@]*)(@[a-z]+\s+)*$', r'\4') | ucfirst | final_dot) ## ``tag_filter_regexp`` is a regexp ## ## Tags that will be used for the changelog must match this regexp. ## tag_filter_regexp = r'^v[0-9]+\.[0-9]+(\.[0-9]+)?$' ## ``unreleased_version_label`` is a string ## ## This label will be used as the changelog Title of the last set of changes ## between last valid tag and HEAD if any. unreleased_version_label = "%%version%% (unreleased)" ## ``output_engine`` is a callable ## ## This will change the output format of the generated changelog file ## ## Available choices are: ## ## - rest_py ## ## Legacy pure python engine, outputs ReSTructured text. ## This is the default. ## ## - mustache() ## ## Template name could be any of the available templates in ## ``templates/mustache/*.tpl``. ## Requires python package ``pystache``. ## Examples: ## - mustache("markdown") ## - mustache("restructuredtext") ## ## - makotemplate() ## ## Template name could be any of the available templates in ## ``templates/mako/*.tpl``. ## Requires python package ``mako``. ## Examples: ## - makotemplate("restructuredtext") ## output_engine = rest_py #output_engine = mustache("restructuredtext") #output_engine = mustache("markdown") #output_engine = makotemplate("restructuredtext") ## ``include_merges`` is a boolean ## ## This option tells git-log whether to include merge commits in the log. ## The default is to include them. include_merges = False mpop-1.5.0/.gitignore000066400000000000000000000001011317160620000144440ustar00rootroot00000000000000build doc/build *.so *.pyc *~ RELEASE-VERSION dist mpop.egg-info mpop-1.5.0/.travis.yml000066400000000000000000000007421317160620000146000ustar00rootroot00000000000000language: python python: - '2.7' install: - pip install . - pip install coveralls - pip install pyorbital script: coverage run --source=mpop setup.py test sudo: false after_success: coveralls deploy: provider: pypi user: Martin.Raspaud password: secure: RuQzdaLTY4sryIzG8Hz1KWEsyYRxrLvbyfm7DurXDPcj2vsujRwJicNwBrJajIBkzZWwdmWE8db55BPWZwCsJtVUbE53vc742wSAcci2zzCgizSb/jjlDkwk1CE/PoMl4t3JsuIU6bklgw1Y1d4Xn4+BeZe8Blol5PD/FUovxfo= on: tags: true repo: mraspaud/mpop mpop-1.5.0/LICENSE.txt000066400000000000000000001045131317160620000143130ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . mpop-1.5.0/MANIFEST.in000066400000000000000000000002161317160620000142210ustar00rootroot00000000000000include doc/Makefile include doc/source/* include doc/examples/*.py include etc/* include LICENSE.txt include README.rst include MANIFEST.in mpop-1.5.0/README000077700000000000000000000000001317160620000150262README.rstustar00rootroot00000000000000mpop-1.5.0/README.rst000066400000000000000000000002201317160620000141450ustar00rootroot00000000000000This is the Meteorological Post-Processing Package (MPoP). A recent build of the documentation is available at http://mpop.readthedocs.org/. mpop-1.5.0/changelog.rst000066400000000000000000003217411317160620000151550ustar00rootroot00000000000000Changelog ========= v1.5.0 (2017-10-18) ------------------- Fix ~~~ - Bugfix: Calibration of channel 3A reflectances was wrong! [Adam.Dybbroe] - Bugfix: RGBs using AVHR 3a/3b with Metop GDS were wrong, as the mask was not handled when stacking arrays. [Adam.Dybbroe] Other ~~~~~ - Update changelog. [Martin Raspaud] - Bump version: 1.4.0 → 1.5.0. [Martin Raspaud] - Add zero seconds option for ninjotiff. [Martin Raspaud] - Numexpr dependency fixed. [Adam.Dybbroe] - Fix aapp-l1b reader handling orbit numbers < 10000. [Adam.Dybbroe] - Add links to EUM docs in header documentation. [Adam.Dybbroe] - Requires python2-numexpr. [Adam.Dybbroe] - Fill out M07 masked values with M06, to partly mitigate missing reflectance data inside VIIRS swath (increasing problem with later CSPP versions) [Adam.Dybbroe] - Numpy>=1.10 compliant. Return a copy (instead of a view) of the band geolocations when creating the SwathDefinition for the band area. [Adam.Dybbroe] Not really sure if this is where the real problem sits, but seems to fix a problem with masking with newer numpy version (>=1.10) - Make eps-l1b reader numpy>=1.12 compatible. [Adam.Dybbroe] - Change requirement to numexpr. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Fix geotiff transparency rules. [Martin Raspaud] - Change rpm dependencies to use pre-packaged numexpr. [Martin Raspaud] - Merge pull request #47 from tparker-usgs/master. [Martin Raspaud] Expand error message when geolocation or band data is missing - Expand error message when geolocation or band data is missing. [Tom Parker] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Add 'channel' to prologue format dictionary. [Panu Lahtinen] - Set default values for glob_epi and glob_pro. [Panu Lahtinen] - Check that EPI and PRO patterns are available. [Panu Lahtinen] - Handle missing PRO and EPI patterns. [Panu Lahtinen] - Merge pull request #46 from pytroll/feature_adjust_aapp1b_masking. [Martin Raspaud] Remove double masking of 3A and 3B channels - Remove double masking which discards previous mask leaving nan's unmasked. [Panu Lahtinen] - Merge pull request #45 from pytroll/feature-preprecompute. [Martin Raspaud] Refactor mpop.projector and add a script for pre-calculating projection parameters - Add more tests. [Panu Lahtinen] - test calc_nearest_params() - test calc_quick_params() - test calc_bilinear_params() - test calc_ewa_params() - test get_precompute_cache_fname() - test get_area_and_id() - Remove a print statement. [Panu Lahtinen] - Handle missing pyresample.ewa, some PEP8. [Panu Lahtinen] - Add a module that can precalculate the projection parameters. [Panu Lahtinen] - Split to more generic methods and functions. [Panu Lahtinen] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Pep8. [Adam.Dybbroe] v1.4.0 (2017-02-20) ------------------- - Update changelog. [Martin Raspaud] - Bump version: 1.3.1 → 1.4.0. [Martin Raspaud] - Merge pull request #44 from pytroll/feature_bilinear. [Martin Raspaud] Feature bilinear - Clarify docstring - radius is also used with bilinear mode. [Panu Lahtinen] - Test that bilinear interpolation is called. [Panu Lahtinen] - Mock pyresample.bilinear, autopep8 the file. [Panu Lahtinen] - Update docstring. [Panu Lahtinen] - Move imports to top of the file. [Panu Lahtinen] - Remove Python 2.6 from the tested versions. [Panu Lahtinen] - Require Pillow version < 4.0.0 for Python 2.6. [Panu Lahtinen] - Add mask to bilinear resampled data. [Panu Lahtinen] - Fix naming. [Panu Lahtinen] - Add implicit flag to receive un-masked parameters for bilinear interpolation. [Panu Lahtinen] - Add bilinear interpolation. [Panu Lahtinen] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Clear cache before inserting EWA parameters. [Panu Lahtinen] - Add EWA resampling. [Panu Lahtinen] - Convert maximum extent to list before modification. [Martin Raspaud] - Do the scaling before finalization in ninjotiff's finalize. [Martin Raspaud] - Fix H8 template file's band names. [Martin Raspaud] - Extract elevation from viirs-sdr + Fix viirs true color. [Adam.Dybbroe] v1.3.1 (2016-12-12) ------------------- - Update changelog. [Martin Raspaud] - Bump version: 1.3.0 → 1.3.1. [Martin Raspaud] - Update maximum_extent instead of just returning the first "geos" extent. [Panu Lahtinen] - Return the area extent from the area definition, if the projection is "geos" [Panu Lahtinen] - Fix 'sun_zen_correction_applied' not being set to true. [Martin Raspaud] - Merge pull request #39 from mraspaud/pre-master. [Martin Raspaud] Turn RGB ninjotiff image into RGBA when fill_value is None - Turn RGB ninjotiff image into RGBA when fill_value is None. [Martin Raspaud] This patch allows to replicate the behaviour when saving pil images. v1.3.0 (2016-10-27) ------------------- - Update changelog. [Martin Raspaud] - Bump version: 1.2.1 → 1.3.0. [Martin Raspaud] - Add bump and gitchangelog configs. [Martin Raspaud] - Fix pep8 compliance. [Martin Raspaud] - Use filenames for mipp only if the files are relevant. [Martin Raspaud] - Handle time_slot tuples better by splitting them. [Martin Raspaud] This allows mpop to be backwards compatible for non mipp-based readers. - Allow providing filenames to the mipp xrit reader. [Martin Raspaud] This allows mpop to use filenames as provided by trollduction. - Make it possible to specify custom stretching of truecolor imagery. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Merge pull request #36 from khunger/feature-fill-value-substitution. [Martin Raspaud] New option fill_value_subst - New option fill_value_subst. [Christian Kliche] This option can be used in conjunction with GeoImage.fill_value. Any occurrences of fill_value within the image data will be replaced with fill_value_subst before storing to image file. Example trollduction configuration to use this feature: test.tif 1 - Merge pull request #37 from khunger/feature-xrit-sublon-metadata. [Martin Raspaud] Atmospheric correction and xrit metadata "sublon" in sat scene info - Algorithm for atmosheric correction. [Christian Kliche] Added new algorithm to Channel class to apply atmospheric correction on a copy of channel data using given satellite zenith angle data. Creates a new channel containing the corrected data. - Added xrit metadata "sublon" to sat scene info. [Christian Kliche] - Bugfix fill_value in cf-output. [Adam.Dybbroe] - Support PPS on I-band resolution. [Adam.Dybbroe] - Bugfix platform naming. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Merge pull request #35 from khunger/feature-writer-options. [Martin Raspaud] Feature writer options - Added tests for save with writer_options. [Christian Kliche] - Fixed unit tests. [Christian Kliche] - Changed parameter order for backwards compatibility. [Christian Kliche] - GeoImage.save extended by writer_options dict. [Christian Kliche] Some dict keys for options used by GeoImage.save are defined in writer_options.py. All options within this dict will be forwarded to custom writers like NinJoTiff writer module. - GeoImage.save extended by writer_options dict. [Christian Kliche] Some dict keys for options used by GeoImage.save are defined in writer_options.py. All options within this dict will be forwarded to custom writers like NinJoTiff writer module. - Allow adding int, float and str attributes to the main info object. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Merge pull request #33 from meteoswiss-mdr/pre-master. [Martin Raspaud] H-SAF and Odyssey reader - Add odyssey reader. [hau] - Renamed hsaf reader. [hau] - Merge branch 'pre-master' of https://github.com/meteoswiss-mdr/mpop into pre-master. [hau] - Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre- master. [hau] - Add config file for reading hsaf data. [hau] - Add new reader for HSAF h03 product. [hau] - Small bugfix for hdf5 SEVIRI reader. [hau] - Add option area_aggregation. [Adam.Dybbroe] Default is True for backward compatibility. If False, the band_axis p arameter is obsolete and all bands are separated in 2d arrays. W riting goes faster this way. - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Merge pull request #34 from ninahakansson/pre-master. [Martin Raspaud] Faster writing in cfscene.py for pps - Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre- master. [Nina.Hakansson] - Faster writing with time_dimension by checking the fastest condition first. [Nina.Hakansson] The condition "(chn.area, chn.info['units']) in area_units" takes several seconds to check for a npp scene combined of some granules. - Pep8 editorials. [Adam.Dybbroe] - Adapt writing to new cfscene. [Adam.Dybbroe] - Bugfix sun-sat angles: Sort geofiles before. [Adam.Dybbroe] - Make writer able to have time dimension and falt band structure. [Adam.Dybbroe] Use time_dimension=True to use this way of storing data - Bugfix viirs geolocation. [Adam.Dybbroe] When geolocation granule files are not ordered in time, geolocation got wrong when calling the loader with a list of files - Add time_dimension option in CFScene writer. [Adam.Dybbroe] Time dimension is used in Diana (visualisation system at SMHI) and in PPS - Fix netcdf file output. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Ensure proper handling of lower and uppercase epsg/EPSG init strings. [Panu Lahtinen] - Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre- master. [Panu Lahtinen] - Get proper srs when using init=EPSG: in projection definition. [Panu Lahtinen] - Change keyword argument to filenames. [Adam.Dybbroe] - Adapt to pillow. [Adam.Dybbroe] - Fix palettes normalised to 0-1 for mpop. [Adam.Dybbroe] - Add imagery capability for OCA cloud parameters. [Adam.Dybbroe] Only the cloud top pressure (ctp) parameters is okay so far. Need to check effective radius and COT - Add the FY3 MERSI-I reader. [Adam.Dybbroe] - Add mpef oca reader. [Adam.Dybbroe] - Add the embeded palette to ninjotiff generation if not overriden. [Martin Raspaud] - Merge pull request #30 from khunger/fix-read-area-calculation. [Martin Raspaud] More conservative approach to handle errors in area_def_names_to_extent() - Use readers def area_extent if calculation fails. [Christian Kliche] - Merge pull request #32 from meteoswiss-mdr/pre-master. [Martin Raspaud] parallax correction and high resolution winds - Make use of sat_nr function in nwcsaf_msg.py. [hau] - Option for estimating cth for parallax correction. [hau] introduced a optional argument if cth should be estimated or not. Additionally introduced a small function to extract the satellite number. - User choice of background color for day_mircophysics. [hau] - Added functionality for parallax correction. [hau] added new functions: mpop/tools.py -> estimate_cth mpop/scene.py -> get_orbital parallax_corr mpop/channel.py -> get_viewing_geometry parallax_corr vinc_vect estimate_cth simple estimation of cloud top height comparing 10.8 micron temperature with temperature profile get_orbital small wrapper to get the satellite orbital from pyorbital parallax_corr (scene.py) performs parallax correction for all loaded channels get_viewing_geometry small function returning viewing azimuth and elevation angle for current channel parallax_corr (channel.py) performs parallax correction for a single channel vinc_vect parallized version of the vinc function - Copy the information of the palette for NWCSAF products. [hau] ... when reprojecting - Add other satellite number definition to reader. [hau] add 8 and 9 entry for meteosat 8 and 9 before only 08 and 09 were possible - Updated the _Calibrator call msg_seviri_hdf.py. [hau] Updated initialization of the _Calibrator function in mpop/satin/msg_seviri_hdf.py msg_seviri_hdf.py uses the _Calibrator function in mipp/xrit/MSG.py that was updated by Martin and now takes another number of input arguments: before Martin s change class _Calibrator(object): def __init__(self, hdr, channel_name): after Martin s change class _Calibrator(object): def __init__(self, hdr, channel_name, bits_per_pixel): so now the argument bits_per_pixel is set to 10. - Merge branch 'test' into pre-master. [hau] - Add file to read high resolution wind data from NWCSAF. [hau] - Add code to process TRT. [hau] TRT is an MeteoSwiss tool to detect thunderstorm cells. The data can be processed with this file. - Add a code file to process HRW data from NWC-SAF. [hau] - Add reader for geo-hdf format EUMETSAT archive. [hau] geo-hdf is a possible data format that you can order from the EUMETSAT data archive It enables to specify smaller regions. - New reader nwcsaf and modified scene.py. [hau] a new file for reading NWCSAF data is submitted it can read CTTH, CMa, CT, CCR, CRPh, PC, SPhR added a small option to scene.py in order to specify which level specifies the desired format of the input file - Add cloud phase palette and palette2colormap function. [hau] - Test commit for submodule. [hau] - Merge pull request #31 from elfsprite/pre-master. [Martin Raspaud] Added S2A reader files to fork - Tile definition is now downloaded and converted automatically. [Matias Takala] - Added S2A reader files to fork. [Matias Takala] - Fix typo. [Adam.Dybbroe] - Make it possible to specify fill-value in overview_sun. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Fix bug of setting shape to viirs reader using foreign band name. [Martin Raspaud] - Reorganize imports. [Martin Raspaud] - Add mercator to the supported ninjo projections. [Martin Raspaud] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Lars Orum Rasmussen] - Added a config template for sentinel-1b. [Lars Orum Rasmussen] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Ninjotiff.save now supports palette ('P') mode. [ras] - New option to handle if data is scaled between 0 and 1. [ras] - Set fill values as a kw arg. [Martin Raspaud] - Use readers def area_extent if calculation fails. [Christian Kliche] - Keep mask when stacking segments in aapp1b. [Martin Raspaud] - Get sun-sat angles for modise reading. [Adam.Dybbroe] - Bugfix getting satellite zenith and azimuth and document get_sunsat_angles method. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Keep track of sun corrected channels for eg ears viirs. [Martin Raspaud] - Fix viirs_compact to remove files even when crashing. [Martin Raspaud] - Add method to retriev the sun-satellite viewing geometry. [Adam.Dybbroe] - Pep8. [Adam.Dybbroe] - Fix the snow_age composite to right luminosity. [Martin Raspaud] - Add MF's copyright notice for the luts. [Martin Raspaud] - Use original luts for the snow_age composite. [Martin Raspaud] - Fix projector test. [Martin Raspaud] - Work around the problem coming at night when M9-channel would mask all the data away. [Panu Lahtinen] - Add Snow Age RGB composite. [Panu Lahtinen] - Handle cases where DNB and/or M channel data are requested but no such files are present. [Panu Lahtinen] - Bugfix hdfeos. [Martin Raspaud] Checkfile was using an undefined variable - Finish integrating trollsift into hdfeos reader. [Martin Raspaud] Some parts were left unfinished. Shoud we fixed now. - Fix new hdfeos reader to look for data on invalid input. [Martin Raspaud] the hdfeos reader would fail looking for data in standard places when an iterable of invalid files would be provided. - Fix hdfeos reader to look for data on invalid input. [Martin Raspaud] the hdfeos reader would fail looking for data in standard places when an iterable of invalid files would be provided. - Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre- master. [Panu Lahtinen] - Update config templates for MODIS to match recent updates in EOS reader. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Run travis tests in a container. [Martin Raspaud] - Fix bug related to modis DR. [Adam.Dybbroe] - Mask out dubious lon/lat values. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Added contributions from Christian (DWD) to ninjotiff. It support input data in the [0.0, 1.0] range. [Lars Orum Rasmussen] - Print more debug info concerning calibration. [Adam.Dybbroe] - Fix formattin character in log message. [Panu Lahtinen] v1.2.1 (2015-12-14) ------------------- - Update changelog. [Martin Raspaud] - Bump version: 1.2.0 → 1.2.1. [Martin Raspaud] - Merge branch 'pre-master' [Martin Raspaud] - Merge branch 'pre-master' [Martin Raspaud] Conflicts: doc/source/pp.rst - Update changelog. [Martin Raspaud] - Bump version: 1.1.0 → 1.2.0. [Martin Raspaud] - Merge branch 'pre-master' [Martin Raspaud] Conflicts: mpop/version.py setup.py v1.2.0 (2015-12-14) ------------------- Fix ~~~ - Bugfix: converted MSG products should be saveable. [Martin Raspaud] - Bugfix: satellite name in msg_hdf now supports missing number. [Martin Raspaud] - Bugfix: misspelling. [Martin Raspaud] - Bugfix: mipp_xrit: do not crash on unknown channels, just warn and skip. [Martin Raspaud] - Bugfix: setup.py includes now eps xml format description. [Martin Raspaud] - Close all h5files in viirs_sdr, not only the last one. [Martin.Raspaud] - Bugfix: close h5 files when done. [Martin Raspaud] Prior to h5py 3.0, the h5 files open with h5py are not closed upon deletion, so we have to do it ourselves... - Bugfix: area.id doesn't exist, use area.area_id. [Martin Raspaud] - Bugfix: return when each file has been loaded independently. [Martin Raspaud] - Bugfix: Do not crash on multiple non-nwc files. [Martin Raspaud] - Bugfix: check start and end times from loaded channels only. [Martin Raspaud] - Bugfix: viirs start and end times not relying on non-existant channels anymore. [Martin Raspaud] - Bugfix: type() doesn't support unicode, cast to str. [Martin Raspaud] Other ~~~~~ - Update changelog. [Martin Raspaud] - Bump version: 1.1.0 → 1.2.0. [Martin Raspaud] - Add template parameters for NOAA-19 ears-nwc. [Adam.Dybbroe] Parameters needed if you want to load only with time_interval and not using the filename argument - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Merged (by hand) sentinel1-feature branch. [Lars Orum Rasmussen] - Added support for gdal's SetNoDataValue if fill_value is not None. [Lars Orum Rasmussen] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Lars Orum Rasmussen] - Added a RGB example. [Lars Orum Rasmussen] - Don't use colormaps for 16b grayscale (Ninjo will fail enhancements) [Lars Orum Rasmussen] For 16b IR, if specified, set min-is-white For 16b grayscale, it seems that transparent pixel (in Ninjo) are forced to be zero Transparent pixel for 16b IR are handled bad - Add template config for ears-nwc Metop-B reading. [Adam.Dybbroe] - Fix bug when using time_interval argument loading ears-nwc data. [Adam.Dybbroe] - Add brightness temperature calibration to the IR bands. [Adam.Dybbroe] - Update EARS config files for new (2014) PPS product format. [Adam.Dybbroe] - Remove old FY3 mersi reader. [Adam.Dybbroe] - Apply VIS/NIR calibration including sun-zenith correction. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Now ninjotiff can list tags. [Lars Orum Rasmussen] Ninjo tags are now a dictionary - Add FY-3B template config file. [Adam.Dybbroe] - Add first draft FY3B VIRR reader. [Adam.Dybbroe] No calibration yet, but counts can be projected and displayed - Added contributions from Christian (DWD) to ninjotiff: now using tifffile.py and support for RGBA. [Lars Orum Rasmussen] Changed scaling into a value range (so it works for me) - Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre- master. [Panu Lahtinen] - Delete world_map.ascii. [Martin Raspaud] - Read DNB using PyTables, separate read() to read_m() and read_dnb() [Panu Lahtinen] - Update coords2area_def with preview mode. [Martin Raspaud] - Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre- master. [Panu Lahtinen] - Remove debug printout from pps reader. [Adam.Dybbroe] - Support a list of files which will be concatenated, enables usage of granule data. [Panu Lahtinen] - Fix for channel names and channel loading. [Panu Lahtinen] - Added Himawari-8 config template file. [Martin Raspaud] - Mask out 0-counts areas in aapp_l1b. [Martin Raspaud] - Support saving GeoImages in IO buffers. [Martin Raspaud] - Add support for noaa gac and lac data. [Martin Raspaud] - Take care of fill_value in datasets. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Fix the sun zenith angle correction fix. [Martin Raspaud] - Do not check time_slot type. [Martin Raspaud] - Bugfix ctth scaling: Only keep same datatype if data are not scaled. [Adam.Dybbroe] - Less debug info. [Adam.Dybbroe] - Bugfix. Sun zenith correction can now take an additional keyword - and data are masked out accordingly. [Adam.Dybbroe] - Fix overview_sun, avoid redish rgb's when sun is very low (below horizon) [Adam.Dybbroe] - Read also the palette data etc. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Add orbit number info in the scene metadata upon loading. (hdfeos) [Martin Raspaud] - Hdfeos: orbit number is now an int. [Martin Raspaud] - Fix geolocation reading for multiple-file processing (hdfeos) [Martin Raspaud] - Changed error message to a warning. [Adam.Dybbroe] - Fix hdf_eos to allow reading several granules. [Martin Raspaud] - Enhancing the dnb_overview, so that pixels with solar contamination are masked out. [Adam.Dybbroe] - Bringing back the night_overview (=cloudtop) [Adam.Dybbroe] - Comment out the night_overview. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Bugfix overview_sun. [Martin Raspaud] - Use builtin sunzen_corr for overview_sun. [Martin Raspaud] - Switch to nullterm string in msg_hdf for nr products. [Martin Raspaud] - Bugfix. [Adam.Dybbroe] - Improve overview for viirs and overview_sun. [Adam.Dybbroe] - Re-introduce the fix for VIIRS bowtie deletions. [Adam.Dybbroe] - Shouting when both a list of file names and a time interval is used. Accepts tine_interval even for local files. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] Conflicts: mpop/satin/nc_pps_l2.py - Fixed incorrect production sources and geolocation file names for 'local' products. [Panu Lahtinen] - Added a unit test to the nc_pps_l2 reader, and adapted the reader a bit. [Adam.Dybbroe] - Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre- master. [Panu Lahtinen] - Fixme reminder. [Adam.Dybbroe] - Restructure how the data and geolocation files are listed and read. [Panu Lahtinen] - Fixed workaround for DIMENSION_LIST attributes. [Panu Lahtinen] - Minor fixes - thanks Panu! [Adam.Dybbroe] - Cleaning up a bit and pep8. [Adam.Dybbroe] - Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master. [Adam.Dybbroe] - Updated reading to support both M and DNB channel data. [Panu Lahtinen] - Adapt navigation to compact_viirs dnb. [Martin Raspaud] - Do not crash when an unknown channel is requested in msg_hdf. [Martin Raspaud] - Fix template files. [Adam.Dybbroe] - Fix template files for NOAA satellites and Metop-A/B. [Adam.Dybbroe] - Bugfix, treating cases when no geolocation is found for product. [Adam.Dybbroe] - More debug info. [Adam.Dybbroe] - More debug info. [Adam.Dybbroe] - Fix save function and bugfix. [Adam.Dybbroe] - More debug info and better exception handling - pps reader. [Adam.Dybbroe] - Rewritten the netCDF4 PPS reader. [Adam.Dybbroe] - Cleaning up for unused code. [Adam.Dybbroe] - Add the info attribute to NordRadCType. [Martin Raspaud] - Fix filename search in msg_hdf. [Martin Raspaud] - Fix extension problem in product search for msg_hdf. [Martin Raspaud] - Replace pyhl with h5py in msg_hdf reader. [Martin Raspaud] - Bugfix ascat l2 reader. [Adam.Dybbroe] - Trying to fix odd behaviour when loading list of products. But it still doesn't work - need a small refactoring. [Adam.Dybbroe] - Added support option to select granules in time interval. [Adam.Dybbroe] - More debug info - for custom compositer. [Adam.Dybbroe] - Merge pull request #17 from spareeth/pre-master. [Martin Raspaud] ASCAT SAR soil moisture level 2 netcdf data from EUMETSAT - Add new reader and config files for ASCAT SAR soil moisture level 2 netcdf data from EUMETSAT. [Sajid Pareeth] - Add new reader and config files for ASCAT SAR soil moisture level 2 netcdf data from EUMETSAT. [Sajid Pareeth] - Added possibility to read granule data from EARS, also some PEP8 work. [Panu Lahtinen] - Avoid leaking memory. [Martin Raspaud] - Bugfix. [Adam.Dybbroe] - Raise an error if projection is attempted when swathdata doesn't have full geolocation. [Adam.Dybbroe] - Remove one verbose debug printout. [Adam.Dybbroe] - Adapt for new PPS netCDF format modification (adding a dimension of length 1) [Adam.Dybbroe] - Check for cloudtype=None. [Adam.Dybbroe] - Add option to provide MSG filename to load call. [Adam.Dybbroe] - Check if PPS file is bzipped, and handle it correctly. [Adam.Dybbroe] - Fix orbit number attribute name in msg_hdf. [Martin Raspaud] - Possible to pass value range to save. [Lars Orum Rasmussen] - Chlorophyll-a palette is gone - now it raise an exception if asked for... [Adam.Dybbroe] - Merge branch 'feature-osisaf-sst-reader' into pre-master. [Adam.Dybbroe] - Adding a reader and palette support for OSISAF SST netCDF products. [Adam.Dybbroe] - Fixed external calibration "newer/older than data" message. [Panu Lahtinen] - Fix ctth writing. [Martin Raspaud] - Fixed typo. [Martin Raspaud] - Add orbit number in generated cloud product hdf files. [Martin Raspaud] - Fix new pyspectral calculator signature. [Martin Raspaud] - Putting back the mipp information in template config files. [Martin Raspaud] - Pyspectral now uses standard platform names. [Martin Raspaud] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Panu Lahtinen] - Add algorithm version in output cloud products. [Martin Raspaud] - Exception handling for missing external calibration data. [Panu Lahtinen] - Minor PEP8 tweaks. [Panu Lahtinen] - Script to generate external calibration files for AVHRR instruments. [Panu Lahtinen] - Support for external calibration coefficients for AVHRR. [Panu Lahtinen] - Removed obsolete "satname" and "number" from satellite configs, updated documentation. [Panu Lahtinen] - Renamed satellite configs to conform to OSCAR naming scheme. [Panu Lahtinen] - Add luts to the pps products from msg format. [Martin Raspaud] - Add metadata to nwcsaf products. [Martin Raspaud] - Add \0 to palette strings. [Martin Raspaud] - Fix pps format output for msg products. [Martin Raspaud] - Remove phase palette from msg products to avoid confusion. [Martin Raspaud] - Bugfix, np.string -> np.string_ [Martin Raspaud] - Change variable length strings in h5 products to fixed. [Martin Raspaud] - Fix some cloud product conversions. [Martin Raspaud] - Fix MSG format to PPS format conversion. [Martin Raspaud] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Martin Raspaud] - Merge pull request #16 from pnuu/simplified_platforms. [Martin Raspaud] Simplified platform names for reading custom composites - Simplified platform names for reading custom composites. [Panu Lahtinen] - Change: accept arbitrary kwargs for saving msg hdf products. [Martin Raspaud] - Revert concatenation to it's original place, in order to keep the tests working. [Martin Raspaud] - Fix whole globe area_extent for loading. [Martin Raspaud] - Fix rpm building. [Martin Raspaud] - Fix masking of lonlats in viirs_sdr. [Martin Raspaud] - Fixing pps-nc reader. [Adam Dybbroe] - Clean temporary files after loading. [Adam Dybbroe] - Pep8 stuff. [Adam Dybbroe] - Fixed polar-stereographic projection bugs, thanks to Ron Goodson. [Lars Orum Rasmussen] - Update changelog. [Martin Raspaud] - Bump version: 1.0.2 → 1.1.0. [Martin Raspaud] - Put config files in etc/pytroll. [Martin Raspaud] - Fix version strings. [Martin.Raspaud] - Don't close the h5 files too soon. [Martin Raspaud] - Close h5 file uppon reading. [Adam Dybbroe] - Bugfix. [Adam Dybbroe] - Try a more clever handling of the case where more level-1b files exist for given sat and orbit. [Adam Dybbroe] - Print out files matching in debug. [Martin Raspaud] - Bugfix. [Adam Dybbroe] - Adding debug info. [Adam Dybbroe] - Bugfix. [Adam Dybbroe] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Remove ugly print statements. [Martin Raspaud] - Load the palettes also. [Martin Raspaud] - AAPP1b: use operational coefficients for vis calibrating per default. [Martin Raspaud] - Fallback to pre-launch if not available. - load(..., pre_launch_coeffs=True) to force using pre-launch coeffs) - Correct npp name in h5 files. [Martin Raspaud] - Add the pps v2014 h5 reader. [Martin Raspaud] - Use h5py for lonlat reading also. [Martin Raspaud] - Use h5py instead of netcdf for reading nc files. [Martin Raspaud] - Fix orbit as int in nc_pps loader. [Martin Raspaud] - Add overlay from config feature. [Martin Raspaud] - Remove type testing for orbit number. [Martin Raspaud] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Allowing kwargs. [Martin Raspaud] - Add 10 km to the area extent on each side, to avoid tangent cases. [Martin Raspaud] - Orbit doesn't have to be a string anymore. [Martin Raspaud] - Fix multiple file loading for metop l1b data. [Martin Raspaud] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Implement save for all cloudproducts. [Martin Raspaud] - Change options names to cloud_product_* and add lookup in os.environ. [Martin Raspaud] - Some fixes to nc_pps_l2 for correct saving. [Martin Raspaud] - Add saving to the cloudtype object. [Martin Raspaud] - Add the save method to cloudtype object. [Martin Raspaud] - Rename _md attribute to mda. [Martin Raspaud] - Mask out bowtie deleted pixels for Suomi-NPP products. [Martin Raspaud] - When a file is provided in nc_pps_l2, just read this file. [Martin Raspaud] - Fix nc_pps_l2 for filename input and PC readiness. [Martin Raspaud] - ViirsSDR: Fix not to crash on single file input. [Martin Raspaud] - Fix aapp1b to be able to run both for given filename and config. [Martin Raspaud] - Try loading according to config if provided file doesn't work, aapp1b. [Martin Raspaud] - Don't crash when reading non aapp1b file. [Martin Raspaud] - Remove "/" from instrument names when loading custom composites. [Martin Raspaud] - Don't say generate lon lat when returning a cached version. [Martin Raspaud] - Nc_pps_l2: don't crash on multiple files, just go through them one at the time. [Martin Raspaud] - Hdfeos: don't just exit when filename doesn't match, try to look for files. [Martin Raspaud] - Don't crash if the file doesn't match (hdfeos) [Martin Raspaud] - Revert nc_reader back until generalization is ready. [Martin Raspaud] - Merge branch 'ppsv2014-reader' of github.com:mraspaud/mpop into ppsv2014-reader. [Martin Raspaud] - Adding dataset attributes to pps reading. [Adam Dybbroe] - Allow inputing filename in the nc_pps_l2 reader. [Martin Raspaud] - Merge branch 'pre-master' into ppsv2014-reader. [Martin Raspaud] - Viirs readers fixes. [Martin Raspaud] - Hdf_eos now uses 1 out of 4 available cores to interpolate data. [Martin Raspaud] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Martin Raspaud] - Fixed bug, now handling fill_value better. [Lars Orum Rasmussen] - More robust tiff header file decoder. [Lars Orum Rasmussen] - Add dnb_overview as a standard product (dnb, dnb, 10.8) [Martin Raspaud] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Martin Raspaud] - Corrected the reader for SAFNWC/PPS v2014. [Sara.Hornquist] - Allow multiresolution loading in hdf eos reader. [Martin Raspaud] - Revert back to old nwcsaf-pps reader for hdf. The reading of the new netcdf format is done with another reader! [Adam Dybbroe] - A new pps reader for the netCDF format of v2014. [Adam Dybbroe] - Adding for new cloudmask and type formats... [Adam Dybbroe] - Enhance nwc-pps reader to support v2014 format. [Adam Dybbroe] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Put the config object back in Projector. [Martin Raspaud] - Fix area_file central search. [Martin Raspaud] - Move the area_file search inside Projector. [Martin Raspaud] - Error when satellite config file is not found. [Martin Raspaud] - Get rid of the funky logging style. [Martin Raspaud] - Log the config file used to generate the scene. [Martin Raspaud] - Support filename list to load in viirs_sdr loader. [Martin Raspaud] - Add avhrr/3 as aliar to avhrr in aapp reader. [Martin Raspaud] - Fix name matching in hdfeos_l1b. [Martin Raspaud] The full name didn't work with fnmatch, take basename instead. - Allows hdfeos_l1b to read a batch of files. [Martin Raspaud] - Add delitem, and code cleanup. [Martin Raspaud] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Martin Raspaud] - Added a reader for SAFNWC/PPS v2014 PPS v2014 has a different fileformat than previous SAFNWC/PPS versions. [Sara.Hornquist] - Aapp1b reader, be more clever when (re)reading. [Martin Raspaud] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] Conflicts: mpop/satout/netcdf4.py - Allow reading several files at once in viirs_compact. [Martin Raspaud] - Allow reading several files at once in eps_l1b. [Martin Raspaud] - Style: use in instead for has_key() [Martin Raspaud] - Adding primitive umarf (native) format reader for meteosat. [Martin Raspaud] - Add logging when an info field can't be save to netcdf. [Martin Raspaud] - Add a name to the area when loading aapp data. [Martin Raspaud] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Martin Raspaud] - For PNG files, geo_mage.tags will be saved a PNG metadata. [Lars Orum Rasmussen] - Add a save method to cfscene objects. [Martin Raspaud] - Don't take None as a filename in loading avhrr data. [Martin Raspaud] - Allow loading a file directly for aapp1b and eps_l1b. [Martin Raspaud] Just run global_data.load(..., filename="/path/to/myfile.1b") - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Martin Raspaud] - Viirs_sdr can now load depending on an area. [Martin Raspaud] - Pep8 cosmetics. [Adam Dybbroe] - Merge pull request #12 from pnuu/pre-master. [Martin Raspaud] Fixed "logger" to "LOGGER" - Fixed "logger" to "LOGGER" [Panu Lahtinen] - Moving pysoectral module import down to function where pyspectral is used. [Adam Dybbroe] - Merge branch 'smhi-premaster' into pre-master. [Adam Dybbroe] - Fixing cloudtype product: palette projection. [Adam Dybbroe] - Turned on debugging to geo-test. [Adam Dybbroe] - Added debug printout for cloud product loading. [Adam Dybbroe] - Make snow and microphysics transparent. [Martin Raspaud] - Rename day_solar to snow. [Martin Raspaud] - Keep the name of cloudtype products when projecting. [Martin Raspaud] - Explicitly load parallax corrected files if present. [Martin Raspaud] - Adding logging for MSG cloud products loading. [Martin Raspaud] - Fix the parallax file sorting problem, again. [Martin Raspaud] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Martin Raspaud] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Bugfix. [Adam Dybbroe] - Merge branch '3.9reflectance' into pre-master. [Adam Dybbroe] Conflicts: mpop/channel.py mpop/instruments/seviri.py mpop/satin/mipp_xrit.py setup.py - Support for rgbs using the seviri 3.9 reflectance (pyspectral) [Adam Dybbroe] - Adding a sun-corrected overview rgb. [Adam Dybbroe] - Adduing for "day microphysics" RGB. [Adam Dybbroe] - Deriving the day-solar RGB using pyspectral to derive the 3.9 reflectance. [Adam Dybbroe] - Use "imp" to find input plugins. [Martin Raspaud] - Cleanup trailing whitespaces. [Martin Raspaud] - Use cartesian coordinates for lon/lat computation if near-pole situations. [Martin Raspaud] - Set alpha channel to the same type as the other channels. [Martin Raspaud] - Sort the filenames in get_best_products (msg_hdf) [Martin Raspaud] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Martin Raspaud] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Merge pull request #10 from pnuu/pre-master. [Martin Raspaud] Fixed failed merging. Thanks Pnuu. - Fixed failed merging (removed "<<<<<<< HEAD" and ">>>>>>> upstream /pre-master" lines) [Panu Lahtinen] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Fix terra and aqua templates for the dual gain channels (13 & 14) [Adam Dybbroe] - Read both parallax corrected and usual cloudtype products. [Martin Raspaud] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Martin Raspaud] - Merge pull request #9 from pnuu/pre-master. [Martin Raspaud] Possibility to get area_extent from area definition(s) - Tests for mpop.satin.helper_functions.boundaries_to_extent. [Panu Lahtinen] - Separated area definitions and boundary calculations. [Panu Lahtinen] - Added test if proj string is in + -format or not. [Panu Lahtinen] - Re-ordered the tests. [Panu Lahtinen] - Fixed incorrect correct values. [Panu Lahtinen] - Test using area definitions instead of definition names. [Panu Lahtinen] - Possibility to give also area definition objects to area_def_names_to_extent() and log a warning if the area definition is not used. [Panu Lahtinen] - Fixed import. [Panu Lahtinen] - Added tests for mpop.satin.helper_functions. [Panu Lahtinen] - Moved to mpop/tests/ [Panu Lahtinen] - Moved to mpop/tests/ [Panu Lahtinen] - Merge remote-tracking branch 'upstream/pre-master' into pre-master. [Panu Lahtinen] Conflicts: mpop/satin/aapp1b.py - Removed unneeded functions. [Panu Lahtinen] - Test for area_def_names_to_extent() [Panu Lahtinen] - Removed unnecessary functions. [Panu Lahtinen] - Removed swath reduction functions. [Panu Lahtinen] - Reverted not to reduce swath data. [Panu Lahtinen] - Added possibility to do data reduction based on target area definition names. [Panu Lahtinen] - Added area extent calculations based on given area definition names. [Panu Lahtinen] - Helper functions for area extent and bondary calculations, and data reducing for swath data. [Panu Lahtinen] - Test for mpop.satin.mipp_xrit.lonlat_to_geo_extent() [Panu Lahtinen] - Support for lon/lat -based area extents. [Panu Lahtinen] - Add start and end time defaults for the images (runner). [Martin Raspaud] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Lars Orum Rasmussen] - Do not mask out negative reflectances in viirs_sdr reading. [Martin Raspaud] - Added navigation to hrpt_hmf plugin. [Martin Raspaud] - Started working on a new plugin version of hdfeos_l1b. [Martin Raspaud] - Cleanup. [Martin Raspaud] - Cleanup. [Martin Raspaud] - Cleanup. [Martin Raspaud] - Cleanup. [Martin Raspaud] - Adding scene tests to the test suite. [Martin Raspaud] - Revamped scene unittests. [Martin Raspaud] - Don't crash on errors. [Martin Raspaud] - Revamped projector tests. [Martin Raspaud] - More geo_image testing. [Martin Raspaud] - Don't use "super" in geo_image. [Martin Raspaud] - Fix testing. [Martin Raspaud] - Mock pyresample and mpop.projector in geo_image tests. [Martin Raspaud] - More testing geo_image. [Martin Raspaud] - Add tests for geo_image. [Martin Raspaud] - Merge branch 'unstable' of ssh://safe/data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Mock gdal for geo_image tests. [Martin Raspaud] - Added netCDF read support for four more projections. [Adam Dybbroe] - Adding support for eqc in cf format. [Adam Dybbroe] - Added config templates for GOES and MTSAT. [Lars Orum Rasmussen] - Copied visir.night_overview to seviri.night_overview, so night_overview.prerequisites is correct when night_overview is called from seviri.py. [ras] - Cloutop in seviri.py now same arguments as cloudtop in visir.py. [Lars Orum Rasmussen] - Fix saving as netcdf. [Martin Raspaud] - Fix floating point tiff saving. [Martin Raspaud] - Make pillow a requirement only if PIL is missing. [Martin Raspaud] - Add some modules to mock in the documentation. [Martin Raspaud] - Add pyorbital to the list of packets to install in travis. [Martin Raspaud] - Merge branch 'feature-travis' into unstable. [Martin Raspaud] - Test_projector doesn't pass. [Martin Raspaud] - Test_projector ? [Martin Raspaud] - Fix travis. [Martin Raspaud] - Adding test_geoimage. [Martin Raspaud] - Test_channel passes, test_image next. [Martin Raspaud] - Test_pp_core crashes, test_channel on. [Martin Raspaud] - Commenting out tests to find out the culprit. [Martin Raspaud] - Ok, last try for travis-ci. [Martin Raspaud] - What is happening with travis ? [Martin Raspaud] - More fiddling to find out why travis-ci complains. [Martin Raspaud] - Testing the simple test way (not coverage) [Martin Raspaud] - Trying to add the tests package for travis-ci. [Martin Raspaud] - Add the tests package. [Martin Raspaud] - Preprare for travis-ci. [Martin Raspaud] - Support 16 bits images (geotiff only at the moment). [Martin Raspaud] - Merge pull request #8 from pnuu/pre-master. [Martin Raspaud] Sun zenith angle correction added. - A section on mpop.tools added to documentation. [Panu Lahtinen] - Extra tests for sun_zen_corr(). [Panu Lahtinen] - Typo. [Panu Lahtinen] - Channel descriptions added. [Panu Lahtinen] - Channel desctiptions are added. [Panu Lahtinen] - Clarification to help sunzen_corr_cos() desctiption. [Panu Lahtinen] - Test cases for channel.sunzen_corr(). [Panu Lahtinen] - Sun zenith angle correction split into two functions. [Panu Lahtinen] - Revert to original version. [Panu Lahtinen] - Initial commit of mpop.tools (with Sun zenith angle correction). [Panu Lahtinen] - Sun zenith angle correction added. [Panu Lahtinen] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [ras] - Solve the multiple channel resolution with automatic resampling radius. [Martin Raspaud] - Add the "nprocs" option to projector objects and scene's project method. [Martin Raspaud] - Now saving orbit number (if available) as global attribute. [ras] - Adding more files to be ignored. [ras] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [ras] - New reader for hrpt level0 format. [Martin Raspaud] - Fix no calibration reading for aapp1b. [Martin Raspaud] - Add the product name to the the image info. [Martin Raspaud] - Add some debugging info about missing pixels in viirs_sdr. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Corrected a comment. [Adam Dybbroe] - Fix for M13 load problem - reported by stefano.cerino@gmail.com. [Adam Dybbroe] - Use number of scan to load the right amount of data in compact viirs reader. [Martin Raspaud] - Fix hook to be able to record both filename and uri. [Martin Raspaud] - Protecting MPOP from netcdf4's unicode variables. [ras] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] - Adding a new convection RGB with co2 correction for SEVIRI. [Adam Dybbroe] - Temporary hack to solve for hdf5 files with more than one granule per file. [Adam Dybbroe] - Removing messaging code from saturn and added a more generic "hook" argument. [Martin Raspaud] - Bumped up version. [Martin Raspaud] - Make viirs_compact scan number independent. [Martin Raspaud] - Cleanup: marking some deprecated modules, removing unfinished file, improving documentation. [Martin Raspaud] - Adding the ears-viirs compact format reader. Untested. [Martin Raspaud] - Code cleanup. [Martin Raspaud] - Merge branch 'pre-master' into unstable. [Martin Raspaud] Conflicts: mpop/imageo/geo_image.py - Night_color (should had beed called night_overview) is the same as cloudtop. [Lars Orum Rasmussen] - Bug fix from Bocheng. [Lars Orum Rasmussen] - Night_overview is just like cloudtop. [Lars Orum Rasmussen] - Now also handling Polar satellites. [Lars Orum Rasmussen] - Cosmetic. [Lars Orum Rasmussen] - Fixed merge conflict. [Lars Orum Rasmussen] - Trying out a chlorophyll product. [Lars Orum Rasmussen] - Added a night overview composite. [Lars Orum Rasmussen] - Better check for empty array. [Lars Orum Rasmussen] - Fix logging. [Martin Raspaud] - Fix backward compatibility in, and deprecate image.py. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Calling numpy percentile only once when doing left and right cut offs. [Adam Dybbroe] - Add support for identifying npp directories by time-date as well as orbit number. [Adam Dybbroe] - Fix histogram-equalization stretch test. [Adam Dybbroe] - Bugfix in histogram equalization function. [Adam Dybbroe] - Using percentile function to generate histogram with constant number of values in each bin. [Adam Dybbroe] - Using numpy.pecentile function to cut the data in the linear stretch. [Adam Dybbroe] - Fix histogram stretch unit test. [Adam Dybbroe] - Correcting the histogram stretching. The com_histogram function was in error when asking for "normed" histograms. [Adam Dybbroe] - Added histogram method that makes a more populated histogram when the data are heaviliy skeewed. Fixes problem seen by Bocheng in DNB imagery. [Adam Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] - Don't remove GeolocationFlyweight _instances, but reset it. Allowing for multiple "loads" [Adam Dybbroe] - Add imageo.formats to installation. [Martin Raspaud] - AAPP loading bug fix. [Martin Raspaud] the aapp1b.py loader to aapp data was broken as it was loading both channels 3a and 3b each time, one of them being entirely masked. This of course created some problem further down. Fixed by setting the not loadable channel to None. - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Bugfix in npp.cfg template. [Adam Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] - Fixing bug concerning the identification of VIIRS geolocation files. Now the configuration specified in npp.cfg overwrites what is actually written in the metadata header of the band files. [Adam Dybbroe] - Make saturn posttroll capable. [Martin Raspaud] - Bump up version number. [Martin Raspaud] - Cosmetics. [Martin Raspaud] - Fixing test cases. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Remove dummy test to boost projection performance. [Martin Raspaud] Mpop was checking in 2 different places if the source and target areas were different, leading to pyresample expanding the area definitions to full lon/lat arrays when checking against a swath definition, and then running an allclose. This was inefficient, and the programming team decided that it was the user's task to know before projection if the source and target area were the same. In other words, the user should be at least a little smart. - Remove dummy test to boost projection performance. [Martin Raspaud] Mpop was checking in 2 different places if the source and target areas were different, leading to pyresample expanding the area definitions to full lon/lat arrays when checking against a swath definition, and then running an allclose. This was inefficient, and the programming team decided that it was the user's task to know before projection if the source and target area were the same. In other words, the user should be at least a little smart. - Update channel list for modis lvl2. [Martin Raspaud] - Bump up version number: 1.0.0. [Martin Raspaud] - Merge branch 'pre-master' into unstable. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Cleanup. [Martin Raspaud] v1.0.0 (2013-09-25) ------------------- - Release v1.0.0. [Martin Raspaud] - Changing palette name to something more intuitive. Allow to have orbit number equals None. [Adam Dybbroe] - Fixing aqua/terra template config files for dual gain channels (13&14) [Adam Dybbroe] - Added Ninjo tiff example areas definitions. [Lars Orum Rasmussen] - Cosmetic. [Lars Orum Rasmussen] - Ninjo tiff writer now handles singel channels. [Lars Orum Rasmussen] Ninjo tiff meta-data can now all be passed as arguments - Better documentation. [Lars Orum Rasmussen] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] Conflicts: etc/npp.cfg.template - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Changed template to fit new npp reader. [krl] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam Dybbroe] - Updated npp confirg file template with geo_filename example. [Adam Dybbroe] - Make overview consistent with the standard overview. [Adam Dybbroe] - Updated npp-template to fit the new viirs reader using the (new) plugin-loader system. [Adam Dybbroe] - Minor clean up. [Adam Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] Conflicts: mpop/satin/viirs_sdr.py - Fix version stuff. [Martin Raspaud] - Merge branch 'feature-optimize_viirs' into unstable. [Martin Raspaud] - Make viirs_sdr a plugin of new format. [Martin Raspaud] - Finalize optimisation i new viirs reader. [Martin Raspaud] - Optimization ongoing. Mask issues. [Martin Raspaud] - Clarify failure to load hrit data. [Martin Raspaud] - Lunar stuff... [Adam Dybbroe] - Fix install requires. [Martin Raspaud] - Fix projector unit test. [Martin Raspaud] - Merge branch 'pre-master' into unstable. [Martin Raspaud] - Merge branch 'pre-master' of git://github.com/mraspaud/mpop into pre- master. [Martin Raspaud] - Fixed (temporary ?) misuse of Image.SAVE. [Lars Orum Rasmussen] - Now config reader is a singleton. [Lars Orum Rasmussen] - Merge branch 'pre-master' into unstable. [Martin Raspaud] - Merge branch 'pre-master' of git://github.com/mraspaud/mpop into pre- master. [Martin Raspaud] - Tmplate -> template. [Lars Orum Rasmussen] - Added support for saving in Ninjo tiff format. [Lars Orum Rasmussen] - Projector cleanup. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - New VIIRS reader. Better, faster, smarter (consumimg less memory) [Adam Dybbroe] - Fix area hashing. [Martin Raspaud] - Fix install dependency. [Martin Raspaud] - Merge branch 'pre-master' into unstable. [Martin Raspaud] - Merge branch 'pre-master' of git://github.com/mraspaud/mpop into pre- master. [Martin Raspaud] Conflicts: doc/source/conf.py setup.py - Optimize. [Martin Raspaud] - Remove the optional ahamap requirement. [Martin Raspaud] - Manage version number centrally. [Martin Raspaud] - Merge branch 'release-v0.13.0' [Martin Raspaud] Conflicts: setup.py - Merge branch 'pre-master' [Martin Raspaud] Conflicts: doc/source/conf.py setup.py v0.13.0 (2013-05-08) -------------------- - Bump up version number for release. [Martin Raspaud] - Merge branch 'pre-master' of git://github.com/mraspaud/mpop into pre- master. [Martin Raspaud] - Bump up version number. [Martin Raspaud] - Make old plugin an info instead of a warning. [Martin Raspaud] - Merge branch 'pre-master' of git://github.com/mraspaud/mpop into pre- master. [Martin Raspaud] - Pep8. [Adam Dybbroe] - Merge branch 'aapp1b' into unstable. [Adam Dybbroe] - Don't mask out IR channel data where count equals zero. [Adam Dybbroe] - Fixing the masking of the ir calibrated Tbs - count=0 not allowed. [Adam Dybbroe] - Make also vis channels masked arrays. [Adam Dybbroe] - Checking if file format is post or pre v4 : If bandcor_2 < 0 we are at versions higher than 4 Masking a bit more strict. [Adam Dybbroe] - Now handle data without a mask and handling lons and lats without crashing. [Lars Orum Rasmussen] - Read signed instead of unsigned (aapp1b). [Martin Raspaud] - Style cleanup. [Martin Raspaud] - Adding calibration type as an option to the loader. So counts, radiances or tbs/refl can be returned. [Adam Dybbroe] - Better show and more cosmetic. [Lars Orum Rasmussen] - Making pylint more happy and some cosmetic. [Lars Orum Rasmussen] - No need to night_overview, use cloudtop with options. [Lars Orum Rasmussen] - Now IR calibration returns a masked array. [Lars Orum Rasmussen] - Added som options for overview image and added a night overview. [Lars Orum Rasmussen] - Finalize aapp1b python-only reader. [Martin Raspaud] - Working on a aapp l1b reader. [oananicola] - Starting a aapp1b branch for directly reading aapp's l1b files. [Lars Orum Rasmussen] - Adding a bit of debug info... [Adam Dybbroe] - Adding orbit number to the cloud mask object. [Adam Dybbroe] - Channel cleanup and tests. [Martin Raspaud] - Merge branch 'feature_plugins' into unstable. [Martin Raspaud] - Make orbit number an 5-character string (padding with '0') [Martin Raspaud] - New plugin implementation, backward compatible. [Martin Raspaud] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Lars Orum Rasmussen] - Reverted to previous commit. [Lars Orum Rasmussen] - Correct green-snow. [Martin Raspaud] Use 0.6 instead on 0.8 - Now, if specified in proj4 object, add EPGS code to tiff metadata. [Lars Orum Rasmussen] - Added, a poor man's version, of Adam's DNB RGB image. [Lars Orum Rasmussen] v0.12.1 (2013-03-14) -------------------- - Cleanup. [Martin Raspaud] - Add several cores for geoloc in eos. [Martin Raspaud] - Bugfix hdfeos. [Martin Raspaud] - Fix loading of terra aqua with multiple cores. [Martin Raspaud] - Add dust, fog, ash composites to VIIRS. [Martin Raspaud] - Enhance error messages. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - New template files for regional EARS (AVHRR and NWC) file support. [Adam Dybbroe] - Minor cosmetics. [Adam Dybbroe] - Make orbit number an 5-character string (padding with '0') [Martin Raspaud] - Merge branch 'fixrtd' into unstable. [Martin Raspaud] - Add pyresample to mock for doc building. [Martin Raspaud] - Get rid of the np.inf error in rtd. [Martin Raspaud] - Mock some import for the documentation. [Martin Raspaud] - Introducing clip percentage for SAR average product. [Lars Orum Rasmussen] - Add symlink from README.rst to README. [Martin Raspaud] - Update download link and README. [Martin Raspaud] v0.12.0 (2013-01-10) -------------------- - Bump up version number. [Martin Raspaud] - Cosmetics. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] - Support for calibrate option. [Adam Dybbroe] - Add template file for meteosat 10. [Martin Raspaud] - Add debug messages to hdf-eos loader. [Martin Raspaud] v0.11.7 (2012-12-04) -------------------- Fix ~~~ - Bugfix: allow more than one "-" in section names. [Martin Raspaud] - Bugfix: read aqua/terra orbit number from file only if not already defined. [Martin Raspaud] - Bugfix: fixed unittest case for wavelengths as lists. [Martin Raspaud] - Bugfix: remove deprecated mviri testcases. [Martin Raspaud] - Bugfix: backward compatibility with netcdf files. [Martin Raspaud] - Bugfix: removed the old mviri compositer. [Martin Raspaud] - Bugfix: When assembling, keep track of object, not just lon/lats. [Martin Raspaud] - Bugfix: assembling scenes would unmask some lon/lats... [Martin Raspaud] - Bugfix: handling of channels with different resolutions in assemble_segments. [Martin Raspaud] - Bugfix: Runner crashed if called with an area not in product list. [Martin Raspaud] - Bugfix: the nwcsaf_pps reader was crashing if no file was found... [Martin Raspaud] - Bugfix: pynav is not working in some cases, replace with pyorbital. [Martin Raspaud] - Bugfix: can now add overlay in monochromatic images. [Martin Raspaud] - Bugfix: swath scene projection takes forever from the second time. [Martin Raspaud] The swath scene, when projected more than once would recompute the nearest neighbours for every channel. Other ~~~~~ - Support pnm image formats. [Martin Raspaud] - The pps palette broke msg compatibility. Now there are two palettes, one for msg and one for pps. [Adam Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] Conflicts: mpop/satin/viirs_sdr.py - Adapted viirs reader to handle aggregated granule files. [Adam Dybbroe] - Fixing nwcsaf-pps ctth height palette. [Adam Dybbroe] - Take better care of the path (was uri) argument. [Martin Raspaud] - Don't do url parsing in the hdfeos reader. [Martin Raspaud] - Fix unit tests. [Martin Raspaud] - Remove the deprecated append function in scene. [Martin Raspaud] - Return when not locating hdf eos file. [Martin Raspaud] - Remove raveling in kd_tree. [Martin Raspaud] - Make use of the new strftime in the viirs reader. [Martin Raspaud] - Add a custom strftime. [Martin Raspaud] This fixes a bug in windows that prevents running strftime on string that contain mapping keys conversion specifiers. - Catch the error if there is no file to load from. [Martin Raspaud] - Add a proper logger in hdfeos reader. [Martin Raspaud] - Get resolution from filename for eos data. [Martin Raspaud] - Introducing stretch argument for average product. [Lars Orum Rasmussen] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Lars Orum Rasmussen] - Clean up. [Martin Raspaud] - Bump up version number. [Martin Raspaud] - Support passing a uri to hdfeos reader. [Martin Raspaud] - Fix the loading of BT for VIIRS M13 channel. [Martin Raspaud] Has no scale and offset - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Lars Orum Rasmussen] - Refactor the unsigned netcdf packing code. [Martin Raspaud] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Lars Orum Rasmussen] - Support packing data as unsigned in netcdf. [Martin Raspaud] - Replace auto mask and scale from netcdf4. [Martin Raspaud] Eats up too much memory. - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Lars Orum Rasmussen] - Feature: Added template for electro-l satellite. [Martin Raspaud] - Feature: taking care of missing data in the viirs reader, and allow for radiance retrieval. [Martin Raspaud] - Feature: last adjustments to new netcdf format. [Martin Raspaud] - Merge branch 'feature-netcdf-upgrade' into unstable. [Martin Raspaud] Conflicts: mpop/satout/cfscene.py mpop/satout/netcdf4.py - Merge branch 'unstable' into feature-netcdf-upgrade. [Martin Raspaud] - Merge branch 'unstable' into feature-netcdf-upgrade. [Martin Raspaud] Conflicts: mpop/satin/mipp_xsar.py - Work on new netcdf format nearing completion. [Martin Raspaud] - Feature: wrapping up new netcdf format, cf-satellite 0.2. [Martin Raspaud] - Renamed some global attributes. [Martin Raspaud] - Netcdf: working towards better matching CF conventions. [Martin Raspaud] - WIP: NetCDF cleaning. [Martin Raspaud] - scale_factor and add_offset are now single values. - vertical_perspective to geos - Merge branch 'unstable' into feature-netcdf-upgrade. [Martin Raspaud] - Group channels by unit and area. [Martin Raspaud] - Do not apply scale and offset when reading. [Martin Raspaud] - WIP: updating the netcdf interface. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Changed handeling of "_FillValue"-attributes. Added find_FillValue_tags function to search for "_FillValue" attributes. The "_FillValue" attributes are used and set when variables are created. [Nina.Hakansson] - Cosmetics. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Fixing bug concerning viirs bandlist and the issue of preventing the loading of channels when only products are requested. [Adam Dybbroe] - Fixing VIIRS reader - does not try to read SDR data if you only want to load a product. Minor fixes in MODIS and AAPP1b readers. [Adam Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] - Bugfix in viirs sdr reader. [Adam Dybbroe] - Added ir108 composite to Viirs. [Martin Raspaud] - RUN: add possibility to get prerequisites for a list of areas. [Martin Raspaud] - Updating area_id for the channel during viirs loading and assembling of segments. [Martin Raspaud] - Area handling in viirs and assembling segments. [Martin Raspaud] - Viirs true color should have a transparent background. [Martin Raspaud] - Added enhancements to the image.__call__ function. [Martin Raspaud] - Fixing runner to warn for missing functions (instead of crashing). [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] Conflicts: mpop/satin/viirs_sdr.py - Bug fix green-snow RGB. [Adam Dybbroe] - Cleaning up a bit in viirs reader. [Adam Dybbroe] - Temporary fix to deal with scale-factors (in CLASS archive these are not tuples of 2 but 6). Taken from old fix in npp-support branch. [Adam Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] - Support for bzip2 compressed NWCSAF products (EARS-NWC) [Adam Dybbroe] - More flexible viirs reading, and fixes to viirs composites. [Martin Raspaud] - Added a stereographic projection translation. [Lars Orum Rasmussen] - Added modist as valid name for 'eos1' [Lars Orum Rasmussen] - Added night_microphysics. [Lars Orum Rasmussen] - Added stretch option. [Lars Orum Rasmussen] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Feature: new function to create an image from a scene. [Martin Raspaud] - Fixed a new npp template config file, with geo_filename example. [Adam Dybbroe] - Adding 500meter scan area. [Adam Dybbroe] - Fixing bug in geolocation reading and removing old style viirs composite file. [Adam Dybbroe] - Using a template from configuration file to find the geolocation file to read - for all VIIRS bands. [Adam Dybbroe] - Fixed bug in hr_natural and added a dnb method. [Adam Dybbroe] - Fixing Bow-tie effects and geolocation for VIIRS when using Cloudtype. Needs to be generalised to all products! [Adam Dybbroe] - Support for tiepoint grids and interpolation + masking out no-data geolocation (handling VIIRS Bow-tie deletetion) [Adam Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] - Adding viirs composites and pps_odim reader for avhrr and viirs channel data in satellite projection (swath) [Adam Dybbroe] - Added a Geo Phys Product to modis level2. [Lars Orum Rasmussen] - Merge branch 'pre-master' of github.com:mraspaud/mpop into pre-master. [Lars Orum Rasmussen] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Adding support for ob_tran projection even though it is not cf- compatible yet. [Adam Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam Dybbroe] - Added the reading of geolocation data from the PPS formatet level1 file. [Adam Dybbroe] - Added Europe Mesan area to template. [Adam Dybbroe] - Feature: MSG hdf files are now used to determine the area. [Martin Raspaud] - Fixed error message. [Martin Raspaud] - Cleanup: clarified import error. [Martin Raspaud] - Cleanup: More descriptive message when plugin can't be loaded. [Martin Raspaud] - Raised version number. [Martin Raspaud] - More relevant messages in msg_hdf reading. [Martin Raspaud] - Adding a RGB for night condition. [Lars Orum Rasmussen] - Merge branch 'pre-master' into unstable. [Martin Raspaud] - Merge branch 'pre-master' into unstable. [Martin Raspaud] v0.11.5 (2012-05-21) -------------------- Fix ~~~ - Bugfix: importing geotiepoints. [Martin Raspaud] - Bugfix: hdfeos was not eumetcast compliant :( [Martin Raspaud] - Bugfix: Do not raise exception on loading failure (nwcsaf_pps) [Martin Raspaud] - Bugfix: fixed misc bugs. [Martin Raspaud] - Bugfix: comparing directories with samefile is better than ==. [Martin Raspaud] - Bugfix: updating old eps_l1b interface. [Martin Raspaud] - Bugfix: Fixed typo in gatherer. [Martin Raspaud] - Bugfix: taking satscene.area into consideration for get_lonlat. [Martin Raspaud] - Bugfix: mipp required version to 0.6.0. [Martin Raspaud] - Bugfix: updating unittest and setup for new mipp release. [Martin Raspaud] - Bugfix: for eps l1b, get_lonlat did not return coherent values since the introduction of pyresample. [Martin Raspaud] - Bugfix: mipp to mipp_xrit namechange. [Martin Raspaud] - Bugfix: better detection of needed channels in aapp1b. [Martin Raspaud] - Bugfix: support for other platforms. [Martin Raspaud] - Bugfix: Support python 2.4 in mipp plugin. [Martin Raspaud] - Bugfix: masked arrays should be conserved by scene.__setitem__ [Martin Raspaud] - Bugfix: Don't make area and time_slot static in compositer. [Martin Raspaud] - Bugfix: reinit channels_to_load and messages for no loading. [Martin Raspaud] - When the loading process is interrupted, the channels_to_load attribute was not reinitialized. - Added a message when loading for a given level did not load anything. - Bugfix: Give an informative message when area is missing for msg's hdf reader. [Martin Raspaud] - Bugfix: update satpos file retrieval for hrpt and eps1a. [Martin Raspaud] - Bugfix: fixed unittests for new plugin system. [Martin Raspaud] - Bugfix: Do not load plugins automatically... [Martin Raspaud] - Bugfix: satellite vs satname again. [Martin Raspaud] - Bugfix: don't crash if msg hdf can't be loaded. [Martin Raspaud] - Bugfix: project now chooses mode automatically by default. [Martin Raspaud] - Bugfix: eps_avhrr adapted to new plugin format. [Martin Raspaud] - Bugfix: loading in msg_hdf adapted to new plugin system. [Martin Raspaud] - Bugfix: loading plugins should fail on any exception. [Martin Raspaud] - Bugfix: stupid syntax error. [Martin Raspaud] - Bugfix: mistook satname for satellite. [Martin Raspaud] - Bugfix: move to jenkins. [Martin Raspaud] - Bugfix: affecting area to channel_image. [Martin Raspaud] - Bugfix: Better handling of alpha channel. [Martin Raspaud] - Bugfix: filewatcher would wait a long time if no new file has come. [Martin Raspaud] Other ~~~~~ - Bumped up version number. [Martin Raspaud] - Modis level-2 reader and netcdf writer can now handle scenes containing only geo-physical product (and no channels) [Lars Orum Rasmussen] - Feature: Pypi ready. [Martin Raspaud] - Bufix: updating to use python-geotiepoints. [Martin Raspaud] - Bumping up the version number for the next release. [Martin Raspaud] - Doc: updating add_overlay documentation. [Martin Raspaud] - Merge pull request #2 from cheeseblok/FixViirsRedSnow. [Martin Raspaud] Fix typo in red_snow check_channels method - Fix typo in red_snow check_channels method. [Scott Macfarlane] - Feature: adding interpolation to modis lon lats. [Martin Raspaud] - Use pynav to get lon/lats if no file can be read. [Martin Raspaud] - Hack to handle both level2 and granules. [Martin Raspaud] - Added the possibility to provide a filename to eps_l1b loader. [Martin Raspaud] - Merge branch 'feature_new_eps_reader' into unstable. [Martin Raspaud] - Added xml file to etc and setup.py. [Martin Raspaud] - Bugfix in geolocation assignment. [Martin Raspaud] - Allowing for both 3a and 3A. [Martin Raspaud] - Put xml file in etc. [Martin Raspaud] - New eps l1b is now feature complete. Comprehensive testing needed. [Martin Raspaud] - Added a new eps l1b reader based on xml description of the format. [Martin Raspaud] - Corrected longitude interpolation to work around datum shift line. [Martin Raspaud] - Cloudtype channel now called "CT". [Martin Raspaud] - Merge branch 'pre-master' of git://github.com/mraspaud/mpop into pre- master. [Martin Raspaud] - SetProjCS is now correctly called after ImportFromProj4. [Lars Orum Rasmussen] Added SetWellKnownGeogCS if available - Merge branch 'pre-master' into unstable. [Martin Raspaud] Conflicts: mpop/satin/mipp_xsar.py - More correct 'new area' [Lars Orum Rasmussen] - Mipp restructure. [Lars Orum Rasmussen] - Merge branch 'pre-master' into area-hash. [Lars Orum Rasmussen] - Merge branch 'pre-master' into area-hash. [Lars Orum Rasmussen] - Now more unique projection filenames (using hash of areas) [Lars Orum Rasmussen] - Enhancements to pps hdf format readers. [Martin Raspaud] - Feature: added support for geotiff float format in geo_image. [Martin Raspaud] - Don't touch satscene.area if already present (mipp reading) [Martin Raspaud] - Feature: get best msg hdf file using area_extent. [Martin Raspaud] - Duck typing for channel assignation. [Martin Raspaud] - Fixed meteosat reading. [Martin Raspaud] - do not change the scene metadata when no channel is loaded - do not crash if no PGE is present - Added shapes in mpop.cfg.template for pycoast. [Martin Raspaud] - Cleanup. [Martin Raspaud] - New add_overlay function, using pycoast. [Martin Raspaud] - Added test for __setitem__ (scene) [Martin Raspaud] - Feature: add a global area if possible. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Fixing so thar also other products (than Channel data) can be assempled. [Adam.Dybbroe] - Adding data member to CloudType. [Adam.Dybbroe] - Added support for trucolor image from modis. [Adam.Dybbroe] - Cleaning up geo_image.py. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] Conflicts: mpop/satin/hdfeos_l1b.py - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam.Dybbroe] - Minor cosmetic/editorial stuff. [Adam.Dybbroe] - Small bugfix - viirs interface. [Adam.Dybbroe] - Feature: wrapping up hdfeos upgrade. [Martin Raspaud] - migrated data to float32 instead of float64 - support only geoloc a 1km resolution at the moment - adjust channel resolution to match loaded data - added template terra.cfg file. - Trimming out dead detectors. [Adam.Dybbroe] - WIP: hdf eos now reads only the needed channels, and can have several resolutions. Geoloc is missing though. [Martin Raspaud] - WIP: Started working on supporting halv/quarter files for modis. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Changed MODIS HDF-EOS level 1b reader to accomodate both the thinned EUMETCasted data and Direct readout data. Changed name from thin_modis.py to hdfeos_l1b.py. Added filename pattern to config. [Adam.Dybbroe] - Fixing indexing bug: missing last line in Metop AVHRR granule. [Adam.Dybbroe] - Revert "Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable" [Martin Raspaud] This reverts commit 45809273f2f9670c8282c32197ef47071aecaa74, reversing changes made to 10ae6838131ae1b6e119e05e08496d1ec9018a4a. - Revert "Reapplying thin_modis cleaning" [Martin Raspaud] This reverts commit 52c63d6fbc9f12c03b645f29dd58250da943d24a. - Reapplying thin_modis cleaning. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Martin Raspaud] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam.Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam.Dybbroe] - Merge branch 'pre-master' into unstable. [Adam.Dybbroe] Conflicts: mpop/satin/eps_avhrr.py - Minor enhancements to nwcsaf pps cloud type reading: Adding support for phase and quality flags. [Adam.Dybbroe] - Fixing indexing bug: missing last line in Metop AVHRR granule. [Adam.Dybbroe] - Merge branch 'unstable' of /data/proj/SAF/GIT/mpop into unstable. [Adam.Dybbroe] Conflicts: doc/source/conf.py mpop/instruments/mviri.py mpop/instruments/seviri.py mpop/instruments/test_mviri.py mpop/instruments/test_seviri.py mpop/instruments/test_visir.py mpop/instruments/visir.py mpop/satin/test_mipp.py mpop/satin/thin_modis.py mpop/saturn/runner.py mpop/scene.py setup.py version.py - Merge branch 'unstable' of https://github.com/mraspaud/mpop into unstable. [Adam.Dybbroe] - Thin_modis Cleanup. [Martin Raspaud] - Cleanup. [Martin Raspaud] - Style: Cleaning up. [Martin Raspaud] - Doc: added screenshots. [Martin Raspaud] - Cleanup, switch to compositer globaly. [Martin Raspaud] - Doc: added more documentation to polar_segments.py. [Martin Raspaud] - Cleanup: remove old unit test for assemble_swath. [Martin Raspaud] - Bugfix in assemble_segments. [Martin Raspaud] - Cleanup: removed old assemble_swath function. [Martin Raspaud] - Doc: update docstring for project. [Martin Raspaud] - Upgrade: assemble_segments now uses scene factory. [Martin Raspaud] - DOC: examples are now functional. [Martin Raspaud] - Cleanup: removed old plugins directory. [Martin Raspaud] - Merge branch 'new_plugins' into unstable. [Martin Raspaud] Conflicts: mpop/plugin_base.py - Init file for plugins initialization. [Adam.Dybbroe] - Merge branch 'new_plugins' of https://github.com/mraspaud/mpop into new_plugins. [Adam.Dybbroe] - Removing old deprecated and now buggy part - has been caught by the try-exception since long. Adding for plugins directory. [Adam.Dybbroe] - Corrected import bug. [Adam.Dybbroe] - Merge branch 'unstable' into new_plugins. [Adam.Dybbroe] - Bug correction - config file reading section 'format' [Adam.Dybbroe] - Removing old deprecated and now buggy part - has been caught by the try-exception since long. Adding for plugins directory. [Adam.Dybbroe] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] - Merge branch 'unstable' of https://github.com/mraspaud/mpop into unstable. [Adam.Dybbroe] - First time in git. [Adam.Dybbroe] - Merge branch 'unstable' of https://github.com/mraspaud/mpop into unstable. [Adam.Dybbroe] - Meris level-2 reader - first commit. [Adam.Dybbroe] - Minor fixes. [Adam.Dybbroe] - Fixed typo. [Adam.Dybbroe] - Feature: updating mipp test to use factory. [Martin Raspaud] - Cleaning up an old print. [Martin Raspaud] - Merge branch 'v0.10.2-support' into unstable. [Martin Raspaud] - Feature: added support for new eumetsat names (modis) and terra. [Martin Raspaud] - Merge branch 'new_plugins' into unstable. [Martin Raspaud] - Moved mipp plugin back to satin. [Martin Raspaud] - Feature: all former plugins are adapted to newer format. [Martin Raspaud] - Style: finalizing plugin system. Now plugins directories loaded from mpop.cfg. [Martin Raspaud] - Cleanup: removing old stuff. [Martin Raspaud] - Feature: added reader plugins as attributes to the scene, called "_reader". [Martin Raspaud] - Feature: new plugin format, added a few getters and made scene reference weak. [Martin Raspaud] - New plugin system. [Martin Raspaud] Transfered the mipp plugin. - DOC: fixed path for examples. [Martin Raspaud] - DOC: Added documentation examples to the project. [Martin Raspaud] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] - Using LOG call instead of print. [Adam.Dybbroe] - Fixed missing LOG import. [Adam.Dybbroe] - Further improvements to MODIS level2 reader and processor. [Adam.Dybbroe] - Feature: Added projection to the pps_hdf channels. [Martin Raspaud] - DOC: added use examples in the documentation directory. [Martin Raspaud] - Merge branch 'master' into unstable. [Martin Raspaud] - Added posibility to have instrument_name in the filenames. [Adam.Dybbroe] - Making sure we pass on orbit number when projecting the scene. [Adam.Dybbroe] - Added colour map for Modis Chlorophyl-A product. [Adam.Dybbroe] - Taking away the alpha parameters for RGB modes. [Martin Raspaud] - Added areas in channels for test. [Martin Raspaud] - Added the radius parameter to runner. [Martin Raspaud] - Adding preliminary NWCSAF pps product reader. [Adam.Dybbroe] - Cleaning up. [Martin Raspaud] - Updated satpos file directories. [Martin Raspaud] - Cleaning up. [Martin Raspaud] - Merge branch 'pre-master' into unstable. [Martin Raspaud] - Updated copyright and version number. [Martin Raspaud] - Updating setup stuff. [Martin Raspaud] - Merge branch 'pre-master' into unstable. [Martin Raspaud] - Adding Day/Night band support. [Adam.Dybbroe] - Adding area for mapping sample data i-bands. [Adam.Dybbroe] - Scaling reflectances to percent (%) as required in mpop. [Adam.Dybbroe] - Adding support for I-bands. [Adam.Dybbroe] - Merge branch 'pre-master' of https://github.com/mraspaud/mpop into pre-master. [Adam.Dybbroe] - Merge branch 'npp-support' into pre-master. [Adam.Dybbroe] - Renamed to npp1.cfg. [Adam.Dybbroe] - VIIRS composites - M-bands only so far. [Adam.Dybbroe] - Cleaning print statements. [Adam.Dybbroe] - NPP template. [Adam.Dybbroe] - Adding NPP/VIIRS test area for sample data: M-bands. [Adam.Dybbroe] - Adding I-band support. [Adam.Dybbroe] - Fixing for re-projection. [Adam.Dybbroe] - Various small corrections. [Adam.Dybbroe] - Corrected band widths - ned to be in microns not nm. [Adam.Dybbroe] - Support for NPP/JPSS VIIRS. [Adam.Dybbroe] - Updated copyright in sphinx doc. [Martin Raspaud] - Deprecating add_overlay in favor of pycoast. [Martin Raspaud] - Merge branch 'feature-new-nc-format' into unstable. [Martin Raspaud] - Added support for different ordering of dimensions in band data. [Martin Raspaud] Use the band_axis keyword argument. - NC reader support different dimension orderings for band-data. [Martin Raspaud] - NC: now band data is of shape (band, x, y). [Martin Raspaud] v0.11.0 (2011-09-20) -------------------- Fix ~~~ - Bugfix: netcdf saving didn't record lat and lon correctly. [Martin Raspaud] - Bugfix: netcdf saving didn't work if only one value was available. [Martin Raspaud] - Bugfix: test_mipp had invalid proj parameters. [Martin Raspaud] - Bugfix: satellite vs satname again. [Martin Raspaud] - Bugfix: project now chooses mode automatically by default. [Martin Raspaud] - Bugfix: move to jenkins. [Martin Raspaud] - Bugfix: fixed unit test for projector reflecting the new mode handling. [Martin Raspaud] - Bugfix: fixed None mode problem in projector. [Martin Raspaud] - Bugfix: The default projecting mode now take into account the types of the in and out areas. [Martin Raspaud] - Bugfix: forgot the argument to wait in filewatcher. [Martin Raspaud] - Bugfix: tags and gdal_options were class attributes, they should be instance attributes. [Martin Raspaud] - Bugfix: 0 reflectances were masked in aapp1b loader. [Martin Raspaud] - Bugfix: corrected parallax values as no_data in msg products reading. [Martin Raspaud] - Bugfix: tags and gdal_options were class attributes, they should be instance attributes. [Martin Raspaud] - Bugfix: Compatibility with nordrad was broken. [Martin Raspaud] - Bugfix: forgot the argument to wait in filewatcher. [Martin Raspaud] - Bugfix: forgot strptime = datetime.strptime when python > 2.5. [Martin Raspaud] - Bugfix: corrected parallax values as no_data in msg products reading. [Martin Raspaud] - Bugfix: individual channel areas are preserved when assembled together. [Martin Raspaud] - Bugfix: cleanup tmp directory when convertion to lvl 1b is done. [Martin Raspaud] - Bugfix: remove hardcoded pathes in hrpt and eps lvl 1a. [Martin Raspaud] - Bugfix: use mpop's main config path. [Martin Raspaud] - Bugfix: added python 2.4 compatibility. [Martin Raspaud] - Bugfix: allow all masked array as channel data. [Martin Raspaud] - Better support for channel-bound areas. [Martin Raspaud] - Bugfix: 0 reflectances were masked in aapp1b loader. [Martin Raspaud] - Bugfix: tags and gdal_options were class attributes, they should be instance attributes. [Martin Raspaud] - Bugfix: error checking on area_extent for loading. [Martin Raspaud] - Bugfix: non loaded channels should not induce computation of projection. [Martin Raspaud] - Bugfix: thin modis didn't like area extent and was locked in 2010... [Martin Raspaud] - Bugfix: Compatibility with nordrad was broken. [Martin Raspaud] Other ~~~~~ - Merge branch 'release-0.11' [Martin Raspaud] - Merge branch 'pre-master' into release-0.11. [Martin Raspaud] - Updated copyright dates in setup.py. [Martin Raspaud] - Bumped version number to 0.11.0. [Martin Raspaud] - Merge branch 'pre-master' into unstable. [Martin Raspaud] - Now a channel can be added to a scene dynamically using dict notation. [esn] - Added units to aapp1b reader. [Martin Raspaud] - Deactivating mipp loading test. [Martin Raspaud] - Adjusted tests for compositer. [Martin Raspaud] - Merge branch 'feature-cleaning' into unstable. [Martin Raspaud] - Merge branch 'unstable' into feature-cleaning. [Martin Raspaud] - Merge branch 'pre-master' into unstable. [Martin Raspaud] - Added append function to scene.py. [Esben S. Nielsen] - New error message when no instrument-levelN section is there in the satellite config file. [Martin Raspaud] - Merge branch 'feature-radius-of-influence' into unstable. [Martin Raspaud] - Syntax bug fixed. [Martin Raspaud] - Made orbit number default to None for PolarFactory's create_scene. [Martin Raspaud] - Merge branch 'feature-radius-of-influence' into unstable. [Martin Raspaud] - Radius of influence is now a keyword parameter to the scene.project method. [Martin Raspaud] - Merge branch 'pre-master' into unstable. [Martin Raspaud] - Can now get reader plugin from PYTHONPATH. [Esben S. Nielsen] - Renamed asimage to as_image. [Martin Raspaud] - Wavelength and resolution are not requirements in config files anymore. [Martin Raspaud] - Merge branch 'feature-channel-to-image' into unstable. [Martin Raspaud] - Feature: added the asimage method to channels, to retrieve a black and white image from the channel data. [Martin Raspaud] - Merge branch 'feature-doc-examples' into unstable. [Martin Raspaud] - Doc: added more documentation to polar_segments.py. [Martin Raspaud] - DOC: examples are now functional. [Martin Raspaud] - DOC: fixed path for examples. [Martin Raspaud] - DOC: Added documentation examples to the project. [Martin Raspaud] - DOC: added use examples in the documentation directory. [Martin Raspaud] - Merge branch 'feature-project-mode' into unstable. [Martin Raspaud] - Doc: update docstring for project. [Martin Raspaud] - Cleanup. [Martin Raspaud] - Switched seviri and mviri to compositer. [Martin Raspaud] - Cleanup. [Martin Raspaud] - Style: Cleaning up. [Martin Raspaud] - Doc: added screenshots. [Martin Raspaud] - Cleanup, switch to compositer globaly. [Martin Raspaud] Conflicts: mpop/instruments/visir.py mpop/satin/hrpt.py mpop/saturn/runner.py - Cleanup: remove old unit test for assemble_swath. [Martin Raspaud] - Bugfix in assemble_segments. [Martin Raspaud] - Cleanup: removed old assemble_swath function. [Martin Raspaud] Conflicts: mpop/scene.py - Upgrade: assemble_segments now uses scene factory. [Martin Raspaud] - Fixed typo. [Adam.Dybbroe] - Feature: updating mipp test to use factory. [Martin Raspaud] - Cleaning up an old print. [Martin Raspaud] Conflicts: mpop/satin/mipp.py - Cleanup: removing old stuff. [Martin Raspaud] - Cleaned up and updated meteosat 9 cfg template further. [Martin Raspaud] - Updated templates to match pytroll MSG tutorial. [Esben S. Nielsen] - Simplified reading of log-level. [Lars Orum Rasmussen] - Proposal for reading loglevel from config file. [Lars Orum Rasmussen] - Cfscene now handles channels with all masked data. [Esben S. Nielsen] - Netcdf area fix. [Martin Raspaud] - Syle: copyright updates. [Martin Raspaud] - Modified the modis-lvl2 loader and extended a bit the cf-io interfaces. [Adam.Dybbroe] - First time in GIT A new reader for EOS-HDF Modis level-2 files from NASA. See http://oceancolor.gsfc.nasa.gov/DOCS/ocformats.html#3 for format description. [Adam.Dybbroe] - Added license. [Martin Raspaud] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] - Info needs to be an instance attribute. [Lars Orum Rasmussen] - Fix initialization of self.time_slot. [Lars Orum Rasmussen] - Merge branch 'v0.10.2-support' into unstable. [Martin Raspaud] - Added pyc and ~ files to gitignore. [Martin Raspaud] - Updated thin modis reader for new file name. [Martin Raspaud] - Merge branch 'v0.10.1-support' into unstable. [Martin Raspaud] - Compression and tiling as default for geotifs. [Martin Raspaud] - Merge branch 'v0.10.0-support' into unstable. [Martin Raspaud] - Feauture: support for qc_straylight. [Martin Raspaud] - Compression and tiling as default for geotifs. [Martin Raspaud] - WIP: attempting interrupt switch for sequential runner. [Martin Raspaud] - Feature: changing filewatcher from processes to threads. [Martin Raspaud] - Feauture: support for qc_straylight. [Martin Raspaud] - Compression and tiling as default for geotifs. [Martin Raspaud] - Update: modis enhancements. [Martin Raspaud] - Feature: filewatcher keeps arrival order. [Martin Raspaud] - Feature: concatenation loads channels. [Martin Raspaud] - Feature: use local tles instead of downloading systematically. [Martin Raspaud] - Feature: move pyaapp as single module. [Martin Raspaud] - Feature: added ana geoloc for hrpt and eps lvl 1a. [Martin Raspaud] - Cosmetics. [Martin Raspaud] - Added gatherer and two_line_elements. [Martin Raspaud] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] - Moved a parenthesis six characters to the left. [Lars Orum Rasmussen] - Feature: assemble_segments function, more clever and should replace assemble_swaths. [Martin Raspaud] - Feature: thin modis reader upgrade, with lonlat estimator and channel trimmer for broken sensors. [Martin Raspaud] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] - Netcdf bandname now only uses integer part of resolution. [Esben S. Nielsen] - Improvement: made resolution int in band names, for netcdf. [Martin Raspaud] - Cleaning. [Martin Raspaud] - WIP: ears. [Martin Raspaud] - Trying to revive the pynwclib module. [Martin Raspaud] - Cleaning. [Martin Raspaud] - Wip: polar hrpt 0 to 1b. [Martin Raspaud] - Feature: Added proj4 parameters for meteosat 7. [Martin Raspaud] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] - Cosmetic. [Esben S. Nielsen] - Now channels are read and saved in order. Optimized scaling during CF save. [Esben S. Nielsen] - Feature: Adding more factories. [Martin Raspaud] - Documentation: adding something on factories and area_extent. [Martin Raspaud] - Documentation: added needed files in setup.py. [Martin Raspaud] v0.10.0 (2011-01-18) -------------------- Fix ~~~ - Bugfix: fixed matching in git command for version numbering. [Martin Raspaud] - Bugfix: Negative temperatures (in K) should not be valid data when reading aapp1b files. [Martin Raspaud] - Bugfix: remove hudson from tags when getting version. [Martin Raspaud] - Bugfix: fixed hdf inconstistencies with the old pyhl reading of msg ctype and ctth files. [Martin Raspaud] - Bugfix: Updated code and tests to validate unittests. [Martin Raspaud] - Bugfix: data reloaded even if the load_again flag was False. [Martin Raspaud] - Bugfix: updated tests for disapearance of avhrr.py. [Martin Raspaud] - Bugfix: access to CompositerClass would fail if using the old interface. [Martin Raspaud] - Bugfix: typesize for msg's ctth didn't please pps... [Martin Raspaud] - Bugfix: fixed data format (uint8) in msg_hdf. [Martin Raspaud] - Bugfix: wrong and forgotten instanciations. [Martin Raspaud] - Bugfix: crashing on missing channels in mipp loading. [Martin Raspaud] - Bugfix: forgot to pass along area_extent in mipp loader. [Martin Raspaud] - Bugfix: fixing integration test (duck typing). [Martin Raspaud] - Bugfix: pyresample.geometry is loaded lazily for area building. [Martin Raspaud] - Bugfix: Updated unit tests. [Martin Raspaud] - Bugfix: Last change introduced empty channel list for meteosat 09. [Martin Raspaud] - Bugfix: Last change introduced empty channel list for meteosat 09. [Martin Raspaud] - Bugfix: update unittests for new internal implementation. [Martin Raspaud] - Bugfix: compression argument was wrong in satelliteinstrumentscene.save. [Martin Raspaud] - Bugfix: adapted mpop to new equality operation in pyresample. [Martin Raspaud] - Bugfix: More robust config reading in projector and test_projector. [Martin Raspaud] - Bugfix: updated the msg_hrit (nwclib based) reader. [Martin Raspaud] - Bugfix: swath processing was broken, now fixed. [Martin Raspaud] - Bugfix: corrected the smaller msg globe area. [Martin Raspaud] - Bugfix: Erraneous assumption on the position of the 0,0 lon lat in the seviri frame led to many wrong things. [Martin Raspaud] - Bugfix: introduced bugs in with last changes. [Martin Raspaud] - Bugfix: new area extent for EuropeCanary. [Martin Raspaud] - Bugfix: Updated setup.py to new structure. [Martin Raspaud] - Bugfix: updated integration test to new structure. [Martin Raspaud] - Bugfix: more verbose crashing when building extensions. [Martin Raspaud] - Bugfix: corrected EuropeCanary region. [Martin Raspaud] - Bugfix: made missing areas message in projector more informative (includes missing area name). [Martin Raspaud] - Bugfix: Added missing import in test_pp_core. [Martin Raspaud] - Bugfix: fixing missing import in test_scene. [Martin Raspaud] Other ~~~~~ - Style: remove a print statement and an unused import. [Martin Raspaud] - Feature: Added natural composite to default composite list. [Martin Raspaud] - Feature: made compositer sensitive to custom composites. [Martin Raspaud] - Documentation: Upgraded documentation to 0.10.0. [Martin Raspaud] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] - The RELEASE-VERSION file should not be checked into git. [Lars Orum Rasmussen] - Optimized parts of mpop. Fixed projector caching. [Esben S. Nielsen] - Optimized parts of mpop processing. Made projector caching functional. [Esben S. Nielsen] - Ignore build directory. [Lars Orum Rasmussen] - Check array in stretch_logarithmic. [Lars Orum Rasmussen] - Prevent adding unintended logging handlers. [Lars Orum Rasmussen] - Feature: Adding extra tags to the image allowed in local_runner. [Martin Raspaud] - Style: lines to 80 chars. [Martin Raspaud] - Merge branch 'unstable' [Martin Raspaud] - Feature: pps hdf loading and polar production update. [Martin Raspaud] - Style: cleanup. [Martin Raspaud] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] Conflicts: mpop/satin/mipp.py - Fixed memory problems. Workaround for lazy import of pyresample. Now uses weakref for compositor. [Esben S. Nielsen] - Better logging in scene loading function. [Martin Raspaud] - Remove unneeded import. [Martin Raspaud] - New version. [Martin Raspaud] - Merge branch 'master' of github.com:mraspaud/mpop. [Lars Orum Rasmussen] - Feature: direct_readout chain in place. [Martin Raspaud] - Removing no longer needed avhrr.py. [Martin Raspaud] - Made scaling expression in cfscene.py nicer. [Esben S. Nielsen] - Corrected shallow copy problem with compositor. Simplyfied usage of GeostationaryFactory. [Esben S. Nielsen] - Feature: cleaner hdf reading for both pps and msg. [Martin Raspaud] - Stability: added failsafe in case no config file is there when loading. [Martin Raspaud] - Merge branch 'pps_hdf' into unstable. [Martin Raspaud] - Feature: Support area_extent in scene.load. [Martin Raspaud] - Feature: Cleaning and use the mipp area_extent and sublon. [Martin Raspaud] - Style: Allow to exclude all the *level? sections. [Martin Raspaud] - Redespached a few composites. [Martin Raspaud] - Style: cosmetics. [Martin Raspaud] - Feature: added the power operation to channels. [Martin Raspaud] - Removed the no longer needed meteosat09.py file. [Martin Raspaud] - Wip: iterative loading, untested. [Martin Raspaud] - More on versionning. [Martin Raspaud] - Merge branch 'unstable' into pps_hdf. [Martin Raspaud] - Feature: started working on the PPS support. [Martin Raspaud] - Spelling. [Martin Raspaud] - Added logarithmic enhancement. [Lars Orum Rasmussen] - Removed unneeded file. [Martin Raspaud] - Api: new version of mipp. [Martin Raspaud] - Added automatic version numbering. [Martin Raspaud] - Version update to 0.10.0alpha1. [Martin Raspaud] - Api: unload takes separate channels (not iterable) as input. [Martin Raspaud] - Doc: updated the meteosat 9 template config. [Martin Raspaud] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] Conflicts: mpop/satellites/meteosat09.py - Feature: Introduced compound satscene objects. [Martin Raspaud] This is done through the use of an "image" attribute, created by the factory in the "satellites" package. The image attribute holds all the compositing functions, while the satscene object remains solely a container for satellite data and metadata. - Feature: added the get_custom_composites function and a composites section in mpop.cfg to load custom made composites on the fly. [Martin Raspaud] - Feature: make use of mipp's area_extent function. [Martin Raspaud] - Style: cleanup channels_to_load after loading. [Martin Raspaud] - Doc: introduce mpop.cfg. [Martin Raspaud] - Feature: make use of the new mpop.cfg file to find the area file. Added the get_area_def helper function in projector. [Martin Raspaud] - Feature: Added the new pge02f product for met09. [Martin Raspaud] - Feature: New format keyword for images. [Martin Raspaud] - Update: new version of mipp, putting the image upright when slicing. [Martin Raspaud] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] Conflicts: mpop/satout/netcdf4.py mpop/scene.py - Corrected mipp slicing in mipp.py. Added keyword for selecting datatype in cfscene.py. Corrected transformation for netCDF data type in cfscene.py. [Esben S. Nielsen] - New add_history function, and some changes in the netcdf handling. [Martin Raspaud] - Upgrade: Upgraded the assemble_segments module to use only one coordinate class. [Martin Raspaud] - Cosmetics: Added log message when slicing in mipp. [Martin Raspaud] - Move everything to a mpop folder, so that import mpop should be used. [Martin Raspaud] - WIP: Completing the nc4 reader. [Martin Raspaud] - Doc: Added credits. [Martin Raspaud] - Doc: updated build for github. [Martin Raspaud] - Feature: Started to support arithmetic operations on channels. [Martin Raspaud] - Feature: support for calibration flag for met 9. [Martin Raspaud] - Cosmetics: Added names to copyrigths. [Martin Raspaud] - Changed default logging. [Esben S. Nielsen] - Merge branch 'dmi_fix' into unstable. [Martin Raspaud] Conflicts: pp/scene.py - Added fill_valued as a keyworded argument. [Lars Orum Rasmussen] - Fixed oversampling error when pyresample is not present. Added compression as default option when writing netCDF files. [Esben S. Nielsen] - Moved pyresample and osgeo dependency in geo_image.py. [Esben S. Nielsen] - Feature: support umarf files for eps avhrr. [Martin Raspaud] - Feature: support the load_again flag for meteosat 9. [Martin Raspaud] - Feature: Allows passing arguments to reader plugins in SatelliteScene.load, and in particular "calibrate" to mipp. [Martin Raspaud] - Feature: added the fill_value argument to channel_image function. [Martin Raspaud] - Cosmetics: reorganized imports. [Martin Raspaud] - Cosmetics: Updated some template files. [Martin Raspaud] - Feature: Added the resave argument for saving projector objects. [Martin Raspaud] - Installation: Updated version number, removed obsolete file to install, and made the package non zip-safe. [Martin Raspaud] - Testing: Added tests for pp.satellites, and some cosmetics. [Martin Raspaud] - Feature: Handled the case of several instruments for get_satellite_class. [Martin Raspaud] - Cosmetics: changed the name of the satellite classes generated on the fly. [Martin Raspaud] - Testing: more on scene unit tests. [Martin Raspaud] - Testing: started integration testing of pp core parts. [Martin Raspaud] - Testing: completed seviri tests. [Martin Raspaud] - Testing: completed avhrr test. [Martin Raspaud] - Testing: Added tests for instruments : seviri, mviri, avhrr. [Martin Raspaud] - Testing: took away prerequisites tests for python 2.4 compatibility. [Martin Raspaud] - Testing: final adjustments for visir. [Martin Raspaud] - Testing: visir tests complete. [Martin Raspaud] - Testing: fixed nosetest running in test_visir. [Martin Raspaud] - Testing: corrected scene patching for visir tests. [Martin Raspaud] - Tests: started testing the visir instrument. [Martin Raspaud] v0.9.0 (2010-10-04) ------------------- Fix ~~~ - Bugfix: geotiff images were all saved with the wgs84 ellipsoid even when another was specified... [Martin Raspaud] - Bugfix: Corrected the formulas for area_extend computation in geos view. [Martin Raspaud] - Bugfix: satellite number in cf proxy must be an int. Added also instrument_name. [Martin Raspaud] - Bugfix: Erraneous on the fly area building. [Martin Raspaud] - Bugfix: geo_image: gdal_options and tags where [] and {} by default, which is dangerous. [Martin Raspaud] - Bugfix: Support for new namespace for osr. [Martin Raspaud] - Bugfix: remove dubble test in test_channel. [Martin Raspaud] - Bugfix: showing channels couldn't handle masked arrays. [Martin Raspaud] - Bugfix: Scen tests where wrong in project. [Martin Raspaud] - Bugfix: when loading only CTTH or CloudType, the region name was not defined. [Martin Raspaud] - Bugfix: in test_channel, Channel constructor needs an argument. [Martin Raspaud] - Bugfix: in test_cmp, tested GenericChannel instead of Channel. [Martin Raspaud] - Bugfix: Test case for channel initialization expected the wrong error when wavelength argument was of the wrong size. [Martin Raspaud] - Bugfix: Added length check for "wavelength" channel init argument. [Martin Raspaud] - Bugfix: test case for channel resolution did not follow previous patch allowing real resolutions. [Martin Raspaud] - Bugfix: thin modis lon/lat are now masked arrays. [Martin Raspaud] - Bugfix: in channel constructor, wavelength triplet was not correctly checked for type. [Martin Raspaud] Just min wavelength was check three times. Other ~~~~~ - Cosmetics and documentation in the scene module. [Martin Raspaud] - Feature: better handling of tags and gdal options in geo_images. [Martin Raspaud] - Cleanup: removed uneeded hardcoded satellites and instruments. [Martin Raspaud] - Documentation: Updated readme, with link to the documentation. [Martin Raspaud] - Documentation: Added a paragraph on geolocalisation. [Martin Raspaud] - Refactoring: took away the precompute flag from the projector constructor, added the save method instead. [Martin Raspaud] - Cosmetics. [Martin Raspaud] - Cosmetics. [Martin Raspaud] - Feature: pyresample 0.7 for projector, and enhanced unittesting. [Martin Raspaud] - New template file for areas. [Martin Raspaud] - Feature: First draft for the hrpt reading (using aapp) and eps1a reading (using aapp and kai). [Martin Raspaud] - Cosmetics: cleaning up the etc directory. [Martin Raspaud] - Testing: Basic mipp testing. [Martin Raspaud] - Cosmetics: cfscene. [Martin Raspaud] - Feature: One mipp reader fits all :) [Martin Raspaud] - Feature: helper "debug_on" function. [Martin Raspaud] - Feature: save method for satscene. Supports only netcdf4 for now. [Martin Raspaud] - Feature: reload keyword for loading channels. [Martin Raspaud] - Documentation: better pp.satellites docstring. [Martin Raspaud] - Testing: updated the test_scene file to reflect scene changes. [Martin Raspaud] - Documentation: changed a couple of docstrings. [Martin Raspaud] - Feature: support pyresample areas in geo images. [Martin Raspaud] - Cosmetics: changing area_id to area. [Martin Raspaud] - Feature: adding metadata handling to channels. [Martin Raspaud] - Feature: now scene and channel accept a pyresample area as area attribute. [Martin Raspaud] - Enhancement: making a better mipp plugin. [Martin Raspaud] - Feature: Finished the netcdf writer. [Martin Raspaud] - Feature: updated the netcdf writer and added a proxy scene class for cf conventions. [Martin Raspaud] - Documentation: big update. [Martin Raspaud] - Documentation: quickstart now passes the doctest. [Martin Raspaud] - Documentation: reworking. [Martin Raspaud] - Feature: Moved get_satellite_class and build_satellite_class to pp.satellites. [Martin Raspaud] - Doc: starting documentation update. [Martin Raspaud] - Enhanced mipp reader. [Martin Raspaud] * Added metadata when loading scenes. * Added slicing when reading data from seviri * Added a draft generic reader - Cosmetics: enhanced error description and debug message in aapp1b, giving names to loaded/missing files. [Martin Raspaud] - Testing: updated test_scene. [Martin Raspaud] - Feature: Added automatic retreiving of product list for a given satellite. [Martin Raspaud] - Cleaning: remove class retrieving and building from runner.py. [Martin Raspaud] - Cosmetics: Better error message in scene when a reader is not found, plus some code enbelishment. [Martin Raspaud] - Feature: made scene object iteratable (channels are iterated). [Martin Raspaud] - Feature: Adding functions to retreive a satellite class from the satellites name and to build it on the fly from a configuration file. [Martin Raspaud] - Testing: more on channel. [Martin Raspaud] - Testing: added test for pp.scene.assemble_swaths. [Martin Raspaud] - Testing: scene loading tested. [Martin Raspaud] - Cleaning: test_scene is now more pylint friendly. [Martin Raspaud] - Feature: extended scene test. [Martin Raspaud] - Feature: more testing of scene.py. [Martin Raspaud] - Merge branch 'unstable' of github.com:mraspaud/mpop into unstable. [Martin Raspaud] Conflicts: pp/test_scene.py - Feature: Enhanced unitests for scene. [Martin Raspaud] - Feature: Enhanced unitests for scene. [Martin Raspaud] - Tests: Improving unittests for channel classes. [Martin Raspaud] - Feature: Project function won't crash if pyresample can't be loaded. Returns the untouched scene instead. [Martin Raspaud] - Rewrote Filewatcher code. [Martin Raspaud] - Feature: added the refresh option to filewatcher to call the processing function even if no new file has come. [Martin Raspaud] - Refactoring: satellite, number, variant arguments to runner __init__ are now a single list argument. [Martin Raspaud] - Cleaning: Removing pylint errors from runner.py code. [Martin Raspaud] - Resolution can now be a floating point number. [Martin Raspaud] - Added the osgeo namespace when importing gdal. [Martin Raspaud] - Warning: Eps spline interpolation does not work around poles. [Martin Raspaud] - Added the "info" attribute to channel and scene as metadata holder. [Martin Raspaud] - Functionality: Automatically build satellite classes from config files. [Martin Raspaud] - Added copyright notices and updated version. [Martin Raspaud] - Changed channel names for seviri. [Martin Raspaud] - Added info stuff in mipp reader. [Martin Raspaud] - Added info.area_name update on projection. [Martin Raspaud] - Added quick mode for projecting fast and dirty. [Martin Raspaud] - Added single channel image building. [Martin Raspaud] - Added support for gdal_options when saving a geo_image. [Martin Raspaud] - Made satout a package. [Martin Raspaud] - Added a few information tags. [Martin Raspaud] - Added support for mipp reading of met 09. [Martin Raspaud] - Added reader and writer to netcdf format. [Martin Raspaud] - Added info object to the scene object in preparation for the netCDF/CF writer. [Adam Dybbroe] - Added support for FY3 satellite and MERSI instrument. [Adam Dybbroe] - Merge branch 'unstable' of git@github.com:mraspaud/mpop into unstable. [Martin Raspaud] Conflicts: imageo/test_image.py Conflicts: imageo/test_image.py - Bugfix in image unit test: testing "almost equal" instead of "equal" for image inversion (floating point errors). [Martin Raspaud] - Bugfix in image unit test: testing "almost equal" instead of "equal" for image inversion (floating point errors). [Martin Raspaud] - Modified image inversion unit test to reflect new behaviour. [Martin Raspaud] - New rebase. [Martin Raspaud] mpop-1.5.0/doc/000077500000000000000000000000001317160620000132315ustar00rootroot00000000000000mpop-1.5.0/doc/Makefile000066400000000000000000000061041317160620000146720ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/NWCSAFMSGPP.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/NWCSAFMSGPP.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." mpop-1.5.0/doc/README000066400000000000000000000002441317160620000141110ustar00rootroot00000000000000In order to read the documentation, sphinx needs to be installed. Then run make. The examples directory shows different ways of using mpop for production purposes.mpop-1.5.0/doc/examples/000077500000000000000000000000001317160620000150475ustar00rootroot00000000000000mpop-1.5.0/doc/examples/geo_hrit.py000066400000000000000000000054401317160620000172240ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, 2014 SMHI # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Minimal script for geostationary production. We take the case of HRIT data from meteosat 9, as send through eumetcast. - Install mipp, mpop, and pyresample - Don't forget to set up the PPP_CONFIG_DIR variable to point to your configuration files. - Edit the meteosat09.cfg configuration file (a template is provided in case you don't have one) with your HRIT directory: .. code-block:: ini [seviri-level1] format = 'xrit/MSG' dir='/data/hrit_in' filename='H-000-MSG?__-MSG?________-%(channel)s-%(segment)s-%Y%m%d%H%M-__' filename_pro='H-000-MSG?__-MSG?________-_________-%(segment)s-%Y%m%d%H%M-__' filename_epi='H-000-MSG?__-MSG?________-_________-%(segment)s-%Y%m%d%H%M-__' where `/data/hrit_in` has to be changed to anything that suits your environment. - Here is an example of a minimal script that has to be called as soon as an MSG slot has arrived (usually, watching the arrival of the epilogue file suffices) """ from mpop.utils import debug_on debug_on() from mpop.satellites import GeostationaryFactory import sys from datetime import datetime if sys.version_info < (2, 5): import time def strptime(string, fmt=None): """This function is available in the datetime module only from Python >= 2.5. """ return datetime(*time.strptime(string, fmt)[:6]) else: strptime = datetime.strptime if __name__ == '__main__': if len(sys.argv) < 2: print "Usage: " + sys.argv[0] + " time_string" sys.exit() time_string = sys.argv[1] time_slot = strptime(time_string, "%Y%m%d%H%M") global_data = GeostationaryFactory.create_scene("meteosat", "09", "seviri", time_slot) global_data.load() areas = ["euro4", "scan2"] for area in areas: local_data = global_data.project(area) img = local_data.image.overview() img.save("overview_" + area + "_" + time_string + ".png") img = local_data.image.fog() img.save("fog_" + area + "_" + time_string + ".png") mpop-1.5.0/doc/examples/polar_aapp1b.py000066400000000000000000000047041317160620000177670ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011 SMHI # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Minimal script for polar data (from aapp level 1b). We take the case of level 1b data (calibrated and geolocalized) from noaa 19, as output from AAPP. - Install mpop and pyresample - Don't forget to set up the PPP_CONFIG_DIR variable to point to your configuration files. - Edit the noaa19.cfg configuration file (a template is provided in case you don't have one) with your data directory: .. code-block:: ini [avhrr-level2] filename = hrpt_%(satellite)s_%Y%m%d_%H%M_%(orbit)s.l1b dir = /data/polar/ format = aapp1b - Here is an example of a minimal script that has to be called as soon as a new swath has arrived """ from mpop.satellites import PolarFactory import sys from datetime import datetime if sys.version_info < (2, 5): import time def strptime(string, fmt=None): """This function is available in the datetime module only from Python >= 2.5. """ return datetime(*time.strptime(string, fmt)[:6]) else: strptime = datetime.strptime if __name__ == '__main__': if len(sys.argv) < 3: print "Usage: " + sys.argv[0] + " time_string orbit" sys.exit() time_string = sys.argv[1] orbit = sys.argv[2] time_slot = strptime(time_string, "%Y%m%d%H%M") global_data = PolarFactory.create_scene("noaa", "19", "avhrr", time_slot, orbit) global_data.load() areas = ["euro4", "scan2"] for area in areas: local_data = global_data.project(area) img = local_data.image.overview() img.save("overview_" + area + "_" + time_string + ".png") img = local_data.image.cloudtop() img.save("cloudtop_" + area + "_" + time_string + ".png") mpop-1.5.0/doc/examples/polar_segments.py000066400000000000000000000072031317160620000204450ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011 SMHI # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Minimal script for assembling and processing segmented polar data. We take the case of level 1b data (calibrated and geolocalized) from noaa 19, as output from AAPP. - Install mpop and pyresample - Don't forget to set up the PPP_CONFIG_DIR variable to point to your configuration files. - Edit the gdsmetop02.cfg configuration file (a template is provided in case you don't have one) with your data directory: .. code-block:: ini [avhrr-granules] type=eps_avhrr granularity=60 full_scan_period=0.1667 scan_width=2048 dir=/data/prod/satellit/ears/avhrr filename=AVHR_xxx_1B_M02_%Y%m%d%H%M* - Here is a minimal script that monitors a directory and builds composites: """ import sys from datetime import timedelta, datetime import glob import os import time from mpop.saturn.gatherer import Granule, Gatherer def get_files_newer_than(directory, time_stamp): """Get the list of files from the *directory* which are newer than a given *time_stamp*. """ filelist = glob.glob(os.path.join(directory, "*")) return [filename for filename in filelist if datetime.fromtimestamp(os.stat(filename)[8]) > time_stamp] if __name__ == '__main__': if len(sys.argv) < 3: print "Usage: " + sys.argv[0] + " directory wait_for_more" sys.exit() directory = sys.argv[1] # if we wait for files in the directory forever or not wait_for_more = eval(sys.argv[2]) areas = ["euro4", "scan2"] gatherer = None time_stamp = datetime(1970, 1, 1) while True: # Scanning directory new_time_stamp = datetime.now() filenames = get_files_newer_than(directory, time_stamp) time_stamp = new_time_stamp # Adding files to the gatherer for filename in filenames: granule = Granule(filename) if gatherer is None: gatherer = Gatherer(areas_of_interest=areas, timeliness=timedelta(minutes=150), satname=granule.satname, number=granule.number, variant=granule.variant) gatherer.add(granule) # Build finished swath and process them. for swath in gatherer.finished_swaths: global_data = swath.concatenate() local_data = global_data.project(swath.area) time_string = global_data.time_slot.strftime("%Y%m%d%H%M") area_id = swath.area.area_id img = local_data.image.overview() img.save("overview_" + area_id + "_" + time_string + ".png") img = local_data.image.natural() img.save("natural_" + area_id + "_" + time_string + ".png") if not wait_for_more: break # wait 60 seconds before restarting time.sleep(60) mpop-1.5.0/doc/source/000077500000000000000000000000001317160620000145315ustar00rootroot00000000000000mpop-1.5.0/doc/source/conf.py000066400000000000000000000163201317160620000160320ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # NWCSAF/MSG PP documentation build configuration file, created by # sphinx-quickstart on Fri Sep 25 16:58:28 2009. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('../../')) class Mock(object): def __init__(self, *args, **kwargs): pass def __call__(self, *args, **kwargs): return Mock() @classmethod def __getattr__(cls, name): if name in ('__file__', '__path__'): return '/dev/null' elif name[0] == name[0].upper(): mockType = type(name, (), {}) mockType.__module__ = __name__ return mockType elif name == "inf": return 0 else: return Mock() MOCK_MODULES = ['Image', 'pyhdf.SD', 'pyhdf.error', 'numpy', 'numpy.core', 'numpy.core.multiarray', 'pyresample', 'pyresample.utils', 'pyresample.geometry', 'h5py'] for mod_name in MOCK_MODULES: sys.modules[mod_name] = Mock() # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'mpop' copyright = u'2009-2013, The pytroll crew' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # import mpop.version as current_version # The full version, including alpha/beta/rc tags. release = current_version.__version__ # The short X.Y version. version = ".".join(release.split(".")[:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'NWCSAFMSGPPdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'mpop.tex', u'mpop documentation', u'SMHI', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None} mpop-1.5.0/doc/source/image.rst000066400000000000000000000012221317160620000163420ustar00rootroot00000000000000.. _geographic-images: =================== Geographic images =================== In order to build satellite composites, mpop has to handle images. We could have used PIL, but we felt the need to use numpy masked arrays as base for our image channels, and we had to handle geographically enriched images. Hence the two following modules: :mod:`mpop.imageo.image` to handle simple images, and :mod:`mpop.imageo.geo_image`. Simple images ============= .. automodule:: mpop.imageo.image :members: :undoc-members: Geographically enriched images ============================== .. automodule:: mpop.imageo.geo_image :members: :undoc-members: mpop-1.5.0/doc/source/index.rst000066400000000000000000000025231317160620000163740ustar00rootroot00000000000000.. NWCSAF/MPoP documentation master file, created by sphinx-quickstart on Fri Sep 25 16:58:28 2009. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. ================================== Welcome to MPoP's documentation! ================================== The Meteorological Post-Processing package is a python library for generating RGB products for meteorological remote sensing. As such it can create RGB composites directly from satellite instrument channels, or take advantage of precomputed PGEs. Get to the project_ page, with source and downloads. It is designed to be easily extendable to support any meteorological satellite by the creation of plugins. In the base distribution, we provide support for Meteosat-7, -8, -9, -10, Himawari-6 (MTSAT-1R), Himawari-7 (MTSAT-2), GOES-11, GOES-12, GOES-13 through the use of mipp_, and NOAA-15, -16, -17, -18, -19, Metop-A and -B through the use of AAPP. Reprojection of data is also available through the use of pyresample_. .. _project: http://github.com/mraspaud/mpop .. _mipp: http://github.com/loerum/mipp .. _pyresample: http://pyresample.googlecode.com .. toctree:: :maxdepth: 2 install quickstart pp input image Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` mpop-1.5.0/doc/source/input.rst000066400000000000000000000047131317160620000164270ustar00rootroot00000000000000============================================== Input plugins: the :mod:`mpop.satin` package ============================================== Available plugins and their requirements ======================================== mipp_xrit --------- Reader for for hrit/lrit formats. Recommends numexpr and pyresample. .. automodule:: mpop.satin.mipp_xrit :members: :undoc-members: aapp1b ------ Reader for AAPP level 1b format. Requires numpy, recommends pyresample. .. automodule:: mpop.satin.aapp1b :members: :undoc-members: eps_l1b ------- Reader for EPS level 1b format. Recommends pyresample. .. automodule:: mpop.satin.eps_l1b :members: :undoc-members: viirs_sdr --------- Reader for the VIIRS SDR format. Requires h5py. .. automodule:: mpop.satin.viirs_sdr :members: :undoc-members: viirs_compact ------------- Reader for the VIIRS compact format from EUMETSAT. Requires h5py. .. automodule:: mpop.satin.viirs_compact :members: :undoc-members: hdfeos_l1b ---------- Reader for Modis data format. Requires pyhdf. .. automodule:: mpop.satin.hdfeos_l1b :members: :undoc-members: msg_hdf ------- Reader for MSG cloud products. Requires h5py, recommends acpg. .. automodule:: mpop.satin.msg_hdf :members: :undoc-members: pps_hdf ------- Reader for PPS cloud products. Requires acpg. .. automodule:: mpop.satin.pps_hdf :members: :undoc-members: hrpt ---- Reader for level 0 hrpt format. Requires AAPP and pynav. .. automodule:: mpop.satin.hrpt :members: :undoc-members: eps1a ----- Reader for level 1a Metop segments. Requires AAPP, kai and eugene. .. automodule:: mpop.satin.eps1a :members: :undoc-members: Interaction with reader plugins =============================== The reader plugin instance used for a specific scene is accessible through a scene attribute named after the plugin format. For example, the attribute for the *foo* format would be called *foo_reader*. This way the other methods present in the plugins are available through the scene object. The plugin API ============== .. versionchanged:: 0.13.0 New plugin API .. automodule:: mpop.plugin_base :members: :undoc-members: Adding a new plugin =================== For now only reader and writer plugins base classes are defined. To add one of those, just create a new class that subclasses the plugin. The interface of any reader plugin must include the :meth:`load` method. Take a look at the existing readers for more insight. mpop-1.5.0/doc/source/install.rst000066400000000000000000000047261317160620000167420ustar00rootroot00000000000000=========================== Installation instructions =========================== Getting the files and installing them ===================================== First you need to get the files from github:: cd /path/to/my/source/directory/ git clone git://github.com/mraspaud/mpop.git You can also retreive a tarball from there if you prefer, then run:: tar zxvf tarball.tar.gz Then you need to install mpop on you computer:: cd mpop python setup.py install [--prefix=/my/custom/installation/directory] You can also install it in develop mode to make it easier to hack:: python setup.py develop [--prefix=/my/custom/installation/directory] Configuration ============= Environment variables --------------------- Environment variables which are needed for mpop are the `PYTHONPATH` of course, and the `PPP_CONFIG_DIR`, which is the directory where the configuration files are to be found. If the latter is not defined, the `etc` directory of the mpop installation is used. Input data directories ---------------------- The input data directories are setup in the satellite configuration files, which can be found in the `PPP_CONFIG_DIR` directory (some template files are provided with mpop in the `etc` directory): .. code-block:: ini [seviri-level1] format = 'xrit/MSG' dir='/data/geo_in' filename='H-000-MSG?__-MSG?________-%(channel)s-%(segment)s-%Y%m%d%H%M-__' filename_pro='H-000-MSG?__-MSG?________-_________-%(segment)s-%Y%m%d%H%M-__' filename_epi='H-000-MSG?__-MSG?________-_________-%(segment)s-%Y%m%d%H%M-__' [seviri-level2] format='mipp_xrit' The different levels indicate different steps of the reading. The `level2` section gives at least the plugin to read the data with. In some cases, the data is first read from another level, as is this case with HRIT/LRIT data when we use mipp_: there we use the `level1` section. The data location is generally dealt in to parts: the directory and the filename. There can also be additional filenames depending on the reader plugin: here, mipp needs also the filename for prologue and epilogue files. Note that the section starts with the name of the instrument. This is important in the case where several instruments are available for the same satellite. Note also that the filename can contain wildcards (`*` and `?`) and optional values (here channel, segment, and time markers). It is up to the input plugin to handle these constructs if needed. .. _mipp: http://www.github.com/loerum/mipp mpop-1.5.0/doc/source/pp.rst000066400000000000000000000123611317160620000157050ustar00rootroot00000000000000======================================= Making use of the :mod:`mpop` package ======================================= The :mod:`mpop` package is the heart of mpop: here are defined the core classes which the user will then need to build satellite composites. Conventions about satellite names ================================= Throughout the document, we will use the following conventions: - *platform name* is the name of an individual satellite following the OSCAR_ naming scheme, e.g. "NOAA-19". - *variant* will be used to differentiate the same data (from the same satellite and instrument) coming in different flavours. For example, we use variant to distinguish data coming from the satellite Metop-B from direct readout (no variant), regional coverage (EARS) or global coverage (GDS). All the satellite configuration files in `PPP_CONFIG_DIR` should be named `.cfg`, e.g. `NOAA-19.cfg` or `GDSMetop-B.cfg`. .. _OSCAR: http://www.wmo-sat.info/oscar/satellites/ Creating a scene object ======================= Creating a scene object can be done calling the `create_scene` function of a factory, (for example :meth:`mpop.satellites.GenericFactory.create_scene`). The reader is refered to the documentation of the :meth:`mpop.scene.SatelliteInstrumentScene` for a description of the input arguments. Such a scene object is roughly a container for :class:`mpop.channel.Channel` objects, which hold the actual data and information for each band. Loading the data ================ Loading the data is done through the :meth:`mpop.scene.SatelliteInstrumentScene.load` method. Calling it effectively loads the data from disk into memory, so it can take a while depending on the volume of data to load and the performance of the host computer. The channels listed as arguments become loaded, and cannot be reloaded: a subsequent call to the method will not reload the data from disk. Re-projecting data ================== Once the data is loaded, one might need to re-project the data. The scene objects can be projected onto other areas if the pyresample_ software is installed, thanks to the :meth:`mpop.scene.SatelliteInstrumentScene.project` method. As input, this method takes either a Definition object (see pyresample's documentation) or string identificator for the area. In the latter case, the referenced region has to be defined in the area file. The name and location of this file is defined in the `mpop.cfg` configuration file, itself located in the directory pointed by the `PPP_CONFIG_DIR` environment variable. For more information about the internals of the projection process, take a look at the :mod:`mpop.projector` module. .. _pyresample: http://googlecode.com/p/pyresample Geo-localisation of the data ============================ Once the data is loaded, each channel should have an `area` attribute containing a pyresample_ area object, if the pyresample_ package is available. These area objects should implement the :meth:`get_lonlats` method, returning the longitudes and latitudes of the channel data. For more information on this matter, the reader is then referred to the documentation_ of the aforementioned package. .. _documentation: http://pyresample.googlecode.com/svn/trunk/docs/build/html/index.html Image composites ================ Methods building image composites are distributed in different modules, taking advantage of the hierarchical structure offered by OOP. The image composites common to all visir instruments are defined in the :mod:`mpop.instruments.visir` module. Some instrument modules, like :mod:`mpop.instruments.avhrr` or :mod:`mpop.instruments.seviri` overload these methods to adapt better for the instrument at hand. For instructions on how to write a new composites, see :ref:`geographic-images`. Adding a new satellite: configuration file ========================================== A satellite configuration file looks like the following (here Meteosat-7, mviri instrument): .. literalinclude:: ../../../satprod/etc/meteosat07.cfg :language: ini :linenos: The configuration file must hold a `satellite` section, the list of channels for the needed instruments (here `mviri-n` sections), and how to read the data in mipp (`mviri-level1`) and how to read it in mpop (`mviri-level2`). Using this template we can define new satellite and instruments. Adding a new satellite: python code =================================== Another way of adding satellites and instruments to mpop is to write the correponding python code. Here are example of such code: .. literalinclude:: ../../mpop/instruments/mviri.py :language: python :linenos: The :mod:`mpop` API =================== Satellite scenes ---------------- .. automodule:: mpop.scene :members: :undoc-members: Instrument channels ------------------- .. automodule:: mpop.channel :members: :undoc-members: The VisIr instrument class -------------------------- .. automodule:: mpop.instruments.visir :members: :undoc-members: Projection facility ------------------- .. automodule:: mpop.projector :members: :undoc-members: Satellite class loader ---------------------- .. automodule:: mpop.satellites :members: :undoc-members: Miscellaneous tools ------------------- .. automodule:: mpop.tools :members: :undoc-members: mpop-1.5.0/doc/source/quickstart.rst000066400000000000000000000231571317160620000174650ustar00rootroot00000000000000============ Quickstart ============ The software uses OOP extensively, to allow higher level metaobject handling. For this tutorial, we will use the Meteosat plugin and data. Don’t forget to first source the `profile` file of interest located in the source `etc` directory. First example ============= .. versionchanged:: 0.10.0 The factory-based loading was added in 0.10.0 Ok, let's get it on:: >>> from mpop.satellites import GeostationaryFactory >>> from mpop.projector import get_area_def >>> import datetime >>> time_slot = datetime.datetime(2009, 10, 8, 14, 30) >>> global_data = GeostationaryFactory.create_scene("Meteosat-9", "", "seviri", time_slot) >>> europe = get_area_def("EuropeCanary") >>> global_data.load([0.6, 0.8, 10.8], area_extent=europe.area_extent) >>> print global_data 'IR_097: (9.380,9.660,9.940)μm, resolution 3000.40316582m, not loaded' 'IR_016: (1.500,1.640,1.780)μm, resolution 3000.40316582m, not loaded' 'VIS008: (0.740,0.810,0.880)μm, shape (1200, 3000), resolution 3000.40316582m' 'VIS006: (0.560,0.635,0.710)μm, shape (1200, 3000), resolution 3000.40316582m' 'WV_062: (5.350,6.250,7.150)μm, resolution 3000.40316582m, not loaded' 'IR_120: (11.000,12.000,13.000)μm, resolution 3000.40316582m, not loaded' 'WV_073: (6.850,7.350,7.850)μm, resolution 3000.40316582m, not loaded' 'IR_087: (8.300,8.700,9.100)μm, resolution 3000.40316582m, not loaded' 'IR_039: (3.480,3.920,4.360)μm, resolution 3000.40316582m, not loaded' 'HRV: (0.500,0.700,0.900)μm, resolution 1000.13434887m, not loaded' 'IR_134: (12.400,13.400,14.400)μm, resolution 3000.40316582m, not loaded' 'IR_108: (9.800,10.800,11.800)μm, shape (1200, 3000), resolution 3000.40316582m' In this example, we create a scene object for the seviri instrument onboard Meteosat-9, specifying the time of the snapshot of interest. The time is defined as a datetime object. The next step is loading the data. This is done using mipp, which takes care of reading the HRIT data, and slicing the data so that we read just what is needed. Calibration is also done with mipp. In order to slice the data, we retreive the area we will work on, here set to variable *europe*. Here we call the :meth:`load` function with a list of the wavelengths of the channels we are interested in, and the area extent in satellite projection of the area of interest. Each retrieved channel is the closest in terms of central wavelength, provided that the required wavelength is within the bounds of the channel. The wavelengths are given in micrometers and have to be given as a floating point number (*i.e.*, don't type '1', but '1.0'). Using an integer number instead returns a channel based on resolution, while using a string retrieves a channels based on its name. >>> img = global_data.image.overview() >>> img.save("./myoverview.png") >>> Once the channels are loaded, we generate an overview RGB composite image, and save it as a png image. Instead of :meth:`save`, one could also use :meth:`show` if the only purpose is to display the image on screen. Available composites are listed in the :mod:`mpop.satellites.visir` module in the mpop documentation. We want more! ============== In the last example, the composite generation worked because the channels needed for the overview (0.6, 0.8, 10.8 μm) were loaded. If we try to generate a day natural color composite, which requires also the 1.6 μm channel, it will result in an error:: >>> img = global_data.image.natural() Traceback (most recent call last): ... NotLoadedError: Required channel 1.63 not loaded, aborting. So it means that we have to load the missing channel first. To do this we could enter the channels list to load manually, as we did for the overview, but we provide a way to get the list of channels needed by a given method using the `prerequisites` method attribute:: >>> global_data.load(global_data.image.natural.prerequisites, area_extent=europe.area_extent) >>> img = global_data.image.natural() >>> Now you can save the image:: >>> img.save("./mynaturalcolors.png") >>> If you want to combine several prerequisites for channel loading, since prerequisites are python sets, you can do:: >>> global_data.load(global_data.image.overview.prerequisites | ... global_data.image.natural.prerequisites, ... area_extent=europe.area_extent) >>> and add as many `| global_data.image.mymethod.prerequisites` as needed. Retrieving channels =================== Retrieving channels is dead easy. From the center wavelength:: >>> print global_data[0.6] 'VIS006: (0.560,0.635,0.710)μm, shape (1200, 3000), resolution 3000.40316582m' or from the channel name:: >>> print global_data["VIS006"] 'VIS006: (0.560,0.635,0.710)μm, shape (1200, 3000), resolution 3000.40316582m' or from the resolution:: >>> print global_data[3000] 'VIS006: (0.560,0.635,0.710)μm, shape (1200, 3000), resolution 3000.40316582m' or more than one at the time:: >>> print global_data[3000, 0.8] 'VIS008: (0.740,0.810,0.880)μm, shape (1200, 3000), resolution 3000.40316582m' The printed lines consists of the following values: * First the name is displayed, * then the triplet gives the min-, center-, and max-wavelength of the channel, * follows the shape of the loaded data, or `None` if the data is not loaded, * and finally the theoretical resolution of the channel is shown. The data of the channel can be retrieved as an numpy (masked) array using the data property:: >>> print global_data[0.6].data [[-- -- -- ..., -- -- --] [-- -- -- ..., -- -- --] [-- -- -- ..., -- -- --] ..., [7.37684259374 8.65549530999 6.58997938374 ..., 0.29507370375 0.1967158025 0.1967158025] [7.18012679124 7.86863209999 6.19654777874 ..., 0.29507370375 0.29507370375 0.29507370375] [5.80311617374 7.57355839624 6.88505308749 ..., 0.29507370375 0.29507370375 0.29507370375]] Channel arithmetics =================== .. versionadded:: 0.10.0 Channel arithmetics added. The common arithmetical operators are supported on channels, so that one can run for example:: >>> cool_channel = (global_data[0.6] - global_data[0.8]) * global_data[10.8] PGEs ==== From the satellite data PGEs [#f1]_ are generated by the accompanying program. The loading procedure for PGEs is exactly the same as with regular channels:: >>> global_data.area = "EuropeCanary" >>> global_data.load(["CTTH"]) >>> and they can be retrieved as simply as before:: >>> print global_data["CTTH"] 'CTTH: shape (1200, 3000), resolution 3000.40316582m' Making custom composites ======================== Building custom composites makes use of the :mod:`imageo` module. For example, building an overview composite can be done manually with:: >>> from mpop.imageo.geo_image import GeoImage >>> img = GeoImage((global_data[0.6].data, ... global_data[0.8].data, ... -global_data[10.8].data), ... "EuropeCanary", ... time_slot, ... mode = "RGB") >>> img.enhance(stretch="crude") >>> img.enhance(gamma=1.7) .. versionadded:: 0.10.0 Custom composites module added. In order to have mpop automatically use the composites you create, it is possible to write them in a python module which name has to be specified in the `mpop.cfg` configuration file, under the *composites* section:: [composites] module=mpop.smhi_composites The module has to be importable (i.e. it has to be in the pythonpath). Here is an example of such a module:: def overview(self): """Make an overview RGB image composite. """ self.check_channels(0.635, 0.85, 10.8) ch1 = self[0.635].check_range() ch2 = self[0.85].check_range() ch3 = -self[10.8].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") img.enhance(stretch = (0.005, 0.005)) return img overview.prerequisites = set([0.6, 0.8, 10.8]) def hr_visual(self): """Make a High Resolution visual BW image composite from Seviri channel. """ self.check_channels("HRV") img = geo_image.GeoImage(self["HRV"].data, self.area, self.time_slot, fill_value=0, mode="L") img.enhance(stretch="crude") return img hr_visual.prerequisites = set(["HRV"]) seviri = [overview, hr_visual] Projections =========== Until now, we have used the channels directly as provided by the satellite, that is in satellite projection. Generating composites thus produces views in satellite projection, *i.e.* as viewed by the satellite. Most often however, we will want to project the data onto a specific area so that only the area of interest is depicted in the RGB composites. Here is how we do that:: >>> local_data = global_data.project("eurol") >>> Now we have projected data onto the "eurol" area in the `local_data` variable and we can operate as before to generate and play with RGB composites:: >>> img = local_data.image.overview() >>> img.save("./local_overview.tif") >>> The image is saved here in GeoTiff_ format. On projected images, one can also add contour overlay with the :meth:`imageo.geo_image.add_overlay`. .. _GeoTiff: http://trac.osgeo.org/geotiff/ .. rubric:: Footnotes .. [#f1] PGEs in Meteosat : CloudType and CTTH mpop-1.5.0/doc/source/saturn.rst000066400000000000000000000010411317160620000165730ustar00rootroot00000000000000========================== Tools for batch production ========================== Running stuff ------------- .. automodule:: mpop.saturn.runner :members: :undoc-members: Handling tasks -------------- .. automodule:: mpop.saturn.tasklist :members: :undoc-members: Monitoring and processing incoming files ---------------------------------------- .. automodule:: mpop.saturn.filewatcher :members: :undoc-members: Earth geometry -------------- .. automodule:: mpop.saturn.assemble_segments :members: :undoc-members: mpop-1.5.0/etc/000077500000000000000000000000001317160620000132375ustar00rootroot00000000000000mpop-1.5.0/etc/DREOS-Aqua.cfg.template000066400000000000000000000067231317160620000173030ustar00rootroot00000000000000[satellite] satname='aqua' number='' instruments=('modis',) [modis-level2] format=hdfeos_l1b.ModisReader instruments = ('modis',) [modis-level2] format=hdfeos_l1b dir=/data/prod/satellit/modis/lvl1 filename = MYD02{resolution:1s}km_A{start_time:%y%j_%H%M%S}_{processing_time:%Y%j%H%M%S}.hdf geofile=MYD03_A{start_time:%y%j_%H%M%S}_{processing_time:%Y%j%H%M%S}.hdf [modis-level3] format = nc_pps_l2.PPSReader cloud_product_filename = S_NWC_%(product)s_eos2_%(orbit)s_*.nc cloud_product_dir = /path/to/pps/output cloud_product_geofilename = S_NWC_CMA_eos2_%(orbit)s_*.nc geolocation_product_name = CMA [modis-1] name = '1' frequency = (0.620, 0.6450, 0.670) resolution = 250 [modis-2] name = '2' frequency = (0.841, 0.8585, 0.876) resolution = 250 [modis-3] name = '3' frequency = (0.459, 0.4690, 0.479) resolution = 500 [modis-4] name = '4' frequency = (0.545, 0.5550, 0.565) resolution = 500 [modis-5] name = '5' frequency = (1.230, 1.2400, 1.250) resolution = 500 [modis-6] name = '6' frequency = (1.628, 1.6400, 1.652) resolution = 500 [modis-7] name = '7' frequency = (2.105, 2.1300, 2.155) resolution = 500 [modis-8] name = '8' frequency = (0.405, 0.4125, 0.420) resolution = 1000 [modis-9] name = '9' frequency = (0.438, 0.4430, 0.448) resolution = 1000 [modis-10] name = '10' frequency = (0.483, 0.4880, 0.493) resolution = 1000 [modis-11] name = '11' frequency = (0.526, 0.5310, 0.536) resolution = 1000 [modis-12] name = '12' frequency = (0.546, 0.5510, 0.556) resolution = 1000 [modis-13lo] name = '13lo' frequency = (0.662, 0.6670, 0.672) resolution = 1000 [modis-13hi] name = '13hi' frequency = (0.662, 0.6670, 0.672) resolution = 1000 [modis-14lo] name = '14lo' frequency = (0.673, 0.6780, 0.683) resolution = 1000 [modis-14hi] name = '14hi' frequency = (0.673, 0.6780, 0.683) resolution = 1000 [modis-15] name = '15' frequency = (0.743, 0.7480, 0.753) resolution = 1000 [modis-16] name = '16' frequency = (0.862, 0.8695, 0.877) resolution = 1000 [modis-17] name = '17' frequency = (0.890, 0.9050, 0.920) resolution = 1000 [modis-18] name = '18' frequency = (0.931, 0.9360, 0.941) resolution = 1000 [modis-19] name = '19' frequency = (0.915, 0.9400, 0.965) resolution = 1000 [modis-20] name = '20' frequency = (3.660, 3.7500, 3.840) resolution = 1000 [modis-21] name = '21' frequency = (3.929, 3.9590, 3.989) resolution = 1000 [modis-22] name = '22' frequency = (3.929, 3.9590, 3.989) resolution = 1000 [modis-23] name = '23' frequency = (4.020, 4.0500, 4.080) resolution = 1000 [modis-24] name = '24' frequency = (4.433, 4.4655, 4.498) resolution = 1000 [modis-25] name = '25' frequency = (4.482, 4.5155, 4.549) resolution = 1000 [modis-26] name = '26' frequency = (1.360, 1.3750, 1.390) resolution = 1000 [modis-27] name = '27' frequency = (6.535, 6.7150, 6.895) resolution = 1000 [modis-28] name = '28' frequency = (7.175, 7.3250, 7.475) resolution = 1000 [modis-29] name = '29' frequency = (8.400, 8.5500, 8.700) resolution = 1000 [modis-30] name = '30' frequency = (9.580, 9.7300, 9.880) resolution = 1000 [modis-31] name = '31' frequency = (10.780, 11.0300, 11.280) resolution = 1000 [modis-32] name = '32' frequency = (11.770, 12.0200, 12.270) resolution = 1000 [modis-33] name = '33' frequency = (13.185, 13.3350, 13.485) resolution = 1000 [modis-34] name = '34' frequency = (13.485, 13.6350, 13.785) resolution = 1000 [modis-35] name = '35' frequency = (13.785, 13.9350, 14.085) resolution = 1000 [modis-36] name = '36' frequency = (14.085, 14.2350, 14.385) resolution = 1000 mpop-1.5.0/etc/DREOS-Terra.cfg.template000066400000000000000000000066311317160620000174670ustar00rootroot00000000000000[satellite] satname='terra' number='' instruments=('modis',) [modis-level2] format=hdfeos_l1b.ModisReader dir=/data/prod/satellit/modis/lvl1 filename = MOD02{resolution:1s}km_A{start_time:%y%j_%H%M%S}_{processing_time:%Y%j%H%M%S}.hdf geofile=MOD03_A{start_time:%y%j_%H%M%S}_{processing_time:%Y%j%H%M%S}.hdf [modis-level3] format = nc_pps_l2.PPSReader cloud_product_filename = S_NWC_%(product)s_eos1_%(orbit)s_*.nc cloud_product_dir = /path/to/pps/output cloud_product_geofilename = S_NWC_CMA_eos1_%(orbit)s_*.nc geolocation_product_name = CMA [modis-1] name = '1' frequency = (0.620, 0.6450, 0.670) resolution = 250 [modis-2] name = '2' frequency = (0.841, 0.8585, 0.876) resolution = 250 [modis-3] name = '3' frequency = (0.459, 0.4690, 0.479) resolution = 500 [modis-4] name = '4' frequency = (0.545, 0.5550, 0.565) resolution = 500 [modis-5] name = '5' frequency = (1.230, 1.2400, 1.250) resolution = 500 [modis-6] name = '6' frequency = (1.628, 1.6400, 1.652) resolution = 500 [modis-7] name = '7' frequency = (2.105, 2.1300, 2.155) resolution = 500 [modis-8] name = '8' frequency = (0.405, 0.4125, 0.420) resolution = 1000 [modis-9] name = '9' frequency = (0.438, 0.4430, 0.448) resolution = 1000 [modis-10] name = '10' frequency = (0.483, 0.4880, 0.493) resolution = 1000 [modis-11] name = '11' frequency = (0.526, 0.5310, 0.536) resolution = 1000 [modis-12] name = '12' frequency = (0.546, 0.5510, 0.556) resolution = 1000 [modis-13lo] name = '13lo' frequency = (0.662, 0.6670, 0.672) resolution = 1000 [modis-13hi] name = '13hi' frequency = (0.662, 0.6670, 0.672) resolution = 1000 [modis-14lo] name = '14lo' frequency = (0.673, 0.6780, 0.683) resolution = 1000 [modis-14hi] name = '14hi' frequency = (0.673, 0.6780, 0.683) resolution = 1000 [modis-15] name = '15' frequency = (0.743, 0.7480, 0.753) resolution = 1000 [modis-16] name = '16' frequency = (0.862, 0.8695, 0.877) resolution = 1000 [modis-17] name = '17' frequency = (0.890, 0.9050, 0.920) resolution = 1000 [modis-18] name = '18' frequency = (0.931, 0.9360, 0.941) resolution = 1000 [modis-19] name = '19' frequency = (0.915, 0.9400, 0.965) resolution = 1000 [modis-20] name = '20' frequency = (3.660, 3.7500, 3.840) resolution = 1000 [modis-21] name = '21' frequency = (3.929, 3.9590, 3.989) resolution = 1000 [modis-22] name = '22' frequency = (3.929, 3.9590, 3.989) resolution = 1000 [modis-23] name = '23' frequency = (4.020, 4.0500, 4.080) resolution = 1000 [modis-24] name = '24' frequency = (4.433, 4.4655, 4.498) resolution = 1000 [modis-25] name = '25' frequency = (4.482, 4.5155, 4.549) resolution = 1000 [modis-26] name = '26' frequency = (1.360, 1.3750, 1.390) resolution = 1000 [modis-27] name = '27' frequency = (6.535, 6.7150, 6.895) resolution = 1000 [modis-28] name = '28' frequency = (7.175, 7.3250, 7.475) resolution = 1000 [modis-29] name = '29' frequency = (8.400, 8.5500, 8.700) resolution = 1000 [modis-30] name = '30' frequency = (9.580, 9.7300, 9.880) resolution = 1000 [modis-31] name = '31' frequency = (10.780, 11.0300, 11.280) resolution = 1000 [modis-32] name = '32' frequency = (11.770, 12.0200, 12.270) resolution = 1000 [modis-33] name = '33' frequency = (13.185, 13.3350, 13.485) resolution = 1000 [modis-34] name = '34' frequency = (13.485, 13.6350, 13.785) resolution = 1000 [modis-35] name = '35' frequency = (13.785, 13.9350, 14.085) resolution = 1000 [modis-36] name = '36' frequency = (14.085, 14.2350, 14.385) resolution = 1000 mpop-1.5.0/etc/EARSMetop-A.cfg.template000066400000000000000000000021571317160620000174540ustar00rootroot00000000000000[satellite] variant = EARS instruments = ('avhrr/3',) [avhrr/3-level3] format = nc_pps_l2.PPSReader cloud_product_filename = W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,METOPA+%(product)s_C_EUMS_%Y%m%d%H%M00_%(orbit)s.nc* cloud_product_geofilename = W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,METOPA+CMA_C_EUMS_%Y%m%d%H%M00_%(orbit)s.nc* [avhrr/3-level1] format=eps1a shortname=M02 dir=/path/to/my/ears/avhrr filename=AVHR_HRP_00_M02_%Y%m%d%H%M* [avhrr/3-granules] type=bzipped eps1a granularity=60 full_scan_period=0.1667 scan_width=2048 dir=/path/to/my/ears/avhrr filename=AVHR_HRP_00_M02_%Y%m%d%H%M* [avhrr/3-1] name = '1' frequency = (0.58, 0.63, 0.68) resolution = 1090 size = (2048,) [avhrr/3-2] name = '2' frequency = (0.725, 0.8625, 1.0) resolution = 1090 size = (2048,) [avhrr/3-3] name = '3A' frequency = (1.58, 1.61, 1.64) resolution = 1090 size = (2048,) [avhrr/3-4] name = '3B' frequency = (3.55, 3.74, 3.93) resolution = 1090 size = (2048,) [avhrr/3-5] name = '4' frequency = (10.3, 10.8, 11.3) resolution = 1090 size = (2500, 2500) [avhrr/3-6] name = '5' frequency = (11.5, 12.0, 12.5) resolution = 1090 size = (2500, 2500) mpop-1.5.0/etc/EARSMetop-B.cfg.template000066400000000000000000000023001317160620000174430ustar00rootroot00000000000000[satellite] satname = Metop-B number = variant = EARS instruments = ('avhrr/3',) [avhrr/3-level3] format = nc_pps_l2.PPSReader cloud_product_dir = /path/to/ears/nwc cloud_product_filename = W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,METOPB+%(product)s_C_EUMS_%Y%m%d%H%M00_*.nc* cloud_product_geofilename = W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,METOPB+CMA_C_EUMS_%Y%m%d%H%M00_*.nc* geolocation_product_name = CMA [avhrr/3-level1] format=eps1a shortname=M01 dir=/path/to/my/ears/avhrr filename=AVHR_HRP_00_M01_%Y%m%d%H%M* [avhrr/3-granules] type=bzipped eps1a granularity=60 full_scan_period=0.1667 scan_width=2048 dir=/path/to/my/ears/avhrr filename=AVHR_HRP_00_M02_%Y%m%d%H%M* [avhrr/3-1] name = '1' frequency = (0.58, 0.63, 0.68) resolution = 1090 size = (2048,) [avhrr/3-2] name = '2' frequency = (0.725, 0.8625, 1.0) resolution = 1090 size = (2048,) [avhrr/3-3] name = '3A' frequency = (1.58, 1.61, 1.64) resolution = 1090 size = (2048,) [avhrr/3-4] name = '3B' frequency = (3.55, 3.74, 3.93) resolution = 1090 size = (2048,) [avhrr/3-5] name = '4' frequency = (10.3, 10.8, 11.3) resolution = 1090 size = (2500, 2500) [avhrr/3-6] name = '5' frequency = (11.5, 12.0, 12.5) resolution = 1090 size = (2500, 2500) mpop-1.5.0/etc/EARSNOAA-19.cfg.template000066400000000000000000000024101317160620000171470ustar00rootroot00000000000000[satellite] variant = EARS instruments = ('avhrr/3',) [avhrr/3-level3] format = nc_pps_l2.PPSReader # W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,NOAA19+CTTH_C_EUMS_20150820141700_33658.nc.bz2 cloud_product_dir = /path/to/ears/nwc cloud_product_filename = W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,NOAA19+%(product)s_C_EUMS_%Y%m%d%H%M00_*.nc* cloud_product_geofilename = W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,NOAA19+CMA_C_EUMS_%Y%m%d%H%M00_*.nc* geolocation_product_name = CMA [avhrr/3-level1] format=hrpt shortname=noaa19 dir=/path/to/my/ears/avhrr filename=avhrr_%Y%m%d%_H%M%S_noaa19.hrp*bz2 [avhrr/3-granules] type=bzipped hrpt granularity=60 full_scan_period=0.1667 scan_width=2048 dir=/path/to/my/ears/avhrr filename=avhrr_%Y%m%d_%H%M%S_noaa19.hrp.bz2 [avhrr/3-1] name = '1' frequency = (0.58, 0.63, 0.68) resolution = 1090 size = (2048,) [avhrr/3-2] name = '2' frequency = (0.725, 0.8625, 1.0) resolution = 1090 size = (2048,) [avhrr/3-3] name = '3A' frequency = (1.58, 1.61, 1.64) resolution = 1090 size = (2048,) [avhrr/3-4] name = '3B' frequency = (3.55, 3.74, 3.93) resolution = 1090 size = (2048,) [avhrr/3-5] name = '4' frequency = (10.3, 10.8, 11.3) resolution = 1090 size = (2500, 2500) [avhrr/3-6] name = '5' frequency = (11.5, 12.0, 12.5) resolution = 1090 size = (2500, 2500)mpop-1.5.0/etc/EARSSuomi-NPPs.cfg.template000066400000000000000000000046701317160620000201260ustar00rootroot00000000000000[satellite] variant = EARS instruments = ('viirs',) [viirs-level2] filename = SVMC_npp_d%Y%m%d_t%H%M%S?_e*_eum_ops.h5 dir = /local_disk/data/satellite/polar/compact_viirs format = viirs_compact [viirs-level3] format = nwcsaf_pps geofilename = %(satellite)s_%Y%m%d_%H%M_%(orbit)s_satproj_*_*_viirs.h5 filename = %(satellite)s_%Y%m%d_%H%M_%(orbit)s_satproj_*_*_%(product)s.h5 geodir = /san1/pps/import/PPS_data/remapped dir = /local_disk/data/pps/export [viirs-m01] frequency = (0.402, 0.412, 0.422) resolution = 742 name = 'M01' size = (3200,) [viirs-m02] frequency = (0.436, 0.445, 0.454) resolution = 742 name = 'M02' size = (3200,) [viirs-m03] frequency = (0.478, 0.488, 0.498) resolution = 742 name = 'M03' size = (3200,) [viirs-m04] frequency = (0.545, 0.555, 0.565) resolution = 742 name = 'M04' size = (3200,) [viirs-m05] frequency = (0.662, 0.672, 0.682) resolution = 742 name = 'M05' size = (3200,) [viirs-m06] frequency = (0.739, 0.746, 0.754) resolution = 742 name = 'M06' size = (3200,) [viirs-m07] frequency = (0.846, 0.865, 0.885) resolution = 742 name = 'M07' size = (3200,) [viirs-m08] frequency = (1.230, 1.240, 1.250) resolution = 742 name = 'M08' size = (3200,) [viirs-m09] frequency = (1.371, 1.378, 1.386) resolution = 742 name = 'M09' size = (3200,) [viirs-m10] frequency = (1.580, 1.610, 1.640) resolution = 742 name = 'M10' size = (3200,) [viirs-m11] frequency = (2.225, 2.250, 2.275) resolution = 742 name = 'M11' size = (3200,) [viirs-m12] frequency = (3.610, 3.700, 3.790) resolution = 742 name = 'M12' size = (3200,) [viirs-m13] frequency = (3.973, 4.050, 4.128) resolution = 742 name = 'M13' size = (3200,) [viirs-m14] frequency = (8.400, 8.550, 8.700) resolution = 742 name = 'M14' size = (3200,) [viirs-m15] frequency = (10.263, 10.763, 11.263) resolution = 742 name = 'M15' size = (3200,) [viirs-m16] frequency = (11.538, 12.013, 12.489) resolution = 742 name = 'M16' size = (3200,) [viirs-i01] frequency = (0.600, 0.640, 0.680) resolution = 371 name = 'I01' size = (6400,) [viirs-i02] frequency = (0.845, 0.865, 0.884) resolution = 371 name = 'I02' size = (6400,) [viirs-i03] frequency = (1.580, 1.610, 1.640) resolution = 371 name = 'I03' size = (6400,) [viirs-i04] frequency = (3.580, 3.740, 3.900) resolution = 371 name = 'I04' size = (6400,) [viirs-i05] frequency = (10.500, 11.450, 12.300) resolution = 371 name = 'I05' size = (6400,) [viirs-dnb] frequency = (0.500, 0.700, 0.900) resolution = 743 name = 'DNB' size = (4064,) mpop-1.5.0/etc/Electro-L_N1.cfg.template000066400000000000000000000031071317160620000176570ustar00rootroot00000000000000# # Level 1.5 configuration file for Electro L N1 # # An item like: # name = value # is read in python like: # try: # name = eval(value) # except NameError: # name = str(value) # [satellite] satname = 'electro-l' number = 'n1' instruments = ('msu-gs',) projection = 'geos(+76.0)' [msu-gs-level2] format = 'mipp_xrit.XritReader' [msu-gs-level1] format = 'mipp/xrit/GOMS' dir = '/data/electro' filename = 'H-000-GOMS1_-GOMS1_4_____-%(channel)s_076E-%(segment)s-%Y%m%d%H%M-__' filename_pro = 'H-000-GOMS1_-GOMS1_4_____-_________-%(segment)s-%Y%m%d%H%M-__' filename_epi = 'H-000-GOMS1_-GOMS1_4_____-_________-%(segment)s-%Y%m%d%H%M-__' [msu-gs-1] name = '00_6' frequency = (0.5, 0.57, 0.65) resolution = 4000.0 size = (2784, 2784) [msu-gs-2] name = '00_7' frequency = (0.65, 0.72, 0.8) resolution = 4000.0 size = (2784, 2784) [msu-gs-3] name = '00_9' frequency = (0.8, 0.86, 0.9) resolution = 4000.0 size = (2784, 2784) [msu-gs-4] name = '03_8' frequency = (3.5, 3.75, 4.0) resolution = 4000.0 size = (2784, 2784) [msu-gs-5] name = '06_4' frequency = (5.7, 6.35, 7.0) resolution = 4000.0 size = (2784, 2784) [msu-gs-6] name = '08_0' frequency = (7.5, 8.0, 8.5) resolution = 4000.0 size = (2784, 2784) [msu-gs-7] name = '08_7' frequency = (8.2, 8.7, 9.2) resolution = 4000.0 size = (2784, 2784) [msu-gs-8] name = '09_7' frequency = (9.2, 9.7, 10.2) resolution = 4000.0 size = (2784, 2784) [msu-gs-9] name = '10_7' frequency = (10.2, 10.7, 11.2) resolution = 4000.0 size = (2784, 2784) [msu-gs-10] name = '11_9' frequency = (11.2, 11.7, 12.5) resolution = 4000.0 size = (2784, 2784)mpop-1.5.0/etc/FY-3A.cfg.template000066400000000000000000000031371317160620000163150ustar00rootroot00000000000000[satellite] instruments = ('mersi',) [modis-level2] format=fy3_mersi_aggr1km dir=/data/prod/satellit/fy3a [mersi-1] name = '1' frequency = (0.420, 0.4700, 0.520) resolution = 1000 [mersi-2] name = '2' frequency = (0.500, 0.5500, 0.600) resolution = 1000 [mersi-3] name = '3' frequency = (0.600, 0.6500, 0.700) resolution = 1000 [mersi-4] name = '4' frequency = (0.815, 0.8650, 0.915) resolution = 1000 [mersi-5] name = '5' frequency = (8.750, 11.2500, 13.750) resolution = 1000 [mersi-6] name = '6' frequency = (0.392, 0.4120, 0.432) resolution = 1000 [mersi-7] name = '7' frequency = (0.423, 0.4430, 0.463) resolution = 1000 [mersi-8] name = '8' frequency = (0.470, 0.4900, 0.510) resolution = 1000 [mersi-9] name = '9' frequency = (0.500, 0.5200, 0.540) resolution = 1000 [mersi-10] name = '10' frequency = (0.545, 0.5650, 0.585) resolution = 1000 [mersi-11] name = '11' frequency = (0.630, 0.6500, 0.670) resolution = 1000 [mersi-12] name = '12' frequency = (0.665, 0.6850, 0.705) resolution = 1000 [mersi-13] name = '13' frequency = (0.745, 0.7650, 0.785) resolution = 1000 [mersi-14] name = '14' frequency = (0.845, 0.8650, 0.885) resolution = 1000 [mersi-15] name = '15' frequency = (0.885, 0.9050, 0.925) resolution = 1000 [mersi-16] name = '16' frequency = (0.920, 0.9400, 0.960) resolution = 1000 [mersi-17] name = '17' frequency = (0.960, 0.9800, 1.000) resolution = 1000 [mersi-18] name = '18' frequency = (0.983, 1.0300, 1.050) resolution = 1000 [mersi-19] name = '19' frequency = (1.590, 1.6400, 1.690) resolution = 1000 [mersi-20] name = '20' frequency = (2.080, 2.1300, 2.180) resolution = 1000 mpop-1.5.0/etc/FY-3B.cfg.template000066400000000000000000000016571317160620000163230ustar00rootroot00000000000000[satellite] instruments = ('virr',) satname = fy3 [virr-level2] format=fy3_virr # clear_FY3B_26293_02-DEC-2015_14-02-13_VIRRX_L1B.HDF dir=/data/to/my/fengyun3/virr/file/ filename = clear_FY3B_%(orbit)s_%d-%b-%Y_%H-%M-%S_VIRRX_L1B.HDF [virr-1] name = '1' frequency = (0.58, 0.63, 0.68) resolution = 1000 [virr-2] name = '2' frequency = (0.84, 0.865, 0.89) resolution = 1000 [virr-3] name = '3' frequency = (3.55, 3.74, 3.93) resolution = 1000 [virr-4] name = '4' frequency = (10.3, 10.8, 11.3) resolution = 1000 [virr-5] name = '5' frequency = (11.5, 12.0, 12.5) resolution = 1000 [virr-6] name = '6' frequency = (1.55, 1.6, 1.64) resolution = 1000 [virr-7] name = '7' frequency = (0.43, 0.455, 0.48) resolution = 1000 [virr-8] name = '8' frequency = (0.48, 0.505, 0.53) resolution = 1000 [virr-9] name = '9' frequency = (0.53, 0.555, 0.58) resolution = 1000 [virr-10] name = '10' frequency = (1.325, 1.36, 1.395) resolution = 1000 mpop-1.5.0/etc/GDSMetop-B.cfg.template000066400000000000000000000017221317160620000173350ustar00rootroot00000000000000[satellite] variant = GDS instruments = ('avhrr/3',) [avhrr/3-1] frequency = (0.58, 0.63, 0.68) resolution = 1090 name = '1' size = (2048,) [avhrr/3-2] frequency = (0.725, 0.8625, 1.0) resolution = 1090 name = '2' size = (2048,) [avhrr/3-3] frequency = (1.58, 1.61, 1.64) resolution = 1090 name = '3A' size = (2048,) [avhrr/3-4] frequency = (3.55, 3.74, 3.93) resolution = 1090 name = '3B' size = (2048,) [avhrr/3-5] frequency = (10.3, 10.8, 11.3) resolution = 1090 name = '4' size = (2048,) [avhrr/3-6] frequency = (11.5, 12.0, 12.5) resolution = 1090 name = '5' size = (2048,) [avhrr/3-level2] format=eps_l1b filename=AVHR_xxx_1B_M01_%Y%m%d%H%M%SZ* dir=/path/to/satellite/data/ [avhrr/3-level3] format=h5_pps_l2.PPSReader cloud_product_filename=S_NWC_%(product)s_metopb_%(orbit)s_*.h5 cloud_product_dir=/data/proj/safutv/data/polar_out/direct_readout/ cloud_product_geofilename=S_NWC_avhrr_metopb_%(orbit)s_*.h5 cloud_product_geodir=/san1/pps/import/PPS_data/remapped mpop-1.5.0/etc/GOES-13.cfg.template000066400000000000000000000015701317160620000165130ustar00rootroot00000000000000# # Level 1.5 configuration file for GOES13 # # An item like: # name = value # is read in python like: # try: # name = eval(value) # except NameError: # name = str(value) # [satellite] satname = 'goes' number = '13' instruments = ('imager13',) projection = 'geos(-75.0)' [imager13-level2] format = 'mipp_xrit.XritReader' [imager13-level1] format = 'mipp/xrit/SGS' dir = '/data/xrit/out' filename = 'L-000-MSG?__-GOES13______-%(channel)s_075W-%(segment)s-%Y%m%d%H%M-__' [imager13-1] name = '00_7' frequency = (0.5, 0.7, 0.9) resolution = 4006.5756 size = (2816, 2816) [imager13-2] name = '03_9' frequency = (2.9, 3.9, 4.9) resolution = 4006.5756 size = (2816, 2816) [imager13-3] name = '06_6' frequency = (5.9, 6.6, 7.3) resolution = 4006.5756 size = (2816, 2816) [imager13-4] name = '10_7' frequency = (9.7, 10.7, 11.7) resolution = 4006.5756 size = (2816, 2816) mpop-1.5.0/etc/GOES-15.cfg.template000066400000000000000000000015711317160620000165160ustar00rootroot00000000000000# # Level 1.5 configuration file for GOES15 # # An item like: # name = value # is read in python like: # try: # name = eval(value) # except NameError: # name = str(value) # [satellite] satname = 'goes' number = '15' instruments = ('imager15',) projection = 'geos(-135.0)' [imager15-level2] format = 'mipp_xrit.XritReader' [imager15-level1] format = 'mipp/xrit/SGS' dir = '/data/xrit/out' filename = 'L-000-MSG?__-GOES15______-%(channel)s_135W-%(segment)s-%Y%m%d%H%M-__' [imager15-1] name = '00_7' frequency = (0.5, 0.7, 0.9) resolution = 4006.5756 size = (2816, 2816) [imager15-2] name = '03_9' frequency = (2.9, 3.9, 4.9) resolution = 4006.5756 size = (2816, 2816) [imager15-3] name = '06_6' frequency = (5.9, 6.6, 7.3) resolution = 4006.5756 size = (2816, 2816) [imager15-4] name = '10_7' frequency = (9.7, 10.7, 11.7) resolution = 4006.5756 size = (2816, 2816) mpop-1.5.0/etc/Himawari-6.cfg.template000066400000000000000000000015171317160620000174420ustar00rootroot00000000000000# # Level 1.5 configuration file for MTSAT1R # # An item like: # name = value # is read in python like: # try: # name = eval(value) # except: # name = str(value) # [satellite] satname = 'mtsat' number = '1r' instruments = ('mviri',) projection = 'geos(140.0)' [mviri-level2] format = 'mipp_xrit.XritReader' [mviri-level1] format = 'mipp/xrit/SGS' dir = '/data/xrit/out' filename = 'L-000-MSG?__-MTSAT1R_____-%(channel)s_140E-%(segment)s-%Y%m%d%H%M-__' [mviri-1] name = '00_7' frequency = (0.5, 0.7, 0.9) resolution = 4000.0 size = (2752, 2752) [mviri-2] name = '03_8' frequency = (2.8, 3.8, 4.8) resolution = 4000.0 size = (2752, 2752) [mviri-3] name = '06_8' frequency = (6.1, 6.8, 7.5) resolution = 4000.0 size = (2752, 2752) [mviri-4] name = '10_8' frequency = (9.8, 10.8, 11.8) resolution = 4000.0 size = (2752, 2752) mpop-1.5.0/etc/Himawari-7.cfg.template000066400000000000000000000015151317160620000174410ustar00rootroot00000000000000# # Level 1.5 configuration file for MTSAT2 # # An item like: # name = value # is read in python like: # try: # name = eval(value) # except: # name = str(value) # [satellite] satname = 'mtsat' number = '2' instruments = ('mviri',) projection = 'geos(145.0)' [mviri-level2] format = 'mipp_xrit.XritReader' [mviri-level1] format = 'mipp/xrit/SGS' dir = '/data/xrit/out' filename = 'L-000-MSG?__-MTSAT2______-%(channel)s_145E-%(segment)s-%Y%m%d%H%M-__' [mviri-1] name = '00_7' frequency = (0.5, 0.7, 0.9) resolution = 4000.0 size = (2752, 2752) [mviri-2] name = '03_8' frequency = (2.8, 3.8, 4.8) resolution = 4000.0 size = (2752, 2752) [mviri-3] name = '06_8' frequency = (6.1, 6.8, 7.5) resolution = 4000.0 size = (2752, 2752) [mviri-4] name = '10_8' frequency = (9.8, 10.8, 11.8) resolution = 4000.0 size = (2752, 2752) mpop-1.5.0/etc/Himawari-8.cfg.template000066400000000000000000000037101317160620000174410ustar00rootroot00000000000000# # Level 1.5 configuration file for MTSAT2 # # An item like: # name = value # is read in python like: # try: # name = eval(value) # except: # name = str(value) # [satellite] satname = 'himawari' number = '8' instruments = ('ahi',) projection = 'geos(140.7)' [ahi-level2] format = 'mipp_xrit' [ahi-level1] format = 'mipp/xrit/H8' dir = '/home/a001673/data/satellite/Himawari-8/ahi/lvl1.5/2015/08/04/JMA-HRIT' filename = 'IMG_DK01%(channel)s_%Y%m%d%H%M_%(segment)s' [ahi-1] name = 'B01' frequency = (0.430, 0.455, 0.480) resolution = 2000.0 size = (5500, 5500) [ahi-2] name = 'B02' frequency = (0.50, 0.51, 0.52) resolution = 2000.0 size = (5500, 5500) [ahi-3] name = 'VIS' frequency = (0.630, 0.645, 0.660) resolution = 2000.0 size = (5500, 5500) [ahi-4] name = 'B04' frequency = (0.850, 0.860, 0.870) resolution = 2000.0 size = (5500, 5500) [ahi-5] name = 'B05' frequency = (1.6, 1.61, 1.62) resolution = 2000.0 size = (5500, 5500) [ahi-6] name = 'B06' frequency = (2.16, 2.26, 2.36) resolution = 2000.0 size = (5500, 5500) [ahi-7] name = 'IR4' frequency = (3.74, 3.85, 3.96) resolution = 2000.0 size = (5500, 5500) [ahi-8] name = 'IR3' frequency = (6.06, 6.25, 6.43) resolution = 2000.0 size = (5500, 5500) [ahi-9] name = 'B09' frequency = (6.89, 6.95, 7.01) resolution = 2000.0 size = (5500, 5500) [ahi-10] name = 'B10' frequency = (7.23, 7.35, 7.49) resolution = 2000.0 size = (5500, 5500) [ahi-11] name = 'B11' frequency = (8.44, 8.6, 8.74) resolution = 2000.0 size = (5500, 5500) [ahi-12] name = 'B12' frequency = (9.54, 9.63, 9.72) resolution = 2000.0 size = (5500, 5500) [ahi-13] name = 'IR1' frequency = (10.3, 10.45, 10.6) resolution = 2000.0 size = (5500, 5500) [ahi-14] name = 'B14' frequency = (11.1, 11.2, 11.3) resolution = 2000.0 size = (5500, 5500) [ahi-15] name = 'IR2' frequency = (12.2, 12.35, 12.5) resolution = 2000.0 size = (5500, 5500) [ahi-16] name = 'B16' frequency = (13.2, 13.3, 13.4) resolution = 2000.0 size = (5500, 5500) mpop-1.5.0/etc/Meteosat-10.cfg.template000066400000000000000000000036731317160620000175420ustar00rootroot00000000000000[satellite] satname = 'meteosat' number = '10' projection = 'geos(0.0)' instruments = ('seviri',) proj4_params = 'proj=geos lon_0=0.00 lat_0=0.00 a=6378169.00 b=6356583.80 h=35785831.00' [seviri-level1] filename_epi = 'H-000-MSG?__-MSG?________-_________-%(segment)s-%Y%m%d%H%M-__' format = 'mipp/xrit/MSG' filename_pro = 'H-000-MSG?__-MSG?________-_________-%(segment)s-%Y%m%d%H%M-__' filename = 'H-000-MSG?__-MSG?________-%(channel)s-%(segment)s-%Y%m%d%H%M-__' dir = '/local_disk/data/satellite/met10' [seviri-level2] format = 'mipp_xrit.XritReader' [seviri-level3] filename = SAFNWC_MSG?_%(product)s%Y%m%d%H%M_%(area)s dir = /home/a001673/usr/src/msg/export/PGE%(number)s format = 'msg_hdf' [seviri-1] frequency = (0.56, 0.635, 0.71) resolution = 3000.403165817 name = 'VIS006' size = (3712, 3712) [seviri-2] frequency = (0.74, 0.81, 0.88) resolution = 3000.403165817 name = 'VIS008' size = (3712, 3712) [seviri-3] frequency = (1.5, 1.64, 1.78) resolution = 3000.403165817 name = 'IR_016' size = (3712, 3712) [seviri-4] frequency = (3.48, 3.92, 4.36) resolution = 3000.403165817 name = 'IR_039' size = (3712, 3712) [seviri-5] frequency = (5.35, 6.25, 7.15) resolution = 3000.403165817 name = 'WV_062' size = (3712, 3712) [seviri-6] frequency = (6.85, 7.35, 7.85) resolution = 3000.403165817 name = 'WV_073' size = (3712, 3712) [seviri-7] frequency = (8.3, 8.7, 9.1) resolution = 3000.403165817 name = 'IR_087' size = (3712, 3712) [seviri-8] frequency = (9.38, 9.66, 9.94) resolution = 3000.403165817 name = 'IR_097' size = (3712, 3712) [seviri-9] frequency = (9.8, 10.8, 11.8) resolution = 3000.403165817 name = 'IR_108' size = (3712, 3712) [seviri-10] frequency = (11.0, 12.0, 13.0) resolution = 3000.403165817 name = 'IR_120' size = (3712, 3712) [seviri-11] frequency = (12.4, 13.4, 14.4) resolution = 3000.403165817 name = 'IR_134' size = (3712, 3712) [seviri-12] frequency = (0.5, 0.7, 0.9) resolution = 1000.134348869 name = 'HRV' size = (11136, 11136) mpop-1.5.0/etc/Meteosat-11.cfg.template000066400000000000000000000036731317160620000175430ustar00rootroot00000000000000[satellite] satname = 'meteosat' number = '11' projection = 'geos(0.0)' instruments = ('seviri',) proj4_params = 'proj=geos lon_0=0.00 lat_0=0.00 a=6378169.00 b=6356583.80 h=35785831.00' [seviri-level1] filename_epi = 'H-000-MSG?__-MSG?________-_________-%(segment)s-%Y%m%d%H%M-__' format = 'mipp/xrit/MSG' filename_pro = 'H-000-MSG?__-MSG?________-_________-%(segment)s-%Y%m%d%H%M-__' filename = 'H-000-MSG?__-MSG?________-%(channel)s-%(segment)s-%Y%m%d%H%M-__' dir = '/local_disk/data/satellite/met10' [seviri-level2] format = 'mipp_xrit.XritReader' [seviri-level3] filename = SAFNWC_MSG?_%(product)s%Y%m%d%H%M_%(area)s dir = /home/a001673/usr/src/msg/export/PGE%(number)s format = 'msg_hdf' [seviri-1] frequency = (0.56, 0.635, 0.71) resolution = 3000.403165817 name = 'VIS006' size = (3712, 3712) [seviri-2] frequency = (0.74, 0.81, 0.88) resolution = 3000.403165817 name = 'VIS008' size = (3712, 3712) [seviri-3] frequency = (1.5, 1.64, 1.78) resolution = 3000.403165817 name = 'IR_016' size = (3712, 3712) [seviri-4] frequency = (3.48, 3.92, 4.36) resolution = 3000.403165817 name = 'IR_039' size = (3712, 3712) [seviri-5] frequency = (5.35, 6.25, 7.15) resolution = 3000.403165817 name = 'WV_062' size = (3712, 3712) [seviri-6] frequency = (6.85, 7.35, 7.85) resolution = 3000.403165817 name = 'WV_073' size = (3712, 3712) [seviri-7] frequency = (8.3, 8.7, 9.1) resolution = 3000.403165817 name = 'IR_087' size = (3712, 3712) [seviri-8] frequency = (9.38, 9.66, 9.94) resolution = 3000.403165817 name = 'IR_097' size = (3712, 3712) [seviri-9] frequency = (9.8, 10.8, 11.8) resolution = 3000.403165817 name = 'IR_108' size = (3712, 3712) [seviri-10] frequency = (11.0, 12.0, 13.0) resolution = 3000.403165817 name = 'IR_120' size = (3712, 3712) [seviri-11] frequency = (12.4, 13.4, 14.4) resolution = 3000.403165817 name = 'IR_134' size = (3712, 3712) [seviri-12] frequency = (0.5, 0.7, 0.9) resolution = 1000.134348869 name = 'HRV' size = (11136, 11136) mpop-1.5.0/etc/Meteosat-7.cfg.template000066400000000000000000000016301317160620000174570ustar00rootroot00000000000000# # Level 1.5 configuration file for Meteosat-7 # # An item like: # name = value # is read in python like: # try: # name = eval(value) # except NameError: # name = str(value) # [satellite] satname = 'meteosat' number = '07' instruments = ('mviri',) projection = 'geos(57.0)' proj4_params = 'proj=geos lon_0=57.00 lat_0=0.00 a=6378140.00 b=6356755.00 h=35785831.00' [mviri-level2] format = 'mipp_xrit.XritReader.' [mviri-level1] format = 'mipp/xrit/MTP' #dir = '/data/prod/satellit/meteosat7' dir = '/local_disk/data/satellite/met7' filename = 'L-000-MTP___-MET7________-%(channel)s_057E-%(segment)s-%Y%m%d%H%M-__' [mviri-1] name = '00_7' frequency = (0.5, 0.7, 0.9) resolution = 2248.49 size = (5000, 5000) [mviri-2] name = '06_4' frequency = (5.7, 6.4, 7.1) resolution = 4496.98 size = (2500, 2500) [mviri-3] name = '11_5' frequency = (10.5, 11.5, 12.5) resolution = 4496.98 size = (2500, 2500) mpop-1.5.0/etc/Meteosat-9.cfg.template000066400000000000000000000037161317160620000174700ustar00rootroot00000000000000[satellite] satname = 'meteosat' number = '09' projection = 'geos(0.0)' instruments = ('seviri',) proj4_params = 'proj=geos lon_0=0.00 lat_0=0.00 a=6378169.00 b=6356583.80 h=35785831.00' [seviri-level1] filename_epi = 'H-000-MSG?__-MSG?________-_________-%(segment)s-%Y%m%d%H%M-__' format = 'mipp/xrit/MSG' filename_pro = 'H-000-MSG?__-MSG?________-_________-%(segment)s-%Y%m%d%H%M-__' filename = 'H-000-MSG?__-MSG?________-%(channel)s-%(segment)s-%Y%m%d%H%M-__' dir = '/local_disk/usr/src/msg/import/SEVIRI_data.20091008' [seviri-level2] format = 'mipp_xrit.XritReader' [seviri-level3] filename = SAFNWC_MSG?_%(product)s%Y%m%d%H%M_%(area)s dir = /home/a001673/usr/src/msg/export/PGE%(number)s format = 'msg_hdf' [seviri-1] frequency = (0.56, 0.635, 0.71) resolution = 3000.403165817 name = 'VIS006' size = (3712, 3712) [seviri-2] frequency = (0.74, 0.81, 0.88) resolution = 3000.403165817 name = 'VIS008' size = (3712, 3712) [seviri-3] frequency = (1.5, 1.64, 1.78) resolution = 3000.403165817 name = 'IR_016' size = (3712, 3712) [seviri-4] frequency = (3.48, 3.92, 4.36) resolution = 3000.403165817 name = 'IR_039' size = (3712, 3712) [seviri-5] frequency = (5.35, 6.25, 7.15) resolution = 3000.403165817 name = 'WV_062' size = (3712, 3712) [seviri-6] frequency = (6.85, 7.35, 7.85) resolution = 3000.403165817 name = 'WV_073' size = (3712, 3712) [seviri-7] frequency = (8.3, 8.7, 9.1) resolution = 3000.403165817 name = 'IR_087' size = (3712, 3712) [seviri-8] frequency = (9.38, 9.66, 9.94) resolution = 3000.403165817 name = 'IR_097' size = (3712, 3712) [seviri-9] frequency = (9.8, 10.8, 11.8) resolution = 3000.403165817 name = 'IR_108' size = (3712, 3712) [seviri-10] frequency = (11.0, 12.0, 13.0) resolution = 3000.403165817 name = 'IR_120' size = (3712, 3712) [seviri-11] frequency = (12.4, 13.4, 14.4) resolution = 3000.403165817 name = 'IR_134' size = (3712, 3712) [seviri-12] frequency = (0.5, 0.7, 0.9) resolution = 1000.134348869 name = 'HRV' size = (11136, 11136) mpop-1.5.0/etc/Metop-B.cfg.template000066400000000000000000000017071317160620000170020ustar00rootroot00000000000000[satellite] variant = instruments = ('avhrr/3',) [avhrr/3-level2] filename = hrpt_metop01_%Y%m%d_%H%M_%(orbit)s.l1b dir = /path/to/aapp/output/metop01_%Y%m%d_%H%M_%(orbit)s format = aapp1b [avhrr/3-level3] format = nc_pps_l2.PPSReader cloud_product_filename = S_NWC_%(product)s_metopb_%(orbit)s_*.nc cloud_product_dir = /path/to/pps/output cloud_product_geofilename = S_NWC_CMA_metopb_%(orbit)s_*.nc geolocation_product_name = CMA [avhrr/3-1] frequency = (0.58, 0.63, 0.68) resolution = 1090 name = '1' size = (2048,) [avhrr/3-2] frequency = (0.725, 0.8625, 1.0) resolution = 1090 name = '2' size = (2048,) [avhrr/3-3] frequency = (1.58, 1.61, 1.64) resolution = 1090 name = '3A' size = (2048,) [avhrr/3-4] frequency = (3.55, 3.74, 3.93) resolution = 1090 name = '3B' size = (2048,) [avhrr/3-5] frequency = (10.3, 10.8, 11.3) resolution = 1090 name = '4' size = (2048,) [avhrr/3-6] frequency = (11.5, 12.0, 12.5) resolution = 1090 name = '5' size = (2048,) mpop-1.5.0/etc/NOAA-15.cfg.template000066400000000000000000000017251317160620000165000ustar00rootroot00000000000000[satellite] variant = instruments = ('avhrr/3',) [avhrr/3-level2] filename = hrpt_%(satellite)s_%Y%m%d_%H%M_%(orbit)s.l1b dir = /path/to/aapp/output/%(satellite)s_%Y%m%d_%H%M_%(orbit)s format = aapp1b [avhrr/3-level3] format = nc_pps_l2.PPSReader cloud_product_filename = S_NWC_%(product)s_noaa15_%(orbit)s_*.nc cloud_product_dir = /path/to/pps/output cloud_product_geofilename = S_NWC_CMA_noaa15_%(orbit)s_*.nc geolocation_product_name = CMA [avhrr/3-1] frequency = (0.58, 0.63, 0.68) resolution = 1090 name = '1' size = (2048,) [avhrr/3-2] frequency = (0.725, 0.8625, 1.0) resolution = 1090 name = '2' size = (2048,) [avhrr/3-3] frequency = (1.58, 1.61, 1.64) resolution = 1090 name = '3A' size = (2048,) [avhrr/3-4] frequency = (3.55, 3.74, 3.93) resolution = 1090 name = '3B' size = (2048,) [avhrr/3-5] frequency = (10.3, 10.8, 11.3) resolution = 1090 name = '4' size = (2048,) [avhrr/3-6] frequency = (11.5, 12.0, 12.5) resolution = 1090 name = '5' size = (2048,) mpop-1.5.0/etc/NOAA-18.cfg.template000066400000000000000000000017251317160620000165030ustar00rootroot00000000000000[satellite] variant = instruments = ('avhrr/3',) [avhrr/3-level2] filename = hrpt_%(satellite)s_%Y%m%d_%H%M_%(orbit)s.l1b dir = /path/to/aapp/output/%(satellite)s_%Y%m%d_%H%M_%(orbit)s format = aapp1b [avhrr/3-level3] format = nc_pps_l2.PPSReader cloud_product_filename = S_NWC_%(product)s_noaa18_%(orbit)s_*.nc cloud_product_dir = /path/to/pps/output cloud_product_geofilename = S_NWC_CMA_noaa18_%(orbit)s_*.nc geolocation_product_name = CMA [avhrr/3-1] frequency = (0.58, 0.63, 0.68) resolution = 1090 name = '1' size = (2048,) [avhrr/3-2] frequency = (0.725, 0.8625, 1.0) resolution = 1090 name = '2' size = (2048,) [avhrr/3-3] frequency = (1.58, 1.61, 1.64) resolution = 1090 name = '3A' size = (2048,) [avhrr/3-4] frequency = (3.55, 3.74, 3.93) resolution = 1090 name = '3B' size = (2048,) [avhrr/3-5] frequency = (10.3, 10.8, 11.3) resolution = 1090 name = '4' size = (2048,) [avhrr/3-6] frequency = (11.5, 12.0, 12.5) resolution = 1090 name = '5' size = (2048,) mpop-1.5.0/etc/NOAA-19.cfg.template000066400000000000000000000017251317160620000165040ustar00rootroot00000000000000[satellite] variant = instruments = ('avhrr/3',) [avhrr/3-level2] filename = hrpt_%(satellite)s_%Y%m%d_%H%M_%(orbit)s.l1b dir = /path/to/aapp/output/%(satellite)s_%Y%m%d_%H%M_%(orbit)s format = aapp1b [avhrr/3-level3] format = nc_pps_l2.PPSReader cloud_product_filename = S_NWC_%(product)s_noaa19_%(orbit)s_*.nc cloud_product_dir = /path/to/pps/output cloud_product_geofilename = S_NWC_CMA_noaa19_%(orbit)s_*.nc geolocation_product_name = CMA [avhrr/3-1] frequency = (0.58, 0.63, 0.68) resolution = 1090 name = '1' size = (2048,) [avhrr/3-2] frequency = (0.725, 0.8625, 1.0) resolution = 1090 name = '2' size = (2048,) [avhrr/3-3] frequency = (1.58, 1.61, 1.64) resolution = 1090 name = '3A' size = (2048,) [avhrr/3-4] frequency = (3.55, 3.74, 3.93) resolution = 1090 name = '3B' size = (2048,) [avhrr/3-5] frequency = (10.3, 10.8, 11.3) resolution = 1090 name = '4' size = (2048,) [avhrr/3-6] frequency = (11.5, 12.0, 12.5) resolution = 1090 name = '5' size = (2048,) mpop-1.5.0/etc/Suomi-NPP.cfg.template000066400000000000000000000050301317160620000172570ustar00rootroot00000000000000[satellite] instruments = ('viirs',) [viirs-level2] filename = SV???_%(satellite)s_d*b%(orbit)s_c*.h5 geo_filenames = G????_%(satellite)s_d*b%(orbit)s_c*.h5 dir = /data/viirs/sdr geo_dir = /data/viirs/sdr format = viirs_sdr.ViirsSDRReader [viirs-level3] format = nc_pps_l2.PPSReader cloud_product_filename = S_NWC_%(product)s_npp_%(orbit)s_*.nc cloud_product_geofilename = S_NWC_CMA_npp_%(orbit)s_*.nc cloud_product_dir=/path/to/pps/cloud/products/ cloud_product_geodir=/path/to/geolocation/files/for/pps/ geolocation_product_name = CMA [viirs-m01] frequency = (0.402, 0.412, 0.422) resolution = 742 name = 'M01' size = (3200,) [viirs-m02] frequency = (0.436, 0.445, 0.454) resolution = 742 name = 'M02' size = (3200,) [viirs-m03] frequency = (0.478, 0.488, 0.498) resolution = 742 name = 'M03' size = (3200,) [viirs-m04] frequency = (0.545, 0.555, 0.565) resolution = 742 name = 'M04' size = (3200,) [viirs-m05] frequency = (0.662, 0.672, 0.682) resolution = 742 name = 'M05' size = (3200,) [viirs-m06] frequency = (0.739, 0.746, 0.754) resolution = 742 name = 'M06' size = (3200,) [viirs-m07] frequency = (0.846, 0.865, 0.885) resolution = 742 name = 'M07' size = (3200,) [viirs-m08] frequency = (1.230, 1.240, 1.250) resolution = 742 name = 'M08' size = (3200,) [viirs-m09] frequency = (1.371, 1.378, 1.386) resolution = 742 name = 'M09' size = (3200,) [viirs-m10] frequency = (1.580, 1.610, 1.640) resolution = 742 name = 'M10' size = (3200,) [viirs-m11] frequency = (2.225, 2.250, 2.275) resolution = 742 name = 'M11' size = (3200,) [viirs-m12] frequency = (3.610, 3.700, 3.790) resolution = 742 name = 'M12' size = (3200,) [viirs-m13] frequency = (3.973, 4.050, 4.128) resolution = 742 name = 'M13' size = (3200,) [viirs-m14] frequency = (8.400, 8.550, 8.700) resolution = 742 name = 'M14' size = (3200,) [viirs-m15] frequency = (10.263, 10.763, 11.263) resolution = 742 name = 'M15' size = (3200,) [viirs-m16] frequency = (11.538, 12.013, 12.489) resolution = 742 name = 'M16' size = (3200,) # I-bands: [viirs-i01] frequency = (0.600, 0.640, 0.680) resolution = 371 name = 'I01' size = (6400,) [viirs-i02] frequency = (0.845, 0.865, 0.884) resolution = 371 name = 'I02' size = (6400,) [viirs-i03] frequency = (1.580, 1.610, 1.640) resolution = 371 name = 'I03' size = (6400,) [viirs-i04] frequency = (3.580, 3.740, 3.900) resolution = 371 name = 'I04' size = (6400,) [viirs-i05] frequency = (10.500, 11.450, 12.300) resolution = 371 name = 'I05' size = (6400,) [viirs-dnb] frequency = (0.500, 0.700, 0.900) resolution = 742 name = 'DNB' size = (4064,) mpop-1.5.0/etc/TerraSAR-X.cfg.template000066400000000000000000000004601317160620000173620ustar00rootroot00000000000000[satellite] instruments = ('sarx',) [sarx-level1] format=mipp/xsar/TSX dir = '/data/xsar/in' filename_archive = 'TX01_SAR*_%Y%m%dT%H%M%S_*.tar' filename_metadata = 'TSX1_SAR*.xml' [sarx-level2] format='mipp_xsar' [sarx-1] name = 'sarx' frequency = (9.50, 9.65, 9.80) # GHz resolution = 8.25 size = () mpop-1.5.0/etc/areas.def.template000066400000000000000000000220741317160620000166310ustar00rootroot00000000000000REGION: afghanistan { NAME: Afghanistan PCS_ID: merc PCS_DEF: proj=merc,lat_ts=35,a=6370997.0,lon_0=67.5,lat_0=35 XSIZE: 1600 YSIZE: 1600 AREA_EXTENT: (-1600000.0, 1600000.0, 1600000.0, 4800000.0) }; REGION: EuropeCanary { NAME: Northern globe MSG image 0 degrees PCS_ID: geos0 PCS_DEF: proj=geos, lon_0=0.0, a=6378169.00, b=6356583.80, h=35785831.0 XSIZE: 3000 YSIZE: 1200 #AREA_EXTENT: (-4820647.75308, 1967264.34239, 4180561.74437, 5567748.14137) #AREA_EXTENT: (-4820147.6858850112, 1966764.2751930435, 4181061.8115659896, 5567248.074173444) AREA_EXTENT: (-4823148.0890508275, 1969764.6783588605, 4178061.4084001728, 5570248.4773392612) }; REGION: AfHorn { NAME: Eastern globe MSG image 0 degrees PCS_ID: geos0 PCS_DEF: proj=geos, lon_0=0.0, a=6378169.00, b=6356583.80, h=35785831.0 XSIZE: 1100 YSIZE: 1600 #AREA_EXTENT: (2266804.5917747435, -1330678.8040398397, 5567248.074173444, 3469966.2612673608) AREA_EXTENT: (2263804.1886089267, -1327678.4008740226, 5564247.6710076267, 3472966.6644331776) }; REGION: afhorn { NAME: Africa horn 3km resolution PCS_ID: merc50 PCS_DEF: proj=merc,ellps=bessel,lon_0=50.0 XSIZE: 1622 YSIZE: 1622 AREA_EXTENT: (-2432000.0,-1130348.139543,2432000.0,3733651.860457) }; REGION: met09globe { NAME: Cropped globe MSG image 0 degrees PCS_ID: geos0 PCS_DEF: proj=geos, lon_0=0.0, a=6378169.00, b=6356583.80, h=35785831.0 #PCS_DEF: proj=geos, lon_0=0.0, a=6378144.0, b=6356759.0, h=35785831.0, rf=295.49 XSIZE: 3620 YSIZE: 3620 #AREA_EXTENT: (-5430729.7301287707, -5430729.7301287707, 5430729.7301287707, 5430729.7301287707) AREA_EXTENT: (-5432229.9317116784, -5429229.5285458621, 5429229.5285458621, 5432229.9317116784) }; REGION: met09globeFull { NAME: Full globe MSG image 0 degrees PCS_ID: geos0 PCS_DEF: proj=geos, lon_0=0.0, a=6378169.00, b=6356583.80, h=35785831.0 #PCS_DEF: proj=geos, lon_0=0.0, a=6378144.0, b=6356759.0, h=35785831.0, rf=295.49 XSIZE: 3712 YSIZE: 3712 AREA_EXTENT: (-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612) }; REGION: met07globe { NAME: Full globe IODC image 57 degrees PCS_ID: geos57 PCS_DEF: proj=geos, lon_0=57.0, a=6378140.00, b=6356755.0, h=35785831.0 # PCS_DEF: proj=geos, lon_0=57.0, a=6378169.0, b=6356584.0, h=35785831.0, rf=295.49 XSIZE: 2500 YSIZE: 2500 AREA_EXTENT: (-5621225.237846375, -5621225.237846375, 5621225.237846375, 5621225.237846375) }; REGION: germ { NAME: Germany PCS_ID: ps50n1 PCS_DEF: proj=stere,ellps=bessel,lat_0=90,lon_0=5,lat_ts=50,a=6378144.0,b=6356759.0 XSIZE: 1024 YSIZE: 1024 AREA_EXTENT: (-155100.436345,-4441495.37946,868899.563655,-3417495.37946) }; REGION: euro4 { NAME: Euro 4km area - Europe PCS_ID: ps60n PCS_DEF: proj=stere,ellps=bessel,lat_0=90,lon_0=14,lat_ts=60 XSIZE: 1024 YSIZE: 1024 AREA_EXTENT: (-2717181.7304994687,-5571048.1403121399,1378818.2695005313,-1475048.1403121399) }; REGION: MSGHRVN { NAME: High resolution northern quarter globe MSG image 0 degrees PCS_ID: geos0 PCS_DEF: proj=geos, lon_0=0.0, a=6378144.0, b=6356759.0, h=35785831.0, rf=295.49 XSIZE: 11136 YSIZE: 2784 AREA_EXTENT: (-5568742.4, 2655354.0, 5568742.4, 5439725.2) }; REGION: scan { NAME: Scandinavia PCS_ID: ps60n PCS_DEF: proj=stere,ellps=bessel,lat_0=90,lon_0=14,lat_ts=60 XSIZE: 512 YSIZE: 512 AREA_EXTENT: (-1268854.1266382949, -4150234.8425892727, 779145.8733617051, -2102234.8425892727) }; REGION: scan2 { NAME: Scandinavia - 2km area PCS_ID: ps60n PCS_DEF: proj=stere,ellps=bessel,lat_0=90,lon_0=14,lat_ts=60 XSIZE: 1024 YSIZE: 1024 AREA_EXTENT: (-1268854.1266382949, -4150234.8425892727, 779145.8733617051, -2102234.8425892727) }; REGION: scan1 { NAME: Scandinavia - 1km area PCS_ID: ps60n PCS_DEF: proj=stere,ellps=bessel,lat_0=90,lon_0=14,lat_ts=60 XSIZE: 2048 YSIZE: 2088 AREA_EXTENT: (-1268854.1266382949, -4150234.8425892727, 779145.8733617051, -2062234.8425892727) }; REGION: mesanX { NAME: Mesan-X rotated lon/lat 1.8km PCS_ID: mesan PCS_DEF: proj=ob_tran,o_proj=eqc,o_lat_p=30,o_lon_p=10,lon_0=-10,a=6371000.0 XSIZE: 1476 YSIZE: 1608 AREA_EXTENT: (1067435.7598983962, -1278764.8903419089, 3791765.9965939857, 1690140.6680267097) }; REGION: mesanE { NAME: Europe Mesan rotated lon/lat 1.8km PCS_ID: mesan PCS_DEF: proj=ob_tran,o_proj=eqc,o_lat_p=30,o_lon_p=10,lon_0=-10,a=6371000.0 XSIZE: 5093 YSIZE: 6294 AREA_EXTENT: (289083.00056196708, -2957836.6467769896, 5381881.121371055, 3335826.6850212598) }; REGION: baws { NAME: BAWS PCS_ID: baws PCS_DEF: proj=aea,ellps=bessel,lon_0=14,lat_1=60,lat_2=60 XSIZE: 1400 YSIZE: 1400 AREA_EXTENT: (-475617, 5324430, 924383, 6724430) }; REGION: eurotv { NAME: Europe TV - 6.2x5.0km PCS_ID: eurotv PCS_DEF: proj=stere,ellps=bessel,lat_0=90,lon_0=14,lat_ts=60,a=6378144.0,b=6356759.0 XSIZE: 1024 YSIZE: 1024 AREA_EXTENT: (-3503748.8201907813, -6589593.1340587894, 2842567.6359087573, -1499856.5846593212) }; REGION: eurotv4n { NAME: Europe TV4 - 4.1x4.1km PCS_ID: eurotv4n PCS_DEF: proj=stere,ellps=bessel,lat_0=90,lon_0=14,lat_ts=60,a=6378144.0,b=6356759.0 XSIZE: 2048 YSIZE: 1152 AREA_EXTENT: (-5103428.6786669521, -6772478.6005340703, 3293371.3213330479, -2049278.6005340703) }; REGION: eurol { NAME: Euro 3.0km area - Europe PCS_ID: ps60wgs84 PCS_DEF: proj=stere,ellps=WGS84,lat_0=90,lon_0=0,lat_ts=60 XSIZE: 2560 YSIZE: 2048 AREA_EXTENT: (-3780000.0, -7644000.0, 3900000.0, -1500000.0) }; REGION: scanl { NAME: Scandinavia - Large PCS_ID: ps60wgs84 PCS_DEF: proj=stere,ellps=WGS84,lat_0=90,lon_0=0,lat_ts=60 XSIZE: 2900 YSIZE: 2900 AREA_EXTENT: (-900000.0, -4500000.0,2000000.0, -1600000.0) }; REGION: euron1 { NAME: Northern Europe - 1km PCS_ID: ps60wgs84 PCS_DEF: proj=stere,ellps=WGS84,lat_0=90,lon_0=0,lat_ts=60 XSIZE: 3072 YSIZE: 3072 AREA_EXTENT: (-1000000.0, -4500000.0, 2072000.0, -1428000.0) }; REGION: nsea { NAME: North Baltic Sea PCS_ID: merc PCS_DEF: proj=merc,ellps=WGS84,lat_ts=0,lon_0=15 XSIZE: 1024 YSIZE: 1024 AREA_EXTENT: (-322789.07638000086, 7784901.986829306, 1725210.9236199991, 9832901.9868293069) }; REGION: ssea { NAME: South Baltic Sea PCS_ID: merc PCS_DEF: proj=merc,ellps=WGS84,lat_ts=0,lon_0=15 XSIZE: 1024 YSIZE: 1024 AREA_EXTENT: (-801407.36204689811, 7003690.6636438016, 1246592.6379531019, 9051690.6636438016) }; REGION: euro { NAME: Euro area - Europe PCS_ID: ps60n PCS_DEF: proj=stere,ellps=bessel,lat_0=90,lon_0=14,lat_ts=60 XSIZE: 512 YSIZE: 512 AREA_EXTENT: (-2717181.7304994687,-5571048.1403121399,1378818.2695005313,-1475048.1403121399) }; REGION: euro_north { NAME: Europe 3km/pixel PCS_ID: euro_north PCS_DEF: proj=stere,lon_0=8.00,lat_0=50.00,lat_ts=50.00,ellps=WGS84 XSIZE: 800 YSIZE: 870 AREA_EXTENT: (-1370912.16,-909970.17,1029087.84,1700029.83) }; REGION: ease_sh { NAME: Antarctic EASE grid PCS_ID: ease_sh PCS_DEF: proj=laea, lat_0=-90, lon_0=0, a=6371228.0, units=m XSIZE: 425 YSIZE: 425 AREA_EXTENT: (-5326849.0625,-5326849.0625,5326849.0625,5326849.0625) }; REGION: ease_nh { NAME: Arctic EASE grid PCS_ID: ease_nh PCS_DEF: proj=laea, lat_0=90, lon_0=0, a=6371228.0, units=m XSIZE: 425 YSIZE: 425 AREA_EXTENT: (-5326849.0625,-5326849.0625,5326849.0625,5326849.0625) }; # VIIRS test areas: REGION: npp_sample_m { NAME: North America - NPP sample data - M-bands PCS_ID: laeaw12060 PCS_DEF: proj=laea,a=6370997.0,lat_0=60.0,lon_0=-120.0 XSIZE: 1500 YSIZE: 1500 AREA_EXTENT: (-1700000, -1400000, 1100000, 1400000) }; REGION: npp_sample_i { NAME: North America - NPP sample data - I-bands PCS_ID: laeaw12060 PCS_DEF: proj=laea,a=6370997.0,lat_0=60.0,lon_0=-120.0 XSIZE: 1200 YSIZE: 800 AREA_EXTENT: (-200000, 100000, 400000, 500000) }; REGION: scan500m { NAME: Scandinavia - 500m area PCS_ID: ps60n PCS_DEF: proj=stere,ellps=bessel,lat_0=90,lon_0=14,lat_ts=60 XSIZE: 4096 YSIZE: 4176 AREA_EXTENT: (-1268854.1266382949, -4150234.8425892727, 779145.8733617051, -2062234.8425892727) }; mpop-1.5.0/etc/eps_avhrrl1b_6.5.xml000066400000000000000000002352341317160620000167520ustar00rootroot00000000000000 40 april04 50 nov05 100 launch current 65 EPS AVHRR/3 Level 1B Format This AVHRR/3 1B description was generated using the AVHRR/3 PFS Excel document Issue 6 Revision 5 (eps_avhrrl1_6.5_names_masks.xls) and pfs2xml version 3.3 AVHR_*1B_*Z* Geolocation AVHRR Geolocation Coverage (Latitude, Longitude) mdr-1b[].EARTH_LOCATIONS[][0] mdr-1b[].EARTH_LOCATIONS[][1] Channel 1 AVHRR Scene Radiance for Channel 1 mdr-1b[].SCENE_RADIANCES[0][] Geolocation Channel 2 AVHRR Scene Radiance for Channel 2 mdr-1b[].SCENE_RADIANCES[1][] Geolocation Channel 3a/b AVHRR Scene Radiance for Channel 3a/b mdr-1b[].SCENE_RADIANCES[2][] Geolocation Channel 4 AVHRR Scene Radiance for Channel 4 mdr-1b[].SCENE_RADIANCES[3][] Geolocation Channel 5 AVHRR Scene Radiance for Channel 5 mdr-1b[].SCENE_RADIANCES[4][] Geolocation Uniformity Test A AVHRR Cloud Information Uniformity Test A (0=test failed or clear, 1=cloudy) mdr-1b[].CLOUD_INFORMATION[].#Uniformity_test_a Geolocation Uniformity Test B AVHRR Cloud Information Uniformity Test A (0 =test failed or cloudy, 1=clear) mdr-1b[].CLOUD_INFORMATION[].#Uniformity_test_b Geolocation T3-T5 Test A AVHRR Cloud Information T3-T5 Test A (0=test failed or clear, 1=cloudy) mdr-1b[].CLOUD_INFORMATION[].#T3_T5_test_a Geolocation T3-T5 Test B AVHRR Cloud Information T3-T5 Test B (0 =test failed or cloudy, 1=clear) mdr-1b[].CLOUD_INFORMATION[].#T3_T5_test_b Geolocation T4-T3 Test A AVHRR Cloud Information T4-T3 Test A (0=test failed or clear, 1=cloudy) mdr-1b[].CLOUD_INFORMATION[].#T4_T3_test_a Geolocation T4-T3 Test B AVHRR Cloud Information T4-T3 Test B (0 =test failed or cloudy, 1=clear) mdr-1b[].CLOUD_INFORMATION[].#T4_T3_test_b Geolocation T4-T5 Test A AVHRR Cloud Information T4-T5 Test A (0=test failed or clear, 1=cloudy) mdr-1b[].CLOUD_INFORMATION[].#T4_T5_test_a Geolocation T4-T5 Test B AVHRR Cloud Information T4-T5 Test B (0 =test failed or cloudy, 1=clear) mdr-1b[].CLOUD_INFORMATION[].#T4_T5_test_b Geolocation Albedo Test A AVHRR Cloud Information Albedo Test A (0=test failed or clear, 1=cloudy or snow/ice covered) mdr-1b[].CLOUD_INFORMATION[].#Albedo_test_a Geolocation Albedo Test B AVHRR Cloud Information Albedo Test B (0 =test failed or cloudy, 1=clear or snow/ice covered) mdr-1b[].CLOUD_INFORMATION[].#Albedo_test_b Geolocation T4 Test A AVHRR Cloud Information T4 Test A (0=test failed or clear, 1=cloudy or snow/ice covered) mdr-1b[].CLOUD_INFORMATION[].#T4_test_a Geolocation T4 Test B AVHRR Cloud Information T4 Test B (0 =test failed or cloudy, 1=clear or snow/ice covered) mdr-1b[].CLOUD_INFORMATION[].#T4_test_b Geolocation Test Situations AVHRR Cloud Information - number of the test situations (11 different test situations) mdr-1b[].CLOUD_INFORMATION[].#Number_of_the_test_situation Geolocation eps-product mpop-1.5.0/etc/geo_image.cfg000066400000000000000000000000431317160620000156310ustar00rootroot00000000000000[coasts] coast_file=world_map.asciimpop-1.5.0/etc/hsaf10.cfg.template000077500000000000000000000012461317160620000166220ustar00rootroot00000000000000[satellite] satname = 'hsaf' projection = 'geos(0.0)' number = '10' instruments = ('seviri',) proj4_params = 'proj=geos lon_0=0.0 lat_0=0.00 a=6378169.00 b=6356583.80 h=35785831.00' #[seviri-level1] ##filename = h03_%Y%m%d_%H%M_rom.grb.nc4 #filename = h03_%Y%m%d_%H%M_rom.* #format = 'read_h03' ##dir = /data/COALITION2/database/meteosat/HSAF/%Y/%m/%d #dir = /data/cinesat/in/hsaf [seviri-level2] #filename = h03_%Y%m%d_%H%M_rom.grb.nc4 filename = h03_%Y%m%d_%H%M_rom.* format = 'hsaf_h03' #dir = /data/COALITION2/database/meteosat/HSAF/%Y/%m/%d dir = /data/cinesat/in/hsaf [seviri-1] frequency = (0.00, 0.00, 0.00) resolution = 3000.403165817 name = 'h03' size = (1900, 900)mpop-1.5.0/etc/metop.ASCAT.cfg.template000066400000000000000000000016361317160620000175160ustar00rootroot00000000000000##Rename this file to metop.cfg to read ASCAT data [satellite] satname = metop variant = number = '' instruments = ('ascat',) [ascat-level2] filename = W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_%Y%m%d%H%M%S_%(orbit)s_eps_o_125_ssm_l2.nc dir = ~/eumetsat/data format = ascat_nc [ascat-1] name = 'soil_moisture' [ascat-2] name = 'soil_moisture_error' [ascat-3] name = 'soil_moisture_sensitivity' [ascat-4] name = 'mean_soil_moisture' [ascat-5] name = 'sigma40' [ascat-6] name = 'slope40_error' [ascat-7] name = 'dry_backscatter' [ascat-8] name = 'rainfall_flag' [ascat-9] name = 'wet_backscatter' [ascat-10] name = 'corr_flags' [ascat-11] name = 'proc_flag1' [ascat-12] name = 'proc_flag2' [ascat-13] name = 'aggregated_quality_flag' [ascat-14] name = 'snow_cover_probability' [ascat-15] name = 'frozen_soil_probability' [ascat-16] name = 'wetland_flag' [ascat-17] name = 'topography_flag' mpop-1.5.0/etc/mpop.cfg.template000066400000000000000000000002701317160620000165040ustar00rootroot00000000000000[projector] area_file=areas.def area_directory= projections_directory=/var/tmp [composites] module=mpop.smhi_composites #shape files for pycoast [shapes] dir=/local_disk/data/shapes/mpop-1.5.0/etc/ninjotiff_products.cfg.template000066400000000000000000000105011317160620000214400ustar00rootroot00000000000000# # Definition of Ninjo products. # #------------------------------------------------------------------------------ # # MSG Single channels # #------------------------------------------------------------------------------ [VIS006] description = MSG Channel 1 sat_id = 6200014 chan_id = 100015 data_cat = GORN data_source = EUMETCAST [VIS008] description = MSG Channel 2 sat_id = 6200014 chan_id = 200015 data_cat = GORN data_source = EUMETCAST [IR_016] description = MSG Channel 3 sat_id = 6200014 chan_id = 300015 data_cat = GORN data_source = EUMETCAST [IR_039] description = MSG Channel 4 sat_id = 6200014 chan_id = 400015 data_cat = GORN data_source = EUMETCAST [WV_062] description = MSG Channel 5 sat_id = 6200014 chan_id = 500015 data_cat = GORN data_source = EUMETCAST [WV_073] description = MSG Channel 6 sat_id = 6200014 chan_id = 600015 data_cat = GORN data_source = EUMETCAST [IR_087] description = MSG Channel 7 sat_id = 6200014 chan_id = 700015 data_cat = GORN data_source = EUMETCAST [IR_097] description = MSG Channel 8 sat_id = 6200014 chan_id = 800015 data_cat = GORN data_source = EUMETCAST [IR_108] description = MSG Channel 9 sat_id = 6200014 chan_id = 900015 data_cat = GORN data_source = EUMETCAST [IR_120] description = MSG Channel 10 sat_id = 6200014 chan_id = 1000015 data_cat = GORN data_source = EUMETCAST [IR_134] description = MSG Channel 11 sat_id = 6200014 chan_id = 1100015 data_cat = GORN data_source = EUMETCAST [HRV] description = MSG Channel 12 sat_id = 6200014 chan_id = 1200015 data_cat = GORN data_source = EUMETCAST #------------------------------------------------------------------------------ # # MSG RGB products # #------------------------------------------------------------------------------ [msg_dust] description = MSG RGB dust sat_id = 900014 chan_id = 6600015 data_cat = GPRN physic_value = NONE physic_unit = NONE gradient = 1.0 axis_intercept = 0.0 data_source = EUMETCAST [msg_convection] description = MSG RGB Convection sat_id = 900014 chan_id = 6700015 data_cat = GPRN physic_value = NONE physic_unit = NONE gradient = 1.0 axis_intercept = 0.0 data_source = EUMETCAST [msg_night_microphysics] description = MSG RGB Night Microphysics sat_id = 900014 chan_id = 8000015 data_cat = GPRN physic_value = NONE physic_unit = NONE gradient = 1.0 axis_intercept = 0.0 data_source = EUMETCAST [msg_airmass] description = MSG RGB Airmass sat_id = 900014 chan_id = 6500015 data_cat = GPRN physic_value = NONE physic_unit = NONE gradient = 1.0 axis_intercept = 0.0 data_source = EUMETCAST #------------------------------------------------------------------------------ # # NWCSAF products # #------------------------------------------------------------------------------ [msg_cloudtype] description = NWCSAF Cloudtype sat_id = 900014 chan_id = 1700015 data_cat = GPRN physic_value = CLOUDTYPE physic_unit = NONE gradient = 1.0 axis_intercept = 0.0 data_source = EUMETCAST [msg_cloudtop_temp] description = NWCSAF Cloud Top Temperature sat_id = 900014 chan_id = 2000015 data_cat = GPRN physic_value = NONE physic_unit = NONE gradient = 1.0 axis_intercept = 0.0 data_source = EUMETCAST [msg_cloudtop_height] description = NWCSAF Cloud Top Height sat_id = 900014 chan_id = 1900015 data_cat = GPRN physic_value = NONE physic_unit = NONE gradient = 1.0 axis_intercept = 0.0 data_source = EUMETCAST [msg_precipitating_clouds] description = NWCSAF Precipitating Clouds sat_id = 900014 chan_id = 2100015 data_cat = GPRN physic_value = NONE physic_unit = NONE gradient = 1.0 axis_intercept = 0.0 data_source = EUMETCAST #------------------------------------------------------------------------------ # # Polar Single channels # #------------------------------------------------------------------------------ [VIS0.6] description = Polar Channel 1 sat_id = 7800014 chan_id = 100015 data_cat = PORN data_source = DMI [VIS0.8] description = Polar Channel 2 sat_id = 7800014 chan_id = 200015 data_cat = PORN data_source = DMI [IR1.6] description = Polar Channel 3a sat_id = 7800014 chan_id = 300015 data_cat = PORN data_source = DMI [IR3.7] description = Polar Channel 3b sat_id = 7800014 chan_id = 1600015 data_cat = PORN data_source = DMI [IR10.8] description = Polar Channel 4 sat_id = 7800014 chan_id = 900015 data_cat = PORN data_source = DMI [IR12.0] description = Polar Channel 5 sat_id = 7800014 chan_id = 1000015 data_cat = PORN data_source = DMI mpop-1.5.0/etc/noaa11.cfg.template000066400000000000000000000014501317160620000166120ustar00rootroot00000000000000[satellite] satname = noaa variant = number = 11 instruments = ('avhrr',) [avhrr-level2] #NSS.LHRR.NH.D92168.S1419.E1430.B1920505.GC #NSS.LHRR.NH.D92324.S1313.E1324.B2140707.GC #NSS.LHRR.NH.D92264.S1331.E1343.B2056060.GC #NSS.LHRR.NH.D92258.S1303.E1314.B2047575.GC filename = NSS.LHRR.NH.D92258.S1303*.GC dir = /home/sajid/data/avhrr/noheader_10bit format = lac_pod_l1b [avhrr-1] frequency = (0.58, 0.63, 0.68) resolution = 1090 name = '1' size = (2048,) [avhrr-2] frequency = (0.725, 0.9125, 1.10) resolution = 1090 name = '2' size = (2048,) [avhrr-3] frequency = (3.55, 3.74, 3.93) resolution = 1090 name = '3' size = (2048,) [avhrr-4] frequency = (10.3, 10.8, 11.3) resolution = 1090 name = '4' size = (2048,) [avhrr-5] frequency = (11.5, 12.0, 12.5) resolution = 1090 name = '5' size = (2048,) mpop-1.5.0/etc/noaa12.cfg.template000066400000000000000000000012541317160620000166150ustar00rootroot00000000000000[satellite] satname = noaa variant = number = 12 instruments = ('avhrr',) [avhrr-level2] filename = NSS.LHRR.ND.D92131.S0623.E0630.B0513838.WI #filename = NSS.LHRR.ND.D92131.S0804*.WI dir = /home/sajid/data/avhrr/noheader_10bit format = lac_pod_l1b [avhrr-1] frequency = (0.58, 0.63, 0.68) resolution = 1090 name = '1' size = (2048,) [avhrr-2] frequency = (0.725, 0.9125, 1.10) resolution = 1090 name = '2' size = (2048,) [avhrr-3] frequency = (3.55, 3.74, 3.93) resolution = 1090 name = '3' size = (2048,) [avhrr-4] frequency = (10.3, 10.8, 11.3) resolution = 1090 name = '4' size = (2048,) [avhrr-5] frequency = (11.5, 12.0, 12.5) resolution = 1090 name = '5' size = (2048,) mpop-1.5.0/etc/noaa18-gac.cfg.template000066400000000000000000000012401317160620000173460ustar00rootroot00000000000000[satellite] satname = noaa variant = number = 18 instruments = ('avhrr',) [avhrr-level2] #filename = NSS.LHRR.NN.D%Y%j.S%H%M.E1143.B2634545.GC filename = NSS.GHRR.NN.D%Y%j.S%H%M*.L1B dir = /home/sajid/data/avhrr format = gac_klm_l1b [avhrr-1] frequency = (0.58, 0.63, 0.68) resolution = 1090 name = '1' size = (2048,) [avhrr-2] frequency = (0.725, 0.9125, 1.10) resolution = 1090 name = '2' size = (2048,) [avhrr-3] frequency = (3.55, 3.74, 3.93) resolution = 1090 name = '3B' size = (2048,) [avhrr-4] frequency = (10.3, 10.8, 11.3) resolution = 1090 name = '4' size = (2048,) [avhrr-5] frequency = (11.5, 12.0, 12.5) resolution = 1090 name = '5' size = (2048,) mpop-1.5.0/etc/noaa18-lac.cfg.template000066400000000000000000000014071317160620000173600ustar00rootroot00000000000000[satellite] satname = noaa variant = number = 18 instruments = ('avhrr',) [avhrr-level2] filename = NSS.LHRR.NN.D10182.S1135*.GC #filename = NSS.LHRR.NN.D10182.S1318*.GC #filename = NSS.LHRR.NN.D%Y%j.S%H%M.E1143*.L1B #filename = NSS.GHRR.NN.D%Y%j.S%H%M.E1209.B2645758.GC dir = /home/sajid/data/avhrr/noheader_10bit format = lac_klm_l1b [avhrr-1] frequency = (0.58, 0.63, 0.68) resolution = 1090 name = '1' size = (2048,) [avhrr-2] frequency = (0.725, 0.9125, 1.10) resolution = 1090 name = '2' size = (2048,) [avhrr-3] frequency = (3.55, 3.74, 3.93) resolution = 1090 name = '3B' size = (2048,) [avhrr-4] frequency = (10.3, 10.8, 11.3) resolution = 1090 name = '4' size = (2048,) [avhrr-5] frequency = (11.5, 12.0, 12.5) resolution = 1090 name = '5' size = (2048,) mpop-1.5.0/etc/odyssey.cfg.template000077500000000000000000000011111317160620000172260ustar00rootroot00000000000000[satellite] satname = 'odyssey' variant = number = instruments = ('radar',) [radar-level2] dir = /data/cinesat/in/radar format = odyssey_radar projection = odyssey [RATE] #filename = T_PAAH21_C_EUOC_%Y%m%d%H%M??.hdf filename = meteoswiss.radar.euoc_rain_rate.%Y%m%d%H%M.hdf5 name = 'RATE' [DBZH] #filename = T_PABH21_C_EUOC_%Y%m%d%H%M??.hdf filename = meteoswiss.radar.euoc_maximum_reflectivit.%Y%m%d%H%M.hdf5 name = 'DBZH' [ACRR] #filename = T_PASH21_C_EUOC_%Y%m%d%H%M??.hdf filename = meteoswiss.radar.euoc_1h_accumulation.%Y%m%d%H%M.hdf5 name = 'ACRR' mpop-1.5.0/etc/s2a.cfg.template000066400000000000000000000026321317160620000162220ustar00rootroot00000000000000[satellite] satname = s2a variant = number = instruments = ('msi',) [msi-level2] filename=%(satellite)s_OPER_%(instrument)s_L1C_??_????_%Y%m%dT%H%M%S_A%(orbit)s_T?????_%(band)s.jp2 dir = /s2a_datadir format = s2_msi [msi-b1] frequency = (0.433, 0.443, 0.453) resolution = 60 name = 'B01' size = (1830,1830) [msi-b2] frequency = (0.4575, 0.490, 0.5225) resolution = 10 name = 'B02' size = (10980,10980) [msi-b3] frequency = (0.5225, 0.560, 0.5775) resolution = 10 name = 'B03' size = (10980,10980) [msi-b4] frequency = (0.650, 0.665, 0.680) resolution = 10 name = 'B04' size = (10980,10980) [msi-b5] frequency = (0.6975, 0.705, 0.7125) resolution = 20 name = 'B05' size = (5490,5490) [msi-b6] frequency = (0.7325, 0.740, 0.7475) resolution = 20 name = 'B06' size = (5490,5490) [msi-b7] frequency = (0.773, 0.783, 0.793) resolution = 20 name = 'B07' size = (5490,5490) [msi-b8] frequency = (0.7845, 0.842, 0.8995) resolution = 10 name = 'B08' size = (10980,10980) [msi-b8a] frequency = (0.855,0.865,0.875) resolution = 20 name = 'B8A' size = (5490,5490) [msi-b9] frequency = (0.935,0.945,0.955) resolution = 60 name = 'B09' size = (1830,1830) [msi-b10] frequency = (1.360,1.375,1.390) resolution = 60 name = 'B10' size = (1830,1830) [msi-b11] frequency = (1.565,1.610,1.655) resolution = 20 name = 'B11' size = (5490,5490) [msi-b12] frequency = (2.100,2.190,2.280) resolution = 20 name = 'B12' size = (5490,5490) mpop-1.5.0/etc/sentinel1a.cfg.template000066400000000000000000000005511317160620000175760ustar00rootroot00000000000000[satellite] satname = 'sentinel' number = '1a' instruments = ('s1a', ) [s1a-level2] format = 'mipp_sentinel.GeoTiffReader' # S1A_IW_GRDM_1SSH_20120109T054406_20120109T054424_001889_000001_ACD9.SAFE/ # s1a-iw-grd-hh-20120109t054406-20120109t054424-001889-000001-001.tiff dir = /data/ras/sentinel-1/???_*_%Y%m%dT%H%M%S_*_%(orbit)s_*SAFE filename = manifest.safe mpop-1.5.0/etc/sentinel1b.cfg.template000066400000000000000000000005511317160620000175770ustar00rootroot00000000000000[satellite] satname = 'sentinel' number = '1b' instruments = ('s1b', ) [s1b-level2] format = 'mipp_sentinel.GeoTiffReader' # S1B_IW_GRDM_1SSH_20120109T054406_20120109T054424_001889_000001_ACD9.SAFE/ # s1B-iw-grd-hh-20120109t054406-20120109t054424-001889-000001-001.tiff dir = /data/ras/sentinel-1/???_*_%Y%m%dT%H%M%S_*_%(orbit)s_*SAFE filename = manifest.safe mpop-1.5.0/mpop/000077500000000000000000000000001317160620000134375ustar00rootroot00000000000000mpop-1.5.0/mpop/__init__.py000066400000000000000000000022121317160620000155450ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2013. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of the mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """PP Package initializer. """ import os BASE_PATH = os.path.sep.join(os.path.dirname( os.path.realpath(__file__)).split(os.path.sep)[:-1]) CONFIG_PATH = (os.environ.get('PPP_CONFIG_DIR', '') or os.path.join(BASE_PATH, 'etc')) from mpop.version import __version__ mpop-1.5.0/mpop/afgl.dat000066400000000000000000000116121317160620000150430ustar00rootroot00000000000000# different temperature profiles # (1) AFGL atmospheric constituent profile. U.S. standard atmosphere 1976. ( AFGL-TR-86-0110) # (2) AFGL atmospheric constituent profile. tropical. ( AFGL-TR-86-0110) # (3) AFGL atmospheric constituent profile. midlatitude summer. ( AFGL-TR-86-0110) # (4) AFGL atmospheric constituent profile. midlatitude winter. ( AFGL-TR-86-0110) # (5) AFGL atmospheric constituent profile. subarctic summer. ( AFGL-TR-86-0110) # (6) AFGL atmospheric constituent profile. subarctic winter. ( AFGL-TR-86-0110) # # (1) us stand (2) tropic (3) MS (4) MW (5) SS (6) SW # z(km) T(K) T(K) T(K) T(K) T(K) T(K) 120.000 360.000 380.000 380.000 333.000 380.000 333.000 115.000 300.000 299.700 316.800 293.000 322.700 288.500 110.000 240.000 241.600 262.400 259.500 270.100 252.600 105.000 208.800 212.000 222.200 237.100 226.000 234.000 100.000 195.100 190.700 190.500 218.600 190.400 218.500 95.000 188.400 184.300 178.300 208.300 176.800 211.000 90.000 186.900 177.000 165.000 199.500 161.600 202.300 85.000 188.900 177.100 165.100 199.800 161.700 213.100 80.000 198.600 184.800 174.100 210.100 170.600 223.900 75.000 208.400 201.800 196.100 220.400 193.600 234.700 70.000 219.600 218.900 218.100 230.700 216.600 245.400 65.000 233.300 236.000 240.100 240.900 239.700 248.400 60.000 247.000 253.100 257.100 250.800 262.700 250.900 55.000 260.800 263.400 269.300 260.600 274.000 259.100 50.000 270.700 270.200 275.700 265.700 277.200 259.300 47.500 270.600 269.600 275.200 265.100 276.200 253.200 45.000 264.200 264.800 269.900 258.500 273.600 247.000 42.500 257.300 259.400 263.700 250.800 269.500 240.800 40.000 250.400 254.000 257.500 243.200 262.100 234.700 37.500 243.435 248.500 251.300 235.500 254.600 228.500 35.000 236.500 243.100 245.200 227.900 247.200 222.300 32.500 229.588 237.700 239.000 220.400 240.000 218.500 30.000 226.500 232.300 233.700 217.400 235.100 216.000 27.500 224.000 227.000 228.450 215.500 231.000 213.600 25.000 221.600 221.400 225.100 215.200 228.100 211.200 24.000 220.600 219.200 223.900 215.200 226.600 211.800 23.000 219.600 217.000 222.800 215.200 225.200 212.400 22.000 218.600 214.600 221.600 215.200 225.200 213.000 21.000 217.600 210.700 220.400 215.200 225.200 213.600 20.000 216.700 206.700 219.200 215.200 225.200 214.200 19.000 216.700 202.700 217.900 215.200 225.200 214.800 18.000 216.700 198.800 216.800 215.700 225.200 215.400 17.000 216.700 194.800 215.700 216.200 225.200 216.000 16.000 216.700 197.000 215.700 216.700 225.200 216.600 15.000 216.700 203.700 215.700 217.200 225.200 217.200 14.000 216.700 210.300 215.700 217.700 225.200 217.200 13.000 216.700 217.000 215.800 218.200 225.200 217.200 12.000 216.700 223.600 222.300 218.700 225.200 217.200 11.000 216.800 230.100 228.800 219.200 225.200 217.200 10.000 223.300 237.000 235.300 219.700 225.200 217.200 9.000 229.700 243.600 241.700 225.700 232.200 217.200 8.000 236.200 250.300 248.200 231.700 239.200 220.600 7.000 242.700 257.000 254.700 237.700 246.100 227.300 6.000 249.200 263.600 261.200 243.700 253.100 234.100 5.000 255.700 270.300 267.200 249.700 260.100 240.900 4.000 262.200 277.000 273.200 255.700 265.500 247.700 3.000 268.700 283.700 279.200 261.700 270.900 252.700 2.000 275.200 287.700 285.200 265.200 276.300 255.900 1.000 281.700 293.700 289.700 268.700 281.700 259.100 0.000 288.200 299.700 294.200 272.200 287.200 257.200 mpop-1.5.0/mpop/channel.py000066400000000000000000000757361317160620000154430ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015, 2017. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """This module defines satellite instrument channels as a generic class, to be inherited when needed. """ import copy import numpy as np import logging LOG = logging.getLogger(__name__) try: from pyorbital.astronomy import sun_zenith_angle as sza except ImportError: sza = None from mpop.tools import viewzen_corr as vz_corr class GeolocationIncompleteError(Exception): """Exception to try catch cases where the original data have not been read or expanded properly so that each pixel has a geo-location""" pass class NotLoadedError(Exception): """Exception to be raised when attempting to use a non-loaded channel. """ pass class GenericChannel(object): """This is an abstract channel class. It can be a super class for calibrated channels data or more elaborate channels such as cloudtype or CTTH. """ def __init__(self, name=None): object.__init__(self) # Channel name if name is not None and not isinstance(name, str): raise TypeError("Channel name must be a string, or None") self.name = name # Channel resolution, in meters. self.resolution = None # ID of the area on which the channel is defined. self.area_id = None # Area on which the channel is defined. self.area_def = None self.info = {} def __cmp__(self, ch2): if(isinstance(ch2, str)): return cmp(self.name, ch2) elif(ch2.name is not None and self.name is not None and ch2.name[0] == "_" and self.name[0] != "_"): return -1 elif(ch2.name is not None and self.name is not None and ch2.name[0] != "_" and self.name[0] == "_"): return 1 else: return cmp(self.name, ch2.name) def _get_area(self): """Getter for area. """ return self.area_def or self.area_id def _set_area(self, area): """Setter for area. """ if (area is None): self.area_def = None self.area_id = None elif(isinstance(area, str)): self.area_id = area else: try: dummy = area.area_extent dummy = area.x_size dummy = area.y_size dummy = area.proj_id dummy = area.proj_dict self.area_def = area except AttributeError: try: dummy = area.lons dummy = area.lats self.area_def = area self.area_id = None except AttributeError: raise TypeError("Malformed area argument. " "Should be a string or an area object.") area = property(_get_area, _set_area) class Channel(GenericChannel): """This is the satellite channel class. It defines satellite channels as a container for calibrated channel data. The *resolution* sets the resolution of the channel, in meters. The *wavelength_range* is a triplet, containing the lowest-, center-, and highest-wavelength values of the channel. *name* is simply the given name of the channel, and *data* is the data it should hold. """ def __init__(self, name=None, resolution=0, wavelength_range=[-np.inf, -np.inf, -np.inf], data=None, calibration_unit=None): GenericChannel.__init__(self, name) self._data = None self.wavelength_range = None if(name is None and wavelength_range == [-np.inf, -np.inf, -np.inf]): raise ValueError("Cannot define a channel with neither name " "nor wavelength range.") if not isinstance(resolution, (int, float)): raise TypeError("Resolution must be an integer number of meters.") self.resolution = resolution if(not isinstance(wavelength_range, (tuple, list, set)) or len(wavelength_range) != 3 or not isinstance(wavelength_range[0], float) or not isinstance(wavelength_range[1], float) or not isinstance(wavelength_range[2], float)): raise TypeError("Wavelength_range should be a triplet of floats.") elif(not (wavelength_range[0] <= wavelength_range[1]) or not (wavelength_range[1] <= wavelength_range[2])): raise ValueError("Wavelength_range should be a sorted triplet.") self.wavelength_range = list(wavelength_range) self.unit = calibration_unit self.data = data def get_reflectance(self, tb11, sun_zenith=None, tb13_4=None): """Get the reflectance part of an NIR channel""" try: from pyspectral.near_infrared_reflectance import Calculator except ImportError: LOG.info("Couldn't load pyspectral") # Check the wavelength, and if outside 3-4 microns this functionality # doesn't give any meaning and should not be supported if (self.wavelength_range[1] < 3.0 or self.wavelength_range[1] > 4.0): LOG.warning("Deriving the near infrared reflectance" + " of a band that is outside the 3-4 micron range" + " is not supported!\n\tWill do nothing...") return # Check if the sun-zenith angle was provided: if sun_zenith is None: lonlats = self.area.get_lonlats() sun_zenith = sza(self.info['time'], lonlats[0], lonlats[1]) try: refl39 = Calculator(self.info['satname'] + self.info['satnumber'], self.info['instrument_name'], self.name) except NameError: LOG.warning("pyspectral missing!") return return refl39.reflectance_from_tbs(sun_zenith, self.data, tb11, tb13_4) def __cmp__(self, ch2, key=0): if(isinstance(ch2, str)): return cmp(self.name, ch2) elif(ch2.name is not None and self.name is not None and ch2.name[0] == "_" and self.name[0] != "_"): return -1 elif(ch2.name is not None and self.name is not None and ch2.name[0] != "_" and self.name[0] == "_"): return 1 else: res = cmp(abs(self.wavelength_range[1] - key), abs(ch2.wavelength_range[1] - key)) if res == 0: return cmp(self.name, ch2.name) else: return res def __str__(self): if self.shape is not None: return ("'%s: (%.3f,%.3f,%.3f)μm, shape %s, resolution %sm'" % (self.name, self.wavelength_range[0], self.wavelength_range[1], self.wavelength_range[2], self.shape, self.resolution)) else: return ("'%s: (%.3f,%.3f,%.3f)μm, resolution %sm, not loaded'" % (self.name, self.wavelength_range[0], self.wavelength_range[1], self.wavelength_range[2], self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self._data is not None def check_range(self, min_range=1.0): """Check that the data of the channels has a definition domain broader than *min_range* and return the data, otherwise return zeros. """ if not self.is_loaded(): raise ValueError("Cannot check range of an non-loaded channel") if not isinstance(min_range, (float, int)): raise TypeError("Min_range must be a single number.") if isinstance(self._data, np.ma.core.MaskedArray): if self._data.mask.all(): return self._data if((self._data.max() - self._data.min()) < min_range): return np.ma.zeros(self.shape) else: return self._data def show(self): """Display the channel as an image. """ if not self.is_loaded(): raise ValueError("Channel not loaded, cannot display.") from PIL import Image as pil data = ((self._data - self._data.min()) * 255.0 / (self._data.max() - self._data.min())) if isinstance(data, np.ma.core.MaskedArray): img = pil.fromarray(np.array(data.filled(0), np.uint8)) else: img = pil.fromarray(np.array(data, np.uint8)) img.show() def as_image(self, stretched=True): """Return the channel as a :class:`mpop.imageo.geo_image.GeoImage` object. The *stretched* argument set to False allows the data to remain untouched (as opposed to crude stretched by default to obtain the same output as :meth:`show`). """ from mpop.imageo.geo_image import GeoImage img = GeoImage(self._data, self.area, None) if stretched: img.stretch("crude") return img def project(self, coverage_instance): """Make a projected copy of the current channel using the given *coverage_instance*. See also the :mod:`mpop.projector` module. """ res = Channel(name=self.name, resolution=self.resolution, wavelength_range=self.wavelength_range, data=None, calibration_unit=self.unit) res.area = coverage_instance.out_area res.info = self.info if hasattr(self, 'palette'): # UH, new res.palette = self.palette # UH, new if self.is_loaded(): LOG.info("Projecting channel %s (%fμm)..." % (self.name, self.wavelength_range[1])) import pyresample if (hasattr(coverage_instance, 'in_area') and isinstance(coverage_instance.in_area, pyresample.geometry.SwathDefinition) and hasattr(coverage_instance.in_area.lats, 'shape') and coverage_instance.in_area.lats.shape != self._data.shape): raise GeolocationIncompleteError("Lons and lats doesn't match data! " + "Data can't be re-projected unless " + "each pixel of the swath has a " + "geo-location atached to it.") data = coverage_instance.project_array(self._data) res.data = data return res else: raise NotLoadedError("Can't project, channel %s (%fμm) not loaded." % (self.name, self.wavelength_range[1])) def get_data(self): """Getter for channel data. """ return self._data def set_data(self, data): """Setter for channel data. """ if data is None: del self._data self._data = None elif isinstance(data, (np.ndarray, np.ma.core.MaskedArray)): self._data = data else: raise TypeError("Data must be a numpy (masked) array.") data = property(get_data, set_data) @property def shape(self): """Shape of the channel. """ if self.data is None: return None else: return self.data.shape def sunzen_corr(self, time_slot, lonlats=None, limit=80., mode='cos', sunmask=False): '''Perform Sun zenith angle correction for the channel at *time_slot* (datetime.datetime() object) and return the corrected channel. The parameter *limit* can be used to set the maximum zenith angle for which the correction is calculated. For larger angles, the correction is the same as at the *limit* (default: 80.0 degrees). Coordinate values can be given as a 2-tuple or a two-element list *lonlats* of numpy arrays; if None, the coordinates will be read from the channel data. Parameter *mode* is a placeholder for other possible illumination corrections. The name of the new channel will be *original_chan.name+'_SZC'*, eg. "VIS006_SZC". This name is also stored to the info dictionary of the originating channel. ''' if self.info.get('sun_zen_correction_applied'): LOG.debug("Sun zenith correction already applied, skipping") return self import mpop.tools try: from pyorbital import astronomy except ImportError: LOG.warning("Could not load pyorbital.astronomy") return None if lonlats is None or len(lonlats) != 2: # Read coordinates LOG.debug("No valid coordinates given, reading from the " "channel data") lons, lats = self.area.get_lonlats() else: lons, lats = lonlats # Calculate Sun zenith angles and the cosine cos_zen = astronomy.cos_zen(time_slot, lons, lats) # Copy the channel new_ch = copy.deepcopy(self) # Set the name new_ch.name += '_SZC' if mode == 'cos': new_ch.data = mpop.tools.sunzen_corr_cos(new_ch.data, cos_zen, limit=limit) else: # Placeholder for other correction methods pass # Add information about the corrected version to original # channel self.info["sun_zen_corrected"] = self.name + '_SZC' if sunmask: if isinstance(sunmask, (float, int)): sunmask = sunmask else: sunmask = 90. cos_limit = np.cos(np.radians(sunmask)) LOG.debug("Masking out data where sun-zenith " + "is greater than %f deg", sunmask) LOG.debug("cos_limit = %f", cos_limit) # Mask out data where the sun elevation is below a threshold: new_ch.data = np.ma.masked_where( cos_zen < cos_limit, new_ch.data, copy=False) new_ch.info["sun_zen_correction_applied"] = True return new_ch def get_viewing_geometry(self, orbital, time_slot, altitude=None): '''Calculates the azimuth and elevation angle as seen by the observer at the position of the current area pixel. inputs: orbital an orbital object define by the tle file (see pyorbital.orbital import Orbital or mpop/scene.py get_oribtal) time_slot time object specifying the observation time altitude optinal: altitude of the observer above the earth ellipsoid outputs: azi azimuth viewing angle in degree (south is 0, counting clockwise) ele elevation viewing angle in degree (zenith is 90, horizon is 0) ''' try: from pyorbital.orbital import Orbital except ImportError: LOG.warning("Could not load pyorbital.orbial.Orbital") return None try: from pyorbital import tlefile except ImportError: LOG.warning("Could not load pyorbital.tlefile") return None (lons, lats) = self.area.get_lonlats() # Calculate observer azimuth and elevation if altitude == None: altitude = np.zeros(lons.shape) azi, ele = orbital.get_observer_look(time_slot, lons, lats, altitude) return (azi, ele) def vinc_vect(phi, lembda, alpha, s, f=None, a=None, degree=True): """ Vincenty's Direct formular Returns the lat and long of projected point and reverse azimuth given a reference point and a distance and azimuth to project. lats, longs and azimuths are passed in radians. Keyword arguments: phi Latitude in degree/radians lembda Longitude in degree/radians alpha Geodetic azimuth in degree/radians s Ellipsoidal distance in meters f WGS84 parameter a WGS84 parameter degree Boolean if in/out values are in degree or radians. Default is in degree Returns: (phiout, lembdaout, alphaout ) as a tuple """ if degree: phi = np.deg2rad(phi) lembda = np.deg2rad(lembda) alpha = np.deg2rad(alpha) if f is None: f = 1 / 298.257223563 if a is None: a = 6378137 two_pi = 2.0 * np.pi if isinstance(alpha, np.ndarray): alpha[alpha < 0.0] += two_pi alpha[alpha > two_pi] -= two_pi else: if alpha < 0.0: alpha = alpha + two_pi if (alpha > two_pi): alpha = alpha - two_pi """ alphama = np.ma.masked_less_equal(alphama, two_pi) alpha = alphama - two_pi alpha.mask = np.ma.nomask logger.debug(alpha) """ b = a * (1.0 - f) tan_u1 = (1 - f) * np.tan(phi) u_1 = np.arctan(tan_u1) sigma1 = np.arctan2(tan_u1, np.cos(alpha)) sinalpha = np.cos(u_1) * np.sin(alpha) cosalpha_sq = 1.0 - sinalpha * sinalpha u_2 = cosalpha_sq * (a * a - b * b) / (b * b) aa_ = 1.0 + (u_2 / 16384) * (4096 + u_2 * (-768 + u_2 * (320 - 175 * u_2))) bb_ = (u_2 / 1024) * (256 + u_2 * (-128 + u_2 * (74 - 47 * u_2))) # Starting with the approximation sigma = (s / (b * aa_)) last_sigma = 2.0 * sigma + 2.0 # something impossible # Iterate the following three equations # until there is no significant change in sigma # two_sigma_m , delta_sigma def iter_sigma(sigma, last_sigma, sigma1, s, b, aa_, bb_): while (abs((last_sigma - sigma) / sigma) > 1.0e-9): two_sigma_m = 2 * sigma1 + sigma delta_sigma = (bb_ * np.sin(sigma) * (np.cos(two_sigma_m) + (bb_ / 4) * (np.cos(sigma) * (-1 + 2 * np.power(np.cos(two_sigma_m), 2) - (bb_ / 6) * np.cos(two_sigma_m) * (-3 + 4 * np.power(np.sin(sigma), 2)) * (-3 + 4 * np.power(np.cos(two_sigma_m), 2)))))) last_sigma = sigma sigma = (s / (b * aa_)) + delta_sigma return(sigma, two_sigma_m) # Check for array inputs arraybool = [isinstance(ele, np.ndarray) for ele in (sigma, last_sigma, sigma1)] logger.debug("Sigma Arrays?: " + str(arraybool)) if all(arraybool): viter_sigma = np.vectorize(iter_sigma) sigma, two_sigma_m = viter_sigma(sigma, last_sigma, sigma1, s, b, aa_, bb_) else: sigma, two_sigma_m = iter_sigma(sigma, last_sigma, sigma1, s, b, aa_, bb_) phiout = np.arctan2((np.sin(u_1) * np.cos(sigma) + np.cos(u_1) * np.sin(sigma) * np.cos(alpha)), ((1 - f) * np.sqrt(np.power(sinalpha, 2) + pow(np.sin(u_1) * np.sin(sigma) - np.cos(u_1) * np.cos(sigma) * np.cos(alpha), 2)))) deltalembda = np.arctan2((np.sin(sigma) * np.sin(alpha)), (np.cos(u_1) * np.cos(sigma) - np.sin(u_1) * np.sin(sigma) * np.cos(alpha))) cc_ = (f / 16) * cosalpha_sq * (4 + f * (4 - 3 * cosalpha_sq)) omega = (deltalembda - (1 - cc_) * f * sinalpha * (sigma + cc_ * np.sin(sigma) * (np.cos(two_sigma_m) + cc_ * np.cos(sigma) * (-1 + 2 * np.power(np.cos(two_sigma_m), 2))))) lembdaout = lembda + omega alphaout = np.arctan2(sinalpha, (-np.sin(u_1) * np.sin(sigma) + np.cos(u_1) * np.cos(sigma) * np.cos(alpha))) alphaout = alphaout + two_pi / 2.0 if isinstance(alphaout, np.ndarray): alphaout[alphaout < 0.0] += two_pi alphaout[alphaout > two_pi] -= two_pi else: if alphaout < 0.0: alphaout = alphaout + two_pi if (alphaout > two_pi): alphaout = alphaout - two_pi if degree: phiout = np.rad2deg(phiout) lembdaout = np.rad2deg(lembdaout) alphaout = np.rad2deg(alphaout) return(phiout, lembdaout, alphaout) def parallax_corr(self, cth=None, time_slot=None, orbital=None, azi=None, ele=None, fill="False"): '''Perform the parallax correction for channel at *time_slot* (datetime.datetime() object), assuming the cloud top height cth and the viewing geometry given by the satellite orbital "orbital" and return the corrected channel. Authors: Ulrich Hamann (MeteoSwiss), Thomas Leppelt (DWD) Example calls: * calling this function (using orbital and time_slot) orbital = data.get_oribtal() data["VIS006"].parallax_corr(cth=data["CTTH"].height, time_slot=data.time_slot, orbital=orbital) * calling this function (using viewing geometry) orbital = data.get_oribtal() (azi, ele) = get_viewing_geometry(self, orbital, time_slot) data["VIS006"].parallax_corr(cth=data["CTTH"].height, azi=azi, ele=ele) Optional input: cth The parameter cth is the cloud top height (or the altitude of the object that should be shifted). cth must have the same size and projection as the channel orbital an orbital object define by the tle file (see pyorbital.orbital import Orbital or mpop/scene.py get_oribtal) azi azimuth viewing angle in degree (south is 0, counting clockwise) e.g. as given by self.get_viewing_geometry ele elevation viewing angle in degree (zenith is 90, horizon is 0) e.g. as given by self.get_viewing_geometry fill specifies the interpolation method to fill the gaps (basically areas behind the cloud that can't be observed by the satellite instrument) "False" (default): no interpolation, gaps are np.nan values and mask is set accordingly "nearest": fill gaps with nearest neighbour "bilinear": use scipy.interpolate.griddata with linear interpolation to fill the gaps output: parallax corrected channel the content of the channel will be parallax corrected. The name of the new channel will be *original_chan.name+'_PC'*, eg. "IR_108_PC". This name is also stored to the info dictionary of the originating channel. ''' # get time_slot from info, if present if time_slot == None: if "time" in self.info.keys(): time_slot = self.info["time"] if azi == None or ele == None: if time_slot == None or orbital == None: print "*** Error in parallax_corr (mpop/channel.py)" print " parallax_corr needs either time_slot and orbital" print " data[\"IR_108\"].parallax_corr(data[\"CTTH\"].height, time_slot=data.time_slot, orbital=orbital)" print " or the azimuth and elevation angle" print " data[\"IR_108\"].parallax_corr(data[\"CTTH\"].height, azi=azi, ele=ele)" quit() else: print ( "... calculate viewing geometry (orbit and time are given)") (azi, ele) = self.get_viewing_geometry(orbital, time_slot) else: print ("... azimuth and elevation angle given") # mask the cloud top height cth_ = np.ma.masked_where(cth < 0, cth, copy=False) # Elevation displacement dz = cth_ / np.tan(np.deg2rad(ele)) # Create the new channel (by copying) and initialize the data with None # values new_ch = copy.deepcopy(self) new_ch.data[:, :] = np.nan # Set the name new_ch.name += '_PC' # Add information about the corrected version to original channel self.info["parallax_corrected"] = self.name + '_PC' # get projection coordinates in meter (proj_x, proj_y) = self.area.get_proj_coords() print "... calculate parallax shift" # shifting pixels according to parallax corretion # shift West-East in m # ??? sign correct ??? proj_x_pc = proj_x - np.sin(np.deg2rad(azi)) * dz # shift North-South in m proj_y_pc = proj_y + np.cos(np.deg2rad(azi)) * dz # get indices for the pixels for the original position (y, x) = self.area.get_xy_from_proj_coords(proj_x, proj_y) # comment: might be done more efficient with meshgrid # >>> x = np.arange(-5.01, 5.01, 0.25) # >>> y = np.arange(-5.01, 5.01, 0.25) # >>> xx, yy = np.meshgrid(x, y) # get indices for the pixels at the parallax corrected position (y_pc, x_pc) = self.area.get_xy_from_proj_coords(proj_x_pc, proj_y_pc) # copy cloud free satellite pixels (surface observations) ind = np.where(cth_.mask == True) new_ch.data[x[ind], y[ind]] = self.data[x[ind], y[ind]] print "... copy data to parallax corrected position" # copy cloudy pixel with new position modified with parallax shift ind = np.where(x_pc.mask == False) new_ch.data[x_pc[ind], y_pc[ind]] = self.data[x[ind], y[ind]] # Mask out data gaps (areas behind the clouds) new_ch.data = np.ma.masked_where( np.isnan(new_ch.data), new_ch.data, copy=False) if fill.lower() == "false": return new_ch elif fill == "nearest": print "*** fill missing values with nearest neighbour" from scipy.ndimage import distance_transform_edt invalid = np.isnan(new_ch.data) ind = distance_transform_edt( invalid, return_distances=False, return_indices=True) new_ch.data = new_ch.data[tuple(ind)] elif fill == "bilinear": # this function does not interpolate at the outer boundaries from scipy.interpolate import griddata ind = np.where(new_ch.data.mask == False) points = np.transpose(np.append([y[ind]], [x[ind]], axis=0)) values = new_ch.data[ind] new_ch.data = griddata(points, values, (y, x), method='linear') # fill the remaining pixels with nearest neighbour from scipy.ndimage import distance_transform_edt invalid = np.isnan(new_ch.data) ind = distance_transform_edt( invalid, return_distances=False, return_indices=True) new_ch.data = new_ch.data[tuple(ind)] else: print "*** Error in parallax_corr (channel.py)" print " unknown gap fill method ", fill quit() return new_ch def viewzen_corr(self, view_zen_angle_data): """Apply atmospheric correction on a copy of this channel data using the given satellite zenith angle data of the same shape. Returns a new channel containing the corrected data. The name of the new channel will be *original_chan.name+'_VZC'*, eg. "IR108_VZC". This name is also stored to the info dictionary of the originating channel. """ # copy channel data which will be corrected in place chn_data = self.data.copy() CHUNK_SZ = 500 for start in xrange(0, chn_data.shape[1], CHUNK_SZ): # apply correction on channel data vz_corr(chn_data[:, start:start + CHUNK_SZ], view_zen_angle_data[:, start:start + CHUNK_SZ]) new_ch = Channel(name=self.name + "_VZC", resolution=self.resolution, wavelength_range=self.wavelength_range, data=chn_data, calibration_unit=self.unit) # Add information about the corrected version to original channel self.info["view_zen_corrected"] = self.name + '_VZC' return new_ch # Arithmetic operations on channels. def __pow__(self, other): return Channel(name="new", data=self.data ** other) def __rpow__(self, other): return Channel(name="new", data=self.data ** other) def __mul__(self, other): return Channel(name="new", data=self.data * other) def __rmul__(self, other): return Channel(name="new", data=self.data * other) def __add__(self, other): return Channel(name="new", data=self.data + other) def __radd__(self, other): return Channel(name="new", data=self.data + other) def __sub__(self, other): return Channel(name="new", data=self.data - other) def __rsub__(self, other): return Channel(name="new", data=self.data - other) def __div__(self, other): return Channel(name="new", data=self.data / other) def __rdiv__(self, other): return Channel(name="new", data=self.data / other) def __neg__(self): return Channel(name="new", data=-self.data) def __abs__(self): return Channel(name="new", data=abs(self.data)) mpop-1.5.0/mpop/compositer.py000066400000000000000000000042301317160620000161740ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Lars Ørum Rasmussen # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """This module defines the generic Compositer class that builds rgb composites from satellite channels. """ import types class Compositer(object): def __init__(self, scene): self._data_holder = scene @property def area(self): """Get the current area. """ return self._data_holder.area @property def time_slot(self): """Get the current time slot. """ return self._data_holder.time_slot @property def fullname(self): """Get the full satellite name (ex 'meteosat10') of the scene. """ return self._data_holder.fullname def __getitem__(self, *args, **kwargs): return self._data_holder.__getitem__(*args, **kwargs) def check_channels(self, *args): self._data_holder.check_channels(*args) @classmethod def remove_attribute(cls, name): """Remove an attribute from the class. """ return delattr(cls, name) @classmethod def add_method(cls, func): """Add a method to the class. """ return setattr(cls, func.__name__, func) def add_method_to_instance(self, func): """Add a method to the instance. """ return setattr(self, func.__name__, types.MethodType(func, self.__class__)) mpop-1.5.0/mpop/imageo/000077500000000000000000000000001317160620000147005ustar00rootroot00000000000000mpop-1.5.0/mpop/imageo/HRWimage.py000066400000000000000000000723471317160620000167320ustar00rootroot00000000000000import matplotlib as mpl # this HAS TO BE the very first lines (before any other matplotlib functions are imported) mpl.use('Agg') # this HAS TO BE the very first lines (before any other matplotlib functions are imported) from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure import matplotlib.pyplot as plt from matplotlib import rcParams from PIL import Image as PIL_Image from TRTimage import fig2data, fig2img from pylab import text as pylab_text from numpy import sin, cos, radians, where, nonzero, transpose, arange, append, meshgrid, mgrid, empty, isnan, nan, percentile from numpy import sum as np_sum from scipy.interpolate import griddata # interp2d from matplotlib.patches import Rectangle from matplotlib.colors import Normalize def prepare_figure(obj_area): # create new figure #fig = Figure() # old version, does not work for the stream plot ## Turn interactive plotting off #plt.ioff() fig = plt.figure() # needs a DISPLAY environment variable (simulated here with mpl.use('Agg')) # define size of image nx = obj_area.x_size ny = obj_area.y_size # canvas figure canvas = FigureCanvas(fig) # get dots per inch of the screen DPI = fig.get_dpi() # print "DPI", DPI fig.set_size_inches(nx/float(DPI),ny/float(DPI)) # set fonts to bold plt.rc('font', weight='bold') # get axis object ax = fig.add_subplot(111, aspect='equal') ## eliminates margins totally fig.subplots_adjust(left=0.0,right=1.0,bottom=0.0,top=1.0, wspace=0, hspace=0) # set limits of the axis ax.set_xlim(0, nx) ax.set_ylim(0, ny) # set transparent backgroud fig.patch.set_alpha(0.0) # transparent outside of diagram ax.set_axis_bgcolor([1,0,0,0]) # transparent color inside diagram return fig, ax def HRWimage( HRW_data, obj_area, hrw_channels=None, min_correlation=None, cloud_type=None, style='barbs', \ barb_length=None, color_mode='channel', pivot='middle', legend=True, legend_loc=3): """Create a PIL image from high resolution wind data required input: HRW data [HRW_class]: HRW_class instant containing the data, see mpop/satin/nwcsaf_hrw_hdf.py obj_area [area_class]: instant of area class, returned by area_def optional input hrw_channels [string array]: giving the channels that are used derive the HRW vectors e.g. hrw_channels['HRV','WV_073'] min_correlation [int]: minimum correlation of tracking, if below arrow is not shown cloud_type [int array]: cloud types of the wind vectors, e.g. cloud_type=[8,10,11] style [string]: different styles of plotting style='barbs' or style='5min_displacement' or style='15min_displacement' color_mode [string]: choose color of the wind symbols, possible choises: color_mode='channel' -> one color per SEVIRI channel used to derive HRW color_mode='pressure' -> colorcoded cloud top pressure color_mode='temperature' -> colorcoded cloud top temperature color_mode='cloud_type' -> NWC-SAF cloud types color_mode='correlation' 80 ... 100 color_mode='conf_nwp' 70 ... 100 color_mode='conf_no_nwp' 70 ... 100 pivot [string]: position of the barb, e.g. pivot='middle' == center of barb at origin legend [True or False] : show legend or not legend_loc [string or int]: location of the legend upper right 1 upper left 2 lower left 3 lower right 4 right 5 center left 6 center right 7 lower center 8 upper center 9 center 10 best """ #if min_correlation != None: # print " filter for min_correlation = ", min_correlation # inds = where(HRW_data.correlation > min_correlation) # HRW_data = HRW_data.subset(inds) print "... create HRWimage, color_mode = ", color_mode # get a empty figure with transparent background, no axis and no margins outside the diagram fig, ax = prepare_figure(obj_area) # define arrow properties head_width = 0.006 * min(obj_area.x_size,obj_area.x_size) head_length = 2 * head_width m_per_s_to_knots = 1.944 #barb_length = 0.008 * min(obj_area.x_size,obj_area.x_size) if barb_length == None: n_winds = len(HRW_data.wind_id) if n_winds < 300: barb_length = 5.68 elif n_winds < 500: barb_length = 5.43 elif n_winds < 700: barb_length = 5.18 elif n_winds < 900: barb_length = 4.68 else: barb_length = 4.00 print "barb_length", barb_length if color_mode == 'channel': classes = ('HRV', 'VIS008 ', 'WV_062 ', 'WV_073 ', 'IR_120 ') colors = ['mediumorchid', 'red', 'limegreen', 'darkgreen', 'darkturquoise'] elif color_mode == 'pressure': classes = ['<200hPa', '200-300hPa','300-400hPa','400-500hPa','500-600hPa','600-700hPa','700-800hPa', '800-900hPa','>900hPa'] colors = ['darksalmon', 'red' ,'darkorange','yellow' ,'lime' ,'seagreen', 'deepskyblue','blue', 'mediumorchid'] classes = tuple(['CTP '+cl for cl in classes]) elif color_mode == 'cloud_type' or color_mode == 'cloudtype': classes=['non-processed','cloud free land', 'cloud free sea', 'land snow', 'sea ice',\ 'very low cum.', 'very low', 'low cum.', 'low', 'med cum.', 'med', 'high cum.', 'high', 'very high cum.', 'very high', \ 'sem. thin', 'sem. med.', 'sem. thick', 'sem. above', 'broken', 'undefined'] colors = empty( (len(classes),3), dtype=int ) colors[ 0,:] = [100, 100, 100] colors[ 1,:] = [ 0, 120, 0] colors[ 2,:] = [ 0, 0, 0] colors[ 3,:] = [250, 190, 250] colors[ 4,:] = [220, 160, 220] colors[ 5,:] = [255, 150, 0] colors[ 6,:] = [255, 100, 0] colors[ 7,:] = [255, 220, 0] colors[ 8,:] = [255, 180, 0] colors[ 9,:] = [255, 255, 140] colors[10,:] = [240, 240, 0] colors[11,:] = [250, 240, 200] colors[12,:] = [215, 215, 150] colors[13,:] = [255, 255, 255] colors[14,:] = [230, 230, 230] colors[15,:] = [ 0, 80, 215] colors[16,:] = [ 0, 180, 230] colors[17,:] = [ 0, 240, 240] colors[18,:] = [ 90, 200, 160] colors[19,:] = [200, 0, 200] colors[20,:] = [ 95, 60, 30] colors = colors/255. elif color_mode in ['correlation','conf_nwp','conf_no_nwp']: classes = ['<70', '<75', '<80', '<85', '<90', '<95', '>95' ] colors = ['indigo', 'darkred', 'red','darkorange','gold', 'lime', 'green'] classes = tuple([color_mode+' '+cl for cl in classes]) else: print "*** Error in HRW_streamplot (mpop/imageo/HRWimage.py)" print " unknown color_mode" quit() for wid in range(len(HRW_data.wind_id)): if color_mode == 'channel': if HRW_data.channel[wid].find('HRV') != -1: # HRV barbcolor = colors[0] elif HRW_data.channel[wid].find('VIS008') != -1: # 0.8 micro m barbcolor = colors[1] elif HRW_data.channel[wid].find('WV_062') != -1: # 6.2 micro m barbcolor = colors[2] elif HRW_data.channel[wid].find('WV_073') != -1: # 7.3 micro m barbcolor = colors[3] elif HRW_data.channel[wid].find('IR_120') != -1: # 12.0 micro m barbcolor = colors[4] elif color_mode == 'pressure': if HRW_data.pressure[wid] < 20000: barbcolor = colors[0] elif HRW_data.pressure[wid] < 30000: barbcolor = colors[1] elif HRW_data.pressure[wid] < 40000: barbcolor = colors[2] elif HRW_data.pressure[wid] < 50000: barbcolor = colors[3] elif HRW_data.pressure[wid] < 60000: barbcolor = colors[4] elif HRW_data.pressure[wid] < 70000: barbcolor = colors[5] elif HRW_data.pressure[wid] < 80000: barbcolor = colors[6] elif HRW_data.pressure[wid] < 90000: barbcolor = colors[7] else: barbcolor = colors[8] elif color_mode == 'cloud_type' or color_mode == 'cloudtype': barbcolor = list(colors[HRW_data.cloud_type[wid], :]) elif color_mode in ['correlation','conf_nwp','conf_no_nwp']: if color_mode == 'correlation': cdata = HRW_data.correlation elif color_mode == 'conf_nwp': cdata = HRW_data.conf_nwp elif color_mode == 'conf_no_nwp': cdata = HRW_data.conf_no_nwp if cdata[wid] < 70: barbcolor = colors[0] elif cdata[wid] < 75: barbcolor = colors[1] elif cdata[wid] < 80: barbcolor = colors[2] elif cdata[wid] < 85: barbcolor = colors[3] elif cdata[wid] < 90: barbcolor = colors[4] elif cdata[wid] < 95: barbcolor = colors[5] else: barbcolor = colors[6] else: print "*** Error in HRW_streamplot (mpop/imageo/HRWimage.py)" print " unknown color_mode" quit() x0, y0 = obj_area.get_xy_from_lonlat( HRW_data.lon[wid], HRW_data.lat[wid], outside_error=False) #, return_int=True u = HRW_data.wind_speed[wid] * -1 * sin(radians(HRW_data.wind_direction[wid])) v = HRW_data.wind_speed[wid] * -1 * cos(radians(HRW_data.wind_direction[wid])) #print '%6s %3d %10.7f %10.7f %7.2f %7.1f %8.1f %10s' % (HRW_data.channel[wid], HRW_data.wind_id[wid], \ # HRW_data.lon[wid], HRW_data.lat[wid], \ # HRW_data.wind_speed[wid]*m_per_s_to_knots, \ # HRW_data.wind_direction[wid], HRW_data.pressure[wid], barbcolor) if style == 'barbs': u = HRW_data.wind_speed[wid] * -1 * sin(radians(HRW_data.wind_direction[wid])) * m_per_s_to_knots v = HRW_data.wind_speed[wid] * -1 * cos(radians(HRW_data.wind_direction[wid])) * m_per_s_to_knots ax.barbs(x0, obj_area.y_size - y0, u * m_per_s_to_knots, v * m_per_s_to_knots, length = barb_length, pivot='middle', barbcolor=barbcolor) elif style == '5min_displacement' or style == '15min_displacement': if style == '5min_displacement': t_in_s = 5*60 elif style == '15min_displacement': t_in_s = 15*60 dx = u * t_in_s / obj_area.pixel_size_x dy = v * t_in_s / obj_area.pixel_size_y ax.arrow(x0, y0, dx, dy, head_width = head_width, head_length = head_length, fc=barbcolor, ec=barbcolor) if legend: rcParams['legend.handlelength'] = 0 rcParams['legend.numpoints'] = 1 # create blank rectangle rec = Rectangle((0, 0), 0, 0, fc="w", fill=False, edgecolor='none', linewidth=0) ## *fontsize*: [size in points | 'xx-small' | 'x-small' | 'small' | ## 'medium' | 'large' | 'x-large' | 'xx-large'] alpha=1.0 bbox={'facecolor':'white', 'alpha':alpha, 'pad':10} print "... add legend: color is a function of ", color_mode recs = empty( len(classes), dtype=object) recs[:] = rec #if color_mode == 'pressure': # recs = [rec, rec, rec, rec, rec, rec, rec, rec, rec] #if color_mode == 'channel': # recs = [rec, rec, rec, rec, rec] #if color_mode in ['correlation','conf_nwp','conf_no_nwp']: # recs = [rec, rec, rec, rec, rec, rec, rec] size=12 if color_mode=='cloud_type': size=10 leg = ax.legend(recs, classes, loc=legend_loc, prop={'size':size}) for color,text in zip(colors,leg.get_texts()): text.set_color(color) return fig2img ( fig ) # ---------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------- def HRWstreamplot( u2d, v2d, obj_area, interpol_method, color_mode='speed', density=(3,3), linewidth_mode="scaled", linewidth_max=2.5, \ min_correlation=None, legend=True, legend_loc=3, vmax=None, colorbar=True, fontcolor='w'): """ Create a streamplot image in PIL format of a 2d wind field color_mode [string]: choose color of the stream lines, color_mode='speed' -> wind speed color_mode='u' -> u-wind component color_mode='v' -> v-wind component density [2 int tuple] density of stream lines, default density = (4,4) linewidth_mode [string] "scaled" to color_mode data "const" always linewith_max """ ## get a empty figure with transparent background, no axis and no margins outside the diagram fig, ax = prepare_figure(obj_area) #print dir(ax) # check if there is there have been enough observation (or if data is not a number array) if isnan(np_sum(u2d)): print "... there are not enough observations" ax.text(0.95, 0.01, 'currently not enough observations', verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes, color='red', fontsize=15) else: print "there is enough data, interpolation method: ", interpol_method # create grid for the wind data [nx, ny] = u2d.shape # for ccs4 this is (640, 710) Y, X = mgrid[nx-1:-1:-1, 0:ny] # watch out for Y->nx and X->ny #print "X.shape ", Y.shape #print Y[:,0] print " calculate color data ", color_mode if color_mode == 'speed': from numpy import sqrt cdata = sqrt(u2d*u2d + v2d*v2d) elif color_mode == 'u': cdata = u2d elif color_mode == 'v': cdata = v2d else: print "*** Error in HRW_streamplot (mpop/imageo/HRWimage.py)" print " unknown color_mode" quit() print " calculate linewidth ", linewidth_mode if linewidth_mode == "const": linewidth = linewidth_max elif linewidth_mode == "scaled": if vmax != None: linewidth = 1 + linewidth_max*(cdata) / vmax else: linewidth = 1 + linewidth_max*(cdata) / cdata.max() else: print "*** Error in HRW_streamplot (mpop/imageo/HRWimage.py)" print " unknown linewidth_mode" quit() print "... data_max =", cdata.max() ,", vmax=", vmax if vmax != None: norm = Normalize(vmin=0, vmax=vmax) else: norm = Normalize(vmin=0, vmax=cdata.max()) #optional arguments of streamplot # density=1, linewidth=None, color=None, # cmap=None, norm=None, arrowsize=1, arrowstyle='-|>', # minlength=0.1, transform=None, zorder=1, start_points=None, INTEGRATOR='RK4',density=(10,10) plt.streamplot(X, Y, u2d, v2d, color=cdata, linewidth=linewidth, cmap=plt.cm.rainbow, density=density, norm=norm) if colorbar: colorbar_ax = fig.add_axes([0.9, 0.1, 0.05, 0.8]) cbar = plt.colorbar(cax=colorbar_ax) plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color=fontcolor) xlabel = cbar.ax.set_xlabel('m/s', weight='bold') #get the title property handler plt.setp(xlabel, color=fontcolor) #plt.savefig("test_streamplot.png") # add information about interpolation method ax.text(0.95, 0.01, interpol_method, verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes, color='green', fontsize=15) return fig2img ( fig ) # ---------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------- def fill_with_closest_pixel(data, invalid=None): """ Replace the value of invalid 'data' cells (indicated by 'invalid') by the value of the nearest valid data cell Input: data: numpy array of any dimension invalid: a binary array of same shape as 'data'. True cells set where data value should be replaced. If None (default), use: invalid = np.isnan(data) Output: Return a filled array. """ from numpy import isnan from scipy.ndimage import distance_transform_edt if invalid is None: invalid = isnan(data) ind = distance_transform_edt(invalid, return_distances=False, return_indices=True) return data[tuple(ind)] # ---------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------- def HRWscatterplot( HRW_data, title='', hrw_channels=None, min_correlation=None, cloud_type=None, color_mode='direction'): ## get a empty figure with transparent background, no axis and no margins outside the diagram # fig = plt.figure() import pylab fig = pylab.figure() ax = plt.subplot(111) ax.set_yscale("log", nonposx='clip') plt.scatter(HRW_data.wind_speed, HRW_data.pressure/100, s=5, c=HRW_data.wind_direction, alpha=0.5, edgecolor='none') pylab.title(title) pylab.ylim([1000,100]) plt.yticks([1000,900,800,700,600,500,400,300,200,100], ['1000','900','800','700','600','500','400','300','200','100'], rotation='horizontal') p = percentile(HRW_data.wind_speed, 95) vmax = (round(p/10)+1)*10 print "... vmax:", vmax plt.plot([0,vmax], [680,680], color='g') plt.plot([0,vmax], [440,440], color='b') pylab.xlim([0,vmax]) ax.set_xlabel('HRW [m/s]') ax.set_ylabel('p [hPa]') cbar = plt.colorbar() cbar.ax.set_ylabel('wind direction') return fig2img ( fig ) # ---------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------- def HRW_2dfield( HRW_data, obj_area, interpol_method=None, hrw_channels=None, min_correlation=None, level=''): print "... calculate 2d wind field (HRW_2dfield)" if min_correlation != None: print " filter for min_correlation = ", min_correlation inds = where(HRW_data.correlation > min_correlation) HRW_data.subset(inds) xx, yy = obj_area.get_xy_from_lonlat( HRW_data.lon, HRW_data.lat, outside_error=False, return_int=False) #, return_int=True yy = obj_area.y_size - yy uu = - HRW_data.wind_speed * sin(radians(HRW_data.wind_direction)) vv = - HRW_data.wind_speed * cos(radians(HRW_data.wind_direction)) # get rid of all vectors outside of the field index = nonzero(xx) xx = xx[index] yy = yy[index] uu = uu[index] vv = vv[index] points = transpose(append([xx], [yy], axis=0)) #print type(uu), uu.shape #print type(points), points.shape #print points[0], yy[0], xx[0] #print uu[0] nx = obj_area.x_size ny = obj_area.y_size x2 = arange(nx) y2 = (ny-1) - arange(ny) grid_x, grid_y = meshgrid(x2, y2) if interpol_method == None: # we need at least 2 winds to interpolate if uu.size < 4: print "*** Warning, not wnough wind data available, n_winds = ", uu.size fake = empty(grid_x.shape) fake[:,:] = nan HRW_data.interpol_method = None return fake, fake elif uu.size < 50: interpol_method = "RBF" else: interpol_method = "linear + nearest" #interpol_method = "nearest" #interpol_method = "cubic + nearest" # might cause unrealistic overshoots #interpol_method = "kriging" #interpol_method = "..." print "... min windspeed (org data): ", HRW_data.wind_speed.min() print "... max windspeed (org data): ", HRW_data.wind_speed.max() for i_iteration in [0,1]: if interpol_method == "nearest": print '... fill with nearest neighbour' # griddata, see http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.interpolate.griddata.html grid_u1x = griddata(points, uu, (grid_x, grid_y), method='nearest') grid_v1x = griddata(points, vv, (grid_x, grid_y), method='nearest') elif interpol_method == "RBF": print '... inter- and extrapolation using radial basis functions' # https://www.youtube.com/watch?v=_cJLVhdj0j4 print "... start Rbf" from scipy.interpolate import Rbf # rbfu = Rbf(xx, yy, uu, epsilon=0.1) # rbfu = Rbf(xx, yy, uu, epsilon=0.2) grid_u1x = rbfu(grid_x, grid_y) rbfv = Rbf(xx, yy, vv, epsilon=0.1) # grid_v1x = rbfv(grid_x, grid_y) print "... finish Rbf" # !very! slow for a large number of observations elif interpol_method == "linear + nearest" or interpol_method == "cubic + nearest": if interpol_method == "linear + nearest": print '... calculate linear interpolation' # griddata, see http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.interpolate.griddata.html grid_u1 = griddata(points, uu, (grid_x, grid_y), method='linear') grid_v1 = griddata(points, vv, (grid_x, grid_y), method='linear') elif interpol_method == "cubic + nearest": # smoother, but can cause unrealistic overshoots print '... calculate cubic interpolation' grid_u1 = griddata(points, uu, (grid_x, grid_y), method='cubic') grid_v1 = griddata(points, vv, (grid_x, grid_y), method='cubic') else: print "*** Error in mpop/imageo/HRWimage.py" print " unknown interpolation method: ", interpol_method quit() if 1==1: # use faster function to extrapolate with closest neighbour print "... fill outside area with closest value" grid_u1x = fill_with_closest_pixel(grid_u1, invalid=None) grid_v1x = fill_with_closest_pixel(grid_v1, invalid=None) else: # use griddata to extrapolate with closest neighbour points2 = transpose(append([grid_x.flatten()], [grid_y.flatten()], axis=0)) print type(grid_x.flatten()), grid_x.flatten().shape print type(points2), points2.shape mask = ~isnan(grid_v1.flatten()) inds = where(mask)[0] grid_u1x = griddata(points2[inds], grid_u1.flatten()[inds], (grid_x, grid_y), method='nearest') grid_v1x = griddata(points2[inds], grid_v1.flatten()[inds], (grid_x, grid_y), method='nearest') if 1==0: # add othermost points as additional data y_add = [0, 0, ny-1, ny-1] x_add = [0, nx-1, 0, nx-1] for (i,j) in zip(x_add,y_add): uu = append(uu, grid_u0[i,j]) vv = append(vv, grid_v0[i,j]) xx = append(xx, x_add) yy = append(yy, y_add) points = transpose(append([yy], [xx], axis=0)) print 'calc extent1' grid_u1e = griddata(points, uu, (grid_x, grid_y), method='linear') grid_v1e = griddata(points, vv, (grid_x, grid_y), method='linear') else: print "*** Error in mpop/imageo/HRWimage.py" print " unknown interpol_method", interpol_method quit() ##http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html ##http://stackoverflow.com/questions/3526514/problem-with-2d-interpolation-in-scipy-non-rectangular-grid #print "SmoothBivariateSpline:" #from scipy.interpolate import SmoothBivariateSpline #fitu = SmoothBivariateSpline( xx, yy, uu, s=1000) # , kx=3, ky=3, s = smooth * z2sum m #from numpy import empty #grid_u_SBP = empty(grid_x.shape) #for i in range(0,nx-1): # starting upper right going down # for j in range(0,ny-1): # starting lower right going right # #print i,j # grid_u_SBP[j,i] = fitu(j,i) #grid_u_SBP = np.array([k.predict([x,y]) for x,y in zip(np.ravel(grid_x), np.ravel(grid_y))]) #grid_u_SBP = grid_u_SBP.reshape(grid_x.shape) ##print x2 ##print y2 #grid_u_SBP = fitu(x2,y2) ##print "grid_u_SBP.shape", grid_u_SBP.shape ###print grid_u_SBP #print "End SmoothBivariateSpline:" #print "bisplrep:" #from scipy import interpolate #tck = interpolate.bisplrep(xx, yy, uu) #grid_u_BSR = interpolate.bisplev(grid_x[:,0], grid_y[0,:], tck) #print grid_u_BSR.shape #print "bisplrep" #print "grid_v1x.shape", grid_v1x.shape extent=(0,nx,0,ny) origin='lower' origin='upper' origin=None # show different stages of 2d inter- and extra-polation if 1==0: print 'make matplotlib.pyplot' import matplotlib.pyplot as plt vmin=-10 vmax=10 fig = plt.figure() plt.subplot(221) plt.title('u '+interpol_method) plt.plot(points[:,0], ny-1-points[:,1], 'k.', ms=1) plt.imshow(grid_u1x, vmin=vmin, vmax=vmax) #, extent=extent #plt.colorbar() plt.subplot(222) plt.title('v '+interpol_method) plt.plot(points[:,0], ny-1-points[:,1], 'k.', ms=1) plt.imshow(grid_v1x, origin=origin, vmin=vmin, vmax=vmax) #, extent=extent #plt.colorbar() # standard calculation for comparison print '... calculate linear interpolation' grid_u1 = griddata(points, uu, (grid_x, grid_y), method='linear') grid_v1 = griddata(points, vv, (grid_x, grid_y), method='linear') grid_u1xx = fill_with_closest_pixel(grid_u1, invalid=None) grid_v1xx = fill_with_closest_pixel(grid_v1, invalid=None) plt.subplot(223) plt.title('U Linear+Nearest') plt.plot(points[:,0], ny-1-points[:,1], 'k.', ms=1) plt.imshow(grid_u1xx, origin=origin, vmin=vmin, vmax=vmax) #, extent=extent #plt.colorbar() plt.subplot(224) plt.title('V Linear+Nearest') plt.plot(points[:,0], ny-1-points[:,1], 'k.', ms=1) #plt.title('Cubic') plt.imshow(grid_v1xx, origin=origin, vmin=vmin, vmax=vmax) #, extent=extent #plt.colorbar() plt.gcf().set_size_inches(6, 6) #plt.show() # does not work with AGG tmpfile="test_hrw"+level+".png" fig.savefig(tmpfile) print "display "+tmpfile+" &" if grid_u1x.min() < -150 or grid_v1x.min() < -150 or grid_u1x.max() > 150 or grid_v1x.max() > 150: print "*** Warning, numerical instability detected, interpolation method: ", interpol_method print " min u windspeed (u 2dimensional): ", grid_u1x.min() print " min v windspeed (v 2dimensional): ", grid_v1x.min() print " max u windspeed (u 2dimensional): ", grid_u1x.max() print " max v windspeed (v 2dimensional): ", grid_v1x.max() interpol_method = "glinear + nearest" print "... try another interpolation method: ", interpol_method else: # (hopefully) numerical stable interpolation, exit the interpolation loop break HRW_data.interpol_method = interpol_method return grid_u1x, grid_v1x mpop-1.5.0/mpop/imageo/TRTimage.py000066400000000000000000000115341317160620000167320ustar00rootroot00000000000000from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure #import matplotlib as mpl #mpl.use('Agg') #from pylab import figure from pylab import rand import matplotlib.pyplot as plt from matplotlib.patches import Ellipse from uuid import uuid4 import subprocess from PIL import Image as PIL_Image import numpy def fig2data ( fig ): """ @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it @param fig a matplotlib figure @return a numpy 3D array of RGBA values """ # draw the renderer fig.canvas.draw ( ) # Get the RGBA buffer from the figure w,h = fig.canvas.get_width_height() buf = numpy.fromstring ( fig.canvas.tostring_argb(), dtype=numpy.uint8 ) buf.shape = ( w, h, 4 ) # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode buf = numpy.roll ( buf, 3, axis = 2 ) return buf def fig2img ( fig ): """ @brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it @param fig a matplotlib figure @return a Python Imaging Library ( PIL ) image """ # put the figure pixmap into a numpy array buf = fig2data ( fig ) w, h, d = buf.shape return PIL_Image.frombytes( "RGBA", ( w ,h ), buf.tostring( ) ) def TRTimage( TRTcell_IDs, TRTcells, obj_area, minRank=8, alpha_max=1.0, plot_vel=True): # define size of image nx = obj_area.x_size ny = obj_area.y_size # create new figure fig = Figure() # canvas figure canvas = FigureCanvas(fig) # get dots per inch of the screen DPI = fig.get_dpi() # print "DPI", DPI fig.set_size_inches(nx/float(DPI),ny/float(DPI)) # get axis object ax = fig.add_subplot(111, aspect='equal') ## eliminates margins totally fig.subplots_adjust(left=0.0,right=1.0,bottom=0.0,top=1.0, wspace=0, hspace=0) #plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0) # does only work with x11 display # set limits of the axis ax.set_xlim(0, nx) ax.set_ylim(0, ny) # set transparent backgroud fig.patch.set_alpha(0.0) # transparent outside of diagram ax.set_axis_bgcolor([1,0,0,0]) # transparent color inside diagram # define arrow properties head_width = 0.006 * min(obj_area.x_size,obj_area.x_size) head_length = 2 * head_width pixel_size_x_km = 0.001 * obj_area.pixel_size_x pixel_size_y_km = 0.001 * obj_area.pixel_size_y for cell in TRTcell_IDs: if TRTcells[cell].RANKr > minRank: (x0,y0) = obj_area.get_xy_from_lonlat(TRTcells[cell].lon, TRTcells[cell].lat, outside_error=False, return_int=False) y0 = (obj_area.y_size-1)-y0 # print (x0,y0) vx = TRTcells[cell].vel_x vy = TRTcells[cell].vel_y # !!!scaling of width and height is not correct, that is on map projection, but not on the ground!!! e = Ellipse( xy = (x0, y0), \ width = 2*TRTcells[cell].ell_S / pixel_size_x_km, \ height = 2*TRTcells[cell].ell_L / pixel_size_y_km, \ angle = -TRTcells[cell].angle ) ax.add_artist(e) e.set_clip_box(ax.bbox) if TRTcells[cell].RANKr <= 12: cell_color="white" alpha = (alpha_max-0.2) / 12. * TRTcells[cell].RANKr elif TRTcells[cell].RANKr <= 15: cell_color="white" alpha = alpha_max elif TRTcells[cell].RANKr <= 25: cell_color="green" alpha = alpha_max elif TRTcells[cell].RANKr <= 35: cell_color="yellow" alpha = alpha_max else: cell_color="red" alpha = alpha_max # print "cell ID: %s, cell rank: %2d, cell_color:%7s, alpha = %4.1f" % (cell, TRTcells[cell].RANKr, cell_color, alpha) e.set_alpha(alpha) # transparency: 0.0 transparent, 1 total visible e.set_facecolor(cell_color) # "white" or [1,1,1] if plot_vel: ax.arrow(x0, y0, vx, vy, head_width = head_width, head_length = head_length, fc=cell_color, ec=cell_color) if 1==1: # print " !!! convert fig to image by function fig2img !!!" ### this would avoid saving into a file, but it fills the transparent areas with "white" PIL_image = fig2img ( fig ) else: tmp_file = '/tmp/TRT_'+str(uuid4())+'.png' # print tmp_file plt.savefig(tmp_file, dpi=DPI, transparent=True) #, bbox_inches='tight' # subprocess.call("display "+tmp_file+" &", shell=True) PIL_image = PIL_Image.open(tmp_file) subprocess.call("rm "+tmp_file+" &", shell=True) return PIL_image mpop-1.5.0/mpop/imageo/__init__.py000066400000000000000000000016121317160620000170110ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of the mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Initialization for geo_image. """ mpop-1.5.0/mpop/imageo/formats/000077500000000000000000000000001317160620000163535ustar00rootroot00000000000000mpop-1.5.0/mpop/imageo/formats/__init__.py000066400000000000000000000000261317160620000204620ustar00rootroot00000000000000# Empty module holder mpop-1.5.0/mpop/imageo/formats/ninjotiff.py000066400000000000000000001170011317160620000207130ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ninjotiff.py Created on Mon Apr 15 13:41:55 2013 A big amount of the tiff writer are (PFE) from https://github.com/davidh-ssec/polar2grid by David Hoese License: Copyright (C) 2013 Space Science and Engineering Center (SSEC), University of Wisconsin-Madison. Lars Ørum Rasmussen, DMI. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Original scripts and automation included as part of this package are distributed under the GNU GENERAL PUBLIC LICENSE agreement version 3. Binary executable files included as part of this software package are copyrighted and licensed by their respective organizations, and distributed consistent with their licensing terms. Edited by Christian Kliche (Ernst Basler + Partner) to replace pylibtiff with a modified version of tifffile.py (created by Christoph Gohlke) """ import calendar import logging import os from copy import deepcopy from datetime import datetime import numpy as np import mpop.imageo.formats.writer_options as write_opts from mpop.imageo.formats import tifffile log = logging.getLogger(__name__) #------------------------------------------------------------------------- # # Ninjo tiff tags from DWD # #------------------------------------------------------------------------- # Geotiff tags. GTF_ModelPixelScale = 33550 GTF_ModelTiepoint = 33922 # Ninjo tiff tags NINJO_TAGS = { "NTD_Magic": 40000, "NTD_SatelliteNameID": 40001, "NTD_DateID": 40002, "NTD_CreationDateID": 40003, "NTD_ChannelID": 40004, "NTD_HeaderVersion": 40005, "NTD_FileName": 40006, "NTD_DataType": 40007, "NTD_SatelliteNumber": 40008, "NTD_ColorDepth": 40009, "NTD_DataSource": 40010, "NTD_XMinimum": 40011, "NTD_XMaximum": 40012, "NTD_YMinimum": 40013, "NTD_YMaximum": 40014, "NTD_Projection": 40015, "NTD_MeridianWest": 40016, "NTD_MeridianEast": 40017, "NTD_EarthRadiusLarge": 40018, "NTD_EarthRadiusSmall": 40019, "NTD_GeodeticDate": 40020, "NTD_ReferenceLatitude1": 40021, "NTD_ReferenceLatitude2": 40022, "NTD_CentralMeridian": 40023, "NTD_PhysicValue": 40024, "NTD_PhysicUnit": 40025, "NTD_MinGrayValue": 40026, "NTD_MaxGrayValue": 40027, "NTD_Gradient": 40028, "NTD_AxisIntercept": 40029, "NTD_ColorTable": 40030, "NTD_Description": 40031, "NTD_OverflightDirection": 40032, "NTD_GeoLatitude": 40033, "NTD_GeoLongitude": 40034, "NTD_Altitude": 40035, "NTD_AOSAsimuth": 40036, "NTD_LOSAsimuth": 40037, "NTD_MaxElevation": 40038, "NTD_OverflightTime": 40039, "NTD_IsBlackLineCorrection": 40040, "NTD_IsAtmosphereCorrected": 40041, "NTD_IsCalibrated": 40042, "NTD_IsNormalized": 40043, "NTD_OriginalHeader": 40044, "NTD_IsValueTableAvailable": 40045, "NTD_ValueTableStringField": 40046, "NTD_ValueTableFloatField": 40047, "NTD_TransparentPixel": 50000 } NINJO_TAGS_INV = dict((v, k) for k, v in NINJO_TAGS.items()) # # model_pixel_scale_tag_count ? ... # Sometimes DWD product defines an array of length 2 (instead of 3 (as in geotiff)). # MODEL_PIXEL_SCALE_COUNT = int(os.environ.get( "GEOTIFF_MODEL_PIXEL_SCALE_COUNT", 3)) #------------------------------------------------------------------------- # # Read Ninjo products config file. # #------------------------------------------------------------------------- def get_product_config(product_name, force_read=False): """Read Ninjo configuration entry for a given product name. :Parameters: product_name : str Name of Ninjo product. :Arguments: force_read : Boolean Force re-reading config file. **Notes**: * It will look for a *ninjotiff_products.cfg* in MPOP's configuration directory defined by *PPP_CONFIG_DIR*. * As an example, see *ninjotiff_products.cfg.template* in MPOP's *etc* directory. """ return ProductConfigs()(product_name, force_read) class _Singleton(type): def __init__(cls, name_, bases_, dict_): super(_Singleton, cls).__init__(name_, bases_, dict_) cls.instance = None def __call__(cls, *args, **kwargs): if cls.instance is None: cls.instance = super(_Singleton, cls).__call__(*args, **kwargs) return cls.instance class ProductConfigs(object): __metaclass__ = _Singleton def __init__(self): self.read_config() def __call__(self, product_name, force_read=False): if force_read: self.read_config() return self._products[product_name] @property def product_names(self): return sorted(self._products.keys()) def read_config(self): from ConfigParser import ConfigParser def _eval(val): try: return eval(val) except: return str(val) filename = self._find_a_config_file() log.info("Reading Ninjo config file: '%s'" % filename) cfg = ConfigParser() cfg.read(filename) products = {} for sec in cfg.sections(): prd = {} for key, val in cfg.items(sec): prd[key] = _eval(val) products[sec] = prd self._products = products @staticmethod def _find_a_config_file(): name_ = 'ninjotiff_products.cfg' home_ = os.path.dirname(os.path.abspath(__file__)) penv_ = os.environ.get('PPP_CONFIG_DIR', '') for fname_ in [os.path.join(x, name_) for x in (home_, penv_)]: if os.path.isfile(fname_): return fname_ raise ValueError("Could not find a Ninjo tiff config file") #------------------------------------------------------------------------- # # Write Ninjo Products # #------------------------------------------------------------------------- def _get_physic_value(physic_unit): # return Ninjo's physics unit and value. if physic_unit.upper() in ('K', 'KELVIN'): return 'Kelvin', 'T' elif physic_unit.upper() in ('C', 'CELSIUS'): return 'Celsius', 'T' elif physic_unit == '%': return physic_unit, 'Reflectance' elif physic_unit.upper() in ('MW M-2 SR-1 (CM-1)-1',): return physic_unit, 'Radiance' else: return physic_unit, 'Unknown' def _get_projection_name(area_def): # return Ninjo's projection name. proj_name = area_def.proj_dict['proj'] if proj_name in ('eqc',): return 'PLAT' elif proj_name in ('merc',): return 'MERC' elif proj_name in ('stere',): lat_0 = area_def.proj_dict['lat_0'] if lat_0 < 0: return 'SPOL' else: return 'NPOL' return None def _get_pixel_size(projection_name, area_def): if projection_name in ['PLAT', 'MERC']: upper_left = area_def.get_lonlat(0, 0) lower_right = area_def.get_lonlat( area_def.shape[0] - 1, area_def.shape[1] - 1) pixel_size = abs(lower_right[0] - upper_left[0]) / (area_def.shape[1] - 1),\ abs(upper_left[1] - lower_right[1]) / (area_def.shape[0] - 1) elif projection_name in ('NPOL', 'SPOL'): pixel_size = (np.rad2deg(area_def.pixel_size_x / float(area_def.proj_dict['a'])), np.rad2deg(area_def.pixel_size_y / float(area_def.proj_dict['b']))) else: raise ValueError("Could determine pixel size from projection name '%s'" % projection_name + " (Unknown)") return pixel_size def _get_satellite_altitude(filename): # Guess altitude (probably no big deal if we fail). sat_altitudes = {'MSG': 36000.0, 'METOP': 817.0, 'NOAA': 870.0} filename = os.path.basename(filename).upper() for nam_, alt_ in sat_altitudes.items(): if nam_ in filename: return alt_ return None def _finalize(geo_image, dtype=np.uint8, value_range_measurement_unit=None, data_is_scaled_01=True): """Finalize a mpop GeoImage for Ninjo. Specialy take care of phycical scale and offset. :Parameters: geo_image : mpop.imageo.geo_image.GeoImage See MPOP's documentation. dtype : bits per sample np.uint8 or np.uint16 (default: np.uint8) value_range_measurement_unit: list or tuple Defining minimum and maximum value range. Data will be clipped into that range. Default is no clipping and auto scale. data_is_scaled_01: boolean If true (default), input data is assumed to be in the [0.0, 1.0] range. :Returns: image : numpy.array Final image. scale : float Scale for transform pixel value to physical value. offset : float Offset for transform pixel value to physical value. fill_value : int Value for masked out pixels. **Notes**: physic_val = image*scale + offset Example values for value_range_measurement_unit are (0, 125) or (40.0, -87.5) """ if geo_image.mode == 'L': # PFE: mpop.satout.cfscene data = geo_image.channels[0] fill_value = np.iinfo(dtype).min log.debug("Transparent pixel are forced to be %d" % fill_value) log.debug("Before scaling: %.2f, %.2f, %.2f" % (data.min(), data.mean(), data.max())) if np.ma.count_masked(data) == data.size: # All data is masked data = np.ones(data.shape, dtype=dtype) * fill_value scale = 1 offset = 0 else: if value_range_measurement_unit and data_is_scaled_01: # No additional scaling of the input data - assume that data is # within [0.0, 1.0] and interpret 0.0 as # value_range_measurement_unit[0] and 1.0 as # value_range_measurement_unit[1] # Make room for transparent pixel. scale_fill_value = ( (np.iinfo(dtype).max) / (np.iinfo(dtype).max + 1.0)) geo_image = deepcopy(geo_image) geo_image.channels[0] *= scale_fill_value geo_image.channels[0] += 1 / (np.iinfo(dtype).max + 1.0) channels, fill_value = geo_image._finalize(dtype) data = channels[0] scale = ((value_range_measurement_unit[1] - value_range_measurement_unit[0]) / (np.iinfo(dtype).max)) # Handle the case where all data has the same value. scale = scale or 1 offset = value_range_measurement_unit[0] mask = data.mask offset -= scale if fill_value is None: fill_value = 0 else: if value_range_measurement_unit: data.clip(value_range_measurement_unit[0], value_range_measurement_unit[1], data) chn_min = value_range_measurement_unit[0] chn_max = value_range_measurement_unit[1] log.debug("Scaling, using value range %.2f - %.2f" % (value_range_measurement_unit[0], value_range_measurement_unit[1])) else: chn_max = data.max() chn_min = data.min() log.debug("Doing auto scaling") # Make room for transparent pixel. scale = ((chn_max - chn_min) / (np.iinfo(dtype).max - 1.0)) # Handle the case where all data has the same value. scale = scale or 1 offset = chn_min # Scale data to dtype, and adjust for transparent pixel forced # to be minimum. mask = data.mask data = 1 + ((data.data - offset) / scale).astype(dtype) offset -= scale data[mask] = fill_value if log.getEffectiveLevel() == logging.DEBUG: d__ = np.ma.array(data, mask=(data == fill_value)) log.debug("After scaling: %.2f, %.2f, %.2f" % (d__.min(), d__.mean(), d__.max())) d__ = data * scale + offset d__ = np.ma.array(d__, mask=(data == fill_value)) log.debug("Rescaling: %.2f, %.2f, %.2f" % (d__.min(), d__.mean(), d__.max())) del d__ return data, scale, offset, fill_value elif geo_image.mode == 'RGB': channels, fill_value = geo_image._finalize(dtype) if fill_value is None: mask = (np.ma.getmaskarray(channels[0]) & np.ma.getmaskarray(channels[1]) & np.ma.getmaskarray(channels[2])) channels.append((np.ma.logical_not(mask) * np.iinfo(channels[0].dtype).max).astype(channels[0].dtype)) fill_value = (0, 0, 0, 0) data = np.dstack([channel.filled(fill_v) for channel, fill_v in zip(channels, fill_value)]) return data, 1.0, 0.0, fill_value[0] elif geo_image.mode == 'RGBA': channels, fill_value = geo_image._finalize(dtype) fill_value = fill_value or (0, 0, 0, 0) data = np.dstack((channels[0].filled(fill_value[0]), channels[1].filled(fill_value[1]), channels[2].filled(fill_value[2]), channels[3].filled(fill_value[3]))) return data, 1.0, 0.0, fill_value[0] elif geo_image.mode == 'P': fill_value = 0 data = geo_image.channels[0] if isinstance(data, np.ma.core.MaskedArray): data = data.filled(fill_value) data = data.astype(dtype) log.debug("Value range: %.2f, %.2f, %.2f" % (data.min(), data.mean(), data.max())) return data, 1.0, 0.0, fill_value else: raise ValueError("Don't known how til handle image mode '%s'" % str(geo_image.mode)) def save(geo_image, filename, ninjo_product_name=None, writer_options=None, **kwargs): """MPOP's interface to Ninjo TIFF writer. :Parameters: geo_image : mpop.imageo.geo_image.GeoImage See MPOP's documentation. filename : str The name of the TIFF file to be created :Keywords: ninjo_product_name : str Optional index to Ninjo configuration file. writer_options : dict options dictionary as defined in MPOP interface See _write kwargs : dict See _write **Note**: * Some arguments are type casted, since they could come from a config file, read as strings. * 8 bits grayscale with a colormap (if specified, inverted for IR channels). * 16 bits grayscale with no colormap (if specified, MinIsWhite is set for IR). * min value will be reserved for transparent color. * If possible mpop.imageo.image's standard finalize will be used. """ if writer_options: # add writer_options kwargs.update(writer_options) if 'ninjo_product_name' in writer_options: ninjo_product_name = writer_options['ninjo_product_name'] dtype = np.uint8 if 'nbits' in kwargs: nbits = int(kwargs['nbits']) if nbits == 16: dtype = np.uint16 try: value_range_measurement_unit = (float(kwargs["ch_min_measurement_unit"]), float(kwargs["ch_max_measurement_unit"])) except KeyError: value_range_measurement_unit = None data_is_scaled_01 = bool(kwargs.get("data_is_scaled_01", True)) data, scale, offset, fill_value = _finalize(geo_image, dtype=dtype, data_is_scaled_01=data_is_scaled_01, value_range_measurement_unit=value_range_measurement_unit,) area_def = geo_image.area time_slot = geo_image.time_slot # Some Ninjo tiff names kwargs['gradient'] = scale kwargs['axis_intercept'] = offset kwargs['transparent_pix'] = fill_value kwargs['image_dt'] = time_slot kwargs['is_calibrated'] = True if geo_image.mode == 'P' and 'cmap' not in kwargs: r, g, b = zip(*geo_image.palette) r = list((np.array(r) * 255).astype(np.uint8)) g = list((np.array(g) * 255).astype(np.uint8)) b = list((np.array(b) * 255).astype(np.uint8)) if len(r) < 256: r += [0] * (256 - len(r)) g += [0] * (256 - len(g)) b += [0] * (256 - len(b)) kwargs['cmap'] = r, g, b write(data, filename, area_def, ninjo_product_name, **kwargs) def write(image_data, output_fn, area_def, product_name=None, **kwargs): """Generic Ninjo TIFF writer. If 'prodcut_name' is given, it will load corresponding Ninjo tiff metadata from '${PPP_CONFIG_DIR}/ninjotiff.cfg'. Else, all Ninjo tiff metadata should be passed by '**kwargs'. A mixture is allowed, where passed arguments overwrite config file. :Parameters: image_data : 2D numpy array Satellite image data to be put into the NinJo compatible tiff output_fn : str The name of the TIFF file to be created area_def: pyresample.geometry.AreaDefinition Defintion of area product_name : str Optional index to Ninjo configuration file. :Keywords: kwargs : dict See _write """ upper_left = area_def.get_lonlat(0, 0) lower_right = area_def.get_lonlat( area_def.shape[0] - 1, area_def.shape[1] - 1) if len(image_data.shape) == 3: if image_data.shape[2] == 4: shape = (area_def.y_size, area_def.x_size, 4) log.info("Will generate RGBA product") else: shape = (area_def.y_size, area_def.x_size, 3) log.info("Will generate RGB product") write_rgb = True else: shape = (area_def.y_size, area_def.x_size) write_rgb = False log.info("Will generate single band product") if image_data.shape != shape: raise ValueError("Raster shape %s does not correspond to expected shape %s" % ( str(image_data.shape), str(shape))) # Ninjo's physical units and value. # If just a physical unit (e.g. 'C') is passed, it will then be # translated into Ninjo's unit and value (e.q 'CELCIUS' and 'T'). physic_unit = kwargs.get('physic_unit', None) if physic_unit and not kwargs.get('physic_value', None): kwargs['physic_unit'], kwargs['physic_value'] = \ _get_physic_value(physic_unit) # Ninjo's projection name. kwargs['projection'] = kwargs.pop('projection', None) or \ _get_projection_name(area_def) or \ area_def.proj_id.split('_')[-1] # Get pixel size if 'pixel_xres' not in kwargs or 'pixel_yres' not in kwargs: kwargs['pixel_xres'], kwargs['pixel_yres'] = \ _get_pixel_size(kwargs['projection'], area_def) # Get altitude. altitude = kwargs.pop('altitude', None) or \ _get_satellite_altitude(output_fn) if altitude is not None: kwargs['altitude'] = altitude if product_name: options = deepcopy(get_product_config(product_name)) else: options = {} options['meridian_west'] = upper_left[0] options['meridian_east'] = lower_right[0] if kwargs['projection'].endswith("POL"): if 'lat_ts' in area_def.proj_dict: options['ref_lat1'] = area_def.proj_dict['lat_ts'] options['ref_lat2'] = 0 else: if 'lat_0' in area_def.proj_dict: options['ref_lat1'] = area_def.proj_dict['lat_0'] options['ref_lat2'] = 0 if 'lon_0' in area_def.proj_dict: options['central_meridian'] = area_def.proj_dict['lon_0'] if 'a' in area_def.proj_dict: options['radius_a'] = area_def.proj_dict['a'] if 'b' in area_def.proj_dict: options['radius_b'] = area_def.proj_dict['b'] options['origin_lon'] = upper_left[0] options['origin_lat'] = upper_left[1] options['min_gray_val'] = image_data.min() options['max_gray_val'] = image_data.max() options.update(kwargs) # Update/overwrite with passed arguments _write(image_data, output_fn, write_rgb=write_rgb, **options) # ----------------------------------------------------------------------------- # # Write tiff file. # # ----------------------------------------------------------------------------- def _write(image_data, output_fn, write_rgb=False, **kwargs): """Proudly Found Elsewhere (PFE) https://github.com/davidh-ssec/polar2grid by David Hoese. Create a NinJo compatible TIFF file with the tags used by the DWD's version of NinJo. Also stores the image as tiles on disk and creates a multi-resolution/pyramid/overview set of images (deresolution: 2,4,8,16). :Parameters: image_data : 2D or 3D numpy array Satellite image data to be put into the NinJo compatible tiff An 3D array (HxWx3) is expected for a RGB image. filename : str The name of the TIFF file to be created :Keywords: cmap : tuple/list of 3 lists of uint16's Individual RGB arrays describing the color value for the corresponding data value. For example, image data with a data type of unsigned 8-bit integers have 256 possible values (0-255). So each list in cmap will have 256 values ranging from 0 to 65535 (2**16 - 1). (default linear B&W colormap) sat_id : int DWD NinJo Satellite ID number chan_id : int DWD NinJo Satellite Channel ID number data_source : str String describing where the data came from (SSEC, EUMCAST) tile_width : int Width of tiles on disk (default 512) tile_length : int Length of tiles on disk (default 512) data_cat : str NinJo specific data category - data_cat[0] = P (polar) or G (geostat) - data_cat[1] = O (original) or P (product) - data_cat[2:4] = RN or RB or RA or RN or AN (Raster, Bufr, ASCII, NIL) Example: 'PORN' or 'GORN' or 'GPRN' or 'PPRN' pixel_xres : float Nadir view pixel resolution in degrees longitude pixel_yres : float Nadir view pixel resolution in degrees latitude origin_lat : float Top left corner latitude origin_lon : float Top left corner longitude image_dt : datetime object Python datetime object describing the date and time of the image data provided in UTC projection : str NinJo compatible projection name (NPOL,PLAT,etc.) meridian_west : float Western image border (default 0.0) meridian_east : float Eastern image border (default 0.0) radius_a : float Large/equatorial radius of the earth (default ) radius_b : float Small/polar radius of the earth (default ) ref_lat1 : float Reference latitude 1 (default ) ref_lat2 : float Reference latitude 2 (default ) central_meridian : float Central Meridian (default ) physic_value : str Physical value type. Examples: - Temperature = 'T' - Albedo = 'ALBEDO' physic_unit : str Physical value units. Examples: - 'CELSIUS' - '%' min_gray_val : int Minimum gray value (default 0) max_gray_val : int Maximum gray value (default 255) gradient : float Gradient/Slope axis_intercept : float Axis Intercept altitude : float Altitude of the data provided (default 0.0) is_atmo_corrected : bool Is the data atmosphere corrected? (True/1 for yes) (default False/0) is_calibrated : bool Is the data calibrated? (True/1 for yes) (default False/0) is_normalized : bool Is the data normalized (True/1 for yes) (default False/0) description : str Description string to be placed in the output TIFF (optional) transparent_pix : int Transparent pixel value (default -1) compression : int zlib compression level (default 6) inv_def_temperature_cmap : bool (default True) Invert the default colormap if physical value type is 'T' omit_filename_path : bool (default False) Do not store path in NTD_FileName tag :Raises: KeyError : if required keyword is not provided """ def _raise_value_error(text): log.error(text) raise ValueError(text) def _default_colormap(reverse=False, nbits16=False): # Basic B&W colormap if nbits16: if reverse: return [[x for x in range(65535, -1, -1)]] * 3 return [[x for x in range(65536)]] * 3 else: if reverse: return [[x * 256 for x in range(255, -1, -1)]] * 3 return [[x * 256 for x in range(256)]] * 3 def _eval_or_none(key, eval_func): try: return eval_func(kwargs[key]) except KeyError: return None def _eval_or_default(key, eval_func, default): try: return eval_func(kwargs[key]) except KeyError: return default log.info("Creating output file '%s'" % (output_fn,)) # Extract keyword arguments cmap = kwargs.pop("cmap", None) sat_id = int(kwargs.pop("sat_id")) chan_id = int(kwargs.pop("chan_id")) data_source = str(kwargs.pop("data_source")) tile_width = int(kwargs.pop("tile_width", 512)) tile_length = int(kwargs.pop("tile_length", 512)) data_cat = str(kwargs.pop("data_cat")) pixel_xres = float(kwargs.pop("pixel_xres")) pixel_yres = float(kwargs.pop("pixel_yres")) origin_lat = float(kwargs.pop("origin_lat")) origin_lon = float(kwargs.pop("origin_lon")) image_dt = kwargs.pop("image_dt") zero_seconds = kwargs.pop("zero_seconds", False) projection = str(kwargs.pop("projection")) meridian_west = float(kwargs.pop("meridian_west", 0.0)) meridian_east = float(kwargs.pop("meridian_east", 0.0)) radius_a = _eval_or_none("radius_a", float) radius_b = _eval_or_none("radius_b", float) ref_lat1 = _eval_or_none("ref_lat1", float) ref_lat2 = _eval_or_none("ref_lat2", float) central_meridian = _eval_or_none("central_meridian", float) min_gray_val = int(kwargs.pop("min_gray_val", 0)) max_gray_val = int(kwargs.pop("max_gray_val", 255)) altitude = _eval_or_none("altitude", float) is_blac_corrected = int(bool(kwargs.pop("is_blac_corrected", 0))) is_atmo_corrected = int(bool(kwargs.pop("is_atmo_corrected", 0))) is_calibrated = int(bool(kwargs.pop("is_calibrated", 0))) is_normalized = int(bool(kwargs.pop("is_normalized", 0))) inv_def_temperature_cmap = bool(kwargs.pop("inv_def_temperature_cmap", 1)) omit_filename_path = bool(kwargs.pop("omit_filename_path", 0)) description = _eval_or_none("description", str) physic_value = str(kwargs.pop("physic_value", 'None')) physic_unit = str(kwargs.pop("physic_unit", 'None')) gradient = float(kwargs.pop("gradient", 1.0)) axis_intercept = float(kwargs.pop("axis_intercept", 0.0)) try: transparent_pix = int(kwargs.get("transparent_pix", -1)) except Exception: transparent_pix = kwargs.get("transparent_pix")[0] finally: kwargs.pop("transparent_pix") # Keyword checks / verification # Handle colormap or not. min_is_white = False if not write_rgb and not cmap: if physic_value == 'T' and inv_def_temperature_cmap: reverse = True else: reverse = False if np.iinfo(image_data.dtype).bits == 8: # Always generate colormap for 8 bit gray scale. cmap = _default_colormap(reverse) elif reverse: # No colormap for 16 bit gray scale, but for IR, specify white is # minimum. min_is_white = True if cmap and len(cmap) != 3: _raise_value_error( "Colormap (cmap) must be a list of 3 lists (RGB), not %d" % len(cmap)) if len(data_cat) != 4: _raise_value_error("NinJo data type must be 4 characters") if data_cat[0] not in ["P", "G"]: _raise_value_error( "NinJo data type's first character must be 'P' or 'G' not '%s'" % data_cat[0]) if data_cat[1] not in ["O", "P"]: _raise_value_error( "NinJo data type's second character must be 'O' or 'P' not '%s'" % data_cat[1]) if data_cat[2:4] not in ["RN", "RB", "RA", "BN", "AN"]: _raise_value_error( "NinJo data type's last 2 characters must be one of %s not '%s'" % ("['RN','RB','RA','BN','AN']", data_cat[2:4])) if description is not None and len(description) >= 1000: log.error("NinJo description must be less than 1000 characters") raise ValueError("NinJo description must be less than 1000 characters") file_dt = datetime.utcnow() file_epoch = calendar.timegm(file_dt.timetuple()) if zero_seconds: log.debug("Applying zero seconds correction") image_dt_corr = datetime(image_dt.year, image_dt.month, image_dt.day, image_dt.hour, image_dt.minute) else: image_dt_corr = image_dt image_epoch = calendar.timegm(image_dt_corr.timetuple()) compression = _eval_or_default("compression", int, 6) def _create_args(image_data, pixel_xres, pixel_yres): log.info("creating tags and data for a resolution %dx%d" % image_data.shape[:2]) args = dict() extra_tags = [] args['extratags'] = extra_tags args['software'] = 'tifffile/pytroll' args['compress'] = compression args['extrasamples_type'] = 2 # Built ins if write_rgb: args["photometric"] = 'rgb' else: if cmap: args["photometric"] = 'palette' args["colormap"] = [item for sublist in cmap for item in sublist] elif min_is_white: args["photometric"] = 'miniswhite' else: args["photometric"] = 'minisblack' # planarconfig, samples_per_pixel, orientation, sample_format set by # tifffile.py args["tile_width"] = tile_width args["tile_length"] = tile_length # not necessary to set value SMinSampleValue and SMaxsampleValue # TIFF6 Spec: The default for SMinSampleValue and SMaxSampleValue is # the full range of the data type # args["SMinSampleValue"] = 0 # args["SMaxsampleValue"] = 255 # NinJo specific tags if description is not None: extra_tags.append( (NINJO_TAGS["NTD_Description"], 's', 0, description, True)) # Geo tiff tags if MODEL_PIXEL_SCALE_COUNT == 3: extra_tags.append((GTF_ModelPixelScale, 'd', 3, [pixel_xres, pixel_yres, 0.0], True)) else: extra_tags.append((GTF_ModelPixelScale, 'd', 2, [pixel_xres, pixel_yres], True)) extra_tags.append((GTF_ModelTiepoint, 'd', 6, [0.0, 0.0, 0.0, origin_lon, origin_lat, 0.0], True)) extra_tags.append((NINJO_TAGS["NTD_Magic"], 's', 0, "NINJO", True)) extra_tags.append( (NINJO_TAGS["NTD_SatelliteNameID"], 'I', 1, sat_id, True)) extra_tags.append( (NINJO_TAGS["NTD_DateID"], 'I', 1, image_epoch, True)) extra_tags.append( (NINJO_TAGS["NTD_CreationDateID"], 'I', 1, file_epoch, True)) extra_tags.append((NINJO_TAGS["NTD_ChannelID"], 'I', 1, chan_id, True)) extra_tags.append((NINJO_TAGS["NTD_HeaderVersion"], 'i', 1, 2, True)) if omit_filename_path: extra_tags.append((NINJO_TAGS["NTD_FileName"], 's', 0, os.path.basename(output_fn), True)) else: extra_tags.append( (NINJO_TAGS["NTD_FileName"], 's', 0, output_fn, True)) extra_tags.append((NINJO_TAGS["NTD_DataType"], 's', 0, data_cat, True)) # Hardcoded to 0 extra_tags.append( (NINJO_TAGS["NTD_SatelliteNumber"], 's', 0, "\x00", True)) if write_rgb: extra_tags.append((NINJO_TAGS["NTD_ColorDepth"], 'i', 1, 24, True)) elif cmap: extra_tags.append((NINJO_TAGS["NTD_ColorDepth"], 'i', 1, 16, True)) else: extra_tags.append((NINJO_TAGS["NTD_ColorDepth"], 'i', 1, 8, True)) extra_tags.append( (NINJO_TAGS["NTD_DataSource"], 's', 0, data_source, True)) extra_tags.append((NINJO_TAGS["NTD_XMinimum"], 'i', 1, 1, True)) extra_tags.append( (NINJO_TAGS["NTD_XMaximum"], 'i', 1, image_data.shape[1], True)) extra_tags.append((NINJO_TAGS["NTD_YMinimum"], 'i', 1, 1, True)) extra_tags.append( (NINJO_TAGS["NTD_YMaximum"], 'i', 1, image_data.shape[0], True)) extra_tags.append( (NINJO_TAGS["NTD_Projection"], 's', 0, projection, True)) extra_tags.append( (NINJO_TAGS["NTD_MeridianWest"], 'f', 1, meridian_west, True)) extra_tags.append( (NINJO_TAGS["NTD_MeridianEast"], 'f', 1, meridian_east, True)) if radius_a is not None: extra_tags.append((NINJO_TAGS["NTD_EarthRadiusLarge"], 'f', 1, float(radius_a), True)) if radius_b is not None: extra_tags.append((NINJO_TAGS["NTD_EarthRadiusSmall"], 'f', 1, float(radius_b), True)) # extra_tags.append((NINJO_TAGS["NTD_GeodeticDate"], 's', 0, "\x00", # True)) # ---? if ref_lat1 is not None: extra_tags.append( (NINJO_TAGS["NTD_ReferenceLatitude1"], 'f', 1, ref_lat1, True)) if ref_lat2 is not None: extra_tags.append( (NINJO_TAGS["NTD_ReferenceLatitude2"], 'f', 1, ref_lat2, True)) if central_meridian is not None: extra_tags.append((NINJO_TAGS["NTD_CentralMeridian"], 'f', 1, central_meridian, True)) extra_tags.append( (NINJO_TAGS["NTD_PhysicValue"], 's', 0, physic_value, True)) extra_tags.append( (NINJO_TAGS["NTD_PhysicUnit"], 's', 0, physic_unit, True)) extra_tags.append( (NINJO_TAGS["NTD_MinGrayValue"], 'i', 1, min_gray_val, True)) extra_tags.append( (NINJO_TAGS["NTD_MaxGrayValue"], 'i', 1, max_gray_val, True)) extra_tags.append((NINJO_TAGS["NTD_Gradient"], 'f', 1, gradient, True)) extra_tags.append( (NINJO_TAGS["NTD_AxisIntercept"], 'f', 1, axis_intercept, True)) if altitude is not None: extra_tags.append( (NINJO_TAGS["NTD_Altitude"], 'f', 1, altitude, True)) extra_tags.append((NINJO_TAGS["NTD_IsBlackLineCorrection"], 'i', 1, is_blac_corrected, True)) extra_tags.append((NINJO_TAGS["NTD_IsAtmosphereCorrected"], 'i', 1, is_atmo_corrected, True)) extra_tags.append( (NINJO_TAGS["NTD_IsCalibrated"], 'i', 1, is_calibrated, True)) extra_tags.append( (NINJO_TAGS["NTD_IsNormalized"], 'i', 1, is_normalized, True)) extra_tags.append((NINJO_TAGS["NTD_TransparentPixel"], 'i', 1, transparent_pix, True)) return args args = _create_args(image_data, pixel_xres, pixel_yres) tifargs = {} header_only_keys = ('byteorder', 'bigtiff', 'software', 'writeshape') for key in header_only_keys: if key in args: tifargs[key] = args[key] del args[key] if 'writeshape' not in args: args['writeshape'] = True if 'bigtiff' not in tifargs and \ image_data.size * image_data.dtype.itemsize > 2000 * 2 ** 20: tifargs['bigtiff'] = True with tifffile.TiffWriter(output_fn, **tifargs) as tif: tif.save(image_data, **args) for _, scale in enumerate((2, 4, 8, 16)): shape = (image_data.shape[0] / scale, image_data.shape[1] / scale) if shape[0] > tile_width and shape[1] > tile_length: args = _create_args(image_data[::scale, ::scale], pixel_xres * scale, pixel_yres * scale) for key in header_only_keys: if key in args: del args[key] tif.save(image_data[::scale, ::scale], **args) log.info("Successfully created a NinJo tiff file: '%s'" % (output_fn,)) # ----------------------------------------------------------------------------- # # Read tags. # # ----------------------------------------------------------------------------- def read_tags(filename): """Will read tag, value pairs from Ninjo tiff file. :Parameters: filename : string Ninjo tiff file. :Return: A list tags, one tag dictionary per page. """ pages = [] with tifffile.TiffFile(filename) as tif: for page in tif: tags = {} for tag in page.tags.values(): name, value = tag.name, tag.value try: # Is it one of ours ? name = int(name) name = NINJO_TAGS_INV[name] except ValueError: pass except KeyError: name = tag.name tags[name] = value pages.append(tags) return pages if __name__ == '__main__': import sys import getopt page_no = None print_color_maps = False opts, args = getopt.getopt(sys.argv[1:], "p:c") for key, val in opts: if key == "-p": page_no = int(val) if key == "-c": print_color_maps = True try: filename = args[0] except IndexError: print >> sys.stderr, """usage: python ninjotiff.py [<-p page-number>] [-c] -p : print page number (default are all pages). -c: print color maps (default is not to print color maps).""" sys.exit(2) pages = read_tags(filename) if page_no is not None: try: pages = [pages[page_no]] except IndexError: print >>sys.stderr, "Invalid page number '%d'" % page_no sys.exit(2) for page in pages: names = sorted(page.keys()) print "" for name in names: if not print_color_maps and name == "color_map": continue print name, page[name] mpop-1.5.0/mpop/imageo/formats/ninjotiff_example000077500000000000000000000137771317160620000220210ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """ ninjotiff_example Created on Wed Dec 2 10:00:18 2015 @author: ras This example is using a ninjotiff.cfg file. If you prefer to pass all meta-data by arguments, they can be defined like: ninjotiff_config = { 0.6: {'description': 'MSG Channel 1', 'sat_id': 6200014, 'chan_id': 100015, 'data_cat': 'GORN', 'data_source': 'EUMETCAST'}, 10.8: {'description': 'MSG Channel 9', 'sat_id': 6200014, 'chan_id': 900015, 'data_cat': 'GORN', 'data_source': 'EUMETCAST'}, 'HRV': {'description': 'MSG Channel 12', 'sat_id': 6200014, 'chan_id': 1200015, 'data_cat': 'GORN', 'data_source': 'EUMETCAST'}, } Saving an image for 'chn' will then go like: image.save(filename, fformat='mpop.imageo.formats.ninjotiff', physic_unit=physic_unit, ch_min_measurement_unit=min_value, ch_max_measurement_unit=max_value, **ninjotiff_config[chn]) """ import sys import os from datetime import datetime, timedelta import logging logging.basicConfig(level=logging.DEBUG) # Basic check. try: os.environ['PPP_CONFIG_DIR'] except KeyError: print >>sys.stderr, "PPP_CONFIG_DIR is not defined" sys.exit(2) from mpop.satellites import GeostationaryFactory import mpop.utils LOG = mpop.utils.get_logger(__name__) if "DEBUG" in os.environ: mpop.utils.debug_on() # Handle argument. try: filename = sys.argv[1] except IndexError: print >> sys.stderr, "usage: ninjotiff_example " exit(2) i__ = os.path.basename(filename).split('-') TIMESLOT = datetime.strptime(i__[6], '%Y%m%d%H%M') SATNO = "%02d" % (7 + int(i__[2][3])) # Areas to be loaded into and to be projected onto. AREAS = ( ('visir_full', 'MSGF', 'msg_pc'), #('hrv_north', 'MSGHRVN', 'msg_hrvn_pc') ) CHANNEL_DICT = { #'visir_full': (0.6, 0.8, 1.6, 3.9, 6.2, 7.3, 8.7, 9.7, 10.8, 12.0, 13.4), 'visir_full': (0.6, 10.8,), 'hrv_north': ('HRV',) } BITS_PER_SAMPLE = 8 DO_CONVECTION = False for area_name, area_in, area_out in AREAS: global_data = GeostationaryFactory.create_scene("meteosat", SATNO, "seviri", area=area_in, time_slot=TIMESLOT) # Load channel by channel (to save memory). for chn in CHANNEL_DICT[area_name]: global_data.load([chn]) chn_name = global_data[chn].name # Save 'unit' ... it seems to be lost somewhere. global_data[chn].unit = global_data[chn].info.get('units', 'None') # Resample to Plate Caree. scene = global_data.project(area_out, mode='quick', precompute=True) # Kelvin -> Celsius. physic_unit = scene[chn].unit.upper() if physic_unit in ('K', 'KELVIN'): scene[chn].data -= 273.15 physic_unit = scene[chn].unit = 'C' # Value range as DWD if physic_unit in ('C', 'CELSIUS'): # IR value_range = (-88.5, 40.) else: # VIS value_range = (0., 125.) # # A GeoImage specifying a color range. # # If no color_range specified, MPOP will not scaled the data into the [0., 1.] range. # In that case set the data_is_scaled_01 option to False in img.save # img = scene.image(chn, mode="L", crange=[value_range]) LOG.info("%s (%s, %s) %.2f %.2f %.2f" % (chn_name, physic_unit, img.channels[0].dtype, img.channels[0].min(), img.channels[0].mean(), img.channels[0].max())) # # Save it to Ninjo tif format, pass physics unit and Ninjo product name # (by "coincidence" product name correspond to MPOP's channel name :). # # If physics unit is not passed, we will expect to find it in ninjotiff's config file. # filename = ('MSG-' + TIMESLOT.strftime("%Y%m%d_%H%M") + '-' + area_name.split('_')[-1] + '-' + chn_name + '-%db.tif' % BITS_PER_SAMPLE) LOG.info("Saving to Ninjo tif %s" % filename) img.save(filename, fformat='mpop.imageo.formats.ninjotiff', physic_unit=physic_unit, ninjo_product_name=chn_name, ch_min_measurement_unit=value_range[0], ch_max_measurement_unit=value_range[1], nbits=BITS_PER_SAMPLE) # Cleanup. scene.unload([chn]) global_data.unload([chn]) if DO_CONVECTION: # # RGB example, convection # # NOT ready for 16 bit BITS_PER_SAMPLE = 8 product_name = 'convection' area_name, area_in, area_out = ('visir_europe', 'MSGNH', 'msg_ninjo_europe_big') channels = [0.635, 1.63, 3.75, 6.7, 7.3, 10.8] global_data = GeostationaryFactory.create_scene("meteosat", SATNO, "seviri", area=area_in, time_slot=TIMESLOT) # Load channels. global_data.load(channels) # Resample to Plate Caree. scene = global_data.project(area_out, mode='quick', precompute=True) img = scene.image.convection() filename = ('MSG-' + TIMESLOT.strftime("%Y%m%d_%H%M") + '-' + area_name.split('_')[-1] + '-' + product_name + '-%db.tif' % BITS_PER_SAMPLE) LOG.info("Saving to Ninjo tif %s" % filename) img.save(filename, fformat='mpop.imageo.formats.ninjotiff', ninjo_product_name='msg_' + product_name, nbits=BITS_PER_SAMPLE) mpop-1.5.0/mpop/imageo/formats/ninjotiff_example.areas000066400000000000000000000023551317160620000230760ustar00rootroot00000000000000VERSION: 1.1 REGION: MSGF { NAME: Full globe MSG image 0 degrees PCS_ID: geos0 PCS_DEF: proj=geos,lon_0=0.0,a=6378144.0,b=6356759.0,h=35785831.0,rf=295.49 XSIZE: 3712 YSIZE: 3712 AREA_EXTENT: (-5568742.4, -5568742.4, 5568742.4, 5568742.4) }; REGION: MSGHRVN { NAME: High resolution northern quarter globe MSG image 0 degrees PCS_ID: geos0 PCS_DEF: proj=geos,lon_0=0.0,a=6378144.0,b=6356759.0,h=35785831.0,rf=295.49 XSIZE: 11136 YSIZE: 2784 AREA_EXTENT: (-5568742.4, 2655354.0, 5568742.4, 5439725.2) }; REGION: msg_pc { NAME: Plate Caree map, 0 degrees PCS_ID: msg_pc PCS_DEF: proj=eqc,lon_0=0.0,lat_0=0.0 XSIZE: 6666 YSIZE: 6666 AREA_EXTENT: (-10007255.76, -10007255.76, 10007255.76, 10007255.76 ) }; REGION: msg_hrvn_pc { NAME: Plate Caree map, 0 degrees PCS_ID: msg_hrvn_pc PCS_DEF: proj=eqc,lon_0=0.0,lat_0=0.0 XSIZE: 10000 YSIZE: 5000 AREA_EXTENT: (-5005380.72, 3061575.43, 5007807.48, 8068169.53) }; mpop-1.5.0/mpop/imageo/formats/tifffile.py000066400000000000000000005366651317160620000205420ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # tifffile.py # Copyright (c) 2008-2014, Christoph Gohlke # Copyright (c) 2008-2014, The Regents of the University of California # Produced at the Laboratory for Fluorescence Dynamics # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holders nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Read and write image data from and to TIFF files. Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH, SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files. Only a subset of the TIFF specification is supported, mainly uncompressed and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float, grayscale and RGB(A) images, which are commonly used in bio-scientific imaging. Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS, and XMP metadata is not implemented. Only primary info records are read for STK, FluoView, MicroManager, and NIH image formats. TIFF, the Tagged Image File Format, is under the control of Adobe Systems. BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL, and OME-TIFF, are custom extensions defined by Molecular Devices (Universal Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy Environment consortium respectively. For command line usage run ``python tifffile.py --help`` :Author: `Christoph Gohlke `_ :Organization: Laboratory for Fluorescence Dynamics, University of California, Irvine :Version: 2014.08.24 Requirements ------------ * `CPython 2.7 or 3.4 `_ * `Numpy 1.8.2 `_ * `Matplotlib 1.4 `_ (optional for plotting) * `Tifffile.c 2013.11.05 `_ (recommended for faster decoding of PackBits and LZW encoded strings) Notes ----- The API is not stable yet and might change between revisions. Tested on little-endian platforms only. Other Python packages and modules for reading bio-scientific TIFF files: * `Imread `_ * `PyLibTiff `_ * `SimpleITK `_ * `PyLSM `_ * `PyMca.TiffIO.py `_ (same as fabio.TiffIO) * `BioImageXD.Readers `_ * `Cellcognition.io `_ * `CellProfiler.bioformats `_ Acknowledgements ---------------- * Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics. * Wim Lewis for a bug fix and some read_cz_lsm functions. * Hadrien Mary for help on reading MicroManager files. References ---------- (1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated. http://partners.adobe.com/public/developer/tiff/ (2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html (3) MetaMorph Stack (STK) Image File Format. http://support.meta.moleculardevices.com/docs/t10243.pdf (4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010). Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011 (5) File Format Description - LSM 5xx Release 2.0. http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc (6) The OME-TIFF format. http://www.openmicroscopy.org/site/support/file-formats/ome-tiff (7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide. http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf (8) Micro-Manager File Formats. http://www.micro-manager.org/wiki/Micro-Manager_File_Formats (9) Tags for TIFF and Related Specifications. Digital Preservation. http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml Examples -------- >>> data = numpy.random.rand(5, 301, 219) >>> imsave('temp.tif', data) >>> image = imread('temp.tif') >>> numpy.testing.assert_array_equal(image, data) >>> with TiffFile('temp.tif') as tif: ... images = tif.asarray() ... for page in tif: ... for tag in page.tags.values(): ... t = tag.name, tag.value ... image = page.asarray() """ from __future__ import division, print_function import sys import os import re import glob import math import zlib import time import json import struct import warnings import tempfile import datetime import collections from fractions import Fraction from xml.etree import cElementTree as etree import numpy try: import _tifffile except ImportError: warnings.warn( "failed to import the optional _tifffile C extension module.\n" "Loading of some compressed images will be slow.\n" "Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/") __version__ = '2014.08.24' __docformat__ = 'restructuredtext en' __all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter', 'TiffSequence') def imsave(filename, data, **kwargs): """Write image data to TIFF file. Refer to the TiffWriter class and member functions for documentation. Parameters ---------- filename : str Name of file to write. data : array_like Input image. The last dimensions are assumed to be image depth, height, width, and samples. kwargs : dict Parameters 'byteorder', 'bigtiff', and 'software' are passed to the TiffWriter class. Parameters 'photometric', 'planarconfig', 'resolution', 'description', 'compress', 'volume', and 'extratags' are passed to the TiffWriter.save function. Examples -------- >>> data = numpy.random.rand(2, 5, 3, 301, 219) >>> description = u'{"shape": %s}' % str(list(data.shape)) >>> imsave('temp.tif', data, compress=6, ... extratags=[(270, 's', 0, description, True)]) Save tiles with compression enabled >>> data = numpy.random.rand(400, 300) >>> imsave('temp.tif', data, compress=6, tile_width=150, tile_length=100) >>> with TiffFile('temp.tif') as tif: ... image = tif.asarray() ... page = tif[0] >>> numpy.testing.assert_array_equal(image, data) >>> page.tags['tile_width'].value 150 >>> page.tags['tile_length'].value 100 Save tiles with compression disabled >>> data = numpy.random.rand(400, 300) >>> imsave('temp.tif', data, compress=0, tile_width=150, tile_length=100) >>> with TiffFile('temp.tif') as tif: ... image = tif.asarray() ... page = tif[0] >>> numpy.testing.assert_array_equal(image, data) >>> page.tags['tile_width'].value 150 >>> page.tags['tile_length'].value 100 Save tiles with compression enabled, 3 samples per pixel >>> data = numpy.random.rand(3, 400, 300) >>> imsave('temp.tif', data, compress=6, tile_width=150, tile_length=100) >>> with TiffFile('temp.tif') as tif: ... image = tif.asarray() ... page = tif[0] >>> numpy.testing.assert_array_equal(image, data) >>> page.tags['tile_width'].value 150 >>> page.tags['tile_length'].value 100 Save colormap >>> data = (numpy.random.rand(400, 300)*250).astype(numpy.uint8) >>> cmap1ch = [x*256 for x in range(256)] >>> cmap = cmap1ch + cmap1ch + cmap1ch >>> data_colored = numpy.take(cmap1ch, data) >>> data_colored = numpy.dstack((data_colored, data_colored, data_colored)) >>> data_colored = numpy.swapaxes(numpy.swapaxes(data_colored,0,2),1,2) >>> imsave('temp.tif', data, photometric='palette', colormap = cmap) >>> with TiffFile('temp.tif') as tif: ... image = tif.asarray() ... page = tif[0] >>> numpy.testing.assert_array_equal(image, data_colored) >>> numpy.testing.assert_array_equal(page.tags['color_map'].value, cmap) """ tifargs = {} for key in ('byteorder', 'bigtiff', 'software', 'writeshape'): if key in kwargs: tifargs[key] = kwargs[key] del kwargs[key] if 'writeshape' not in kwargs: kwargs['writeshape'] = True if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20: tifargs['bigtiff'] = True with TiffWriter(filename, **tifargs) as tif: tif.save(data, **kwargs) class TiffWriter(object): """Write image data to TIFF file. TiffWriter instances must be closed using the close method, which is automatically called when using the 'with' statement. Examples -------- >>> data = numpy.random.rand(2, 5, 3, 301, 219) >>> with TiffWriter('temp.tif', bigtiff=True) as tif: ... for i in range(data.shape[0]): ... tif.save(data[i], compress=6) """ TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6, 'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17} TAGS = { 'new_subfile_type': 254, 'subfile_type': 255, 'image_width': 256, 'image_length': 257, 'bits_per_sample': 258, 'compression': 259, 'photometric': 262, 'fill_order': 266, 'document_name': 269, 'image_description': 270, 'strip_offsets': 273, 'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278, 'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283, 'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296, 'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320, 'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324, 'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339, 'image_depth': 32997, 'tile_depth': 32998} def __init__(self, filename, bigtiff=False, byteorder=None, software='tifffile.py'): """Create a new TIFF file for writing. Use bigtiff=True when creating files greater than 2 GB. Parameters ---------- filename : str Name of file to write. bigtiff : bool If True, the BigTIFF format is used. byteorder : {'<', '>'} The endianness of the data in the file. By default this is the system's native byte order. software : str Name of the software used to create the image. Saved with the first page only. """ if byteorder not in (None, '<', '>'): raise ValueError("invalid byteorder %s" % byteorder) if byteorder is None: byteorder = '<' if sys.byteorder == 'little' else '>' self._byteorder = byteorder self._software = software self._fh = open(filename, 'wb') self._fh.write({'<': b'II', '>': b'MM'}[byteorder]) if bigtiff: self._bigtiff = True self._offset_size = 8 self._tag_size = 20 self._numtag_format = 'Q' self._offset_format = 'Q' self._val_format = '8s' self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0)) else: self._bigtiff = False self._offset_size = 4 self._tag_size = 12 self._numtag_format = 'H' self._offset_format = 'I' self._val_format = '4s' self._fh.write(struct.pack(byteorder+'H', 42)) # first IFD self._ifd_offset = self._fh.tell() self._fh.write(struct.pack(byteorder+self._offset_format, 0)) def save(self, data, photometric=None, planarconfig=None, resolution=None, description=None, volume=False, writeshape=False, compress=0, colormap=None, extrasamples_type=1, tile_width=None, tile_length=None, extratags=()): """Write image data to TIFF file. Image data are written in one stripe per plane. Dimensions larger than 2 to 4 (depending on photometric mode, planar configuration, and SGI mode) are flattened and saved as separate pages. The 'sample_format' and 'bits_per_sample' TIFF tags are derived from the data type. Parameters ---------- data : array_like Input image. The last dimensions are assumed to be image depth, height, width, and samples. photometric : {'minisblack', 'miniswhite', 'rgb', 'palette'} The color space of the image data. By default this setting is inferred from the data shape. planarconfig : {'contig', 'planar'} Specifies if samples are stored contiguous or in separate planes. By default this setting is inferred from the data shape. 'contig': last dimension contains samples. 'planar': third last dimension contains samples. resolution : (float, float) or ((int, int), (int, int)) X and Y resolution in dots per inch as float or rational numbers. description : str The subject of the image. Saved with the first page only. compress : int Values from 0 to 9 controlling the level of zlib compression. If 0, data are written uncompressed (default). volume : bool If True, volume data are stored in one tile (if applicable) using the SGI image_depth and tile_depth tags. Image width and depth must be multiple of 16. Few software can read this format, e.g. MeVisLab. writeshape : bool If True, write the data shape to the image_description tag if necessary and no other description is given. colormap : list of uint16's (3 concatenated lists for RGB) Individual RGB arrays describing the color value for the corresponding data value. For example, image data with a data type of unsigned 8-bit integers have 256 possible values (0-255). So the colormap will have 3*256 values ranging from 0 to 65535 (2**16 - 1). tile_width : int If not none, data is stored in tiles of size (tile_length, tile_width). Only in conjunction with defined tile_length (default : None) tile_length : int If not none, data is stored in tiles of size (tile_length, tile_width). Only in conjunction with defined tile_width (default : None) extratags: sequence of tuples Additional tags as [(code, dtype, count, value, writeonce)]. code : int The TIFF tag Id. dtype : str Data type of items in 'value' in Python struct format. One of B, s, H, I, 2I, b, h, i, f, d, Q, or q. count : int Number of data values. Not used for string values. value : sequence 'Count' values compatible with 'dtype'. writeonce : bool If True, the tag is written to the first page only. """ if photometric not in (None, 'minisblack', 'miniswhite', 'rgb', 'palette'): raise ValueError("invalid photometric %s" % photometric) if planarconfig not in (None, 'contig', 'planar'): raise ValueError("invalid planarconfig %s" % planarconfig) if not 0 <= compress <= 9: raise ValueError("invalid compression level %s" % compress) fh = self._fh byteorder = self._byteorder numtag_format = self._numtag_format val_format = self._val_format offset_format = self._offset_format offset_size = self._offset_size tag_size = self._tag_size data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C') data_shape = shape = data.shape data = numpy.atleast_2d(data) # enable tile writing if tile width and length specified if tile_length is not None and tile_width is not None: write_tiles = 1 else: write_tiles = 0 # normalize shape of data samplesperpixel = 1 extrasamples = 0 if volume and data.ndim < 3: volume = False if photometric is None: if planarconfig: photometric = 'rgb' elif data.ndim > 2 and shape[-1] in (3, 4): photometric = 'rgb' elif volume and data.ndim > 3 and shape[-4] in (3, 4): photometric = 'rgb' elif data.ndim > 2 and shape[-3] in (3, 4): photometric = 'rgb' else: photometric = 'minisblack' if planarconfig and len(shape) <= (3 if volume else 2) and ( photometric != 'palette'): planarconfig = None photometric = 'minisblack' if photometric == 'rgb': if len(shape) < 3: raise ValueError("not a RGB(A) image") if len(shape) < 4: volume = False if planarconfig is None: if shape[-1] in (3, 4): planarconfig = 'contig' elif shape[-4 if volume else -3] in (3, 4): planarconfig = 'planar' elif shape[-1] > shape[-4 if volume else -3]: planarconfig = 'planar' else: planarconfig = 'contig' if planarconfig == 'contig': data = data.reshape((-1, 1) + shape[(-4 if volume else -3):]) samplesperpixel = data.shape[-1] else: data = data.reshape( (-1,) + shape[(-4 if volume else -3):] + (1,)) samplesperpixel = data.shape[1] if samplesperpixel > 3: extrasamples = samplesperpixel - 3 elif photometric == 'palette': if len(shape) > 2: raise ValueError("not a 1-channel image") samplesperpixel = 1 planarconfig = None # remove trailing 1s while len(shape) > 2 and shape[-1] == 1: shape = shape[:-1] if len(shape) < 3: volume = False data = data.reshape( (-1, 1) + shape[(-3 if volume else -2):] + (1,)) elif planarconfig and len(shape) > (3 if volume else 2): if planarconfig == 'contig': data = data.reshape((-1, 1) + shape[(-4 if volume else -3):]) samplesperpixel = data.shape[-1] else: data = data.reshape( (-1,) + shape[(-4 if volume else -3):] + (1,)) samplesperpixel = data.shape[1] extrasamples = samplesperpixel - 1 else: planarconfig = None # remove trailing 1s while len(shape) > 2 and shape[-1] == 1: shape = shape[:-1] if len(shape) < 3: volume = False if False and ( len(shape) > (3 if volume else 2) and shape[-1] < 5 and all(shape[-1] < i for i in shape[(-4 if volume else -3):-1])): # DISABLED: non-standard TIFF, e.g. (220, 320, 2) planarconfig = 'contig' samplesperpixel = shape[-1] data = data.reshape((-1, 1) + shape[(-4 if volume else -3):]) else: data = data.reshape( (-1, 1) + shape[(-3 if volume else -2):] + (1,)) if samplesperpixel == 2: warnings.warn("writing non-standard TIFF (samplesperpixel 2)") if volume and (data.shape[-2] % 16 or data.shape[-3] % 16): warnings.warn("volume width or length are not multiple of 16") volume = False data = numpy.swapaxes(data, 1, 2) data = data.reshape( (data.shape[0] * data.shape[1],) + data.shape[2:]) # data.shape is now normalized 5D or 6D, depending on volume # (pages, planar_samples, (depth,) height, width, contig_samples) assert len(data.shape) in (5, 6) shape = data.shape bytestr = bytes if sys.version[0] == '2' else ( lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x) tags = [] # list of (code, ifdentry, ifdvalue, writeonce) if volume or write_tiles: # use tiles to save volume data or explicitly requests tag_byte_counts = TiffWriter.TAGS['tile_byte_counts'] tag_offsets = TiffWriter.TAGS['tile_offsets'] else: # else use strips tag_byte_counts = TiffWriter.TAGS['strip_byte_counts'] tag_offsets = TiffWriter.TAGS['strip_offsets'] def pack(fmt, *val): return struct.pack(byteorder+fmt, *val) def addtag(code, dtype, count, value, writeonce=False): # Compute ifdentry & ifdvalue bytes from code, dtype, count, value. # Append (code, ifdentry, ifdvalue, writeonce) to tags list. code = int(TiffWriter.TAGS.get(code, code)) try: tifftype = TiffWriter.TYPES[dtype] except KeyError: raise ValueError("unknown dtype %s" % dtype) rawcount = count if dtype == 's': value = bytestr(value) + b'\0' count = rawcount = len(value) value = (value, ) if len(dtype) > 1: count *= int(dtype[:-1]) dtype = dtype[-1] ifdentry = [pack('HH', code, tifftype), pack(offset_format, rawcount)] ifdvalue = None if count == 1: if isinstance(value, (tuple, list)): value = value[0] ifdentry.append(pack(val_format, pack(dtype, value))) elif struct.calcsize(dtype) * count <= offset_size: ifdentry.append(pack(val_format, pack(str(count)+dtype, *value))) else: ifdentry.append(pack(offset_format, 0)) ifdvalue = pack(str(count)+dtype, *value) tags.append((code, b''.join(ifdentry), ifdvalue, writeonce)) def rational(arg, max_denominator=1000000): # return nominator and denominator from float or two integers try: f = Fraction.from_float(arg) except TypeError: f = Fraction(arg[0], arg[1]) f = f.limit_denominator(max_denominator) return f.numerator, f.denominator if self._software: addtag('software', 's', 0, self._software, writeonce=True) self._software = None # only save to first page if description: addtag('image_description', 's', 0, description, writeonce=True) elif writeshape and shape[0] > 1 and shape != data_shape: addtag('image_description', 's', 0, "shape=(%s)" % (",".join('%i' % i for i in data_shape)), writeonce=True) addtag('datetime', 's', 0, datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"), writeonce=True) addtag('compression', 'H', 1, 32946 if compress else 1) addtag('orientation', 'H', 1, 1) addtag('image_width', 'I', 1, shape[-2]) addtag('image_length', 'I', 1, shape[-3]) if volume: addtag('image_depth', 'I', 1, shape[-4]) addtag('tile_depth', 'I', 1, shape[-4]) addtag('tile_width', 'I', 1, shape[-2]) addtag('tile_length', 'I', 1, shape[-3]) elif write_tiles: addtag('tile_width', 'I', 1, tile_width) addtag('tile_length', 'I', 1, tile_length) addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2) # addtag('sample_format', 'H', 1, # {'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind]) addtag('sample_format', 'H', samplesperpixel, ({'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind],) * samplesperpixel) addtag('photometric', 'H', 1, {'miniswhite': 0, 'minisblack': 1, 'rgb': 2, 'palette': 3}[photometric]) if photometric == 'palette': if colormap == None: raise ValueError( "photometric 'palette' specified but colormap missing") else: addtag('color_map', 'H', 3 * (2 ** (data.dtype.itemsize * 8 * samplesperpixel)), colormap) addtag('samples_per_pixel', 'H', 1, samplesperpixel) if planarconfig and samplesperpixel > 1: addtag('planar_configuration', 'H', 1, 1 if planarconfig == 'contig' else 2) addtag('bits_per_sample', 'H', samplesperpixel, (data.dtype.itemsize * 8, ) * samplesperpixel) else: addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8) if extrasamples: if photometric == 'rgb' and extrasamples == 1: addtag('extra_samples', 'H', 1, extrasamples_type) # associated alpha channel else: addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples) if resolution: addtag('x_resolution', '2I', 1, rational(resolution[0])) addtag('y_resolution', '2I', 1, rational(resolution[1])) addtag('resolution_unit', 'H', 1, 2) if not write_tiles: addtag('rows_per_strip', 'I', 1, shape[-3] * (shape[-4] if volume else 1)) if write_tiles: # use multiple tiles per plane tiles_x = (shape[3] + tile_width - 1) // tile_width tiles_y = (shape[2] + tile_length - 1) // tile_length strip_byte_counts = \ (tile_width * tile_length * shape[-1] * data.dtype.itemsize,) \ * shape[1] * tiles_x * tiles_y else: # use one strip or tile per plane tiles_x = tiles_y = 1 strip_byte_counts = \ (data[0, 0].size * data.dtype.itemsize,) * shape[1] addtag(tag_byte_counts, offset_format, shape[1] * tiles_x * tiles_y, strip_byte_counts) addtag(tag_offsets, offset_format, shape[1] * tiles_x * tiles_y, (0, ) * shape[1] * tiles_x * tiles_y) # add extra tags from users for t in extratags: addtag(*t) # the entries in an IFD must be sorted in ascending order by tag code tags = sorted(tags, key=lambda x: x[0]) if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize > 2**31-1): raise ValueError("data too large for non-bigtiff file") for pageindex in range(shape[0]): # update pointer at ifd_offset pos = fh.tell() fh.seek(self._ifd_offset) fh.write(pack(offset_format, pos)) fh.seek(pos) # write ifdentries fh.write(pack(numtag_format, len(tags))) tag_offset = fh.tell() fh.write(b''.join(t[1] for t in tags)) self._ifd_offset = fh.tell() fh.write(pack(offset_format, 0)) # offset to next IFD # write tag values and patch offsets in ifdentries, if necessary for tagindex, tag in enumerate(tags): if tag[2]: pos = fh.tell() fh.seek(tag_offset + tagindex*tag_size + offset_size + 4) fh.write(pack(offset_format, pos)) fh.seek(pos) if tag[0] == tag_offsets: strip_offsets_offset = pos elif tag[0] == tag_byte_counts: strip_byte_counts_offset = pos fh.write(tag[2]) # write image data data_offset = fh.tell() if write_tiles: # multiple tiles per page if compress: # reset and use compress sizes strip_byte_counts = [] for plane in data[pageindex]: for ty in xrange(0, tiles_y): for tx in xrange(0, tiles_x): # allocate fixed size tile filled with zeros tile = numpy.zeros((tile_width * tile_length, shape[-1]), data.dtype) # clipping right and bottom if necessary # tile length filled with image data itl = min(tile_length, shape[2] - ty*tile_length) # tile width filled with image data itw = min(tile_width, shape[3] - tx*tile_width) ioffs = tx*tile_width for tl in xrange(0, itl): # copy data to tile line ir = ty*tile_length+tl tile[tl*tile_width:tl*tile_width+itw] \ = plane[ir, ioffs:ioffs+itw] if compress: tile = zlib.compress(tile, compress) strip_byte_counts.append(len(tile)) fh.write(tile) else: tile.tofile(fh) fh.flush() else: # one strip/tile per page if compress: strip_byte_counts = [] for plane in data[pageindex]: plane = zlib.compress(plane, compress) strip_byte_counts.append(len(plane)) fh.write(plane) else: # if this fails try update Python/numpy data[pageindex].tofile(fh) fh.flush() # update strip and tile offsets and byte_counts if necessary pos = fh.tell() for tagindex, tag in enumerate(tags): if tag[0] == tag_offsets: # strip or tile offsets if tag[2]: fh.seek(strip_offsets_offset) strip_offset = data_offset for size in strip_byte_counts: fh.write(pack(offset_format, strip_offset)) strip_offset += size else: fh.seek(tag_offset + tagindex*tag_size + offset_size + 4) fh.write(pack(offset_format, data_offset)) elif tag[0] == tag_byte_counts: # strip or tile byte_counts if compress: if tag[2]: fh.seek(strip_byte_counts_offset) for size in strip_byte_counts: fh.write(pack(offset_format, size)) else: fh.seek(tag_offset + tagindex*tag_size + offset_size + 4) fh.write(pack(offset_format, strip_byte_counts[0])) break fh.seek(pos) fh.flush() # remove tags that should be written only once if pageindex == 0: tags = [t for t in tags if not t[-1]] def close(self): self._fh.close() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def imread(files, **kwargs): """Return image data from TIFF file(s) as numpy array. The first image series is returned if no arguments are provided. Parameters ---------- files : str or list File name, glob pattern, or list of file names. key : int, slice, or sequence of page indices Defines which pages to return as array. series : int Defines which series of pages in file to return as array. multifile : bool If True (default), OME-TIFF data may include pages from multiple files. pattern : str Regular expression pattern that matches axes names and indices in file names. kwargs : dict Additional parameters passed to the TiffFile or TiffSequence asarray function. Examples -------- >>> im = imread('test.tif', key=0) >>> im.shape (256, 256, 4) >>> ims = imread(['test.tif', 'test.tif']) >>> ims.shape (2, 256, 256, 4) """ kwargs_file = {} if 'multifile' in kwargs: kwargs_file['multifile'] = kwargs['multifile'] del kwargs['multifile'] else: kwargs_file['multifile'] = True kwargs_seq = {} if 'pattern' in kwargs: kwargs_seq['pattern'] = kwargs['pattern'] del kwargs['pattern'] if isinstance(files, basestring) and any(i in files for i in '?*'): files = glob.glob(files) if not files: raise ValueError('no files found') if len(files) == 1: files = files[0] if isinstance(files, basestring): with TiffFile(files, **kwargs_file) as tif: return tif.asarray(**kwargs) else: with TiffSequence(files, **kwargs_seq) as imseq: return imseq.asarray(**kwargs) class lazyattr(object): """Lazy object attribute whose value is computed on first access.""" __slots__ = ('func', ) def __init__(self, func): self.func = func def __get__(self, instance, owner): if instance is None: return self value = self.func(instance) if value is NotImplemented: return getattr(super(owner, instance), self.func.__name__) setattr(instance, self.func.__name__, value) return value class TiffFile(object): """Read image and metadata from TIFF, STK, LSM, and FluoView files. TiffFile instances must be closed using the close method, which is automatically called when using the 'with' statement. Attributes ---------- pages : list All TIFF pages in file. series : list of Records(shape, dtype, axes, TiffPages) TIFF pages with compatible shapes and types. micromanager_metadata: dict Extra MicroManager non-TIFF metadata in the file, if exists. All attributes are read-only. Examples -------- >>> with TiffFile('test.tif') as tif: ... data = tif.asarray() ... data.shape (256, 256, 4) """ def __init__(self, arg, name=None, offset=None, size=None, multifile=True, multifile_close=True): """Initialize instance from file. Parameters ---------- arg : str or open file Name of file or open file object. The file objects are closed in TiffFile.close(). name : str Optional name of file in case 'arg' is a file handle. offset : int Optional start position of embedded file. By default this is the current file position. size : int Optional size of embedded file. By default this is the number of bytes from the 'offset' to the end of the file. multifile : bool If True (default), series may include pages from multiple files. Currently applies to OME-TIFF only. multifile_close : bool If True (default), keep the handles of other files in multifile series closed. This is inefficient when few files refer to many pages. If False, the C runtime may run out of resources. """ self._fh = FileHandle(arg, name=name, offset=offset, size=size) self.offset_size = None self.pages = [] self._multifile = bool(multifile) self._multifile_close = bool(multifile_close) self._files = {self._fh.name: self} # cache of TiffFiles try: self._fromfile() except Exception: self._fh.close() raise @property def filehandle(self): """Return file handle.""" return self._fh @property def filename(self): """Return name of file handle.""" return self._fh.name def close(self): """Close open file handle(s).""" for tif in self._files.values(): tif._fh.close() self._files = {} def _fromfile(self): """Read TIFF header and all page records from file.""" self._fh.seek(0) try: self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)] except KeyError: raise ValueError("not a valid TIFF file") version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0] if version == 43: # BigTiff self.offset_size, zero = struct.unpack(self.byteorder+'HH', self._fh.read(4)) if zero or self.offset_size != 8: raise ValueError("not a valid BigTIFF file") elif version == 42: self.offset_size = 4 else: raise ValueError("not a TIFF file") self.pages = [] while True: try: page = TiffPage(self) self.pages.append(page) except StopIteration: break if not self.pages: raise ValueError("empty TIFF file") if self.is_micromanager: # MicroManager files contain metadata not stored in TIFF tags. self.micromanager_metadata = read_micromanager_metadata(self._fh) if self.is_lsm: self._fix_lsm_strip_offsets() self._fix_lsm_strip_byte_counts() def _fix_lsm_strip_offsets(self): """Unwrap strip offsets for LSM files greater than 4 GB.""" for series in self.series: wrap = 0 previous_offset = 0 for page in series.pages: strip_offsets = [] for current_offset in page.strip_offsets: if current_offset < previous_offset: wrap += 2**32 strip_offsets.append(current_offset + wrap) previous_offset = current_offset page.strip_offsets = tuple(strip_offsets) def _fix_lsm_strip_byte_counts(self): """Set strip_byte_counts to size of compressed data. The strip_byte_counts tag in LSM files contains the number of bytes for the uncompressed data. """ if not self.pages: return strips = {} for page in self.pages: assert len(page.strip_offsets) == len(page.strip_byte_counts) for offset, bytecount in zip(page.strip_offsets, page.strip_byte_counts): strips[offset] = bytecount offsets = sorted(strips.keys()) offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size)) for i, offset in enumerate(offsets[:-1]): strips[offset] = min(strips[offset], offsets[i+1] - offset) for page in self.pages: if page.compression: page.strip_byte_counts = tuple( strips[offset] for offset in page.strip_offsets) @lazyattr def series(self): """Return series of TiffPage with compatible shape and properties.""" if not self.pages: return [] series = [] page0 = self.pages[0] if self.is_ome: series = self._omeseries() elif self.is_fluoview: dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T', b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R', b'EVENT': 'V', b'EXPOSURE': 'L'} mmhd = list(reversed(page0.mm_header.dimensions)) series = [Record( axes=''.join(dims.get(i[0].strip().upper(), 'Q') for i in mmhd if i[1] > 1), shape=tuple(int(i[1]) for i in mmhd if i[1] > 1), pages=self.pages, dtype=numpy.dtype(page0.dtype))] elif self.is_lsm: lsmi = page0.cz_lsm_info axes = CZ_SCAN_TYPES[lsmi.scan_type] if page0.is_rgb: axes = axes.replace('C', '').replace('XY', 'XYC') axes = axes[::-1] shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes) pages = [p for p in self.pages if not p.is_reduced] series = [Record(axes=axes, shape=shape, pages=pages, dtype=numpy.dtype(pages[0].dtype))] if len(pages) != len(self.pages): # reduced RGB pages pages = [p for p in self.pages if p.is_reduced] cp = 1 i = 0 while cp < len(pages) and i < len(shape)-2: cp *= shape[i] i += 1 shape = shape[:i] + pages[0].shape axes = axes[:i] + 'CYX' series.append(Record(axes=axes, shape=shape, pages=pages, dtype=numpy.dtype(pages[0].dtype))) elif self.is_imagej: shape = [] axes = [] ij = page0.imagej_tags if 'frames' in ij: shape.append(ij['frames']) axes.append('T') if 'slices' in ij: shape.append(ij['slices']) axes.append('Z') if 'channels' in ij and not self.is_rgb: shape.append(ij['channels']) axes.append('C') remain = len(self.pages) // (product(shape) if shape else 1) if remain > 1: shape.append(remain) axes.append('I') shape.extend(page0.shape) axes.extend(page0.axes) axes = ''.join(axes) series = [Record(pages=self.pages, shape=tuple(shape), axes=axes, dtype=numpy.dtype(page0.dtype))] elif self.is_nih: if len(self.pages) == 1: shape = page0.shape axes = page0.axes else: shape = (len(self.pages),) + page0.shape axes = 'I' + page0.axes series = [Record(pages=self.pages, shape=shape, axes=axes, dtype=numpy.dtype(page0.dtype))] elif page0.is_shaped: # TODO: shaped files can contain multiple series shape = page0.tags['image_description'].value[7:-1] shape = tuple(int(i) for i in shape.split(b',')) series = [Record(pages=self.pages, shape=shape, axes='Q' * len(shape), dtype=numpy.dtype(page0.dtype))] # generic detection of series if not series: shapes = [] pages = {} for page in self.pages: if not page.shape: continue shape = page.shape + (page.axes, page.compression in TIFF_DECOMPESSORS) if shape not in pages: shapes.append(shape) pages[shape] = [page] else: pages[shape].append(page) series = [Record(pages=pages[s], axes=(('I' + s[-2]) if len(pages[s]) > 1 else s[-2]), dtype=numpy.dtype(pages[s][0].dtype), shape=((len(pages[s]), ) + s[:-2] if len(pages[s]) > 1 else s[:-2])) for s in shapes] # remove empty series, e.g. in MD Gel files series = [s for s in series if sum(s.shape) > 0] return series def asarray(self, key=None, series=None, memmap=False): """Return image data from multiple TIFF pages as numpy array. By default the first image series is returned. Parameters ---------- key : int, slice, or sequence of page indices Defines which pages to return as array. series : int Defines which series of pages to return as array. memmap : bool If True, return an array stored in a binary file on disk if possible. """ if key is None and series is None: series = 0 if series is not None: pages = self.series[series].pages else: pages = self.pages if key is None: pass elif isinstance(key, int): pages = [pages[key]] elif isinstance(key, slice): pages = pages[key] elif isinstance(key, collections.Iterable): pages = [pages[k] for k in key] else: raise TypeError("key must be an int, slice, or sequence") if not len(pages): raise ValueError("no pages selected") if self.is_nih: if pages[0].is_palette: result = stack_pages(pages, colormapped=False, squeeze=False) result = numpy.take(pages[0].color_map, result, axis=1) result = numpy.swapaxes(result, 0, 1) else: result = stack_pages(pages, memmap=memmap, colormapped=False, squeeze=False) elif len(pages) == 1: return pages[0].asarray(memmap=memmap) elif self.is_ome: assert not self.is_palette, "color mapping disabled for ome-tiff" if any(p is None for p in pages): # zero out missing pages firstpage = next(p for p in pages if p) nopage = numpy.zeros_like( firstpage.asarray(memmap=False)) s = self.series[series] if memmap: with tempfile.NamedTemporaryFile() as fh: result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape) result = result.reshape(-1) else: result = numpy.empty(s.shape, s.dtype).reshape(-1) index = 0 class KeepOpen: # keep Tiff files open between consecutive pages def __init__(self, parent, close): self.master = parent self.parent = parent self._close = close def open(self, page): if self._close and page and page.parent != self.parent: if self.parent != self.master: self.parent.filehandle.close() self.parent = page.parent self.parent.filehandle.open() def close(self): if self._close and self.parent != self.master: self.parent.filehandle.close() keep = KeepOpen(self, self._multifile_close) for page in pages: keep.open(page) if page: a = page.asarray(memmap=False, colormapped=False, reopen=False) else: a = nopage try: result[index:index + a.size] = a.reshape(-1) except ValueError as e: warnings.warn("ome-tiff: %s" % e) break index += a.size keep.close() else: result = stack_pages(pages, memmap=memmap) if key is None: try: result.shape = self.series[series].shape except ValueError: try: warnings.warn("failed to reshape %s to %s" % ( result.shape, self.series[series].shape)) # try series of expected shapes result.shape = (-1,) + self.series[series].shape except ValueError: # revert to generic shape result.shape = (-1,) + pages[0].shape else: result.shape = (-1,) + pages[0].shape return result def _omeseries(self): """Return image series in OME-TIFF file(s).""" root = etree.fromstring(self.pages[0].tags['image_description'].value) uuid = root.attrib.get('UUID', None) self._files = {uuid: self} dirname = self._fh.dirname modulo = {} result = [] for element in root: if element.tag.endswith('BinaryOnly'): warnings.warn("ome-xml: not an ome-tiff master file") break if element.tag.endswith('StructuredAnnotations'): for annot in element: if not annot.attrib.get('Namespace', '').endswith('modulo'): continue for value in annot: for modul in value: for along in modul: if not along.tag[:-1].endswith('Along'): continue axis = along.tag[-1] newaxis = along.attrib.get('Type', 'other') newaxis = AXES_LABELS[newaxis] if 'Start' in along.attrib: labels = range( int(along.attrib['Start']), int(along.attrib['End']) + 1, int(along.attrib.get('Step', 1))) else: labels = [label.text for label in along if label.tag.endswith('Label')] modulo[axis] = (newaxis, labels) if not element.tag.endswith('Image'): continue for pixels in element: if not pixels.tag.endswith('Pixels'): continue atr = pixels.attrib dtype = atr.get('Type', None) axes = ''.join(reversed(atr['DimensionOrder'])) shape = list(int(atr['Size'+ax]) for ax in axes) size = product(shape[:-2]) ifds = [None] * size for data in pixels: if not data.tag.endswith('TiffData'): continue atr = data.attrib ifd = int(atr.get('IFD', 0)) num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0)) num = int(atr.get('PlaneCount', num)) idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]] try: idx = numpy.ravel_multi_index(idx, shape[:-2]) except ValueError: # ImageJ produces invalid ome-xml when cropping warnings.warn("ome-xml: invalid TiffData index") continue for uuid in data: if not uuid.tag.endswith('UUID'): continue if uuid.text not in self._files: if not self._multifile: # abort reading multifile OME series # and fall back to generic series return [] fname = uuid.attrib['FileName'] try: tif = TiffFile(os.path.join(dirname, fname)) except (IOError, ValueError): tif.close() warnings.warn( "ome-xml: failed to read '%s'" % fname) break self._files[uuid.text] = tif if self._multifile_close: tif.close() pages = self._files[uuid.text].pages try: for i in range(num if num else len(pages)): ifds[idx + i] = pages[ifd + i] except IndexError: warnings.warn("ome-xml: index out of range") # only process first uuid break else: pages = self.pages try: for i in range(num if num else len(pages)): ifds[idx + i] = pages[ifd + i] except IndexError: warnings.warn("ome-xml: index out of range") if all(i is None for i in ifds): # skip images without data continue dtype = next(i for i in ifds if i).dtype result.append(Record(axes=axes, shape=shape, pages=ifds, dtype=numpy.dtype(dtype))) for record in result: for axis, (newaxis, labels) in modulo.items(): i = record.axes.index(axis) size = len(labels) if record.shape[i] == size: record.axes = record.axes.replace(axis, newaxis, 1) else: record.shape[i] //= size record.shape.insert(i+1, size) record.axes = record.axes.replace(axis, axis+newaxis, 1) record.shape = tuple(record.shape) # squeeze dimensions for record in result: record.shape, record.axes = squeeze_axes(record.shape, record.axes) return result def __len__(self): """Return number of image pages in file.""" return len(self.pages) def __getitem__(self, key): """Return specified page.""" return self.pages[key] def __iter__(self): """Return iterator over pages.""" return iter(self.pages) def __str__(self): """Return string containing information about file.""" result = [ self._fh.name.capitalize(), format_size(self._fh.size), {'<': 'little endian', '>': 'big endian'}[self.byteorder]] if self.is_bigtiff: result.append("bigtiff") if len(self.pages) > 1: result.append("%i pages" % len(self.pages)) if len(self.series) > 1: result.append("%i series" % len(self.series)) if len(self._files) > 1: result.append("%i files" % (len(self._files))) return ", ".join(result) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() @lazyattr def fstat(self): try: return os.fstat(self._fh.fileno()) except Exception: # io.UnsupportedOperation return None @lazyattr def is_bigtiff(self): return self.offset_size != 4 @lazyattr def is_rgb(self): return all(p.is_rgb for p in self.pages) @lazyattr def is_palette(self): return all(p.is_palette for p in self.pages) @lazyattr def is_mdgel(self): return any(p.is_mdgel for p in self.pages) @lazyattr def is_mediacy(self): return any(p.is_mediacy for p in self.pages) @lazyattr def is_stk(self): return all(p.is_stk for p in self.pages) @lazyattr def is_lsm(self): return self.pages[0].is_lsm @lazyattr def is_imagej(self): return self.pages[0].is_imagej @lazyattr def is_micromanager(self): return self.pages[0].is_micromanager @lazyattr def is_nih(self): return self.pages[0].is_nih @lazyattr def is_fluoview(self): return self.pages[0].is_fluoview @lazyattr def is_ome(self): return self.pages[0].is_ome class TiffPage(object): """A TIFF image file directory (IFD). Attributes ---------- index : int Index of page in file. dtype : str {TIFF_SAMPLE_DTYPES} Data type of image, colormapped if applicable. shape : tuple Dimensions of the image array in TIFF page, colormapped and with one alpha channel if applicable. axes : str Axes label codes: 'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane, 'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda, 'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime, 'L' exposure, 'V' event, 'Q' unknown, '_' missing tags : TiffTags Dictionary of tags in page. Tag values are also directly accessible as attributes. color_map : numpy array Color look up table, if exists. cz_lsm_scan_info: Record(dict) LSM scan info attributes, if exists. imagej_tags: Record(dict) Consolidated ImageJ description and metadata tags, if exists. uic_tags: Record(dict) Consolidated MetaMorph STK/UIC tags, if exists. All attributes are read-only. Notes ----- The internal, normalized '_shape' attribute is 6 dimensional: 0. number planes (stk) 1. planar samples_per_pixel 2. image_depth Z (sgi) 3. image_length Y 4. image_width X 5. contig samples_per_pixel """ def __init__(self, parent): """Initialize instance from file.""" self.parent = parent self.index = len(parent.pages) self.shape = self._shape = () self.dtype = self._dtype = None self.axes = "" self.tags = TiffTags() self._fromfile() self._process_tags() def _fromfile(self): """Read TIFF IFD structure and its tags from file. File cursor must be at storage position of IFD offset and is left at offset to next IFD. Raises StopIteration if offset (first bytes read) is 0. """ fh = self.parent.filehandle byteorder = self.parent.byteorder offset_size = self.parent.offset_size fmt = {4: 'I', 8: 'Q'}[offset_size] offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0] if not offset: raise StopIteration() # read standard tags tags = self.tags fh.seek(offset) fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size] try: numtags = struct.unpack(byteorder + fmt, fh.read(size))[0] except Exception: warnings.warn("corrupted page list") raise StopIteration() tagcode = 0 for _ in range(numtags): try: tag = TiffTag(self.parent) # print(tag) except TiffTag.Error as e: warnings.warn(str(e)) continue if tagcode > tag.code: # expected for early LSM and tifffile versions warnings.warn("tags are not ordered by code") tagcode = tag.code if tag.name not in tags: tags[tag.name] = tag else: # some files contain multiple IFD with same code # e.g. MicroManager files contain two image_description i = 1 while True: name = "%s_%i" % (tag.name, i) if name not in tags: tags[name] = tag break pos = fh.tell() if self.is_lsm or (self.index and self.parent.is_lsm): # correct non standard LSM bitspersample tags self.tags['bits_per_sample']._correct_lsm_bitspersample(self) if self.is_lsm: # read LSM info subrecords for name, reader in CZ_LSM_INFO_READERS.items(): try: offset = self.cz_lsm_info['offset_'+name] except KeyError: continue if offset < 8: # older LSM revision continue fh.seek(offset) try: setattr(self, 'cz_lsm_'+name, reader(fh)) except ValueError: pass elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value: # read uic1tag now that plane count is known uic1tag = tags['uic1tag'] fh.seek(uic1tag.value_offset) tags['uic1tag'].value = Record( read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count, tags['uic2tag'].count)) fh.seek(pos) def _process_tags(self): """Validate standard tags and initialize attributes. Raise ValueError if tag values are not supported. """ tags = self.tags for code, (name, default, dtype, count, validate) in TIFF_TAGS.items(): if not (name in tags or default is None): tags[name] = TiffTag(code, dtype=dtype, count=count, value=default, name=name) if name in tags and validate: try: if tags[name].count == 1: setattr(self, name, validate[tags[name].value]) else: setattr(self, name, tuple( validate[value] for value in tags[name].value)) except KeyError: raise ValueError("%s.value (%s) not supported" % (name, tags[name].value)) tag = tags['bits_per_sample'] if tag.count == 1: self.bits_per_sample = tag.value else: # LSM might list more items than samples_per_pixel value = tag.value[:self.samples_per_pixel] if any((v-value[0] for v in value)): self.bits_per_sample = value else: self.bits_per_sample = value[0] tag = tags['sample_format'] if tag.count == 1: self.sample_format = TIFF_SAMPLE_FORMATS[tag.value] else: value = tag.value[:self.samples_per_pixel] if any((v-value[0] for v in value)): self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value] else: self.sample_format = TIFF_SAMPLE_FORMATS[value[0]] if 'photometric' not in tags: self.photometric = None if 'image_depth' not in tags: self.image_depth = 1 if 'image_length' in tags: self.strips_per_image = int(math.floor( float(self.image_length + self.rows_per_strip - 1) / self.rows_per_strip)) else: self.strips_per_image = 0 key = (self.sample_format, self.bits_per_sample) self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None) if 'image_length' not in self.tags or 'image_width' not in self.tags: # some GEL file pages are missing image data self.image_length = 0 self.image_width = 0 self.image_depth = 0 self.strip_offsets = 0 self._shape = () self.shape = () self.axes = '' if self.is_palette: self.dtype = self.tags['color_map'].dtype[1] self.color_map = numpy.array(self.color_map, self.dtype) dmax = self.color_map.max() if dmax < 256: self.dtype = numpy.uint8 self.color_map = self.color_map.astype(self.dtype) #else: # self.dtype = numpy.uint8 # self.color_map >>= 8 # self.color_map = self.color_map.astype(self.dtype) self.color_map.shape = (3, -1) # determine shape of data image_length = self.image_length image_width = self.image_width image_depth = self.image_depth samples_per_pixel = self.samples_per_pixel if self.is_stk: assert self.image_depth == 1 planes = self.tags['uic2tag'].count if self.is_contig: self._shape = (planes, 1, 1, image_length, image_width, samples_per_pixel) if samples_per_pixel == 1: self.shape = (planes, image_length, image_width) self.axes = 'YX' else: self.shape = (planes, image_length, image_width, samples_per_pixel) self.axes = 'YXS' else: self._shape = (planes, samples_per_pixel, 1, image_length, image_width, 1) if samples_per_pixel == 1: self.shape = (planes, image_length, image_width) self.axes = 'YX' else: self.shape = (planes, samples_per_pixel, image_length, image_width) self.axes = 'SYX' # detect type of series if planes == 1: self.shape = self.shape[1:] elif numpy.all(self.uic2tag.z_distance != 0): self.axes = 'Z' + self.axes elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0): self.axes = 'T' + self.axes else: self.axes = 'I' + self.axes # DISABLED if self.is_palette: assert False, "color mapping disabled for stk" if self.color_map.shape[1] >= 2**self.bits_per_sample: if image_depth == 1: self.shape = (3, planes, image_length, image_width) else: self.shape = (3, planes, image_depth, image_length, image_width) self.axes = 'C' + self.axes else: warnings.warn("palette cannot be applied") self.is_palette = False elif self.is_palette: samples = 1 if 'extra_samples' in self.tags: samples += len(self.extra_samples) if self.is_contig: self._shape = (1, 1, image_depth, image_length, image_width, samples) else: self._shape = (1, samples, image_depth, image_length, image_width, 1) if self.color_map.shape[1] >= 2**self.bits_per_sample: if image_depth == 1: self.shape = (3, image_length, image_width) self.axes = 'CYX' else: self.shape = (3, image_depth, image_length, image_width) self.axes = 'CZYX' else: warnings.warn("palette cannot be applied") self.is_palette = False if image_depth == 1: self.shape = (image_length, image_width) self.axes = 'YX' else: self.shape = (image_depth, image_length, image_width) self.axes = 'ZYX' elif self.is_rgb or samples_per_pixel > 1: if self.is_contig: self._shape = (1, 1, image_depth, image_length, image_width, samples_per_pixel) if image_depth == 1: self.shape = (image_length, image_width, samples_per_pixel) self.axes = 'YXS' else: self.shape = (image_depth, image_length, image_width, samples_per_pixel) self.axes = 'ZYXS' else: self._shape = (1, samples_per_pixel, image_depth, image_length, image_width, 1) if image_depth == 1: self.shape = (samples_per_pixel, image_length, image_width) self.axes = 'SYX' else: self.shape = (samples_per_pixel, image_depth, image_length, image_width) self.axes = 'SZYX' if False and self.is_rgb and 'extra_samples' in self.tags: # DISABLED: only use RGB and first alpha channel if exists extra_samples = self.extra_samples if self.tags['extra_samples'].count == 1: extra_samples = (extra_samples, ) for exs in extra_samples: if exs in ('unassalpha', 'assocalpha', 'unspecified'): if self.is_contig: self.shape = self.shape[:-1] + (4,) else: self.shape = (4,) + self.shape[1:] break else: self._shape = (1, 1, image_depth, image_length, image_width, 1) if image_depth == 1: self.shape = (image_length, image_width) self.axes = 'YX' else: self.shape = (image_depth, image_length, image_width) self.axes = 'ZYX' if not self.compression and 'strip_byte_counts' not in tags: self.strip_byte_counts = ( product(self.shape) * (self.bits_per_sample // 8), ) assert len(self.shape) == len(self.axes) def asarray(self, squeeze=True, colormapped=True, rgbonly=False, scale_mdgel=False, memmap=False, reopen=True): """Read image data from file and return as numpy array. Raise ValueError if format is unsupported. If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default, the shape of the returned array might be different from the page shape. Parameters ---------- squeeze : bool If True, all length-1 dimensions (except X and Y) are squeezed out from result. colormapped : bool If True, color mapping is applied for palette-indexed images. rgbonly : bool If True, return RGB(A) image without additional extra samples. memmap : bool If True, use numpy.memmap to read arrays from file if possible. For use on 64 bit systems and files with few huge contiguous data. reopen : bool If True and the parent file handle is closed, the file is temporarily re-opened (and closed if no exception occurs). scale_mdgel : bool If True, MD Gel data will be scaled according to the private metadata in the second TIFF page. The dtype will be float32. """ if not self._shape: return if self.dtype is None: raise ValueError("data type not supported: %s%i" % ( self.sample_format, self.bits_per_sample)) if self.compression not in TIFF_DECOMPESSORS: raise ValueError("cannot decompress %s" % self.compression) tag = self.tags['sample_format'] if tag.count != 1 and any((i-tag.value[0] for i in tag.value)): raise ValueError("sample formats don't match %s" % str(tag.value)) fh = self.parent.filehandle closed = fh.closed if closed: if reopen: fh.open() else: raise IOError("file handle is closed") dtype = self._dtype shape = self._shape image_width = self.image_width image_length = self.image_length image_depth = self.image_depth typecode = self.parent.byteorder + dtype bits_per_sample = self.bits_per_sample if self.is_tiled: if 'tile_offsets' in self.tags: byte_counts = self.tile_byte_counts offsets = self.tile_offsets else: byte_counts = self.strip_byte_counts offsets = self.strip_offsets tile_width = self.tile_width tile_length = self.tile_length tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1 tw = (image_width + tile_width - 1) // tile_width tl = (image_length + tile_length - 1) // tile_length td = (image_depth + tile_depth - 1) // tile_depth shape = (shape[0], shape[1], td*tile_depth, tl*tile_length, tw*tile_width, shape[-1]) tile_shape = (tile_depth, tile_length, tile_width, shape[-1]) runlen = tile_width else: byte_counts = self.strip_byte_counts offsets = self.strip_offsets runlen = image_width if any(o < 2 for o in offsets): raise ValueError("corrupted page") if memmap and self._is_memmappable(rgbonly, colormapped): result = fh.memmap_array(typecode, shape, offset=offsets[0]) elif self.is_contiguous: fh.seek(offsets[0]) result = fh.read_array(typecode, product(shape)) result = result.astype('=' + dtype) else: if self.is_contig: runlen *= self.samples_per_pixel if bits_per_sample in (8, 16, 32, 64, 128): if (bits_per_sample * runlen) % 8: raise ValueError("data and sample size mismatch") def unpack(x): try: return numpy.fromstring(x, typecode) except ValueError as e: # strips may be missing EOI warnings.warn("unpack: %s" % e) xlen = ((len(x) // (bits_per_sample // 8)) * (bits_per_sample // 8)) return numpy.fromstring(x[:xlen], typecode) elif isinstance(bits_per_sample, tuple): def unpack(x): return unpackrgb(x, typecode, bits_per_sample) else: def unpack(x): return unpackints(x, typecode, bits_per_sample, runlen) decompress = TIFF_DECOMPESSORS[self.compression] if self.compression == 'jpeg': table = self.jpeg_tables if 'jpeg_tables' in self.tags else b'' decompress = lambda x: decodejpg(x, table, self.photometric) if self.is_tiled: result = numpy.empty(shape, dtype) tw, tl, td, pl = 0, 0, 0, 0 for offset, bytecount in zip(offsets, byte_counts): fh.seek(offset) tile = unpack(decompress(fh.read(bytecount))) tile.shape = tile_shape if self.predictor == 'horizontal': numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile) result[0, pl, td:td+tile_depth, tl:tl+tile_length, tw:tw+tile_width, :] = tile del tile tw += tile_width if tw >= shape[4]: tw, tl = 0, tl + tile_length if tl >= shape[3]: tl, td = 0, td + tile_depth if td >= shape[2]: td, pl = 0, pl + 1 result = result[..., :image_depth, :image_length, :image_width, :] else: strip_size = (self.rows_per_strip * self.image_width * self.samples_per_pixel) result = numpy.empty(shape, dtype).reshape(-1) index = 0 for offset, bytecount in zip(offsets, byte_counts): fh.seek(offset) strip = fh.read(bytecount) strip = decompress(strip) strip = unpack(strip) size = min(result.size, strip.size, strip_size, result.size - index) result[index:index+size] = strip[:size] del strip index += size result.shape = self._shape if self.predictor == 'horizontal' and not (self.is_tiled and not self.is_contiguous): # work around bug in LSM510 software if not (self.parent.is_lsm and not self.compression): numpy.cumsum(result, axis=-2, dtype=dtype, out=result) if colormapped and self.is_palette: if self.color_map.shape[1] >= 2**bits_per_sample: # FluoView and LSM might fail here result = numpy.take(self.color_map, result[:, 0, :, :, :, 0], axis=1) elif rgbonly and self.is_rgb and 'extra_samples' in self.tags: # return only RGB and first alpha channel if exists extra_samples = self.extra_samples if self.tags['extra_samples'].count == 1: extra_samples = (extra_samples, ) for i, exs in enumerate(extra_samples): if exs in ('unassalpha', 'assocalpha', 'unspecified'): if self.is_contig: result = result[..., [0, 1, 2, 3+i]] else: result = result[:, [0, 1, 2, 3+i]] break else: if self.is_contig: result = result[..., :3] else: result = result[:, :3] if squeeze: try: result.shape = self.shape except ValueError: warnings.warn("failed to reshape from %s to %s" % ( str(result.shape), str(self.shape))) if scale_mdgel and self.parent.is_mdgel: # MD Gel stores private metadata in the second page tags = self.parent.pages[1] if tags.md_file_tag in (2, 128): scale = tags.md_scale_pixel scale = scale[0] / scale[1] # rational result = result.astype('float32') if tags.md_file_tag == 2: result **= 2 # squary root data format result *= scale if closed: # TODO: file remains open if an exception occurred above fh.close() return result def _is_memmappable(self, rgbonly, colormapped): """Return if image data in file can be memory mapped.""" if not self.parent.filehandle.is_file or not self.is_contiguous: return False return not (self.predictor or (rgbonly and 'extra_samples' in self.tags) or (colormapped and self.is_palette) or ({'big': '>', 'little': '<'}[sys.byteorder] != self.parent.byteorder)) @lazyattr def is_contiguous(self): """Return offset and size of contiguous data, else None. Excludes prediction and colormapping. """ if self.compression or self.bits_per_sample not in (8, 16, 32, 64): return if self.is_tiled: if (self.image_width != self.tile_width or self.image_length % self.tile_length or self.tile_width % 16 or self.tile_length % 16): return if ('image_depth' in self.tags and 'tile_depth' in self.tags and (self.image_length != self.tile_length or self.image_depth % self.tile_depth)): return offsets = self.tile_offsets byte_counts = self.tile_byte_counts else: offsets = self.strip_offsets byte_counts = self.strip_byte_counts if len(offsets) == 1: return offsets[0], byte_counts[0] if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1] or byte_counts[i+1] == 0 # no data/ignore offset for i in range(len(offsets)-1)): return offsets[0], sum(byte_counts) def __str__(self): """Return string containing information about page.""" s = ', '.join(s for s in ( ' x '.join(str(i) for i in self.shape), str(numpy.dtype(self.dtype)), '%s bit' % str(self.bits_per_sample), self.photometric if 'photometric' in self.tags else '', self.compression if self.compression else 'raw', '|'.join(t[3:] for t in ( 'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej', 'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy', 'is_sgi', 'is_reduced', 'is_tiled', 'is_contiguous') if getattr(self, t))) if s) return "Page %i: %s" % (self.index, s) def __getattr__(self, name): """Return tag value.""" if name in self.tags: value = self.tags[name].value setattr(self, name, value) return value raise AttributeError(name) @lazyattr def uic_tags(self): """Consolidate UIC tags.""" if not self.is_stk: raise AttributeError("uic_tags") tags = self.tags result = Record() result.number_planes = tags['uic2tag'].count if 'image_description' in tags: result.plane_descriptions = self.image_description.split(b'\x00') if 'uic1tag' in tags: result.update(tags['uic1tag'].value) if 'uic3tag' in tags: result.update(tags['uic3tag'].value) # wavelengths if 'uic4tag' in tags: result.update(tags['uic4tag'].value) # override uic1 tags uic2tag = tags['uic2tag'].value result.z_distance = uic2tag.z_distance result.time_created = uic2tag.time_created result.time_modified = uic2tag.time_modified try: result.datetime_created = [ julian_datetime(*dt) for dt in zip(uic2tag.date_created, uic2tag.time_created)] result.datetime_modified = [ julian_datetime(*dt) for dt in zip(uic2tag.date_modified, uic2tag.time_modified)] except ValueError as e: warnings.warn("uic_tags: %s" % e) return result @lazyattr def imagej_tags(self): """Consolidate ImageJ metadata.""" if not self.is_imagej: raise AttributeError("imagej_tags") tags = self.tags if 'image_description_1' in tags: # MicroManager result = imagej_description(tags['image_description_1'].value) else: result = imagej_description(tags['image_description'].value) if 'imagej_metadata' in tags: try: result.update(imagej_metadata( tags['imagej_metadata'].value, tags['imagej_byte_counts'].value, self.parent.byteorder)) except Exception as e: warnings.warn(str(e)) return Record(result) @lazyattr def is_rgb(self): """True if page contains a RGB image.""" return ('photometric' in self.tags and self.tags['photometric'].value == 2) @lazyattr def is_contig(self): """True if page contains a contiguous image.""" return ('planar_configuration' in self.tags and self.tags['planar_configuration'].value == 1) @lazyattr def is_palette(self): """True if page contains a palette-colored image and not OME or STK.""" try: # turn off color mapping for OME-TIFF and STK if self.is_stk or self.is_ome or self.parent.is_ome: return False except IndexError: pass # OME-XML not found in first page return ('photometric' in self.tags and self.tags['photometric'].value == 3) @lazyattr def is_tiled(self): """True if page contains tiled image.""" return 'tile_width' in self.tags @lazyattr def is_reduced(self): """True if page is a reduced image of another image.""" return bool(self.tags['new_subfile_type'].value & 1) @lazyattr def is_mdgel(self): """True if page contains md_file_tag tag.""" return 'md_file_tag' in self.tags @lazyattr def is_mediacy(self): """True if page contains Media Cybernetics Id tag.""" return ('mc_id' in self.tags and self.tags['mc_id'].value.startswith(b'MC TIFF')) @lazyattr def is_stk(self): """True if page contains UIC2Tag tag.""" return 'uic2tag' in self.tags @lazyattr def is_lsm(self): """True if page contains LSM CZ_LSM_INFO tag.""" return 'cz_lsm_info' in self.tags @lazyattr def is_fluoview(self): """True if page contains FluoView MM_STAMP tag.""" return 'mm_stamp' in self.tags @lazyattr def is_nih(self): """True if page contains NIH image header.""" return 'nih_image_header' in self.tags @lazyattr def is_sgi(self): """True if page contains SGI image and tile depth tags.""" return 'image_depth' in self.tags and 'tile_depth' in self.tags @lazyattr def is_ome(self): """True if page contains OME-XML in image_description tag.""" return ('image_description' in self.tags and self.tags[ 'image_description'].value.startswith(b' parent.offset_size or code in CUSTOM_TAGS: pos = fh.tell() tof = {4: 'I', 8: 'Q'}[parent.offset_size] self.value_offset = offset = struct.unpack(byteorder+tof, value)[0] if offset < 0 or offset > parent.filehandle.size: raise TiffTag.Error("corrupt file - invalid tag value offset") elif offset < 4: raise TiffTag.Error("corrupt value offset for tag %i" % code) fh.seek(offset) if code in CUSTOM_TAGS: readfunc = CUSTOM_TAGS[code][1] value = readfunc(fh, byteorder, dtype, count) if isinstance(value, dict): # numpy.core.records.record value = Record(value) elif code in TIFF_TAGS or dtype[-1] == 's': value = struct.unpack(fmt, fh.read(size)) else: value = read_numpy(fh, byteorder, dtype, count) fh.seek(pos) else: value = struct.unpack(fmt, value[:size]) if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325): # scalar value if not strip/tile offsets/byte_counts if len(value) == 1: value = value[0] if (dtype.endswith('s') and isinstance(value, bytes) and self._type != 7): # TIFF ASCII fields can contain multiple strings, # each terminated with a NUL value = stripascii(value) self.code = code self.name = name self.dtype = dtype self.count = count self.value = value def _correct_lsm_bitspersample(self, parent): """Correct LSM bitspersample tag. Old LSM writers may use a separate region for two 16-bit values, although they fit into the tag value element of the tag. """ if self.code == 258 and self.count == 2: # TODO: test this. Need example file. warnings.warn("correcting LSM bitspersample tag") fh = parent.filehandle tof = {4: '') def __str__(self): """Return string containing information about tag.""" return ' '.join(str(getattr(self, s)) for s in self.__slots__) class TiffSequence(object): """Sequence of image files. The data shape and dtype of all files must match. Properties ---------- files : list List of file names. shape : tuple Shape of image sequence. axes : str Labels of axes in shape. Examples -------- >>> tifs = TiffSequence("test.oif.files/*.tif") >>> tifs.shape, tifs.axes ((2, 100), 'CT') >>> data = tifs.asarray() >>> data.shape (2, 100, 256, 256) """ _patterns = { 'axes': r""" # matches Olympus OIF and Leica TIFF series _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4})) _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? """} class ParseError(Exception): pass def __init__(self, files, imread=TiffFile, pattern='axes', *args, **kwargs): """Initialize instance from multiple files. Parameters ---------- files : str, or sequence of str Glob pattern or sequence of file names. imread : function or class Image read function or class with asarray function returning numpy array from single file. pattern : str Regular expression pattern that matches axes names and sequence indices in file names. By default this matches Olympus OIF and Leica TIFF series. """ if isinstance(files, basestring): files = natural_sorted(glob.glob(files)) files = list(files) if not files: raise ValueError("no files found") #if not os.path.isfile(files[0]): # raise ValueError("file not found") self.files = files if hasattr(imread, 'asarray'): # redefine imread _imread = imread def imread(fname, *args, **kwargs): with _imread(fname) as im: return im.asarray(*args, **kwargs) self.imread = imread self.pattern = self._patterns.get(pattern, pattern) try: self._parse() if not self.axes: self.axes = 'I' except self.ParseError: self.axes = 'I' self.shape = (len(files),) self._start_index = (0,) self._indices = tuple((i,) for i in range(len(files))) def __str__(self): """Return string with information about image sequence.""" return "\n".join([ self.files[0], '* files: %i' % len(self.files), '* axes: %s' % self.axes, '* shape: %s' % str(self.shape)]) def __len__(self): return len(self.files) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def close(self): pass def asarray(self, memmap=False, *args, **kwargs): """Read image data from all files and return as single numpy array. If memmap is True, return an array stored in a binary file on disk. The args and kwargs parameters are passed to the imread function. Raise IndexError or ValueError if image shapes don't match. """ im = self.imread(self.files[0], *args, **kwargs) shape = self.shape + im.shape if memmap: with tempfile.NamedTemporaryFile() as fh: result = numpy.memmap(fh, dtype=im.dtype, shape=shape) else: result = numpy.zeros(shape, dtype=im.dtype) result = result.reshape(-1, *im.shape) for index, fname in zip(self._indices, self.files): index = [i-j for i, j in zip(index, self._start_index)] index = numpy.ravel_multi_index(index, self.shape) im = self.imread(fname, *args, **kwargs) result[index] = im result.shape = shape return result def _parse(self): """Get axes and shape from file names.""" if not self.pattern: raise self.ParseError("invalid pattern") pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE) matches = pattern.findall(self.files[0]) if not matches: raise self.ParseError("pattern doesn't match file names") matches = matches[-1] if len(matches) % 2: raise self.ParseError("pattern doesn't match axis name and index") axes = ''.join(m for m in matches[::2] if m) if not axes: raise self.ParseError("pattern doesn't match file names") indices = [] for fname in self.files: matches = pattern.findall(fname)[-1] if axes != ''.join(m for m in matches[::2] if m): raise ValueError("axes don't match within the image sequence") indices.append([int(m) for m in matches[1::2] if m]) shape = tuple(numpy.max(indices, axis=0)) start_index = tuple(numpy.min(indices, axis=0)) shape = tuple(i-j+1 for i, j in zip(shape, start_index)) if product(shape) != len(self.files): warnings.warn("files are missing. Missing data are zeroed") self.axes = axes.upper() self.shape = shape self._indices = indices self._start_index = start_index class Record(dict): """Dictionary with attribute access. Can also be initialized with numpy.core.records.record. """ __slots__ = () def __init__(self, arg=None, **kwargs): if kwargs: arg = kwargs elif arg is None: arg = {} try: dict.__init__(self, arg) except (TypeError, ValueError): for i, name in enumerate(arg.dtype.names): v = arg[i] self[name] = v if v.dtype.char != 'S' else stripnull(v) def __getattr__(self, name): return self[name] def __setattr__(self, name, value): self.__setitem__(name, value) def __str__(self): """Pretty print Record.""" s = [] lists = [] for k in sorted(self): try: if k.startswith('_'): # does not work with byte continue except AttributeError: pass v = self[k] if isinstance(v, (list, tuple)) and len(v): if isinstance(v[0], Record): lists.append((k, v)) continue elif isinstance(v[0], TiffPage): v = [i.index for i in v if i] s.append( ("* %s: %s" % (k, str(v))).split("\n", 1)[0] [:PRINT_LINE_LEN].rstrip()) for k, v in lists: l = [] for i, w in enumerate(v): l.append("* %s[%i]\n %s" % (k, i, str(w).replace("\n", "\n "))) s.append('\n'.join(l)) return '\n'.join(s) class TiffTags(Record): """Dictionary of TiffTag with attribute access.""" def __str__(self): """Return string with information about all tags.""" s = [] for tag in sorted(self.values(), key=lambda x: x.code): typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1]) line = "* %i %s (%s) %s" % ( tag.code, tag.name, typecode, tag.as_str()) s.append(line[:PRINT_LINE_LEN].lstrip()) return '\n'.join(s) class FileHandle(object): """Binary file handle. * Handle embedded files (for CZI within CZI files). * Allow to re-open closed files (for multi file formats such as OME-TIFF). * Read numpy arrays and records from file like objects. Only binary read, seek, tell, and close are supported on embedded files. When initialized from another file handle, do not use it unless this FileHandle is closed. Attributes ---------- name : str Name of the file. path : str Absolute path to file. size : int Size of file in bytes. is_file : bool If True, file has a filno and can be memory mapped. All attributes are read-only. """ __slots__ = ('_fh', '_arg', '_mode', '_name', '_dir', '_offset', '_size', '_close', 'is_file') def __init__(self, arg, mode='rb', name=None, offset=None, size=None): """Initialize file handle from file name or another file handle. Parameters ---------- arg : str, File, or FileHandle File name or open file handle. mode : str File open mode in case 'arg' is a file name. name : str Optional name of file in case 'arg' is a file handle. offset : int Optional start position of embedded file. By default this is the current file position. size : int Optional size of embedded file. By default this is the number of bytes from the 'offset' to the end of the file. """ self._fh = None self._arg = arg self._mode = mode self._name = name self._dir = '' self._offset = offset self._size = size self._close = True self.is_file = False self.open() def open(self): """Open or re-open file.""" if self._fh: return # file is open if isinstance(self._arg, basestring): # file name self._arg = os.path.abspath(self._arg) self._dir, self._name = os.path.split(self._arg) self._fh = open(self._arg, self._mode) self._close = True if self._offset is None: self._offset = 0 elif isinstance(self._arg, FileHandle): # FileHandle self._fh = self._arg._fh if self._offset is None: self._offset = 0 self._offset += self._arg._offset self._close = False if not self._name: if self._offset: name, ext = os.path.splitext(self._arg._name) self._name = "%s@%i%s" % (name, self._offset, ext) else: self._name = self._arg._name self._dir = self._arg._dir else: # open file object self._fh = self._arg if self._offset is None: self._offset = self._arg.tell() self._close = False if not self._name: try: self._dir, self._name = os.path.split(self._fh.name) except AttributeError: self._name = "Unnamed stream" if self._offset: self._fh.seek(self._offset) if self._size is None: pos = self._fh.tell() self._fh.seek(self._offset, 2) self._size = self._fh.tell() self._fh.seek(pos) try: self._fh.fileno() self.is_file = True except Exception: self.is_file = False def read(self, size=-1): """Read 'size' bytes from file, or until EOF is reached.""" if size < 0 and self._offset: size = self._size return self._fh.read(size) def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'): """Return numpy.memmap of data stored in file.""" if not self.is_file: raise ValueError("Can not memory map file without fileno.") return numpy.memmap(self._fh, dtype=dtype, mode=mode, offset=self._offset + offset, shape=shape, order=order) def read_array(self, dtype, count=-1, sep=""): """Return numpy array from file. Work around numpy issue #2230, "numpy.fromfile does not accept StringIO object" https://github.com/numpy/numpy/issues/2230. """ try: return numpy.fromfile(self._fh, dtype, count, sep) except IOError: if count < 0: size = self._size else: size = count * numpy.dtype(dtype).itemsize data = self._fh.read(size) return numpy.fromstring(data, dtype, count, sep) def read_record(self, dtype, shape=1, byteorder=None): """Return numpy record from file.""" try: rec = numpy.rec.fromfile(self._fh, dtype, shape, byteorder=byteorder) except Exception: dtype = numpy.dtype(dtype) if shape is None: shape = self._size // dtype.itemsize size = product(sequence(shape)) * dtype.itemsize data = self._fh.read(size) return numpy.rec.fromstring(data, dtype, shape, byteorder=byteorder) return rec[0] if shape == 1 else rec def tell(self): """Return file's current position.""" return self._fh.tell() - self._offset def seek(self, offset, whence=0): """Set file's current position.""" if self._offset: if whence == 0: self._fh.seek(self._offset + offset, whence) return elif whence == 2: self._fh.seek(self._offset + self._size + offset, 0) return self._fh.seek(offset, whence) def close(self): """Close file.""" if self._close and self._fh: self._fh.close() self._fh = None self.is_file = False def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def __getattr__(self, name): """Return attribute from underlying file object.""" if self._offset: warnings.warn( "FileHandle: '%s' not implemented for embedded files" % name) return getattr(self._fh, name) @property def name(self): return self._name @property def dirname(self): return self._dir @property def path(self): return os.path.join(self._dir, self._name) @property def size(self): return self._size @property def closed(self): return self._fh is None def read_bytes(fh, byteorder, dtype, count): """Read tag data from file and return as byte string.""" dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1] return fh.read_array(dtype, count).tostring() def read_numpy(fh, byteorder, dtype, count): """Read tag data from file and return as numpy array.""" dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1] return fh.read_array(dtype, count) def read_json(fh, byteorder, dtype, count): """Read JSON tag data from file and return as object.""" data = fh.read(count) try: return json.loads(unicode(stripnull(data), 'utf-8')) except ValueError: warnings.warn("invalid JSON `%s`" % data) def read_mm_header(fh, byteorder, dtype, count): """Read MM_HEADER tag from file and return as numpy.rec.array.""" return fh.read_record(MM_HEADER, byteorder=byteorder) def read_mm_stamp(fh, byteorder, dtype, count): """Read MM_STAMP tag from file and return as numpy.array.""" return fh.read_array(byteorder+'f8', 8) def read_uic1tag(fh, byteorder, dtype, count, plane_count=None): """Read MetaMorph STK UIC1Tag from file and return as dictionary. Return empty dictionary if plane_count is unknown. """ assert dtype in ('2I', '1I') and byteorder == '<' result = {} if dtype == '2I': # pre MetaMorph 2.5 (not tested) values = fh.read_array(' structure_size: break cz_lsm_info.append((name, dtype)) else: cz_lsm_info = CZ_LSM_INFO return fh.read_record(cz_lsm_info, byteorder=byteorder) def read_cz_lsm_floatpairs(fh): """Read LSM sequence of float pairs from file and return as list.""" size = struct.unpack(' 0: esize, etime, etype = struct.unpack(''}[fh.read(2)] except IndexError: raise ValueError("not a MicroManager TIFF file") results = {} fh.seek(8) (index_header, index_offset, display_header, display_offset, comments_header, comments_offset, summary_header, summary_length ) = struct.unpack(byteorder + "IIIIIIII", fh.read(32)) if summary_header != 2355492: raise ValueError("invalid MicroManager summary_header") results['summary'] = read_json(fh, byteorder, None, summary_length) if index_header != 54773648: raise ValueError("invalid MicroManager index_header") fh.seek(index_offset) header, count = struct.unpack(byteorder + "II", fh.read(8)) if header != 3453623: raise ValueError("invalid MicroManager index_header") data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count)) results['index_map'] = { 'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5], 'position': data[3::5], 'offset': data[4::5]} if display_header != 483765892: raise ValueError("invalid MicroManager display_header") fh.seek(display_offset) header, count = struct.unpack(byteorder + "II", fh.read(8)) if header != 347834724: raise ValueError("invalid MicroManager display_header") results['display_settings'] = read_json(fh, byteorder, None, count) if comments_header != 99384722: raise ValueError("invalid MicroManager comments_header") fh.seek(comments_offset) header, count = struct.unpack(byteorder + "II", fh.read(8)) if header != 84720485: raise ValueError("invalid MicroManager comments_header") results['comments'] = read_json(fh, byteorder, None, count) return results def imagej_metadata(data, bytecounts, byteorder): """Return dict from ImageJ metadata tag value.""" _str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252') def read_string(data, byteorder): return _str(stripnull(data[0 if byteorder == '<' else 1::2])) def read_double(data, byteorder): return struct.unpack(byteorder+('d' * (len(data) // 8)), data) def read_bytes(data, byteorder): #return struct.unpack('b' * len(data), data) return numpy.fromstring(data, 'uint8') metadata_types = { # big endian b'info': ('info', read_string), b'labl': ('labels', read_string), b'rang': ('ranges', read_double), b'luts': ('luts', read_bytes), b'roi ': ('roi', read_bytes), b'over': ('overlays', read_bytes)} metadata_types.update( # little endian dict((k[::-1], v) for k, v in metadata_types.items())) if not bytecounts: raise ValueError("no ImageJ metadata") if not data[:4] in (b'IJIJ', b'JIJI'): raise ValueError("invalid ImageJ metadata") header_size = bytecounts[0] if header_size < 12 or header_size > 804: raise ValueError("invalid ImageJ metadata header size") ntypes = (header_size - 4) // 8 header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8]) pos = 4 + ntypes * 8 counter = 0 result = {} for mtype, count in zip(header[::2], header[1::2]): values = [] name, func = metadata_types.get(mtype, (_str(mtype), read_bytes)) for _ in range(count): counter += 1 pos1 = pos + bytecounts[counter] values.append(func(data[pos:pos1], byteorder)) pos = pos1 result[name.strip()] = values[0] if count == 1 else values return result def imagej_description(description): """Return dict from ImageJ image_description tag.""" def _bool(val): return {b'true': True, b'false': False}[val.lower()] _str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252') result = {} for line in description.splitlines(): try: key, val = line.split(b'=') except Exception: continue key = key.strip() val = val.strip() for dtype in (int, float, _bool, _str): try: val = dtype(val) break except Exception: pass result[_str(key)] = val return result def _replace_by(module_function, package=None, warn=False): """Try replace decorated function by module.function.""" try: from importlib import import_module except ImportError: warnings.warn('could not import module importlib') return lambda func: func def decorate(func, module_function=module_function, warn=warn): try: module, function = module_function.split('.') if not package: module = import_module(module) else: module = import_module('.' + module, package=package) func, oldfunc = getattr(module, function), func globals()['__old_' + func.__name__] = oldfunc except Exception: if warn: warnings.warn("failed to import %s" % module_function) return func return decorate def decodejpg(encoded, tables=b'', photometric=None, ycbcr_subsampling=None, ycbcr_positioning=None): """Decode JPEG encoded byte string (using _czifile extension module).""" import _czifile image = _czifile.decodejpg(encoded, tables) if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning: # TODO: convert YCbCr to RGB pass return image.tostring() @_replace_by('_tifffile.decodepackbits') def decodepackbits(encoded): """Decompress PackBits encoded byte string. PackBits is a simple byte-oriented run-length compression scheme. """ func = ord if sys.version[0] == '2' else lambda x: x result = [] result_extend = result.extend i = 0 try: while True: n = func(encoded[i]) + 1 i += 1 if n < 129: result_extend(encoded[i:i+n]) i += n elif n > 129: result_extend(encoded[i:i+1] * (258-n)) i += 1 except IndexError: pass return b''.join(result) if sys.version[0] == '2' else bytes(result) @_replace_by('_tifffile.decodelzw') def decodelzw(encoded): """Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string). The strip must begin with a CLEAR code and end with an EOI code. This is an implementation of the LZW decoding algorithm described in (1). It is not compatible with old style LZW compressed files like quad-lzw.tif. """ len_encoded = len(encoded) bitcount_max = len_encoded * 8 unpack = struct.unpack if sys.version[0] == '2': newtable = [chr(i) for i in range(256)] else: newtable = [bytes([i]) for i in range(256)] newtable.extend((0, 0)) def next_code(): """Return integer of `bitw` bits at `bitcount` position in encoded.""" start = bitcount // 8 s = encoded[start:start+4] try: code = unpack('>I', s)[0] except Exception: code = unpack('>I', s + b'\x00'*(4-len(s)))[0] code <<= bitcount % 8 code &= mask return code >> shr switchbitch = { # code: bit-width, shr-bits, bit-mask 255: (9, 23, int(9*'1'+'0'*23, 2)), 511: (10, 22, int(10*'1'+'0'*22, 2)), 1023: (11, 21, int(11*'1'+'0'*21, 2)), 2047: (12, 20, int(12*'1'+'0'*20, 2)), } bitw, shr, mask = switchbitch[255] bitcount = 0 if len_encoded < 4: raise ValueError("strip must be at least 4 characters long") if next_code() != 256: raise ValueError("strip must begin with CLEAR code") code = 0 oldcode = 0 result = [] result_append = result.append while True: code = next_code() # ~5% faster when inlining this function bitcount += bitw if code == 257 or bitcount >= bitcount_max: # EOI break if code == 256: # CLEAR table = newtable[:] table_append = table.append lentable = 258 bitw, shr, mask = switchbitch[255] code = next_code() bitcount += bitw if code == 257: # EOI break result_append(table[code]) else: if code < lentable: decoded = table[code] newcode = table[oldcode] + decoded[:1] else: newcode = table[oldcode] newcode += newcode[:1] decoded = newcode result_append(decoded) table_append(newcode) lentable += 1 oldcode = code if lentable in switchbitch: bitw, shr, mask = switchbitch[lentable] if code != 257: warnings.warn("unexpected end of lzw stream (code %i)" % code) return b''.join(result) @_replace_by('_tifffile.unpackints') def unpackints(data, dtype, itemsize, runlen=0): """Decompress byte string to array of integers of any bit size <= 32. Parameters ---------- data : byte str Data to decompress. dtype : numpy.dtype or str A numpy boolean or integer type. itemsize : int Number of bits per integer. runlen : int Number of consecutive integers, after which to start at next byte. """ if itemsize == 1: # bitarray data = numpy.fromstring(data, '|B') data = numpy.unpackbits(data) if runlen % 8: data = data.reshape(-1, runlen + (8 - runlen % 8)) data = data[:, :runlen].reshape(-1) return data.astype(dtype) dtype = numpy.dtype(dtype) if itemsize in (8, 16, 32, 64): return numpy.fromstring(data, dtype) if itemsize < 1 or itemsize > 32: raise ValueError("itemsize out of range: %i" % itemsize) if dtype.kind not in "biu": raise ValueError("invalid dtype") itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize) if itembytes != dtype.itemsize: raise ValueError("dtype.itemsize too small") if runlen == 0: runlen = len(data) // itembytes skipbits = runlen*itemsize % 8 if skipbits: skipbits = 8 - skipbits shrbits = itembytes*8 - itemsize bitmask = int(itemsize*'1'+'0'*shrbits, 2) dtypestr = '>' + dtype.char # dtype always big endian? unpack = struct.unpack l = runlen * (len(data)*8 // (runlen*itemsize + skipbits)) result = numpy.empty((l, ), dtype) bitcount = 0 for i in range(len(result)): start = bitcount // 8 s = data[start:start+itembytes] try: code = unpack(dtypestr, s)[0] except Exception: code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0] code <<= bitcount % 8 code &= bitmask result[i] = code >> shrbits bitcount += itemsize if (i+1) % runlen == 0: bitcount += skipbits return result def unpackrgb(data, dtype='>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff) >>> print(unpackrgb(data, '>> print(unpackrgb(data, '>> print(unpackrgb(data, '= bits) data = numpy.fromstring(data, dtype.byteorder+dt) result = numpy.empty((data.size, len(bitspersample)), dtype.char) for i, bps in enumerate(bitspersample): t = data >> int(numpy.sum(bitspersample[i+1:])) t &= int('0b'+'1'*bps, 2) if rescale: o = ((dtype.itemsize * 8) // bps + 1) * bps if o > data.dtype.itemsize * 8: t = t.astype('I') t *= (2**o - 1) // (2**bps - 1) t //= 2**(o - (dtype.itemsize * 8)) result[:, i] = t return result.reshape(-1) def reorient(image, orientation): """Return reoriented view of image array. Parameters ---------- image : numpy array Non-squeezed output of asarray() functions. Axes -3 and -2 must be image length and width respectively. orientation : int or str One of TIFF_ORIENTATIONS keys or values. """ o = TIFF_ORIENTATIONS.get(orientation, orientation) if o == 'top_left': return image elif o == 'top_right': return image[..., ::-1, :] elif o == 'bottom_left': return image[..., ::-1, :, :] elif o == 'bottom_right': return image[..., ::-1, ::-1, :] elif o == 'left_top': return numpy.swapaxes(image, -3, -2) elif o == 'right_top': return numpy.swapaxes(image, -3, -2)[..., ::-1, :] elif o == 'left_bottom': return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] elif o == 'right_bottom': return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] def squeeze_axes(shape, axes, skip='XY'): """Return shape and axes with single-dimensional entries removed. Remove unused dimensions unless their axes are listed in 'skip'. >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC') ((5, 2, 1), 'TYX') """ if len(shape) != len(axes): raise ValueError("dimensions of axes and shape don't match") shape, axes = zip(*(i for i in zip(shape, axes) if i[0] > 1 or i[1] in skip)) return shape, ''.join(axes) def transpose_axes(data, axes, asaxes='CTZYX'): """Return data with its axes permuted to match specified axes. A view is returned if possible. >>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape (5, 2, 1, 3, 4) """ for ax in axes: if ax not in asaxes: raise ValueError("unknown axis %s" % ax) # add missing axes to data shape = data.shape for ax in reversed(asaxes): if ax not in axes: axes = ax + axes shape = (1,) + shape data = data.reshape(shape) # transpose axes data = data.transpose([axes.index(ax) for ax in asaxes]) return data def stack_pages(pages, memmap=False, *args, **kwargs): """Read data from sequence of TiffPage and stack them vertically. If memmap is True, return an array stored in a binary file on disk. Additional parameters are passsed to the page asarray function. """ if len(pages) == 0: raise ValueError("no pages") if len(pages) == 1: return pages[0].asarray(memmap=memmap, *args, **kwargs) result = pages[0].asarray(*args, **kwargs) shape = (len(pages),) + result.shape if memmap: with tempfile.NamedTemporaryFile() as fh: result = numpy.memmap(fh, dtype=result.dtype, shape=shape) else: result = numpy.empty(shape, dtype=result.dtype) for i, page in enumerate(pages): result[i] = page.asarray(*args, **kwargs) return result def stripnull(string): """Return string truncated at first null character. Clean NULL terminated C strings. >>> stripnull(b'string\\x00') b'string' """ i = string.find(b'\x00') return string if (i < 0) else string[:i] def stripascii(string): """Return string truncated at last byte that is 7bit ASCII. Clean NULL separated and terminated TIFF strings. >>> stripascii(b'string\\x00string\\n\\x01\\x00') b'string\\x00string\\n' >>> stripascii(b'\\x00') b'' """ # TODO: pythonize this ord_ = ord if sys.version_info[0] < 3 else lambda x: x i = len(string) while i: i -= 1 if 8 < ord_(string[i]) < 127: break else: i = -1 return string[:i+1] def format_size(size): """Return file size as string from byte size.""" for unit in ('B', 'KB', 'MB', 'GB', 'TB'): if size < 2048: return "%.f %s" % (size, unit) size /= 1024.0 def sequence(value): """Return tuple containing value if value is not a sequence. >>> sequence(1) (1,) >>> sequence([1]) [1] """ try: len(value) return value except TypeError: return (value, ) def product(iterable): """Return product of sequence of numbers. Equivalent of functools.reduce(operator.mul, iterable, 1). >>> product([2**8, 2**30]) 274877906944 >>> product([]) 1 """ prod = 1 for i in iterable: prod *= i return prod def natural_sorted(iterable): """Return human sorted list of strings. E.g. for sorting file names. >>> natural_sorted(['f1', 'f2', 'f10']) ['f1', 'f2', 'f10'] """ def sortkey(x): return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)] numbers = re.compile(r'(\d+)') return sorted(iterable, key=sortkey) def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)): """Return datetime object from timestamp in Excel serial format. Convert LSM time stamps. >>> excel_datetime(40237.029999999795) datetime.datetime(2010, 2, 28, 0, 43, 11, 999982) """ return epoch + datetime.timedelta(timestamp) def julian_datetime(julianday, milisecond=0): """Return datetime from days since 1/1/4713 BC and ms since midnight. Convert Julian dates according to MetaMorph. >>> julian_datetime(2451576, 54362783) datetime.datetime(2000, 2, 2, 15, 6, 2, 783) """ if julianday <= 1721423: # no datetime before year 1 return None a = julianday + 1 if a > 2299160: alpha = math.trunc((a - 1867216.25) / 36524.25) a += 1 + alpha - alpha // 4 b = a + (1524 if a > 1721423 else 1158) c = math.trunc((b - 122.1) / 365.25) d = math.trunc(365.25 * c) e = math.trunc((b - d) / 30.6001) day = b - d - math.trunc(30.6001 * e) month = e - (1 if e < 13.5 else 13) year = c - (4716 if month > 2.5 else 4715) hour, milisecond = divmod(milisecond, 1000 * 60 * 60) minute, milisecond = divmod(milisecond, 1000 * 60) second, milisecond = divmod(milisecond, 1000) return datetime.datetime(year, month, day, hour, minute, second, milisecond) def test_tifffile(directory='testimages', verbose=True): """Read all images in directory. Print error message on failure. >>> test_tifffile(verbose=False) """ successful = 0 failed = 0 start = time.time() for f in glob.glob(os.path.join(directory, '*.*')): if verbose: print("\n%s>\n" % f.lower(), end='') t0 = time.time() try: tif = TiffFile(f, multifile=True) except Exception as e: if not verbose: print(f, end=' ') print("ERROR:", e) failed += 1 continue try: img = tif.asarray() except ValueError: try: img = tif[0].asarray() except Exception as e: if not verbose: print(f, end=' ') print("ERROR:", e) failed += 1 continue finally: tif.close() successful += 1 if verbose: print("%s, %s %s, %s, %.0f ms" % ( str(tif), str(img.shape), img.dtype, tif[0].compression, (time.time()-t0) * 1e3)) if verbose: print("\nSuccessfully read %i of %i files in %.3f s\n" % ( successful, successful+failed, time.time()-start)) class TIFF_SUBFILE_TYPES(object): def __getitem__(self, key): result = [] if key & 1: result.append('reduced_image') if key & 2: result.append('page') if key & 4: result.append('mask') return tuple(result) TIFF_PHOTOMETRICS = { 0: 'miniswhite', 1: 'minisblack', 2: 'rgb', 3: 'palette', 4: 'mask', 5: 'separated', # CMYK 6: 'ycbcr', 8: 'cielab', 9: 'icclab', 10: 'itulab', 32803: 'cfa', # Color Filter Array 32844: 'logl', 32845: 'logluv', 34892: 'linear_raw' } TIFF_COMPESSIONS = { 1: None, 2: 'ccittrle', 3: 'ccittfax3', 4: 'ccittfax4', 5: 'lzw', 6: 'ojpeg', 7: 'jpeg', 8: 'adobe_deflate', 9: 't85', 10: 't43', 32766: 'next', 32771: 'ccittrlew', 32773: 'packbits', 32809: 'thunderscan', 32895: 'it8ctpad', 32896: 'it8lw', 32897: 'it8mp', 32898: 'it8bl', 32908: 'pixarfilm', 32909: 'pixarlog', 32946: 'deflate', 32947: 'dcs', 34661: 'jbig', 34676: 'sgilog', 34677: 'sgilog24', 34712: 'jp2000', 34713: 'nef', } TIFF_DECOMPESSORS = { None: lambda x: x, 'adobe_deflate': zlib.decompress, 'deflate': zlib.decompress, 'packbits': decodepackbits, 'lzw': decodelzw, # 'jpeg': decodejpg } TIFF_DATA_TYPES = { 1: '1B', # BYTE 8-bit unsigned integer. 2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code; # the last byte must be NULL (binary zero). 3: '1H', # SHORT 16-bit (2-byte) unsigned integer 4: '1I', # LONG 32-bit (4-byte) unsigned integer. 5: '2I', # RATIONAL Two LONGs: the first represents the numerator of # a fraction; the second, the denominator. 6: '1b', # SBYTE An 8-bit signed (twos-complement) integer. 7: '1s', # UNDEFINED An 8-bit byte that may contain anything, # depending on the definition of the field. 8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer. 9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer. 10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator # of a fraction, the second the denominator. 11: '1f', # FLOAT Single precision (4-byte) IEEE format. 12: '1d', # DOUBLE Double precision (8-byte) IEEE format. 13: '1I', # IFD unsigned 4 byte IFD offset. #14: '', # UNICODE #15: '', # COMPLEX 16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff) 17: '1q', # SLONG8 signed 8 byte integer (BigTiff) 18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff) } TIFF_SAMPLE_FORMATS = { 1: 'uint', 2: 'int', 3: 'float', #4: 'void', #5: 'complex_int', 6: 'complex', } TIFF_SAMPLE_DTYPES = { ('uint', 1): '?', # bitmap ('uint', 2): 'B', ('uint', 3): 'B', ('uint', 4): 'B', ('uint', 5): 'B', ('uint', 6): 'B', ('uint', 7): 'B', ('uint', 8): 'B', ('uint', 9): 'H', ('uint', 10): 'H', ('uint', 11): 'H', ('uint', 12): 'H', ('uint', 13): 'H', ('uint', 14): 'H', ('uint', 15): 'H', ('uint', 16): 'H', ('uint', 17): 'I', ('uint', 18): 'I', ('uint', 19): 'I', ('uint', 20): 'I', ('uint', 21): 'I', ('uint', 22): 'I', ('uint', 23): 'I', ('uint', 24): 'I', ('uint', 25): 'I', ('uint', 26): 'I', ('uint', 27): 'I', ('uint', 28): 'I', ('uint', 29): 'I', ('uint', 30): 'I', ('uint', 31): 'I', ('uint', 32): 'I', ('uint', 64): 'Q', ('int', 8): 'b', ('int', 16): 'h', ('int', 32): 'i', ('int', 64): 'q', ('float', 16): 'e', ('float', 32): 'f', ('float', 64): 'd', ('complex', 64): 'F', ('complex', 128): 'D', ('uint', (5, 6, 5)): 'B', } TIFF_ORIENTATIONS = { 1: 'top_left', 2: 'top_right', 3: 'bottom_right', 4: 'bottom_left', 5: 'left_top', 6: 'right_top', 7: 'right_bottom', 8: 'left_bottom', } # TODO: is there a standard for character axes labels? AXES_LABELS = { 'X': 'width', 'Y': 'height', 'Z': 'depth', 'S': 'sample', # rgb(a) 'I': 'series', # general sequence, plane, page, IFD 'T': 'time', 'C': 'channel', # color, emission wavelength 'A': 'angle', 'P': 'phase', # formerly F # P is Position in LSM! 'R': 'tile', # region, point, mosaic 'H': 'lifetime', # histogram 'E': 'lambda', # excitation wavelength 'L': 'exposure', # lux 'V': 'event', 'Q': 'other', #'M': 'mosaic', # LSM 6 } AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items())) # Map OME pixel types to numpy dtype OME_PIXEL_TYPES = { 'int8': 'i1', 'int16': 'i2', 'int32': 'i4', 'uint8': 'u1', 'uint16': 'u2', 'uint32': 'u4', 'float': 'f4', # 'bit': 'bit', 'double': 'f8', 'complex': 'c8', 'double-complex': 'c16', } # NIH Image PicHeader v1.63 NIH_IMAGE_HEADER = [ ('fileid', 'a8'), ('nlines', 'i2'), ('pixelsperline', 'i2'), ('version', 'i2'), ('oldlutmode', 'i2'), ('oldncolors', 'i2'), ('colors', 'u1', (3, 32)), ('oldcolorstart', 'i2'), ('colorwidth', 'i2'), ('extracolors', 'u2', (6, 3)), ('nextracolors', 'i2'), ('foregroundindex', 'i2'), ('backgroundindex', 'i2'), ('xscale', 'f8'), ('_x0', 'i2'), ('_x1', 'i2'), ('units_t', 'i2'), # NIH_UNITS_TYPE ('p1', [('x', 'i2'), ('y', 'i2')]), ('p2', [('x', 'i2'), ('y', 'i2')]), ('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE ('ncoefficients', 'i2'), ('coeff', 'f8', 6), ('_um_len', 'u1'), ('um', 'a15'), ('_x2', 'u1'), ('binarypic', 'b1'), ('slicestart', 'i2'), ('sliceend', 'i2'), ('scalemagnification', 'f4'), ('nslices', 'i2'), ('slicespacing', 'f4'), ('currentslice', 'i2'), ('frameinterval', 'f4'), ('pixelaspectratio', 'f4'), ('colorstart', 'i2'), ('colorend', 'i2'), ('ncolors', 'i2'), ('fill1', '3u2'), ('fill2', '3u2'), ('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE ('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE ('invertedtable', 'b1'), ('zeroclip', 'b1'), ('_xunit_len', 'u1'), ('xunit', 'a11'), ('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE ] NIH_COLORTABLE_TYPE = ( 'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow', 'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum') NIH_LUTMODE_TYPE = ( 'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale', 'ColorLut', 'CustomGrayscale') NIH_CURVEFIT_TYPE = ( 'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit', 'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated', 'UncalibratedOD') NIH_UNITS_TYPE = ( 'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters', 'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits') NIH_STACKTYPE_TYPE = ( 'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack') # Map Universal Imaging Corporation MetaMorph internal tag ids to name and type UIC_TAGS = { 0: ('auto_scale', int), 1: ('min_scale', int), 2: ('max_scale', int), 3: ('spatial_calibration', int), 4: ('x_calibration', Fraction), 5: ('y_calibration', Fraction), 6: ('calibration_units', str), 7: ('name', str), 8: ('thresh_state', int), 9: ('thresh_state_red', int), 10: ('tagid_10', None), # undefined 11: ('thresh_state_green', int), 12: ('thresh_state_blue', int), 13: ('thresh_state_lo', int), 14: ('thresh_state_hi', int), 15: ('zoom', int), 16: ('create_time', julian_datetime), 17: ('last_saved_time', julian_datetime), 18: ('current_buffer', int), 19: ('gray_fit', None), 20: ('gray_point_count', None), 21: ('gray_x', Fraction), 22: ('gray_y', Fraction), 23: ('gray_min', Fraction), 24: ('gray_max', Fraction), 25: ('gray_unit_name', str), 26: ('standard_lut', int), 27: ('wavelength', int), 28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions 29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions 30: ('overlay_mask', None), 31: ('overlay_compress', None), 32: ('overlay', None), 33: ('special_overlay_mask', None), 34: ('special_overlay_compress', None), 35: ('special_overlay', None), 36: ('image_property', read_uic_image_property), 37: ('stage_label', '%ip'), # N str 38: ('autoscale_lo_info', Fraction), 39: ('autoscale_hi_info', Fraction), 40: ('absolute_z', '(%i,2)u4'), # N fractions 41: ('absolute_z_valid', '(%i,)u4'), # N long 42: ('gamma', int), 43: ('gamma_red', int), 44: ('gamma_green', int), 45: ('gamma_blue', int), 46: ('camera_bin', int), 47: ('new_lut', int), 48: ('image_property_ex', None), 49: ('plane_property', int), 50: ('user_lut_table', '(256,3)u1'), 51: ('red_autoscale_info', int), 52: ('red_autoscale_lo_info', Fraction), 53: ('red_autoscale_hi_info', Fraction), 54: ('red_minscale_info', int), 55: ('red_maxscale_info', int), 56: ('green_autoscale_info', int), 57: ('green_autoscale_lo_info', Fraction), 58: ('green_autoscale_hi_info', Fraction), 59: ('green_minscale_info', int), 60: ('green_maxscale_info', int), 61: ('blue_autoscale_info', int), 62: ('blue_autoscale_lo_info', Fraction), 63: ('blue_autoscale_hi_info', Fraction), 64: ('blue_min_scale_info', int), 65: ('blue_max_scale_info', int), #66: ('overlay_plane_color', read_uic_overlay_plane_color), } # Olympus FluoView MM_DIMENSION = [ ('name', 'a16'), ('size', 'i4'), ('origin', 'f8'), ('resolution', 'f8'), ('unit', 'a64'), ] MM_HEADER = [ ('header_flag', 'i2'), ('image_type', 'u1'), ('image_name', 'a257'), ('offset_data', 'u4'), ('palette_size', 'i4'), ('offset_palette0', 'u4'), ('offset_palette1', 'u4'), ('comment_size', 'i4'), ('offset_comment', 'u4'), ('dimensions', MM_DIMENSION, 10), ('offset_position', 'u4'), ('map_type', 'i2'), ('map_min', 'f8'), ('map_max', 'f8'), ('min_value', 'f8'), ('max_value', 'f8'), ('offset_map', 'u4'), ('gamma', 'f8'), ('offset', 'f8'), ('gray_channel', MM_DIMENSION), ('offset_thumbnail', 'u4'), ('voice_field', 'i4'), ('offset_voice_field', 'u4'), ] # Carl Zeiss LSM CZ_LSM_INFO = [ ('magic_number', 'u4'), ('structure_size', 'i4'), ('dimension_x', 'i4'), ('dimension_y', 'i4'), ('dimension_z', 'i4'), ('dimension_channels', 'i4'), ('dimension_time', 'i4'), ('data_type', 'i4'), # CZ_DATA_TYPES ('thumbnail_x', 'i4'), ('thumbnail_y', 'i4'), ('voxel_size_x', 'f8'), ('voxel_size_y', 'f8'), ('voxel_size_z', 'f8'), ('origin_x', 'f8'), ('origin_y', 'f8'), ('origin_z', 'f8'), ('scan_type', 'u2'), ('spectral_scan', 'u2'), ('type_of_data', 'u4'), # CZ_TYPE_OF_DATA ('offset_vector_overlay', 'u4'), ('offset_input_lut', 'u4'), ('offset_output_lut', 'u4'), ('offset_channel_colors', 'u4'), ('time_interval', 'f8'), ('offset_channel_data_types', 'u4'), ('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO ('offset_ks_data', 'u4'), ('offset_time_stamps', 'u4'), ('offset_event_list', 'u4'), ('offset_roi', 'u4'), ('offset_bleach_roi', 'u4'), ('offset_next_recording', 'u4'), # LSM 2.0 ends here ('display_aspect_x', 'f8'), ('display_aspect_y', 'f8'), ('display_aspect_z', 'f8'), ('display_aspect_time', 'f8'), ('offset_mean_of_roi_overlay', 'u4'), ('offset_topo_isoline_overlay', 'u4'), ('offset_topo_profile_overlay', 'u4'), ('offset_linescan_overlay', 'u4'), ('offset_toolbar_flags', 'u4'), ('offset_channel_wavelength', 'u4'), ('offset_channel_factors', 'u4'), ('objective_sphere_correction', 'f8'), ('offset_unmix_parameters', 'u4'), # LSM 3.2, 4.0 end here ('offset_acquisition_parameters', 'u4'), ('offset_characteristics', 'u4'), ('offset_palette', 'u4'), ('time_difference_x', 'f8'), ('time_difference_y', 'f8'), ('time_difference_z', 'f8'), ('internal_use_1', 'u4'), ('dimension_p', 'i4'), ('dimension_m', 'i4'), ('dimensions_reserved', '16i4'), ('offset_tile_positions', 'u4'), ('reserved_1', '9u4'), ('offset_positions', 'u4'), ('reserved_2', '21u4'), # must be 0 ] # Import functions for LSM_INFO sub-records CZ_LSM_INFO_READERS = { 'scan_info': read_cz_lsm_scan_info, 'time_stamps': read_cz_lsm_time_stamps, 'event_list': read_cz_lsm_event_list, 'channel_colors': read_cz_lsm_floatpairs, 'positions': read_cz_lsm_floatpairs, 'tile_positions': read_cz_lsm_floatpairs, } # Map cz_lsm_info.scan_type to dimension order CZ_SCAN_TYPES = { 0: 'XYZCT', # x-y-z scan 1: 'XYZCT', # z scan (x-z plane) 2: 'XYZCT', # line scan 3: 'XYTCZ', # time series x-y 4: 'XYZTC', # time series x-z 5: 'XYTCZ', # time series 'Mean of ROIs' 6: 'XYZTC', # time series x-y-z 7: 'XYCTZ', # spline scan 8: 'XYCZT', # spline scan x-z 9: 'XYTCZ', # time series spline plane x-z 10: 'XYZCT', # point mode } # Map dimension codes to cz_lsm_info attribute CZ_DIMENSIONS = { 'X': 'dimension_x', 'Y': 'dimension_y', 'Z': 'dimension_z', 'C': 'dimension_channels', 'T': 'dimension_time', } # Description of cz_lsm_info.data_type CZ_DATA_TYPES = { 0: 'varying data types', 1: '8 bit unsigned integer', 2: '12 bit unsigned integer', 5: '32 bit float', } # Description of cz_lsm_info.type_of_data CZ_TYPE_OF_DATA = { 0: 'Original scan data', 1: 'Calculated data', 2: '3D reconstruction', 3: 'Topography height map', } CZ_LSM_SCAN_INFO_ARRAYS = { 0x20000000: "tracks", 0x30000000: "lasers", 0x60000000: "detection_channels", 0x80000000: "illumination_channels", 0xa0000000: "beam_splitters", 0xc0000000: "data_channels", 0x11000000: "timers", 0x13000000: "markers", } CZ_LSM_SCAN_INFO_STRUCTS = { # 0x10000000: "recording", 0x40000000: "track", 0x50000000: "laser", 0x70000000: "detection_channel", 0x90000000: "illumination_channel", 0xb0000000: "beam_splitter", 0xd0000000: "data_channel", 0x12000000: "timer", 0x14000000: "marker", } CZ_LSM_SCAN_INFO_ATTRIBUTES = { # recording 0x10000001: "name", 0x10000002: "description", 0x10000003: "notes", 0x10000004: "objective", 0x10000005: "processing_summary", 0x10000006: "special_scan_mode", 0x10000007: "scan_type", 0x10000008: "scan_mode", 0x10000009: "number_of_stacks", 0x1000000a: "lines_per_plane", 0x1000000b: "samples_per_line", 0x1000000c: "planes_per_volume", 0x1000000d: "images_width", 0x1000000e: "images_height", 0x1000000f: "images_number_planes", 0x10000010: "images_number_stacks", 0x10000011: "images_number_channels", 0x10000012: "linscan_xy_size", 0x10000013: "scan_direction", 0x10000014: "time_series", 0x10000015: "original_scan_data", 0x10000016: "zoom_x", 0x10000017: "zoom_y", 0x10000018: "zoom_z", 0x10000019: "sample_0x", 0x1000001a: "sample_0y", 0x1000001b: "sample_0z", 0x1000001c: "sample_spacing", 0x1000001d: "line_spacing", 0x1000001e: "plane_spacing", 0x1000001f: "plane_width", 0x10000020: "plane_height", 0x10000021: "volume_depth", 0x10000023: "nutation", 0x10000034: "rotation", 0x10000035: "precession", 0x10000036: "sample_0time", 0x10000037: "start_scan_trigger_in", 0x10000038: "start_scan_trigger_out", 0x10000039: "start_scan_event", 0x10000040: "start_scan_time", 0x10000041: "stop_scan_trigger_in", 0x10000042: "stop_scan_trigger_out", 0x10000043: "stop_scan_event", 0x10000044: "stop_scan_time", 0x10000045: "use_rois", 0x10000046: "use_reduced_memory_rois", 0x10000047: "user", 0x10000048: "use_bc_correction", 0x10000049: "position_bc_correction1", 0x10000050: "position_bc_correction2", 0x10000051: "interpolation_y", 0x10000052: "camera_binning", 0x10000053: "camera_supersampling", 0x10000054: "camera_frame_width", 0x10000055: "camera_frame_height", 0x10000056: "camera_offset_x", 0x10000057: "camera_offset_y", 0x10000059: "rt_binning", 0x1000005a: "rt_frame_width", 0x1000005b: "rt_frame_height", 0x1000005c: "rt_region_width", 0x1000005d: "rt_region_height", 0x1000005e: "rt_offset_x", 0x1000005f: "rt_offset_y", 0x10000060: "rt_zoom", 0x10000061: "rt_line_period", 0x10000062: "prescan", 0x10000063: "scan_direction_z", # track 0x40000001: "multiplex_type", # 0 after line; 1 after frame 0x40000002: "multiplex_order", 0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average 0x40000004: "sampling_method", # 1 mean; 2 sum 0x40000005: "sampling_number", 0x40000006: "acquire", 0x40000007: "sample_observation_time", 0x4000000b: "time_between_stacks", 0x4000000c: "name", 0x4000000d: "collimator1_name", 0x4000000e: "collimator1_position", 0x4000000f: "collimator2_name", 0x40000010: "collimator2_position", 0x40000011: "is_bleach_track", 0x40000012: "is_bleach_after_scan_number", 0x40000013: "bleach_scan_number", 0x40000014: "trigger_in", 0x40000015: "trigger_out", 0x40000016: "is_ratio_track", 0x40000017: "bleach_count", 0x40000018: "spi_center_wavelength", 0x40000019: "pixel_time", 0x40000021: "condensor_frontlens", 0x40000023: "field_stop_value", 0x40000024: "id_condensor_aperture", 0x40000025: "condensor_aperture", 0x40000026: "id_condensor_revolver", 0x40000027: "condensor_filter", 0x40000028: "id_transmission_filter1", 0x40000029: "id_transmission1", 0x40000030: "id_transmission_filter2", 0x40000031: "id_transmission2", 0x40000032: "repeat_bleach", 0x40000033: "enable_spot_bleach_pos", 0x40000034: "spot_bleach_posx", 0x40000035: "spot_bleach_posy", 0x40000036: "spot_bleach_posz", 0x40000037: "id_tubelens", 0x40000038: "id_tubelens_position", 0x40000039: "transmitted_light", 0x4000003a: "reflected_light", 0x4000003b: "simultan_grab_and_bleach", 0x4000003c: "bleach_pixel_time", # laser 0x50000001: "name", 0x50000002: "acquire", 0x50000003: "power", # detection_channel 0x70000001: "integration_mode", 0x70000002: "special_mode", 0x70000003: "detector_gain_first", 0x70000004: "detector_gain_last", 0x70000005: "amplifier_gain_first", 0x70000006: "amplifier_gain_last", 0x70000007: "amplifier_offs_first", 0x70000008: "amplifier_offs_last", 0x70000009: "pinhole_diameter", 0x7000000a: "counting_trigger", 0x7000000b: "acquire", 0x7000000c: "point_detector_name", 0x7000000d: "amplifier_name", 0x7000000e: "pinhole_name", 0x7000000f: "filter_set_name", 0x70000010: "filter_name", 0x70000013: "integrator_name", 0x70000014: "channel_name", 0x70000015: "detector_gain_bc1", 0x70000016: "detector_gain_bc2", 0x70000017: "amplifier_gain_bc1", 0x70000018: "amplifier_gain_bc2", 0x70000019: "amplifier_offset_bc1", 0x70000020: "amplifier_offset_bc2", 0x70000021: "spectral_scan_channels", 0x70000022: "spi_wavelength_start", 0x70000023: "spi_wavelength_stop", 0x70000026: "dye_name", 0x70000027: "dye_folder", # illumination_channel 0x90000001: "name", 0x90000002: "power", 0x90000003: "wavelength", 0x90000004: "aquire", 0x90000005: "detchannel_name", 0x90000006: "power_bc1", 0x90000007: "power_bc2", # beam_splitter 0xb0000001: "filter_set", 0xb0000002: "filter", 0xb0000003: "name", # data_channel 0xd0000001: "name", 0xd0000003: "acquire", 0xd0000004: "color", 0xd0000005: "sample_type", 0xd0000006: "bits_per_sample", 0xd0000007: "ratio_type", 0xd0000008: "ratio_track1", 0xd0000009: "ratio_track2", 0xd000000a: "ratio_channel1", 0xd000000b: "ratio_channel2", 0xd000000c: "ratio_const1", 0xd000000d: "ratio_const2", 0xd000000e: "ratio_const3", 0xd000000f: "ratio_const4", 0xd0000010: "ratio_const5", 0xd0000011: "ratio_const6", 0xd0000012: "ratio_first_images1", 0xd0000013: "ratio_first_images2", 0xd0000014: "dye_name", 0xd0000015: "dye_folder", 0xd0000016: "spectrum", 0xd0000017: "acquire", # timer 0x12000001: "name", 0x12000002: "description", 0x12000003: "interval", 0x12000004: "trigger_in", 0x12000005: "trigger_out", 0x12000006: "activation_time", 0x12000007: "activation_number", # marker 0x14000001: "name", 0x14000002: "description", 0x14000003: "trigger_in", 0x14000004: "trigger_out", } # Map TIFF tag code to attribute name, default value, type, count, validator TIFF_TAGS = { 254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()), 255: ('subfile_type', None, 3, 1, {0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}), 256: ('image_width', None, 4, 1, None), 257: ('image_length', None, 4, 1, None), 258: ('bits_per_sample', 1, 3, 1, None), 259: ('compression', 1, 3, 1, TIFF_COMPESSIONS), 262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS), 266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}), 269: ('document_name', None, 2, None, None), 270: ('image_description', None, 2, None, None), 271: ('make', None, 2, None, None), 272: ('model', None, 2, None, None), 273: ('strip_offsets', None, 4, None, None), 274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS), 277: ('samples_per_pixel', 1, 3, 1, None), 278: ('rows_per_strip', 2**32-1, 4, 1, None), 279: ('strip_byte_counts', None, 4, None, None), 280: ('min_sample_value', None, 3, None, None), 281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample 282: ('x_resolution', None, 5, 1, None), 283: ('y_resolution', None, 5, 1, None), 284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}), 285: ('page_name', None, 2, None, None), 286: ('x_position', None, 5, 1, None), 287: ('y_position', None, 5, 1, None), 296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}), 297: ('page_number', None, 3, 2, None), 305: ('software', None, 2, None, None), 306: ('datetime', None, 2, None, None), 315: ('artist', None, 2, None, None), 316: ('host_computer', None, 2, None, None), 317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}), 318: ('white_point', None, 5, 2, None), 319: ('primary_chromaticities', None, 5, 6, None), 320: ('color_map', None, 3, None, None), 322: ('tile_width', None, 4, 1, None), 323: ('tile_length', None, 4, 1, None), 324: ('tile_offsets', None, 4, None, None), 325: ('tile_byte_counts', None, 4, None, None), 338: ('extra_samples', None, 3, None, {0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}), 339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS), 340: ('smin_sample_value', None, None, None, None), 341: ('smax_sample_value', None, None, None, None), 347: ('jpeg_tables', None, 7, None, None), 530: ('ycbcr_subsampling', 1, 3, 2, None), 531: ('ycbcr_positioning', 1, 3, 1, None), 32996: ('sgi_matteing', None, None, 1, None), # use extra_samples 32996: ('sgi_datatype', None, None, 1, None), # use sample_format 32997: ('image_depth', None, 4, 1, None), 32998: ('tile_depth', None, 4, 1, None), 33432: ('copyright', None, 1, None, None), 33445: ('md_file_tag', None, 4, 1, None), 33446: ('md_scale_pixel', None, 5, 1, None), 33447: ('md_color_table', None, 3, None, None), 33448: ('md_lab_name', None, 2, None, None), 33449: ('md_sample_info', None, 2, None, None), 33450: ('md_prep_date', None, 2, None, None), 33451: ('md_prep_time', None, 2, None, None), 33452: ('md_file_units', None, 2, None, None), 33550: ('model_pixel_scale', None, 12, 3, None), 33922: ('model_tie_point', None, 12, None, None), 34665: ('exif_ifd', None, None, 1, None), 34735: ('geo_key_directory', None, 3, None, None), 34736: ('geo_double_params', None, 12, None, None), 34737: ('geo_ascii_params', None, 2, None, None), 34853: ('gps_ifd', None, None, 1, None), 37510: ('user_comment', None, None, None, None), 42112: ('gdal_metadata', None, 2, None, None), 42113: ('gdal_nodata', None, 2, None, None), 50289: ('mc_xy_position', None, 12, 2, None), 50290: ('mc_z_position', None, 12, 1, None), 50291: ('mc_xy_calibration', None, 12, 3, None), 50292: ('mc_lens_lem_na_n', None, 12, 3, None), 50293: ('mc_channel_name', None, 1, None, None), 50294: ('mc_ex_wavelength', None, 12, 1, None), 50295: ('mc_time_stamp', None, 12, 1, None), 50838: ('imagej_byte_counts', None, None, None, None), 65200: ('flex_xml', None, 2, None, None), # code: (attribute name, default value, type, count, validator) } # Map custom TIFF tag codes to attribute names and import functions CUSTOM_TAGS = { 700: ('xmp', read_bytes), 34377: ('photoshop', read_numpy), 33723: ('iptc', read_bytes), 34675: ('icc_profile', read_bytes), 33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK 33629: ('uic2tag', read_uic2tag), 33630: ('uic3tag', read_uic3tag), 33631: ('uic4tag', read_uic4tag), 34361: ('mm_header', read_mm_header), # Olympus FluoView 34362: ('mm_stamp', read_mm_stamp), 34386: ('mm_user_block', read_bytes), 34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM 43314: ('nih_image_header', read_nih_image_header), # 40001: ('mc_ipwinscal', read_bytes), 40100: ('mc_id_old', read_bytes), 50288: ('mc_id', read_bytes), 50296: ('mc_frame_properties', read_bytes), 50839: ('imagej_metadata', read_bytes), 51123: ('micromanager_metadata', read_json), } # Max line length of printed output PRINT_LINE_LEN = 79 def imshow(data, title=None, vmin=0, vmax=None, cmap=None, bitspersample=None, photometric='rgb', interpolation='nearest', dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs): """Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. Requires pyplot already imported ``from matplotlib import pyplot``. Parameters ---------- bitspersample : int or None Number of bits per channel in integer RGB images. photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'} The color space of the image data. title : str Window and subplot title. figure : matplotlib.figure.Figure (optional). Matplotlib to use for plotting. subplot : int A matplotlib.pyplot.subplot axis. maxdim : int maximum image size in any dimension. kwargs : optional Arguments for matplotlib.pyplot.imshow. """ #if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'): # raise ValueError("Can't handle %s photometrics" % photometric) # TODO: handle photometric == 'separated' (CMYK) isrgb = photometric in ('rgb', 'palette') data = numpy.atleast_2d(data.squeeze()) data = data[(slice(0, maxdim), ) * len(data.shape)] dims = data.ndim if dims < 2: raise ValueError("not an image") elif dims == 2: dims = 0 isrgb = False else: if isrgb and data.shape[-3] in (3, 4): data = numpy.swapaxes(data, -3, -2) data = numpy.swapaxes(data, -2, -1) elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and data.shape[-1] < data.shape[-3] // 16 and data.shape[-1] < 5): data = numpy.swapaxes(data, -3, -1) data = numpy.swapaxes(data, -2, -1) isrgb = isrgb and data.shape[-1] in (3, 4) dims -= 3 if isrgb else 2 if photometric == 'palette' and isrgb: datamax = data.max() if datamax > 255: data >>= 8 # possible precision loss data = data.astype('B') elif data.dtype.kind in 'ui': if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: try: bitspersample = int(math.ceil(math.log(data.max(), 2))) except Exception: bitspersample = data.dtype.itemsize * 8 elif not isinstance(bitspersample, int): # bitspersample can be tuple, e.g. (5, 6, 5) bitspersample = data.dtype.itemsize * 8 datamax = 2**bitspersample if isrgb: if bitspersample < 8: data <<= 8 - bitspersample elif bitspersample > 8: data >>= bitspersample - 8 # precision loss data = data.astype('B') elif data.dtype.kind == 'f': datamax = data.max() if isrgb and datamax > 1.0: if data.dtype.char == 'd': data = data.astype('f') data /= datamax elif data.dtype.kind == 'b': datamax = 1 elif data.dtype.kind == 'c': raise NotImplementedError("complex type") # TODO: handle complex types if not isrgb: if vmax is None: vmax = datamax if vmin is None: if data.dtype.kind == 'i': dtmin = numpy.iinfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data > dtmin) if data.dtype.kind == 'f': dtmin = numpy.finfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data > dtmin) else: vmin = 0 pyplot = sys.modules['matplotlib.pyplot'] if figure is None: pyplot.rc('font', family='sans-serif', weight='normal', size=8) figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True, facecolor='1.0', edgecolor='w') try: figure.canvas.manager.window.title(title) except Exception: pass pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9, left=0.1, right=0.95, hspace=0.05, wspace=0.0) subplot = pyplot.subplot(subplot) if title: try: title = unicode(title, 'Windows-1252') except TypeError: pass pyplot.title(title, size=11) if cmap is None: if data.dtype.kind in 'ubf' or vmin == 0: cmap = 'cubehelix' else: cmap = 'coolwarm' if photometric == 'miniswhite': cmap += '_r' image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax, cmap=cmap, interpolation=interpolation, **kwargs) if not isrgb: pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 def format_coord(x, y): # callback function to format coordinate display in toolbar x = int(x + 0.5) y = int(y + 0.5) try: if dims: return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x], current, x, y) else: return "%s @ [%4i, %4i]" % (data[y, x], x, y) except IndexError: return "" pyplot.gca().format_coord = format_coord if dims: current = list((0, ) * dims) cur_ax_dat = [0, data[tuple(current)].squeeze()] sliders = [pyplot.Slider( pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]), 'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5', valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)] for slider in sliders: slider.drawon = False def set_image(current, sliders=sliders, data=data): # change image and redraw canvas cur_ax_dat[1] = data[tuple(current)].squeeze() image.set_data(cur_ax_dat[1]) for ctrl, index in zip(sliders, current): ctrl.eventson = False ctrl.set_val(index) ctrl.eventson = True figure.canvas.draw() def on_changed(index, axis, data=data, current=current): # callback function for slider change event index = int(round(index)) cur_ax_dat[0] = axis if index == current[axis]: return if index >= data.shape[axis]: index = 0 elif index < 0: index = data.shape[axis] - 1 current[axis] = index set_image(current) def on_keypressed(event, data=data, current=current): # callback function for key press event key = event.key axis = cur_ax_dat[0] if str(key) in '0123456789': on_changed(key, axis) elif key == 'right': on_changed(current[axis] + 1, axis) elif key == 'left': on_changed(current[axis] - 1, axis) elif key == 'up': cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1 elif key == 'down': cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1 elif key == 'end': on_changed(data.shape[axis] - 1, axis) elif key == 'home': on_changed(0, axis) figure.canvas.mpl_connect('key_press_event', on_keypressed) for axis, ctrl in enumerate(sliders): ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) return figure, subplot, image def _app_show(): """Block the GUI. For use as skimage plugin.""" pyplot = sys.modules['matplotlib.pyplot'] pyplot.show() def main(argv=None): """Command line usage main function.""" if float(sys.version[0:3]) < 2.6: print("This script requires Python version 2.6 or better.") print("This is Python version %s" % sys.version) return 0 if argv is None: argv = sys.argv import optparse parser = optparse.OptionParser( usage="usage: %prog [options] path", description="Display image data in TIFF files.", version="%%prog %s" % __version__) opt = parser.add_option opt('-p', '--page', dest='page', type='int', default=-1, help="display single page") opt('-s', '--series', dest='series', type='int', default=-1, help="display series of pages of same shape") opt('--nomultifile', dest='nomultifile', action='store_true', default=False, help="don't read OME series from multiple files") opt('--noplot', dest='noplot', action='store_true', default=False, help="don't display images") opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear', help="image interpolation method") opt('--dpi', dest='dpi', type='int', default=96, help="set plot resolution") opt('--debug', dest='debug', action='store_true', default=False, help="raise exception on failures") opt('--test', dest='test', action='store_true', default=False, help="try read all images in path") opt('--doctest', dest='doctest', action='store_true', default=False, help="runs the docstring examples") opt('-v', '--verbose', dest='verbose', action='store_true', default=True) opt('-q', '--quiet', dest='verbose', action='store_false') settings, path = parser.parse_args() path = ' '.join(path) if settings.doctest: import doctest doctest.testmod() return 0 if not path: parser.error("No file specified") if settings.test: test_tifffile(path, settings.verbose) return 0 if any(i in path for i in '?*'): path = glob.glob(path) if not path: print('no files match the pattern') return 0 # TODO: handle image sequences #if len(path) == 1: path = path[0] print("Reading file structure...", end=' ') start = time.time() try: tif = TiffFile(path, multifile=not settings.nomultifile) except Exception as e: if settings.debug: raise else: print("\n", e) sys.exit(0) print("%.3f ms" % ((time.time()-start) * 1e3)) if tif.is_ome: settings.norgb = True images = [(None, tif[0 if settings.page < 0 else settings.page])] if not settings.noplot: print("Reading image data... ", end=' ') def notnone(x): return next(i for i in x if i is not None) start = time.time() try: if settings.page >= 0: images = [(tif.asarray(key=settings.page), tif[settings.page])] elif settings.series >= 0: images = [(tif.asarray(series=settings.series), notnone(tif.series[settings.series].pages))] else: images = [] for i, s in enumerate(tif.series): try: images.append( (tif.asarray(series=i), notnone(s.pages))) except ValueError as e: images.append((None, notnone(s.pages))) if settings.debug: raise else: print("\n* series %i failed: %s... " % (i, e), end='') print("%.3f ms" % ((time.time()-start) * 1e3)) except Exception as e: if settings.debug: raise else: print(e) tif.close() print("\nTIFF file:", tif) print() for i, s in enumerate(tif.series): print ("Series %i" % i) print(s) print() for i, page in images: print(page) print(page.tags) if page.is_palette: print("\nColor Map:", page.color_map.shape, page.color_map.dtype) for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags', 'mm_header', 'imagej_tags', 'micromanager_metadata', 'nih_image_header'): if hasattr(page, attr): print("", attr.upper(), Record(getattr(page, attr)), sep="\n") print() if page.is_micromanager: print('MICROMANAGER_FILE_METADATA') print(Record(tif.micromanager_metadata)) if images and not settings.noplot: try: import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot except ImportError as e: warnings.warn("failed to import matplotlib.\n%s" % e) else: for img, page in images: if img is None: continue vmin, vmax = None, None if 'gdal_nodata' in page.tags: try: vmin = numpy.min(img[img > float(page.gdal_nodata)]) except ValueError: pass if page.is_stk: try: vmin = page.uic_tags['min_scale'] vmax = page.uic_tags['max_scale'] except KeyError: pass else: if vmax <= vmin: vmin, vmax = None, None title = "%s\n %s" % (str(tif), str(page)) imshow(img, title=title, vmin=vmin, vmax=vmax, bitspersample=page.bits_per_sample, photometric=page.photometric, interpolation=settings.interpol, dpi=settings.dpi) pyplot.show() TIFFfile = TiffFile # backwards compatibility if sys.version_info[0] > 2: basestring = str, bytes unicode = str if __name__ == "__main__": sys.exit(main()) mpop-1.5.0/mpop/imageo/formats/writer_options.py000066400000000000000000000016661317160620000220250ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2016. # Author(s): # Christian Kliche # This file is part of the mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . ''' Module for writer option constants ''' WR_OPT_NBITS = 'nbits' WR_OPT_COMPRESSION = 'compression' WR_OPT_BLOCKSIZE = 'blocksize' WR_OPT_FILL_VALUE_SUBST = 'fill_value_subst' mpop-1.5.0/mpop/imageo/geo_image.py000066400000000000000000000447621317160620000172030ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009-2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # Esben S. Nielsen # Stefano Cerino # Katja Hungershofer # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Module for geographic images. """ import logging import os import numpy as np from mpop import CONFIG_PATH from mpop.utils import ensure_dir try: from trollimage.image import Image, UnknownImageFormat except ImportError: from mpop.imageo.image import Image, UnknownImageFormat import mpop.imageo.formats.writer_options as write_opts logger = logging.getLogger(__name__) class GeoImage(Image): """This class defines geographic images. As such, it contains not only data of the different *channels* of the image, but also the area on which it is defined (*area* parameter) and *time_slot* of the snapshot. The channels are considered to contain floating point values in the range [0.0,1.0]. In order to normalize the input data, the *crange* parameter defines the original range of the data. The conversion to the classical [0,255] range and byte type is done automagically when saving the image to file. See also :class:`image.Image` for more information. """ def __init__(self, channels, area, time_slot, mode="L", crange=None, fill_value=None, palette=None): self.area = area self.time_slot = time_slot self.tags = {} self.gdal_options = {} Image.__init__(self, channels, mode, crange, fill_value, palette) def save(self, filename, compression=6, tags=None, gdal_options=None, fformat=None, blocksize=256, writer_options=None, **kwargs): """Save the image to the given *filename*. If the extension is "tif", the image is saved to geotiff_ format, in which case the *compression* level can be given ([0, 9], 0 meaning off). See also :meth:`image.Image.save`, :meth:`image.Image.double_save`, and :meth:`image.Image.secure_save`. The *tags* argument is a dict of tags to include in the image (as metadata), and the *gdal_options* holds options for the gdal saving driver. A *blocksize* other than 0 will result in a tiled image (if possible), with tiles of size equal to *blocksize*. If the specified format *fformat* is not know to MPOP (and PIL), we will try to import module *fformat* and call the method `fformat.save`. Use *writer_options* to define parameters that should be forwarded to custom writers. Dictionary keys listed in mpop.imageo.formats.writer_options will be interpreted by this function instead of *compression*, *blocksize* and nbits in *tags* dict. .. _geotiff: http://trac.osgeo.org/geotiff/ """ fformat = fformat or os.path.splitext(filename)[1][1:] # prefer parameters in writer_options dict # fill dict if parameters are missing writer_options = writer_options or {} tags = tags or {} if writer_options.get(write_opts.WR_OPT_COMPRESSION, None): compression = writer_options[write_opts.WR_OPT_COMPRESSION] elif compression is not None: writer_options[write_opts.WR_OPT_COMPRESSION] = compression if writer_options.get(write_opts.WR_OPT_BLOCKSIZE, None): blocksize = writer_options[write_opts.WR_OPT_BLOCKSIZE] elif blocksize is not None: writer_options[write_opts.WR_OPT_BLOCKSIZE] = blocksize if writer_options.get(write_opts.WR_OPT_NBITS, None): tags['NBITS'] = writer_options[write_opts.WR_OPT_NBITS] elif tags.get('NBITS') is not None: writer_options[write_opts.WR_OPT_NBITS] = tags.get('NBITS') if fformat.lower() in ('tif', 'tiff'): kwargs = kwargs or {} kwargs['writer_options'] = writer_options return self.geotiff_save(filename, compression, tags, gdal_options, blocksize, **kwargs) try: # Let image.pil_save it ? Image.save(self, filename, compression, fformat=fformat) except UnknownImageFormat: # No ... last resort, try to import an external module. logger.info("Importing image writer module '%s'" % fformat) try: saver = __import__(fformat, globals(), locals(), ['save']) except ImportError: raise UnknownImageFormat( "Unknown image format '%s'" % fformat) kwargs = kwargs or {} kwargs['writer_options'] = writer_options saver.save(self, filename, **kwargs) def _gdal_write_channels(self, dst_ds, channels, opacity, fill_value): """Write *channels* in a gdal raster structure *dts_ds*, using *opacity* as alpha value for valid data, and *fill_value*. """ if fill_value is not None: for i, chan in enumerate(channels): chn = chan.filled(fill_value[i]) bnd = dst_ds.GetRasterBand(i + 1) bnd.SetNoDataValue(fill_value[i]) bnd.WriteArray(chn) else: mask = np.ones(channels[0].shape, dtype=np.bool) i = 0 for i, chan in enumerate(channels): dst_ds.GetRasterBand(i + 1).WriteArray(chan.filled(0)) mask &= np.ma.getmaskarray(chan) try: mask |= np.ma.getmaskarray(opacity) except AttributeError: pass alpha = np.where(mask, 0, opacity).astype(chan.dtype) dst_ds.GetRasterBand(i + 2).WriteArray(alpha) def geotiff_save(self, filename, compression=6, tags=None, gdal_options=None, blocksize=0, geotransform=None, spatialref=None, floating_point=False, writer_options=None): """Save the image to the given *filename* in geotiff_ format, with the *compression* level in [0, 9]. 0 means not compressed. The *tags* argument is a dict of tags to include in the image (as metadata). By default it uses the 'area' instance to generate geotransform and spatialref information, this can be overwritten by the arguments *geotransform* and *spatialref*. *floating_point* allows the saving of 'L' mode images in floating point format if set to True. When argument *writer_options* is not none and entry 'fill_value_subst' is included, its numeric value will be used to substitute image data that would be equal to the fill_value (used to replace masked data). .. _geotiff: http://trac.osgeo.org/geotiff/ """ from osgeo import gdal, osr raster = gdal.GetDriverByName("GTiff") tags = tags or {} writer_options = writer_options or {} if floating_point: if self.mode != "L": raise ValueError("Image must be in 'L' mode for floating point" " geotif saving") if self.fill_value is None: logger.warning("Image with floats cannot be transparent, " "so setting fill_value to 0") self.fill_value = 0 channels = [self.channels[0].astype(np.float64)] fill_value = self.fill_value or [0] gformat = gdal.GDT_Float64 opacity = 0 else: nbits = int(tags.get("NBITS", "8")) if nbits > 16: dtype = np.uint32 gformat = gdal.GDT_UInt32 elif nbits > 8: dtype = np.uint16 gformat = gdal.GDT_UInt16 else: dtype = np.uint8 gformat = gdal.GDT_Byte opacity = np.iinfo(dtype).max channels, fill_value = self._finalize(dtype) fill_value_subst = writer_options.get( write_opts.WR_OPT_FILL_VALUE_SUBST, None) if fill_value is not None and fill_value_subst is not None: for i, chan in enumerate(channels): np.place(chan, chan == fill_value[i], int(fill_value_subst)) logger.debug("Saving to GeoTiff.") if tags is not None: self.tags.update(tags) if gdal_options is not None: self.gdal_options.update(gdal_options) g_opts = ["=".join(i) for i in self.gdal_options.items()] if compression != 0: g_opts.append("COMPRESS=DEFLATE") g_opts.append("ZLEVEL=" + str(compression)) if blocksize != 0: g_opts.append("TILED=YES") g_opts.append("BLOCKXSIZE=" + str(blocksize)) g_opts.append("BLOCKYSIZE=" + str(blocksize)) if(self.mode == "L"): ensure_dir(filename) if fill_value is not None: dst_ds = raster.Create(filename, self.width, self.height, 1, gformat, g_opts) else: g_opts.append("ALPHA=YES") dst_ds = raster.Create(filename, self.width, self.height, 2, gformat, g_opts) self._gdal_write_channels(dst_ds, channels, opacity, fill_value) elif(self.mode == "LA"): ensure_dir(filename) g_opts.append("ALPHA=YES") dst_ds = raster.Create(filename, self.width, self.height, 2, gformat, g_opts) self._gdal_write_channels(dst_ds, channels[:-1], channels[1], fill_value) elif(self.mode == "RGB"): ensure_dir(filename) if fill_value is not None: dst_ds = raster.Create(filename, self.width, self.height, 3, gformat, g_opts) else: g_opts.append("ALPHA=YES") dst_ds = raster.Create(filename, self.width, self.height, 4, gformat, g_opts) self._gdal_write_channels(dst_ds, channels, opacity, fill_value) elif(self.mode == "RGBA"): ensure_dir(filename) g_opts.append("ALPHA=YES") dst_ds = raster.Create(filename, self.width, self.height, 4, gformat, g_opts) self._gdal_write_channels(dst_ds, channels[:-1], channels[3], fill_value) else: raise NotImplementedError("Saving to GeoTIFF using image mode" " %s is not implemented." % self.mode) # Create raster GeoTransform based on upper left corner and pixel # resolution ... if not overwritten by argument geotransform. if geotransform: dst_ds.SetGeoTransform(geotransform) if spatialref: if not isinstance(spatialref, str): spatialref = spatialref.ExportToWkt() dst_ds.SetProjection(spatialref) else: from pyresample import utils from mpop.projector import get_area_def try: area = get_area_def(self.area) except (utils.AreaNotFound, AttributeError): area = self.area try: adfgeotransform = [area.area_extent[0], area.pixel_size_x, 0, area.area_extent[3], 0, -area.pixel_size_y] dst_ds.SetGeoTransform(adfgeotransform) srs = osr.SpatialReference() srs.ImportFromProj4(area.proj4_string) srs.SetProjCS(area.proj_id) try: srs.SetWellKnownGeogCS(area.proj_dict['ellps']) except KeyError: pass try: # Check for epsg code. srs.ImportFromEPSG(int(area.proj_dict['init']. lower().split('epsg:')[1])) except (KeyError, IndexError): pass srs = srs.ExportToWkt() dst_ds.SetProjection(srs) except AttributeError: logger.exception("Could not load geographic data, invalid area") self.tags.update({'TIFFTAG_DATETIME': self.time_slot.strftime("%Y:%m:%d %H:%M:%S")}) dst_ds.SetMetadata(self.tags, '') # Close the dataset dst_ds = None def add_overlay(self, color=(0, 0, 0), width=0.5, resolution=None): """Add coastline and political borders to image, using *color* (tuple of integers between 0 and 255). Warning: Loses the masks ! *resolution* is chosen automatically if None (default), otherwise it should be one of: +-----+-------------------------+---------+ | 'f' | Full resolution | 0.04 km | | 'h' | High resolution | 0.2 km | | 'i' | Intermediate resolution | 1.0 km | | 'l' | Low resolution | 5.0 km | | 'c' | Crude resolution | 25 km | +-----+-------------------------+---------+ """ img = self.pil_image() import ConfigParser conf = ConfigParser.ConfigParser() conf.read(os.path.join(CONFIG_PATH, "mpop.cfg")) coast_dir = conf.get('shapes', 'dir') logger.debug("Getting area for overlay: " + str(self.area)) if self.area is None: raise ValueError("Area of image is None, can't add overlay.") from mpop.projector import get_area_def if isinstance(self.area, str): self.area = get_area_def(self.area) logger.info("Add coastlines and political borders to image.") logger.debug("Area = " + str(self.area)) if resolution is None: x_resolution = ((self.area.area_extent[2] - self.area.area_extent[0]) / self.area.x_size) y_resolution = ((self.area.area_extent[3] - self.area.area_extent[1]) / self.area.y_size) res = min(x_resolution, y_resolution) if res > 25000: resolution = "c" elif res > 5000: resolution = "l" elif res > 1000: resolution = "i" elif res > 200: resolution = "h" else: resolution = "f" logger.debug("Automagically choose resolution " + resolution) from pycoast import ContourWriterAGG cw_ = ContourWriterAGG(coast_dir) cw_.add_coastlines(img, self.area, outline=color, resolution=resolution, width=width) cw_.add_borders(img, self.area, outline=color, resolution=resolution, width=width) arr = np.array(img) if len(self.channels) == 1: self.channels[0] = np.ma.array(arr[:, :] / 255.0) else: for idx in range(len(self.channels)): self.channels[idx] = np.ma.array(arr[:, :, idx] / 255.0) def add_overlay_config(self, config_file): """Add overlay to image parsing a configuration file. """ import ConfigParser conf = ConfigParser.ConfigParser() conf.read(os.path.join(CONFIG_PATH, "mpop.cfg")) coast_dir = conf.get('shapes', 'dir') logger.debug("Getting area for overlay: " + str(self.area.area_id)) try: import aggdraw from pycoast import ContourWriterAGG cw_ = ContourWriterAGG(coast_dir) except ImportError: logger.warning("AGGdraw lib not installed...width and opacity properties are not available for overlays.") from pycoast import ContourWriter cw_ = ContourWriter(coast_dir) logger.debug("Getting area for overlay: " + str(self.area)) if self.area is None: raise ValueError("Area of image is None, can't add overlay.") if self.mode != "RGB": self.convert("RGB") img = self.pil_image() from mpop.projector import get_area_def if isinstance(self.area, str): self.area = get_area_def(self.area) logger.info("Add overlays to image.") logger.debug("Area = " + str(self.area.area_id)) foreground = cw_.add_overlay_from_config(config_file, self.area) img.paste(foreground, mask=foreground.split()[-1]) arr = np.array(img) if len(self.channels) == 1: self.channels[0] = np.ma.array(arr[:, :] / 255.0) else: for idx in range(len(self.channels)): self.channels[idx] = np.ma.array(arr[:, :, idx] / 255.0) mpop-1.5.0/mpop/imageo/image.py000066400000000000000000001255151317160620000163450ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2012, 2013, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # Esben S. Nielsen # This file is part of the mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """This module defines the image class. It overlaps largely the PIL library, but has the advandage of using masked arrays as pixel arrays, so that data arrays containing invalid values may be properly handled. """ import warnings warnings.warn(__name__ + " is deprecated, please use trollimage instead.", DeprecationWarning) import os import re from PIL import Image as Pil import numpy as np try: import numexpr as ne except ImportError: ne = None from mpop.utils import ensure_dir import logging LOG = logging.getLogger(__name__) class UnknownImageFormat(Exception): """Exception to be raised when image format is unknown to MPOP""" pass def check_image_format(fformat): cases = {"jpg": "jpeg", "jpeg": "jpeg", "tif": "tiff", "tiff": "tif", "pgm": "ppm", "pbm": "ppm", "ppm": "ppm", "bmp": "bmp", "dib": "bmp", "gif": "gif", "im": "im", "pcx": "pcx", "png": "png", "xbm": "xbm", "xpm": "xpm", } fformat = fformat.lower() try: fformat = cases[fformat] except KeyError: raise UnknownImageFormat("Unknown image format '%s'." % fformat) return fformat try: from numpy import percentile except ImportError: # Stolen from numpy 1.7.0 for backward compatibility def percentile(a, q, axis=None, out=None, overwrite_input=False): """ Compute the qth percentile of the data along the specified axis. Returns the qth percentile of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) Percentile to compute which must be between 0 and 100 inclusive. axis : int, optional Axis along which the percentiles are computed. The default (None) is to compute the median along a flattened version of the array. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to median. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. Note that, if `overwrite_input` is True and the input is not already an array, an error will be raised. Returns ------- pcntile : ndarray A new array holding the result (unless `out` is specified, in which case that array is returned instead). If the input contains integers, or floats of smaller precision than 64, then the output data-type is float64. Otherwise, the output data-type is the same as that of the input. See Also -------- mean, median Notes ----- Given a vector V of length N, the qth percentile of V is the qth ranked value in a sorted copy of V. A weighted average of the two nearest neighbors is used if the normalized ranking does not match q exactly. The same as the median if ``q=50``, the same as the minimum if ``q=0`` and the same as the maximum if ``q=100``. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.percentile(a, 50) 3.5 >>> np.percentile(a, 0.5, axis=0) array([ 6.5, 4.5, 2.5]) >>> np.percentile(a, 50, axis=1) array([ 7., 2.]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, 50, axis=0, out=m) array([ 6.5, 4.5, 2.5]) >>> m array([ 6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.percentile(b, 50, axis=None, overwrite_input=True) 3.5 """ a = np.asarray(a) if q == 0: return a.min(axis=axis, out=out) elif q == 100: return a.max(axis=axis, out=out) if overwrite_input: if axis is None: sorted = a.ravel() sorted.sort() else: a.sort(axis=axis) sorted = a else: sorted = np.sort(a, axis=axis) if axis is None: axis = 0 return _compute_qth_percentile(sorted, q, axis, out) # handle sequence of q's without calling sort multiple times def _compute_qth_percentile(sorted, q, axis, out): if not np.isscalar(q): p = [_compute_qth_percentile(sorted, qi, axis, None) for qi in q] if out is not None: out.flat = p return p q = q / 100.0 if (q < 0) or (q > 1): raise ValueError("percentile must be either in the range [0,100]") indexer = [slice(None)] * sorted.ndim Nx = sorted.shape[axis] index = q * (Nx - 1) i = int(index) if i == index: indexer[axis] = slice(i, i + 1) weights = np.array(1) sumval = 1.0 else: indexer[axis] = slice(i, i + 2) j = i + 1 weights = np.array([(j - index), (index - i)], float) wshape = [1] * sorted.ndim wshape[axis] = 2 weights.shape = wshape sumval = weights.sum() # Use add.reduce in both cases to coerce data type as well as # check and use out array. return np.add.reduce(sorted[indexer] * weights, axis=axis, out=out) / sumval class Image(object): """This class defines images. As such, it contains data of the different *channels* of the image (red, green, and blue for example). The *mode* tells if the channels define a black and white image ("L"), an rgb image ("RGB"), an YCbCr image ("YCbCr"), or an indexed image ("P"), in which case a *palette* is needed. Each mode has also a corresponding alpha mode, which is the mode with an "A" in the end: for example "RGBA" is rgb with an alpha channel. *fill_value* sets how the image is filled where data is missing, since channels are numpy masked arrays. Setting it to (0,0,0) in RGB mode for example will produce black where data is missing."None" (default) will produce transparency (thus adding an alpha channel) if the file format allows it, black otherwise. The channels are considered to contain floating point values in the range [0.0,1.0]. In order to normalize the input data, the *color_range* parameter defines the original range of the data. The conversion to the classical [0,255] range and byte type is done automagically when saving the image to file. """ modes = ["L", "LA", "RGB", "RGBA", "YCbCr", "YCbCrA", "P", "PA"] def __init__(self, channels=None, mode="L", color_range=None, fill_value=None, palette=None): self.channels = None self.mode = None self.width = 0 self.height = 0 self.fill_value = None self.palette = None self.shape = None self.info = {} self._secondary_mode = "RGB" if(channels is not None and not isinstance(channels, (tuple, set, list, np.ndarray, np.ma.core.MaskedArray))): raise TypeError("Channels should a tuple, set, list, numpy array, " "or masked array.") if(isinstance(channels, (tuple, list)) and len(channels) != len(re.findall("[A-Z]", mode))): raise ValueError("Number of channels does not match mode.") if mode not in self.modes: raise ValueError("Unknown mode.") if(color_range is not None and not _is_pair(color_range) and not _is_list_of_pairs(color_range)): raise ValueError("Color_range should be a pair" " or a list/tuple/set of pairs.") if(color_range is not None and _is_list_of_pairs(color_range) and (channels is None or len(color_range) != len(channels))): raise ValueError("Color_range length does not match number of " "channels.") if(color_range is not None and (((mode == "L" or mode == "P") and not _is_pair(color_range)) and (len(color_range) != len(re.findall("[A-Z]", mode))))): raise ValueError("Color_range does not match mode") self.mode = mode if isinstance(fill_value, (tuple, list, set)): self.fill_value = list(fill_value) elif fill_value is not None: self.fill_value = [fill_value] else: self.fill_value = None self.channels = [] self.palette = palette if isinstance(channels, (tuple, list)): if _areinstances(channels, (np.ma.core.MaskedArray, np.ndarray, list, tuple)): for i, chn in enumerate(channels): if color_range is not None: color_min = color_range[i][0] color_max = color_range[i][1] else: color_min = 0.0 color_max = 1.0 # Add data to image object as a channel self._add_channel(chn, color_min, color_max) self.shape = self.channels[-1].shape if self.shape != self.channels[0].shape: raise ValueError("Channels must have the same shape.") self.height = self.shape[0] self.width = self.shape[1] else: raise ValueError("Channels must all be arrays, lists or " "tuples.") elif channels is not None: self.height = channels.shape[0] self.width = channels.shape[1] self.shape = channels.shape if color_range is not None: color_min = color_range[0] color_max = color_range[1] else: color_min = 0.0 color_max = 1.0 # Add data to image object as a channel self._add_channel(channels, color_min, color_max) else: self.shape = (0, 0) self.width = 0 self.height = 0 def _add_channel(self, chn, color_min, color_max): """Adds a channel to the image object """ if isinstance(chn, np.ma.core.MaskedArray): chn_data = chn.data chn_mask = chn.mask else: chn_data = np.array(chn) chn_mask = False scaled = ((chn_data - color_min) * 1.0 / (color_max - color_min)) self.channels.append(np.ma.array(scaled, mask=chn_mask)) def _finalize(self, dtype=np.uint8): """Finalize the image, that is put it in RGB mode, and set the channels in 8bit format ([0,255] range). """ channels = [] if self.mode == "P": self.convert("RGB") if self.mode == "PA": self.convert("RGBA") for chn in self.channels: if isinstance(chn, np.ma.core.MaskedArray): final_data = chn.data.clip(0, 1) * np.iinfo(dtype).max else: final_data = chn.clip(0, 1) * np.iinfo(dtype).max channels.append(np.ma.array(final_data, dtype, mask=chn.mask)) if self.fill_value is not None: fill_value = [int(col * np.iinfo(dtype).max) for col in self.fill_value] else: fill_value = None return channels, fill_value def is_empty(self): """Checks for an empty image. """ if(((self.channels == []) and (not self.shape == (0, 0))) or ((not self.channels == []) and (self.shape == (0, 0)))): raise RuntimeError("Channels-shape mismatch.") return self.channels == [] and self.shape == (0, 0) def show(self): """Display the image on screen. """ self.pil_image().show() def pil_image(self): """Return a PIL image from the current image. """ channels, fill_value = self._finalize() if self.is_empty(): return Pil.new(self.mode, (0, 0)) if(self.mode == "L"): if fill_value is not None: img = Pil.fromarray(channels[0].filled(fill_value)) else: img = Pil.fromarray(channels[0].filled(0)) alpha = np.zeros(channels[0].shape, np.uint8) mask = np.ma.getmaskarray(channels[0]) alpha = np.where(mask, alpha, 255) pil_alpha = Pil.fromarray(alpha) img = Pil.merge("LA", (img, pil_alpha)) elif(self.mode == "LA"): if fill_value is not None: img = Pil.fromarray(channels[0].filled(fill_value)) pil_alpha = Pil.fromarray(channels[1]) else: img = Pil.fromarray(channels[0].filled(0)) alpha = np.zeros(channels[0].shape, np.uint8) mask = np.ma.getmaskarray(channels[0]) alpha = np.where(mask, alpha, channels[1]) pil_alpha = Pil.fromarray(alpha) img = Pil.merge("LA", (img, pil_alpha)) elif(self.mode == "RGB"): # Mask where all channels have missing data (incomplete data will # be shown). mask = (np.ma.getmaskarray(channels[0]) & np.ma.getmaskarray(channels[1]) & np.ma.getmaskarray(channels[2])) if fill_value is not None: pil_r = Pil.fromarray(channels[0].filled(fill_value[0])) pil_g = Pil.fromarray(channels[1].filled(fill_value[1])) pil_b = Pil.fromarray(channels[2].filled(fill_value[2])) img = Pil.merge("RGB", (pil_r, pil_g, pil_b)) else: pil_r = Pil.fromarray(channels[0].filled(0)) pil_g = Pil.fromarray(channels[1].filled(0)) pil_b = Pil.fromarray(channels[2].filled(0)) alpha = np.zeros(channels[0].shape, np.uint8) alpha = np.where(mask, alpha, 255) pil_a = Pil.fromarray(alpha) img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a)) elif(self.mode == "RGBA"): # Mask where all channels have missing data (incomplete data will # be shown). mask = (np.ma.getmaskarray(channels[0]) & np.ma.getmaskarray(channels[1]) & np.ma.getmaskarray(channels[2]) & np.ma.getmaskarray(channels[3])) if fill_value is not None: pil_r = Pil.fromarray(channels[0].filled(fill_value[0])) pil_g = Pil.fromarray(channels[1].filled(fill_value[1])) pil_b = Pil.fromarray(channels[2].filled(fill_value[2])) pil_a = Pil.fromarray(channels[3].filled(fill_value[3])) img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a)) else: pil_r = Pil.fromarray(channels[0].filled(0)) pil_g = Pil.fromarray(channels[1].filled(0)) pil_b = Pil.fromarray(channels[2].filled(0)) alpha = np.where(mask, 0, channels[3]) pil_a = Pil.fromarray(alpha) img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a)) else: raise TypeError("Does not know how to use mode %s." % (self.mode)) return img def save(self, filename, compression=6, fformat=None): """Save the image to the given *filename*. For some formats like jpg and png, the work is delegated to :meth:`pil_save`, which doesn't support the *compression* option. """ self.pil_save(filename, compression, fformat) def pil_save(self, filename, compression=6, fformat=None): """Save the image to the given *filename* using PIL. For now, the compression level [0-9] is ignored, due to PIL's lack of support. See also :meth:`save`. """ # PIL does not support compression option. del compression if self.is_empty(): raise IOError("Cannot save an empty image") ensure_dir(filename) fformat = fformat or os.path.splitext(filename)[1][1:4] fformat = check_image_format(fformat) params = {} if fformat == 'png': # Take care of GeoImage.tags. params['pnginfo'] = self._pngmeta() self.pil_image().save(filename, fformat, **params) def _pngmeta(self): """It will return GeoImage.tags as a PNG metadata object. Inspired by: public domain, Nick Galbreath http://blog.modp.com/2007/08/python-pil-and-png-metadata-take-2.html """ reserved = ('interlace', 'gamma', 'dpi', 'transparency', 'aspect') try: tags = self.tags except AttributeError: tags = {} # Undocumented class from PIL import PngImagePlugin meta = PngImagePlugin.PngInfo() # Copy from tags to new dict for k__, v__ in tags.items(): if k__ not in reserved: meta.add_text(k__, v__, 0) return meta def putalpha(self, alpha): """Adds an *alpha* channel to the current image, or replaces it with *alpha* if it already exists. """ alpha = np.ma.array(alpha) if(not (alpha.shape[0] == 0 and self.shape[0] == 0) and alpha.shape != self.shape): raise ValueError("Alpha channel shape should match image shape") if(not self.mode.endswith("A")): self.convert(self.mode + "A") if not self.is_empty(): self.channels[-1] = alpha def _rgb2ycbcr(self, mode): """Convert the image from RGB mode to YCbCr.""" self._check_modes(("RGB", "RGBA")) (self.channels[0], self.channels[1], self.channels[2]) = \ rgb2ycbcr(self.channels[0], self.channels[1], self.channels[2]) if self.fill_value is not None: self.fill_value[0:3] = rgb2ycbcr(self.fill_value[0], self.fill_value[1], self.fill_value[2]) self.mode = mode def _ycbcr2rgb(self, mode): """Convert the image from YCbCr mode to RGB. """ self._check_modes(("YCbCr", "YCbCrA")) (self.channels[0], self.channels[1], self.channels[2]) = \ ycbcr2rgb(self.channels[0], self.channels[1], self.channels[2]) if self.fill_value is not None: self.fill_value[0:3] = ycbcr2rgb(self.fill_value[0], self.fill_value[1], self.fill_value[2]) self.mode = mode def _to_p(self, mode): """Convert the image to P or PA mode. """ if self.mode.endswith("A"): chans = self.channels[:-1] alpha = self.channels[-1] self._secondary_mode = self.mode[:-1] else: chans = self.channels alpha = None self._secondary_mode = self.mode palette = [] selfmask = reduce(np.ma.mask_or, [chn.mask for chn in chans]) new_chn = np.ma.zeros(self.shape, dtype=int) color_nb = 0 for i in range(self.height): for j in range(self.width): current_col = tuple([chn[i, j] for chn in chans]) try: (idx for idx in range(len(palette)) if palette[idx] == current_col).next() except StopIteration: idx = color_nb palette.append(current_col) color_nb = color_nb + 1 new_chn[i, j] = idx if self.fill_value is not None: if self.mode.endswith("A"): current_col = tuple(self.fill_value[:-1]) fill_alpha = [self.fill_value[-1]] else: current_col = tuple(self.fill_value) fill_alpha = [] try: (idx for idx in range(len(palette)) if palette[idx] == current_col).next() except StopIteration: idx = color_nb palette.append(current_col) color_nb = color_nb + 1 self.fill_value = [idx] + fill_alpha new_chn.mask = selfmask self.palette = palette if alpha is None: self.channels = [new_chn] else: self.channels = [new_chn, alpha] self.mode = mode def _from_p(self, mode): """Convert the image from P or PA mode. """ self._check_modes(("P", "PA")) if self.mode.endswith("A"): alpha = self.channels[-1] else: alpha = None chans = [] cdfs = [] color_chan = self.channels[0] for i in range(len(self.palette[0])): cdfs.append(np.zeros(len(self.palette))) for j in range(len(self.palette)): cdfs[i][j] = self.palette[j][i] new_chn = np.ma.array(np.interp(color_chan, np.arange(len(self.palette)), cdfs[i]), mask=color_chan.mask) chans.append(new_chn) if self.fill_value is not None: if alpha is not None: fill_alpha = self.fill_value[-1] self.fill_value = list(self.palette[int(self.fill_value[0])]) self.fill_value += [fill_alpha] else: self.fill_value = list(self.palette[int(self.fill_value[0])]) self.mode = self._secondary_mode self.channels = chans if alpha is not None: self.channels.append(alpha) self.mode = self.mode + "A" self.convert(mode) def _check_modes(self, modes): """Check that the image is in on of the given *modes*, raise an exception otherwise. """ if not isinstance(modes, (tuple, list, set)): modes = [modes] if self.mode not in modes: raise ValueError("Image not in suitable mode: %s" % modes) def _l2rgb(self, mode): """Convert from L (black and white) to RGB. """ self._check_modes(("L", "LA")) self.channels.append(self.channels[0].copy()) self.channels.append(self.channels[0].copy()) if self.fill_value is not None: self.fill_value = self.fill_value[:1] * 3 + self.fill_value[1:] if self.mode == "LA": self.channels[1], self.channels[3] = \ self.channels[3], self.channels[1] self.mode = mode def _rgb2l(self, mode): """Convert from RGB to monochrome L. """ self._check_modes(("RGB", "RGBA")) kb_ = 0.114 kr_ = 0.299 r__ = self.channels[0] g__ = self.channels[1] b__ = self.channels[2] y__ = kr_ * r__ + (1 - kr_ - kb_) * g__ + kb_ * b__ if self.fill_value is not None: self.fill_value = ([rgb2ycbcr(self.fill_value[0], self.fill_value[1], self.fill_value[2])[0]] + self.fill_value[3:]) self.channels = [y__] + self.channels[3:] self.mode = mode def _ycbcr2l(self, mode): """Convert from YCbCr to L. """ self._check_modes(("YCbCr", "YCbCrA")) self.channels = [self.channels[0]] + self.channels[3:] if self.fill_value is not None: self.fill_value = [self.fill_value[0]] + self.fill_value[3:] self.mode = mode def _l2ycbcr(self, mode): """Convert from L to YCbCr. """ self._check_modes(("L", "LA")) luma = self.channels[0] zeros = np.ma.zeros(luma.shape) zeros.mask = luma.mask self.channels = [luma, zeros, zeros] + self.channels[1:] if self.fill_value is not None: self.fill_value = [self.fill_value[0], 0, 0] + self.fill_value[1:] self.mode = mode def convert(self, mode): """Convert the current image to the given *mode*. See :class:`Image` for a list of available modes. """ if mode == self.mode: return if mode not in ["L", "LA", "RGB", "RGBA", "YCbCr", "YCbCrA", "P", "PA"]: raise ValueError("Mode %s not recognized." % (mode)) if self.is_empty(): self.mode = mode return if(mode == self.mode + "A"): self.channels.append(np.ma.ones(self.channels[0].shape)) if self.fill_value is not None: self.fill_value += [1] self.mode = mode elif(mode + "A" == self.mode): self.channels = self.channels[:-1] if self.fill_value is not None: self.fill_value = self.fill_value[:-1] self.mode = mode elif(mode.endswith("A") and not self.mode.endswith("A")): self.convert(self.mode + "A") self.convert(mode) elif(self.mode.endswith("A") and not mode.endswith("A")): self.convert(self.mode[:-1]) self.convert(mode) else: cases = { "RGB": {"YCbCr": self._rgb2ycbcr, "L": self._rgb2l, "P": self._to_p}, "RGBA": {"YCbCrA": self._rgb2ycbcr, "LA": self._rgb2l, "PA": self._to_p}, "YCbCr": {"RGB": self._ycbcr2rgb, "L": self._ycbcr2l, "P": self._to_p}, "YCbCrA": {"RGBA": self._ycbcr2rgb, "LA": self._ycbcr2l, "PA": self._to_p}, "L": {"RGB": self._l2rgb, "YCbCr": self._l2ycbcr, "P": self._to_p}, "LA": {"RGBA": self._l2rgb, "YCbCrA": self._l2ycbcr, "PA": self._to_p}, "P": {"RGB": self._from_p, "YCbCr": self._from_p, "L": self._from_p}, "PA": {"RGBA": self._from_p, "YCbCrA": self._from_p, "LA": self._from_p}} try: cases[self.mode][mode](mode) except KeyError: raise ValueError("Conversion from %s to %s not implemented !" % (self.mode, mode)) def clip(self, channels=True): """Limit the values of the array to the default [0,1] range. *channels* says which channels should be clipped.""" if not (isinstance(channels, (tuple, list))): channels = [channels] * len(self.channels) for i in range(len(self.channels)): if channels[i]: self.channels[i] = np.ma.clip(self.channels[i], 0.0, 1.0) def resize(self, shape): """Resize the image to the given *shape* tuple, in place. For zooming, nearest neighbour method is used, while for shrinking, decimation is used. Therefore, *shape* must be a multiple or a divisor of the image shape. """ if self.is_empty(): raise ValueError("Cannot resize an empty image") factor = [1, 1] zoom = [True, True] zoom[0] = shape[0] >= self.height zoom[1] = shape[1] >= self.width if zoom[0]: factor[0] = shape[0] * 1.0 / self.height else: factor[0] = self.height * 1.0 / shape[0] if zoom[1]: factor[1] = shape[1] * 1.0 / self.width else: factor[1] = self.width * 1.0 / shape[1] if(int(factor[0]) != factor[0] or int(factor[1]) != factor[1]): raise ValueError("Resize not of integer factor!") factor[0] = int(factor[0]) factor[1] = int(factor[1]) i = 0 for chn in self.channels: if zoom[0]: chn = chn.repeat([factor[0]] * chn.shape[0], axis=0) else: chn = chn[[idx * factor[0] for idx in range(self.height / factor[0])], :] if zoom[1]: self.channels[i] = chn.repeat([factor[1]] * chn.shape[1], axis=1) else: self.channels[i] = chn[:, [idx * factor[1] for idx in range(self.width / factor[1])]] i = i + 1 self.height = self.channels[0].shape[0] self.width = self.channels[0].shape[1] self.shape = self.channels[0].shape def replace_luminance(self, luminance): """Replace the Y channel of the image by the array *luminance*. If the image is not in YCbCr mode, it is converted automatically to and from that mode. """ if self.is_empty(): return if (luminance.shape != self.channels[0].shape): if ((luminance.shape[0] * 1.0 / luminance.shape[1]) == (self.channels[0].shape[0] * 1.0 / self.channels[0].shape[1])): if luminance.shape[0] > self.channels[0].shape[0]: self.resize(luminance.shape) else: raise NameError("Luminance smaller than the image !") else: raise NameError("Not the good shape !") mode = self.mode if mode.endswith("A"): self.convert("YCbCrA") self.channels[0] = luminance self.convert(mode) else: self.convert("YCbCr") self.channels[0] = luminance self.convert(mode) def enhance(self, inverse=False, gamma=1.0, stretch="no"): """Image enhancement function. It applies **in this order** inversion, gamma correction, and stretching to the current image, with parameters *inverse* (see :meth:`Image.invert`), *gamma* (see :meth:`Image.gamma`), and *stretch* (see :meth:`Image.stretch`). """ self.invert(inverse) self.gamma(gamma) self.stretch(stretch) def gamma(self, gamma=1.0): """Apply gamma correction to the channels of the image. If *gamma* is a tuple, then it should have as many elements as the channels of the image, and the gamma correction is applied elementwise. If *gamma* is a number, the same gamma correction is applied on every channel, if there are several channels in the image. The behaviour of :func:`gamma` is undefined outside the normal [0,1] range of the channels. """ if not isinstance(gamma, (int, long, float)): if(not isinstance(gamma, (tuple, list, set)) or not _areinstances(gamma, (int, long, float))): raise TypeError("Gamma should be a real number, or an iterable " "of real numbers.") if(isinstance(gamma, (list, tuple, set)) and len(gamma) != len(self.channels)): raise ValueError("Number of channels and gamma components differ.") if gamma < 0: raise ValueError("Gamma correction must be a positive number.") if gamma == 1.0: return if (isinstance(gamma, (tuple, list))): gamma_list = list(gamma) else: gamma_list = [gamma] * len(self.channels) for i in range(len(self.channels)): if(isinstance(self.channels[i], np.ma.core.MaskedArray)): if ne: self.channels[i] = np.ma.array( ne.evaluate("data ** (1.0 / gamma)", local_dict={"data": self.channels[i].data, 'gamma': gamma_list[i]}), mask=self.channels[i].mask, copy=False) else: self.channels[i] = np.ma.array(self.channels[i].data ** (1.0 / gamma_list[i]), mask=self.channels[i].mask, copy=False) else: self.channels[i] = np.where(self.channels[i] >= 0, self.channels[i] ** (1.0 / gamma_list[i]), self.channels[i]) def stretch(self, stretch="no", **kwarg): """Apply stretching to the current image. The value of *stretch* sets the type of stretching applied. The values "histogram", "linear", "crude" (or "crude-stretch") perform respectively histogram equalization, contrast stretching (with 5% cutoff on both sides), and contrast stretching without cutoff. The value "logarithmic" or "log" will do a logarithmic enhancement towards white. If a tuple or a list of two values is given as input, then a contrast stretching is performed with the values as cutoff. These values should be normalized in the range [0.0,1.0]. """ if((isinstance(stretch, tuple) or isinstance(stretch, list))): if len(stretch) == 2: for i in range(len(self.channels)): self.stretch_linear(i, cutoffs=stretch, **kwarg) else: raise ValueError( "Stretch tuple must have exactly two elements") elif stretch == "linear": for i in range(len(self.channels)): self.stretch_linear(i, **kwarg) elif stretch == "histogram": for i in range(len(self.channels)): self.stretch_hist_equalize(i, **kwarg) elif(stretch in ["crude", "crude-stretch"]): for i in range(len(self.channels)): self.crude_stretch(i, **kwarg) elif(stretch in ["log", "logarithmic"]): for i in range(len(self.channels)): self.stretch_logarithmic(i, **kwarg) elif(stretch == "no"): return elif isinstance(stretch, str): raise ValueError("Stretching method %s not recognized." % stretch) else: raise TypeError("Stretch parameter must be a string or a tuple.") def invert(self, invert=True): """Inverts all the channels of a image according to *invert*. If invert is a tuple or a list, elementwise invertion is performed, otherwise all channels are inverted if *invert* is true (default). """ if(isinstance(invert, (tuple, list, set)) and len(self.channels) != len(invert)): raise ValueError( "Number of channels and invert components differ.") if isinstance(invert, (tuple, list, set)): i = 0 for chn in self.channels: if(invert[i]): self.channels[i] = 1.0 - chn i = i + 1 elif(invert): i = 0 for chn in self.channels: self.channels[i] = 1.0 - chn i = i + 1 def stretch_hist_equalize(self, ch_nb): """Stretch the current image's colors by performing histogram equalization on channel *ch_nb*. """ LOG.info("Perform a histogram equalized contrast stretch.") if(self.channels[ch_nb].size == np.ma.count_masked(self.channels[ch_nb])): LOG.warning("Nothing to stretch !") return arr = self.channels[ch_nb] nwidth = 2048.0 carr = arr.compressed() cdf = np.arange(0.0, 1.0, 1 / nwidth) LOG.debug("Make histogram bins having equal amount of data, " + "using numpy percentile function:") bins = percentile(carr, list(cdf * 100)) res = np.ma.empty_like(arr) res.mask = np.ma.getmaskarray(arr) res[~res.mask] = np.interp(carr, bins, cdf) self.channels[ch_nb] = res def stretch_logarithmic(self, ch_nb, factor=100.): """Move data into range [1:factor] and do a normalized logarithmic enhancement. """ LOG.debug("Perform a logarithmic contrast stretch.") if ((self.channels[ch_nb].size == np.ma.count_masked(self.channels[ch_nb])) or (self.channels[ch_nb].min() == self.channels[ch_nb].max())): LOG.warning("Nothing to stretch !") return crange = (0., 1.0) arr = self.channels[ch_nb] b = float(crange[1] - crange[0]) / np.log(factor) c = float(crange[0]) slope = (factor - 1.) / float(arr.max() - arr.min()) arr = 1. + (arr - arr.min()) * slope arr = c + b * np.log(arr) self.channels[ch_nb] = arr def stretch_linear(self, ch_nb, cutoffs=(0.005, 0.005)): """Stretch linearly the contrast of the current image on channel *ch_nb*, using *cutoffs* for left and right trimming. """ LOG.debug("Perform a linear contrast stretch.") if((self.channels[ch_nb].size == np.ma.count_masked(self.channels[ch_nb])) or self.channels[ch_nb].min() == self.channels[ch_nb].max()): LOG.warning("Nothing to stretch !") return arr = self.channels[ch_nb] carr = arr.compressed() LOG.debug("Calculate the histogram percentiles: ") LOG.debug("Left and right percentiles: " + str(cutoffs[0] * 100) + " " + str(cutoffs[1] * 100)) left, right = percentile(carr, [cutoffs[0] * 100, 100. - cutoffs[1] * 100]) delta_x = (right - left) LOG.debug("Interval: left=%f, right=%f width=%f" % (left, right, delta_x)) if delta_x > 0.0: self.channels[ch_nb] = np.ma.array((arr - left) / delta_x, mask=arr.mask) else: self.channels[ch_nb] = np.ma.zeros(arr.shape) LOG.warning("Unable to make a contrast stretch!") def crude_stretch(self, ch_nb, min_stretch=None, max_stretch=None): """Perform simple linear stretching (without any cutoff) on the channel *ch_nb* of the current image and normalize to the [0,1] range.""" if(min_stretch is None): min_stretch = self.channels[ch_nb].min() if(max_stretch is None): max_stretch = self.channels[ch_nb].max() if((not self.channels[ch_nb].mask.all()) and max_stretch - min_stretch > 0): stretched = self.channels[ch_nb].data.astype(np.float) stretched -= min_stretch stretched /= max_stretch - min_stretch self.channels[ch_nb] = np.ma.array(stretched, mask=self.channels[ch_nb].mask, copy=False) else: LOG.warning("Nothing to stretch !") def merge(self, img): """Use the provided image as background for the current *img* image, that is if the current image has missing data. """ if self.is_empty(): raise ValueError("Cannot merge an empty image.") if(self.mode != img.mode): raise ValueError("Cannot merge image of different modes.") selfmask = reduce(np.ma.mask_or, [chn.mask for chn in self.channels]) for i in range(len(self.channels)): self.channels[i] = np.ma.where(selfmask, img.channels[i], self.channels[i]) self.channels[i].mask = np.logical_and(selfmask, img.channels[i].mask) def all(iterable): for element in iterable: if not element: return False return True def _areinstances(the_list, types): """Check if all the elements of the list are of given type. """ return all([isinstance(item, types) for item in the_list]) def _is_pair(item): """Check if an item is a pair (tuple of size 2). """ return (isinstance(item, (list, tuple, set)) and len(item) == 2 and not isinstance(item[0], (list, tuple, set)) and not isinstance(item[1], (list, tuple, set))) def _is_list_of_pairs(the_list): """Check if a list contains only pairs. """ return all([_is_pair(item) for item in the_list]) def ycbcr2rgb(y__, cb_, cr_): """Convert the three YCbCr channels to RGB channels. """ kb_ = 0.114 kr_ = 0.299 r__ = 2 * cr_ / (1 - kr_) + y__ b__ = 2 * cb_ / (1 - kb_) + y__ g__ = (y__ - kr_ * r__ - kb_ * b__) / (1 - kr_ - kb_) return r__, g__, b__ def rgb2ycbcr(r__, g__, b__): """Convert the three RGB channels to YCbCr.""" kb_ = 0.114 kr_ = 0.299 y__ = kr_ * r__ + (1 - kr_ - kb_) * g__ + kb_ * b__ cb_ = 1. / (2 * (1 - kb_)) * (b__ - y__) cr_ = 1. / (2 * (1 - kr_)) * (r__ - y__) return y__, cb_, cr_ mpop-1.5.0/mpop/imageo/image_processing.py000066400000000000000000000030131317160620000205650ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of the mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Miscellaneous image processing tools. """ import numpy def gamma_correction(arr, gamma): """Perform gamma correction *g* to an array *arr*, which is assumed to be in the range [0.0,1.0], and return the resulting array (same range). """ return arr ** (1.0 / gamma) def crude_stretch(arr, norm = 1, amin = None, amax = None): """Perform simple linear stretching (without any cutoff) and normalize.""" if(amin is None): amin = arr.min() if(amax is None): amax = arr.max() res = (arr - amin) * (norm * 1.0) / (amax - amin) res = numpy.where(res > norm, norm, res) res = numpy.where(res < 0, 0, res) return res mpop-1.5.0/mpop/imageo/palettes.py000066400000000000000000000342131317160620000170760ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2013, 2015, 2016. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of the mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Palette holder module. """ import numpy as np def tv_legend(): """Palette for TV. """ legend = [] legend.append((0, 0, 0)) # Unprocessed: Black legend.append((0, 120, 0)) # Land legend.append((0, 0, 215)) # Sea: Blue legend.append((0, 120, 0)) # Land (Snow on land) legend.append((0, 0, 215)) # Sea: Blue (Snow/Ice on sea) for i in range(5, 256): # All other pixel values are grey according to IR temp. legend.append((i, i, i)) return convert_palette(legend) def vv_legend(): """Palette for Swedish road authorities (Vägverket). """ legend = [] legend.append((0, 0, 0)) # Unprocessed: Black legend.append((0, 120, 0)) # Land legend.append((0, 0, 215)) # Sea: Blue # Cloud type values 5 to 8: legend.append((255, 150, 0)) # Very low cumuliform legend.append((255, 100, 0)) # Very low legend.append((255, 220, 0)) # Low cumuliform legend.append((255, 180, 0)) # Low for i in range(7, 256): # All other pixel values are grey according to IR temp. legend.append((i, i, i)) return convert_palette(legend) def cloud_phase(): """Palette for cloud phase. """ legend = [] legend.append((0, 0, 0)) # Unprocessed: Black legend.append((0, 0, 215)) # Water Clouds: Blue legend.append((240, 240, 240)) # Ice Clouds: Almost White legend.append((120, 120, 0)) # Uncertain Phase: ? return convert_palette(legend) def cms_modified(): """Palette for regular cloud classification. """ return nwcsaf_cloudtype() def nwcsaf_cloudtype(): """Palette for regular cloud classification. """ legend = [] legend.append((100, 100, 100)) # Unprocessed: Grey legend.append((0, 120, 0)) legend.append((0, 0, 0)) # Sea: Black legend.append((250, 190, 250)) # Snow legend.append((220, 160, 220)) # Sea-ice legend.append((255, 150, 0)) # Very low cumuliform legend.append((255, 100, 0)) # Very low legend.append((255, 220, 0)) # Low cumuliform legend.append((255, 180, 0)) # Low legend.append((255, 255, 140)) # Medium cumuliform legend.append((240, 240, 0)) # Medium legend.append((250, 240, 200)) # High cumiliform legend.append((215, 215, 150)) # High legend.append((255, 255, 255)) # Very high cumuliform legend.append((230, 230, 230)) # Very high legend.append((0, 80, 215)) # Semi-transparent thin legend.append((0, 180, 230)) # Semi-transparent medium legend.append((0, 240, 240)) # Semi-transparent thick legend.append((90, 200, 160)) # Semi-transparent above legend.append((200, 0, 200)) # Broken legend.append((95, 60, 30)) # Undefined: Brown return convert_palette(legend) def ctth_height(): """CTTH height palette. """ legend = [] legend.append((0, 0, 0)) legend.append((255, 0, 216)) # 0 meters legend.append((126, 0, 43)) legend.append((153, 20, 47)) legend.append((178, 51, 0)) legend.append((255, 76, 0)) legend.append((255, 102, 0)) legend.append((255, 164, 0)) legend.append((255, 216, 0)) legend.append((216, 255, 0)) legend.append((178, 255, 0)) legend.append((153, 255, 0)) legend.append((0, 255, 0)) legend.append((0, 140, 48)) legend.append((0, 178, 255)) legend.append((0, 216, 255)) legend.append((0, 255, 255)) legend.append((238, 214, 210)) legend.append((239, 239, 223)) legend.append((255, 255, 255)) # 10,000 meters for i in range(79): legend.append((255, 255, 255)) legend.append((224, 224, 224)) return convert_palette(legend) def ctth_height_pps(): """CTTH height palette for NWCSAF/PPS. Identical to the one found in the hdf5 files. """ legend = [] legend.append((255, 0, 216)) # 0 meters legend.append((255, 0, 216)) # 0 meters legend.append((255, 0, 216)) # 0 meters legend.append((126, 0, 43)) legend.append((126, 0, 43)) legend.append((153, 20, 47)) legend.append((153, 20, 47)) legend.append((153, 20, 47)) legend.append((178, 51, 0)) legend.append((178, 51, 0)) legend.append((255, 76, 0)) legend.append((255, 76, 0)) legend.append((255, 76, 0)) legend.append((255, 102, 0)) legend.append((255, 102, 0)) legend.append((255, 164, 0)) legend.append((255, 164, 0)) legend.append((255, 164, 0)) legend.append((255, 216, 0)) legend.append((255, 216, 0)) legend.append((216, 255, 0)) legend.append((216, 255, 0)) legend.append((178, 255, 0)) legend.append((178, 255, 0)) legend.append((178, 255, 0)) legend.append((153, 255, 0)) legend.append((153, 255, 0)) legend.append((0, 255, 0)) legend.append((0, 255, 0)) legend.append((0, 255, 0)) legend.append((0, 140, 48)) legend.append((0, 140, 48)) legend.append((0, 178, 255)) legend.append((0, 178, 255)) legend.append((0, 178, 255)) legend.append((0, 216, 255)) legend.append((0, 216, 255)) legend.append((0, 255, 255)) legend.append((0, 255, 255)) legend.append((0, 255, 255)) legend.append((238, 214, 210)) legend.append((238, 214, 210)) legend.append((239, 239, 223)) legend.append((239, 239, 223)) for idx in range(47, 150): legend.append((255, 255, 255)) # 10,000 meters for idx in range(150, 256): legend.append((0, 0, 0)) return convert_palette(legend) def chlorophyll_a(): """Chlorophyll-A legend for MERIS""" raise NotImplementedError("This palette is not implemented - " + "it was earlier though...") # -------------------------------------------- # Define colour palette LUT for SST palette image # colour shading; blue(cold) -> green -> yellow -> red(warm) # def sstlut_osisaf_metno(): legend = [] legend.append((0, 0, 0)) # /* Black (out of image) */ legend.append((82, 82, 82)) # /* Dark grey (land) */ legend.append((187, 187, 187)) # /* Light grey (cloud contaminated) */ legend.append((255, 255, 255)) # /* White (sea ice and snow) */ legend.append((255, 0, 255)) # /* Starting at 4 = pink */ legend.append((195, 0, 129)) # /* Dark pink */ legend.append((129, 0, 47)) # /* Dark red */ legend.append((195, 0, 0)) # /* Medium dark red */ r = 255 # /* Red */ g = 0 b = 0 for i in range(8, 11, 1): legend.append((r, g, b)) r = r - 19 g = g + 43 r = 200 # /* Brown */ g = 128 b = 0 for i in range(11, 16, 1): legend.append((r, g, b)) r = r + 11 g = g + 26 b = b + 13 r = 65535 / 256 # /* Yellow */ g = 65535 / 256 b = 16185 / 256 legend.append((r, g, b)) legend.append((52000 / 256, 65535 / 256, 13500 / 256)) legend.append((35000 / 256, 65535 / 256, 7000 / 256)) r = 0 # /* Green */ g = 65535 / 256 b = 0 for i in range(19, 22, 1): legend.append((r, g, b)) g = g - 12422 / 256 r = 0 # /* Dark Green */ g = 28269 / 256 b = 0 for i in range(22, 26, 1): legend.append((r, g, b)) g = g - 7067 / 256 b = b + 16383 / 256 r = 0 g = 0 b = 65535 / 256 legend.append((r, g, b)) # Blue legend.append((25700 / 256, 25700 / 256, 65535 / 256)) # Dark purple legend.append((38550 / 256, 38550 / 256, 65535 / 256)) # Light purple for i in range(29, 256): legend.append((0, 0, 0)) return convert_palette(legend) def convert_palette(palette): """Convert palette from [0,255] range to [0,1]. """ new_palette = [] for i in palette: new_palette.append((i[0] / 255.0, i[1] / 255.0, i[2] / 255.0)) return new_palette def convert_palette2colormap(palette): """Convert palette from [0,255] range to [0,1]. """ from trollimage.colormap import Colormap j = 0 n_pal = len(palette) - 1 values = [] colors = [] red = [r for (r, g, b) in palette] green = [g for (r, g, b) in palette] blue = [b for (r, g, b) in palette] max_pal = max(max(red), max(blue), max(green)) if max_pal <= 1.0: # print "palette already normalized" denom = 1.0 else: # print "palette normalized to 255" denom = 255.0 for i in palette: values.append((n_pal - j) / float(n_pal)) colors.append((i[0] / denom, i[1] / denom, i[2] / denom)) j = j + 1 # reverse order to the entries values = values[::-1] colors = colors[::-1] # for i in palette: # values.append( j / float(n_pal)) # colors.append((i[0] / 255.0, i[1] / 255.0, i[2] / 255.0)) # j=j+1 # attention: # Colormap(values, colors) uses the second input option of Colormap # values has to be a list (not a tuple) and # colors has to be the corresponding list of color tuples return Colormap(values, colors) class LogColors(object): """ Defines colors to use with `logdata2image` """ def __init__(self, nodata, zeros, over, breaks): self.nodata = nodata self.zeros = zeros self.over = over self.breaks = breaks def palette(self, N=256): """ Build a palette for logarithmic data images. """ max_value = self.breaks[-1][0] pal = np.zeros((N, 3), dtype=np.uint8) b_last, rgb_last = self.breaks[0] for b, rgb in self.breaks[1:]: # Get a slice of the palette array for the current interval p = pal[ np.log(b_last + 1) * N / np.log(max_value):np.log(b + 1) * N / np.log(max_value)] for i in range(3): # red, green, blue p[:, i] = np.linspace(rgb_last[i], rgb[i], p.shape[0]) b_last = b rgb_last = rgb pal[0] = self.nodata pal[1] = self.zeros pal[-1] = self.over return pal class TriColors(LogColors): """ Use three color tones in the intervals between the elements of *breaks*. """ color_tones = [((0, 0, 200), (150, 150, 255)), # dark to light blue ((150, 150, 0), (255, 255, 8)), # greyish to bright yellow ((230, 150, 100), (230, 0, 0))] # green to red nodata = (0, 0, 0) # black # zeros = (20, 0, 20) # dark purple # black #There is no need to mark zeros with another col zeros = (0, 0, 0) over = (255, 0, 0) # bright red def __init__(self, breaks): breaks = [(breaks[0], TriColors.color_tones[0][0]), (breaks[1], TriColors.color_tones[0][1]), (breaks[1], TriColors.color_tones[1][0]), (breaks[2], TriColors.color_tones[1][1]), (breaks[2], TriColors.color_tones[2][0]), (breaks[3], TriColors.color_tones[2][1])] LogColors.__init__(self, TriColors.nodata, TriColors.zeros, TriColors.over, breaks) CPP_COLORS = {'cpp_cot': TriColors([0, 3.6, 23, 700]), # ISCCP intervals 'cpp_reff': TriColors([0, 10, 20, 1000])} CPP_COLORS['cot'] = CPP_COLORS['cpp_cot'] CPP_COLORS['reff'] = CPP_COLORS['cpp_reff'] def get_ctp_legend(): """ Get the Cloud Top Pressure color palette """ legend = [] legend.append((0, 0, 0)) # No data legend.append((255, 0, 216)) # 0: 1000-1050 hPa (=100000-105000 Pa) legend.append((126, 0, 43)) # 1: 950-1000 hPa legend.append((153, 20, 47)) # 2: 900-950 hPa legend.append((178, 51, 0)) # 3: 850-900 hPa legend.append((255, 76, 0)) # 4: 800-850 hPa legend.append((255, 102, 0)) # 5: 750-800 hPa legend.append((255, 164, 0)) # 6: 700-750 hPa legend.append((255, 216, 0)) # 7: 650-700 hPa legend.append((216, 255, 0)) # 8: 600-650 hPa legend.append((178, 255, 0)) # 9: 550-600 hPa legend.append((153, 255, 0)) # 10: 500-550 hPa legend.append((0, 255, 0)) # 11: 450-500 hPa legend.append((0, 140, 48)) # 12: 400-450 hPa legend.append((0, 178, 255)) # 13: 350-400 hPa legend.append((0, 216, 255)) # 14: 300-350 hPa legend.append((0, 255, 255)) # 15: 250-300 hPa legend.append((238, 214, 210)) # 16: 200-250 hPa legend.append((239, 239, 223)) # 17: 150-200 hPa legend.append((255, 255, 255)) # 18: 100-150 hPa legend.append((255, 255, 255)) # 19: 50-100 hPa legend.append((255, 255, 255)) # 20: 0-50 hPa (=0-5000 Pa) palette = convert_palette(legend) return palette def get_reff_legend(): return get_log_legend('reff') def get_cot_legend(): return get_log_legend('cot') def get_log_legend(product_name): # This is the same data as is used in logdata2image (when indata as for # the calls from cppimage) legend = CPP_COLORS[product_name].palette() palette = convert_palette(legend) return palette def oca_get_scenetype_legend(): # Colorize using PPS/CPP palette legend = np.array([[170, 130, 255], # purple/blue for liquid (cph == 1) [220, 200, 255], # almost white for ice (cph == 2) [255, 200, 200] # Redish for multi layer clouds ]) legend = np.vstack([np.zeros((111, 3)), legend]) palette = convert_palette(legend) return palette mpop-1.5.0/mpop/instruments/000077500000000000000000000000001317160620000160325ustar00rootroot00000000000000mpop-1.5.0/mpop/instruments/__init__.py000066400000000000000000000000001317160620000201310ustar00rootroot00000000000000mpop-1.5.0/mpop/instruments/modis.py000066400000000000000000000045301317160620000175210ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2014, 2015. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """This modules describes the modis instrument. It provides MODIS specific methods for RGB-compositing. """ import mpop.imageo.geo_image as geo_image from mpop.instruments.visir import VisirCompositer import logging LOG = logging.getLogger(__name__) MODIS = [["8", (0.405, 0.4125, 0.420), 1000], ["9", (0.438, 0.443, 0.488), 1000], ["3", (0.459, 0.469, 0.479), 1000], ["10", (0.483, 0.488, 0.493), 1000], ["11", (0.526, 0.531, 0.536), 1000], ["12", (0.546, 0.551, 0.556), 1000], ["4", (0.545, 0.555, 0.565), 1000], ["1", (0.620, 0.645, 0.670), 1000], ["13", (0.662, 0.667, 0.672), 1000] ] # MODIS # class ModisCompositer(VisirCompositer): """This class sets up the Modis channel list and instrument specific composite methods. """ channel_list = MODIS instrument_name = "modis" def truecolor(self): """Make a daytime true color RGB composite from Modis channels. """ self.check_channels(0.645, 0.555, 0.469) ch1 = self[0.645].data / 100. ch2 = self[0.555].data / 100. ch3 = self[0.469].data / 100. img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=None, mode="RGB") img.enhance(stretch="histogram") return img truecolor.prerequisites = set([0.645, 0.555, 0.469]) mpop-1.5.0/mpop/instruments/s2_composites.py000066400000000000000000000010631317160620000211750ustar00rootroot00000000000000from mpop.imageo.geo_image import GeoImage def s2_truecolor(self): self.check_channels('B02','B03','B04') ch1 = self['B04'].data ch2 = self['B03'].data ch3 = self['B02'].data img = GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=None, mode="RGB") img.enhance(stretch="linear") #img.enhance(stretch="histogram") img.enhance(gamma=2.0) return img s2_truecolor.prerequisites = set(['B02', 'B03','B04']) msi=[s2_truecolor] mpop-1.5.0/mpop/instruments/sarx.py000066400000000000000000000072271317160620000173710ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2013, 2014. # Author(s): # Lars Ørum Rasmussen # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . # """This modules describes the sarx instrument from the TerraSAR-X satellite. """ import numpy as np from mpop.compositer import Compositer import logging LOG = logging.getLogger(__name__) class SarxCompositer(Compositer): """This class sets up the SAR-X instrument channel list. """ instrument_name = "sarx" def average(self, downscaling_factor=2, average_window=None): """ Makes a mean convolution of an image. :Parameters: `downscaling_factor` : int image downscaling factor, default is a factor 2. `average_window` : int window size for calculating mean values, default is the same as downscaling_factor. :Returns: `image` : GeoImage mean convoluted image. """ from mpop.imageo.geo_image import GeoImage from pyresample import geometry import scipy.ndimage as ndi self.check_channels(9.65) if average_window == None: average_window = downscaling_factor LOG.info("Downsampling a factor %d and averaging " % downscaling_factor + "in a window of %dx%d" % (average_window, average_window)) ch = self[9.65] # If average window and downscale factor is the same # the following could be used: # # data = data.reshape([shight, hight/shight, # swidth, width/swidth]).mean(3).mean(1) # avg kernel kernel = (np.ones((average_window, average_window), dtype=np.float) / (average_window * average_window)) # do convolution data = ndi.filters.correlate(ch.data.astype(np.float), kernel, mode='nearest') # downscale data = data[1::downscaling_factor, 1::downscaling_factor] # New area, and correct for integer truncation. p_size_x, p_size_y = (ch.area.pixel_size_x * downscaling_factor, ch.area.pixel_size_y * downscaling_factor) area_extent = (ch.area.area_extent[0], ch.area.area_extent[1], ch.area.area_extent[0] + data.shape[1] * p_size_x, ch.area.area_extent[1] + data.shape[0] * p_size_y) area = geometry.AreaDefinition(self._data_holder.satname + self._data_holder.instrument_name + str(area_extent) + str(data.shape), "On-the-fly area", ch.area.proj_id, ch.area.proj_dict, data.shape[1], data.shape[0], area_extent) return GeoImage(data, area, self.time_slot, fill_value=(0,), mode='L') average.prerequisites = set([9.65, ]) mpop-1.5.0/mpop/instruments/seviri.py000066400000000000000000000341651317160620000177160ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2013, 2014, 2016. # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """This modules describes the seviri instrument. """ import numpy as np import mpop.imageo.geo_image as geo_image from mpop.instruments.visir import VisirCompositer import logging LOG = logging.getLogger(__name__) try: from pyorbital.astronomy import sun_zenith_angle as sza except ImportError: sza = None from mpop.imageo import palettes oca_palette_func = {'ll_ctp': palettes.get_ctp_legend, 'ul_ctp': palettes.get_ctp_legend, 'ul_cot': palettes.get_cot_legend, 'll_cot': palettes.get_cot_legend, 'reff': palettes.get_reff_legend, 'scenetype': palettes.oca_get_scenetype_legend} def _arrange_log_data(arr, max_value, no_data_value): """ Prepare logarithmic data for creating an image. """ MAX_IM_VAL = 2**8 - 1 # Logarithmic data should never be negative assert ((arr >= 0) + (arr == no_data_value)).all(), \ "Negative values encountered in cloud optical thickness" # Confine image data values to the range [2, MAX_IM_VAL] arr_log = np.log(arr + 1.) # arr == 0 -> arr_log = 0 cot_im_data = arr_log * (MAX_IM_VAL - 3) / np.log(max_value + 1.) + 2. cot_im_data[cot_im_data > MAX_IM_VAL] = MAX_IM_VAL # Now that the data is adjusted, cast it to uint8 ([0, 2**8 - 1]) cot_im_data = cot_im_data.astype(np.uint8) # Give no-data values a special color cot_im_data[arr == no_data_value] = 0 # Give arr == 0 a special color cot_im_data[arr == 0] = 1 return cot_im_data class SeviriCompositer(VisirCompositer): """This class sets up the Seviri instrument channel list. """ instrument_name = "seviri" def co2corr(self): """CO2 correction of the brightness temperature of the MSG 3.9um channel:: .. math:: T4_CO2corr = (BT(IR3.9)^4 + Rcorr)^0.25 Rcorr = BT(IR10.8)^4 - (BT(IR10.8)-dt_CO2)^4 dt_CO2 = (BT(IR10.8)-BT(IR13.4))/4.0 """ try: self.check_channels(3.75, 10.8, 13.4) except RuntimeError: LOG.warning("CO2 correction not performed, channel data missing.") return bt039 = self[3.9].data bt108 = self[10.8].data bt134 = self[13.4].data dt_co2 = (bt108 - bt134) / 4.0 rcorr = bt108 ** 4 - (bt108 - dt_co2) ** 4 t4_co2corr = bt039 ** 4 + rcorr t4_co2corr = np.ma.where(t4_co2corr > 0.0, t4_co2corr, 0) t4_co2corr = t4_co2corr ** 0.25 return t4_co2corr co2corr.prerequisites = set([3.75, 10.8, 13.4]) def co2corr_chan(self): """CO2 correction of the brightness temperature of the MSG 3.9um channel, adding it as a channel:: .. math:: T4_CO2corr = (BT(IR3.9)^4 + Rcorr)^0.25 Rcorr = BT(IR10.8)^4 - (BT(IR10.8)-dt_CO2)^4 dt_CO2 = (BT(IR10.8)-BT(IR13.4))/4.0 """ if "_IR39Corr" in [chn.name for chn in self._data_holder.channels]: return self.check_channels(3.75, 10.8, 13.4) dt_co2 = (self[10.8] - self[13.4]) / 4.0 rcorr = self[10.8] ** 4 - (self[10.8] - dt_co2) ** 4 t4_co2corr = self[3.9] ** 4 + rcorr t4_co2corr.data = np.ma.where( t4_co2corr.data > 0.0, t4_co2corr.data, 0) t4_co2corr = t4_co2corr ** 0.25 t4_co2corr.name = "_IR39Corr" t4_co2corr.area = self[3.9].area t4_co2corr.wavelength_range = self[3.9].wavelength_range t4_co2corr.resolution = self[3.9].resolution self._data_holder.channels.append(t4_co2corr) co2corr_chan.prerequisites = set([3.75, 10.8, 13.4]) def refl39_chan(self): """Derive the solar (reflectance) part of the 3.9um channel including a correction of the limb cooling (co2 correction), adding it as a channel. """ if "_IR39Refl" in [chn.name for chn in self._data_holder.channels]: return if not sza: LOG.warning("3.9 reflectance derivation is not possible..." + "\nCheck that pyspectral and pyorbital are " + "installed and available!") return self.check_channels(3.75, 10.8, 13.4) platform_name = self.fullname if platform_name == 'unknown': LOG.error("Failed setting correct platform name for pyspectral! " + "Satellite = " + str(self.fullname)) LOG.debug("Satellite = " + str(self.fullname)) r39 = self[3.9].get_reflectance(self[10.8].data, sun_zenith=None, tb13_4=self[13.4].data) if r39 is None: raise RuntimeError("Couldn't derive 3.x reflectance. " + "Check if pyspectral is installed!") r39channel = self[3.9] * 1.0 r39channel.data = np.ma.where(r39channel.data > 0.0, r39 * 100, 0) r39channel.name = "_IR39Refl" r39channel.area = self[3.9].area r39channel.wavelength_range = self[3.9].wavelength_range r39channel.resolution = self[3.9].resolution self._data_holder.channels.append(r39channel) refl39_chan.prerequisites = set([3.75, 10.8, 13.4]) def convection_co2(self): """Make a Severe Convection RGB image composite on SEVIRI compensating for the CO2 absorption in the 3.9 micron channel. """ self.co2corr_chan() self.check_channels("_IR39Corr", 0.635, 1.63, 6.7, 7.3, 10.8) ch1 = self[6.7].data - self[7.3].data ch2 = self["_IR39Corr"].data - self[10.8].data ch3 = self[1.63].check_range() - self[0.635].check_range() img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB", crange=((-30, 0), (0, 55), (-70, 20))) img.enhance(gamma=(1.0, 0.5, 1.0)) return img convection_co2.prerequisites = (co2corr_chan.prerequisites | set([0.635, 1.63, 6.7, 7.3, 10.8])) def cloudtop(self, stretch=(0.005, 0.005), gamma=None): """Make a Cloudtop RGB image composite from Seviri channels. """ self.co2corr_chan() self.check_channels("_IR39Corr", 10.8, 12.0) ch1 = -self["_IR39Corr"].data ch2 = -self[10.8].data ch3 = -self[12.0].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") if stretch: img.enhance(stretch=stretch) if gamma: img.enhance(gamma=gamma) return img cloudtop.prerequisites = co2corr_chan.prerequisites | set([10.8, 12.0]) def night_overview(self, stretch='histogram', gamma=None): """See cloudtop. """ return self.cloudtop(stretch=stretch, gamma=gamma) night_overview.prerequisites = cloudtop.prerequisites def night_fog(self): """Make a Night Fog RGB image composite from Seviri channels. """ self.co2corr_chan() self.check_channels("_IR39Corr", 10.8, 12.0) ch1 = self[12.0].data - self[10.8].data ch2 = self[10.8].data - self["_IR39Corr"].data ch3 = self[10.8].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB", crange=((-4, 2), (0, 6), (243, 293))) img.enhance(gamma=(1.0, 2.0, 1.0)) return img night_fog.prerequisites = co2corr_chan.prerequisites | set([10.8, 12.0]) def night_microphysics(self): """Make a Night Microphysics RGB image composite from Seviri channels. This is a Eumetsat variant of night_fog. See e.g http://oiswww.eumetsat.int/~idds/html/doc/best_practices.pdf """ self.check_channels(3.9, 10.8, 12.0) ch1 = self[12.0].data - self[10.8].data ch2 = self[10.8].data - self[3.9].data ch3 = self[10.8].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB", crange=((-4, 2), (0, 10), (243, 293))) return img night_microphysics.prerequisites = set([3.9, 10.8, 12.0]) def snow(self): """Make a 'Snow' RGB as suggested in the MSG interpretation guide (rgbpart04.ppt). It is kind of special as it requires the derivation of the daytime component of the mixed Terrestrial/Solar 3.9 micron channel. Furthermore the sun zenith angle is used. """ self.refl39_chan() self.check_channels("_IR39Refl", 0.8, 1.63, 3.75) # We calculate the sun zenith angle again. Should be reused if already # calculated/available... # FIXME! lonlats = self[3.9].area.get_lonlats() sunz = sza(self.time_slot, lonlats[0], lonlats[1]) sunz = np.ma.masked_outside(sunz, 0.0, 88.0) sunzmask = sunz.mask sunz = sunz.filled(88.) costheta = np.cos(np.deg2rad(sunz)) red = np.ma.masked_where(sunzmask, self[0.8].data / costheta) green = np.ma.masked_where(sunzmask, self[1.6].data / costheta) img = geo_image.GeoImage((red, green, self['_IR39Refl'].data), self.area, self.time_slot, crange=((0, 100), (0, 70), (0, 30)), fill_value=None, mode="RGB") img.gamma((1.7, 1.7, 1.7)) return img snow.prerequisites = refl39_chan.prerequisites | set( [0.8, 1.63, 3.75]) def day_microphysics(self, wintertime=False, fill_value=None): """Make a 'Day Microphysics' RGB as suggested in the MSG interpretation guide (rgbpart04.ppt). It is kind of special as it requires the derivation of the daytime component of the mixed Terrestrial/Solar 3.9 micron channel. Furthermore the sun zenith angle is used. for black backgroup specify: fill_value=(0,0,0) """ self.refl39_chan() self.check_channels(0.8, "_IR39Refl", 10.8) # We calculate the sun zenith angle again. Should be reused if already # calculated/available... # FIXME! lonlats = self[3.9].area.get_lonlats() sunz = sza(self.time_slot, lonlats[0], lonlats[1]) sunz = np.ma.masked_outside(sunz, 0.0, 88.0) sunzmask = sunz.mask sunz = sunz.filled(88.) costheta = np.cos(np.deg2rad(sunz)) if wintertime: crange = ((0, 100), (0, 25), (203, 323)) else: crange = ((0, 100), (0, 60), (203, 323)) red = np.ma.masked_where(sunzmask, self[0.8].data / costheta) green = np.ma.masked_where(sunzmask, self['_IR39Refl'].data) blue = np.ma.masked_where(sunzmask, self[10.8].data) img = geo_image.GeoImage((red, green, blue), self.area, self.time_slot, crange=crange, fill_value=fill_value, mode="RGB") if wintertime: img.gamma((1.0, 1.5, 1.0)) else: img.gamma((1.0, 2.5, 1.0)) # Summertime settings.... return img day_microphysics.prerequisites = refl39_chan.prerequisites | set( [0.8, 10.8]) def oca(self, fieldname): """Make an OCA cloud parameter image""" palette = oca_palette_func[fieldname]() data = getattr(getattr(self['OCA'], fieldname), 'data') if fieldname in ['scenetype']: data = data.astype('uint8') elif fieldname in ['ul_ctp', 'll_ctp']: data = (22. - data / 5000.).astype('Int16') elif fieldname in ['reff']: data = (data * 1000000. + 0.5).astype('uint8') data.fill_value = 255 elif fieldname in ['ul_cot', 'll_cot']: data = np.ma.exp(data * np.ma.log(10)) max_value = palettes.CPP_COLORS['cot'].breaks[-1][0] data.fill_value = 255 no_data = 255 # FIXME! data = _arrange_log_data(data.filled(), max_value, no_data) else: raise NotImplementedError( "No imagery for parameter %s implemented yet...", fieldname) img = geo_image.GeoImage(data, self.area, self.time_slot, fill_value=(0), mode="P", palette=palette) return img oca.prerequisites = set(['OCA']) mpop-1.5.0/mpop/instruments/viirs.py000066400000000000000000000710561317160620000175510ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017. # Author(s): # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """This modules describes the viirs instrument. It provides VIIRS specific methods for RGB-compositing. """ import logging import numpy as np import mpop.imageo.geo_image as geo_image from mpop.instruments.visir import VisirCompositer LOG = logging.getLogger(__name__) try: from pyorbital.astronomy import sun_zenith_angle as sza except ImportError: LOG.warning("Sun zenith angle correction not possible! " + "Check the availability of the pyorbital module in your environment") sza = None # VIIRS # Since there is overlap between I-bands and M-bands we need to # specifically re-define some of the RGB composites already defined # in the standard visir.py module. So, the same names, like "overview" # can be invoked and based on M-bands only. # In addition we define new composite names for the I-bands, # like e.g. hr_overview, hr_night_fog, etc # class ViirsCompositer(VisirCompositer): """This class sets up the VIIRS instrument channel list. """ instrument_name = "viirs" def overview(self, stretch='linear', gamma=1.6): """Make an Overview RGB image composite from VIIRS channels. """ self.check_channels('M05', 'M07', 'M15') ch1 = self['M05'].check_range() ch2 = self['M07'].check_range() ch3 = -self['M15'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") if stretch: img.enhance(stretch=stretch) if gamma: img.enhance(gamma=gamma) return img overview.prerequisites = set(['M05', 'M07', 'M15']) def overview_sun(self, stretch='linear', gamma=1.6, fill_value=(0, 0, 0)): """Make an overview RGB image composite normalising with cosine to the sun zenith angle. """ self.check_channels('M05', 'M07', 'M15') lonlats = self['M15'].area.get_lonlats() red = self['M05'].sunzen_corr(self.time_slot, lonlats, limit=88., sunmask=95).data green = self['M07'].sunzen_corr(self.time_slot, lonlats, limit=88., sunmask=95).data blue = -self['M15'].data img = geo_image.GeoImage((red, green, blue), self.area, self.time_slot, fill_value=fill_value, mode="RGB") if stretch: img.enhance(stretch=stretch) if gamma: img.enhance(gamma=gamma) return img overview_sun.prerequisites = set(['M05', 'M07', 'M15']) def hr_overview(self): """Make a high resolution Overview RGB image composite from the VIIRS I-bands only - 375 meter resolution. """ self.check_channels('I01', 'I02', 'I05') ch1 = self['I01'].check_range() ch2 = self['I02'].check_range() ch3 = -self['I05'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") img.enhance(stretch="crude") img.enhance(gamma=1.6) return img hr_overview.prerequisites = set(['I01', 'I02', 'I05']) def truecolor(self, stretch='linear', gamma=2.0): """Make a True Color RGB image composite from M-bands only. """ #elf.check_channels('M02', 'M04', 'M05') self.check_channels('M03', 'M04', 'M05') ch1 = self['M05'].check_range() ch2 = self['M04'].check_range() #h3 = self['M02'].check_range() ch3 = self['M03'].check_range() img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=None, mode="RGB") if stretch: img.enhance(stretch=stretch) if gamma: img.enhance(gamma=gamma) return img truecolor.prerequisites = set(['M03', 'M04', 'M05']) # truecolor.prerequisites = set(['M02', 'M04', 'M05']) def natural(self): """Make a Natural Colors RGB image composite from M-bands only. """ self.check_channels('M05', 'M06', 'M07', 'M10') ch1 = self['M10'].check_range() ch2 = self['M07'].check_range() ch3 = self['M05'].check_range() ch2b = self['M06'].check_range() ch2 = np.ma.where(ch2.mask, ch2b, ch2) common_mask = np.logical_or(ch1.mask, ch2.mask) common_mask = np.logical_or(common_mask, ch3.mask) ch1.mask = common_mask ch2.mask = common_mask ch3.mask = common_mask img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB", crange=((0, 90), (0, 90), (0, 90))) img.enhance(gamma=1.8) return img natural.prerequisites = set(['M05', 'M06', 'M07', 'M10']) def hr_natural(self): """Make a high resolution Day Natural Colors RGB image composite from I-bands only - 375 meter resolution. """ self.check_channels('I01', 'I02', 'I03') ch1 = self['I03'].check_range() ch2 = self['I02'].check_range() ch3 = self['I01'].check_range() img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB", crange=((0, 90), (0, 90), (0, 90))) img.enhance(gamma=1.8) return img hr_natural.prerequisites = set(['I01', 'I02', 'I03']) def vis06(self): """Make a black and white image of the VIS 0.635um channel. """ return self.channel_image("M05") vis06.prerequisites = set(['M05']) def hr_vis06(self): """Make a high res black and white image of the 'visible' (VIS) I-band at 0.640um. """ return self.channel_image('I01') hr_vis06.prerequisites = set(['I01']) def green_snow(self): """Make a Green Snow RGB image composite. """ self.check_channels('M05', 'M10', 'M15') ch1 = self['M10'].check_range() ch2 = self['M05'].check_range() ch3 = -self['M15'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") img.enhance(stretch="crude") img.enhance(gamma=1.6) return img green_snow.prerequisites = set(['M05', 'M10', 'M15']) def hr_green_snow(self): """Make a Green Snow RGB image composite. """ self.check_channels('I01', 'I03', 'I05') ch1 = self['I03'].check_range() ch2 = self['I01'].check_range() ch3 = -self['I05'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") img.enhance(stretch="crude") img.enhance(gamma=1.6) return img hr_green_snow.prerequisites = set(['I01', 'I03', 'I05']) def red_snow(self): """Make a Red Snow RGB image composite. """ self.check_channels('M05', 'M10', 'M15') ch1 = self['M05'].check_range() ch2 = self['M10'].check_range() ch3 = -self['M15'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") img.enhance(stretch="crude") return img red_snow.prerequisites = set(['M05', 'M10', 'M15']) def hr_red_snow(self): """Make a high resolution Red Snow RGB image composite from the I-bands only. """ self.check_channels('I01', 'I03', 'I05') ch1 = self['I01'].check_range() ch2 = self['I03'].check_range() ch3 = -self['I05'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") img.enhance(stretch="crude") return img hr_red_snow.prerequisites = set(['I01', 'I03', 'I05']) def dnb_overview(self, stretch='linear'): """Make a nighttime overview RGB image composite from VIIRS DNB and M bands. """ self.check_channels('DNB', 'M15') lonlats = self['M15'].area.get_lonlats() if sza: sunz = sza(self.time_slot, lonlats[0], lonlats[1]) sunz = np.ma.masked_outside(sunz, 103, 180) sunzmask = sunz.mask red = np.ma.masked_where(sunzmask, self['DNB'].data) green = np.ma.masked_where(sunzmask, self['DNB'].data) blue = np.ma.masked_where(sunzmask, -self['M15'].data) else: LOG.warning("No masking of solar contaminated pixels performed!") red = self['DNB'].data green = self['DNB'].data blue = -self['M15'].data img = geo_image.GeoImage((red, green, blue), self.area, self.time_slot, fill_value=None, mode="RGB") img.enhance(stretch=stretch) return img dnb_overview.prerequisites = set(['DNB', 'M15']) # def dnb_overview(self): # """Make an Overview RGB image composite from VIIRS # channels. # """ # self.check_channels('DNB', 'M15') # ch1 = self['DNB'].data # ch2 = self['DNB'].data # ch3 = -self['M15'].data # img = geo_image.GeoImage((ch1, ch2, ch3), # self.area, # self.time_slot, # fill_value=None, # mode="RGB") # img.enhance(stretch="linear") # return img # dnb_overview.prerequisites = set(['DNB', 'M15']) def night_color(self): """Make a Night Overview RGB image composite. Same as cloudtop ... just different. """ return self.cloudtop(stretch="histogram") night_color.prerequisites = set(['M12', 'M15', 'M16']) def night_fog(self): """Make a Night Fog RGB image composite. """ self.check_channels('M12', 'M15', 'M16') ch1 = self['M16'].data - self['M15'].data ch2 = self['M15'].data - self['M12'].data ch3 = self['M15'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB", crange=((-4, 2), (0, 6), (243, 293))) img.enhance(gamma=(1.0, 2.0, 1.0)) return img night_fog.prerequisites = set(['M12', 'M15', 'M16']) def dust(self): """Make a Dust RGB image composite. """ self.check_channels('M14', 'M15', 'M16') ch1 = self['M16'].data - self['M15'].data ch2 = self['M15'].data - self['M14'].data ch3 = self['M15'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB", crange=((-4, 2), (0, 15), (261, 289))) img.enhance(gamma=(1.0, 2.5, 1.0)) return img dust.prerequisites = set(['M14', 'M15', 'M16']) def ash(self): """Make a Ash RGB image composite. """ self.check_channels('M14', 'M15', 'M16') ch1 = self['M16'].data - self['M15'].data ch2 = self['M15'].data - self['M14'].data ch3 = self['M15'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB", crange=((-4, 2), (-4, 5), (243, 303))) return img ash.prerequisites = set(['M14', 'M15', 'M16']) def fog(self): """Make a Fog RGB image composite. """ self.check_channels('M14', 'M15', 'M16') ch1 = self['M16'].data - self['M15'].data ch2 = self['M15'].data - self['M14'].data ch3 = self['M15'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB", crange=((-4, 2), (0, 6), (243, 283))) img.enhance(gamma=(1.0, 2.0, 1.0)) return img fog.prerequisites = set(['M14', 'M15', 'M16']) def cloudtop(self, stretch=None): """Make a Cloudtop RGB image composite. """ self.check_channels('M12', 'M15', 'M16') ch1 = -self['M12'].data ch2 = -self['M15'].data ch3 = -self['M16'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") if stretch: img.enhance(stretch=stretch) else: img.enhance(stretch=(0.005, 0.005)) return img cloudtop.prerequisites = set(['M12', 'M15', 'M16']) def dnb(self, stretch="histogram"): """Make a black and white image of the Day-Night band.""" self.check_channels('DNB') img = geo_image.GeoImage(self['DNB'].data, self.area, self.time_slot, fill_value=0, mode="L") if stretch: img.enhance(stretch=stretch) return img dnb.prerequisites = set(['DNB']) def dnb_rgb(self, stretch="linear"): """Make a RGB Day-Night band using M15 as blue.""" self.check_channels('DNB', 'M15') ch1 = self['DNB'].data ch2 = self['DNB'].data ch3 = -self['M15'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") if stretch: img.enhance(stretch=stretch) return img dnb_rgb.prerequisites = set(['DNB', 'M15']) def ir108(self): """Make a black and white image of the IR 10.8um channel. """ self.check_channels("M15") img = geo_image.GeoImage(self["M15"].data, self.area, self.time_slot, fill_value=0, mode="L", crange=(-70 + 273.15, 57.5 + 273.15)) img.enhance(inverse=True) return img ir108.prerequisites = set(["M15"]) def hr_ir108(self): """Make a black and white image of the IR 10.8um channel (320m). """ self.check_channels("I05") img = geo_image.GeoImage(self["I05"].data, self.area, self.time_slot, fill_value=0, mode="L", crange=(-70 + 273.15, 57.5 + 273.15)) img.enhance(inverse=True) return img hr_ir108.prerequisites = set(["I05"]) def chlorophyll(self, stretch=None): """ From http://oceancolor.gsfc.nasa.gov/REPROCESSING/R2009/ocv6/ * Rrs1 = blue wavelength Rrs (e.g., 443, 490, or 510-nm) * Rrs2 = green wavelength Rrs (e.g., 547, 555, or 565-nm) * X = log10(Rrs1 / Rrs2) * chlor_a = 10^(a0 + a1*X + a2*X^2 + a3*X^3 + a4*X^4) sensor default * blue green a0 a1 a2 a3 a4 OC3V VIIRS Y 443>486 550 0.2228 -2.4683 1.5867 -0.4275 -0.7768 blue: M02(445)>M03(488) green: M04(555) * X = log10(max(M2, M3)/M4) """ self.check_channels("M02", "M03", "M04") a0, a1, a2, a3, a4 = (0.2228, -2.4683, 1.5867, -0.4275, -0.7768) # X = np.maximum(self["M02"].data, self["M03"].data)/self["M04"].data X = self["M02"].data / self["M04"].data X = np.log10(X) chlor_a = 10 ** (a0 + a1 * X + a2 * (X ** 2) + a3 * (X ** 3) + a4 * (X ** 4)) print 'chlor_a:', chlor_a.min(), chlor_a.mean(), chlor_a.max() img = geo_image.GeoImage(chlor_a, self.area, self.time_slot, fill_value=0, mode="L") if stretch: img.enhance(stretch=stretch) return img chlorophyll.prerequisites = set(["M02", "M03", "M04"]) def hr_cloudtop(self): """Make a Night Fog RGB image composite. """ self.check_channels('I04', 'I05') ch1 = -self['I04'].data ch2 = self['I05'].data ch3 = self['I05'].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=(0, 0, 0), mode="RGB") img.enhance(stretch=(0.005, 0.005)) return img hr_cloudtop.prerequisites = set(['I04', 'I05']) def snow_age(self): """Make a Snow age RGB image composite. """ self.check_channels('M07', 'M08', 'M09', 'M10', 'M11') coeff = 255. / 160. lonlats = self['M11'].area.get_lonlats() m07 = self['M07'].sunzen_corr( self.time_slot, lonlats, limit=88., sunmask=95).data * coeff m08 = self['M08'].sunzen_corr( self.time_slot, lonlats, limit=88., sunmask=95).data * coeff m09 = self['M09'].sunzen_corr( self.time_slot, lonlats, limit=88., sunmask=95).data * coeff m10 = self['M10'].sunzen_corr( self.time_slot, lonlats, limit=88., sunmask=95).data * coeff m11 = self['M11'].sunzen_corr( self.time_slot, lonlats, limit=88., sunmask=95).data * coeff refcu = m11 - m10 refcu[refcu < 0] = 0 ch1 = m07 - refcu / 2. - m09 / 4. ch2 = m08 + refcu / 4. + m09 / 4. ch3 = m11 + m09 # Bernard Bellec snow Look-Up Tables V 1.0 (c) Meteo-France # These Look-up Tables allow you to create the RGB snow product # for SUOMI-NPP VIIRS Imager according to the algorithm # presented at the second CSPP/IMAPP users' meeting at Eumetsat # in Darmstadt on 14-16 April 2015 # The algorithm and the product are described in this # presentation : # http://www.ssec.wisc.edu/meetings/cspp/2015/Agenda%20PDF/Wednesday/Roquet_snow_product_cspp2015.pdf # # For further information you may contact # Bernard Bellec at Bernard.Bellec@meteo.fr # or # Pascale Roquet at Pascale.Roquet@meteo.fr luts = np.array([[0, 0, 0], [1, 2, 2], [3, 8, 5], [4, 12, 8], [6, 15, 10], [8, 18, 13], [9, 21, 16], [11, 24, 19], [13, 26, 21], [14, 28, 24], [ 16, 30, 27], [18, 32, 30], [19, 34, 32], [21, 36, 35], [22, 38, 38], [24, 40, 40], [ 26, 42, 43], [27, 43, 46], [29, 45, 49], [31, 47, 51], [32, 49, 54], [34, 50, 57], [ 36, 52, 60], [37, 54, 62], [39, 55, 65], [40, 57, 68], [42, 59, 70], [44, 60, 73], [ 45, 62, 76], [47, 64, 79], [49, 66, 81], [50, 67, 84], [52, 69, 87], [53, 71, 90], [ 55, 73, 92], [56, 75, 95], [58, 77, 98], [59, 79, 100], [61, 81, 103], [62, 83, 106], [ 64, 85, 109], [65, 86, 111], [67, 88, 114], [68, 90, 117], [70, 92, 119], [71, 94, 121], [ 73, 96, 124], [74, 98, 126], [76, 100, 129], [77, 102, 131], [79, 104, 134], [80, 106, 136], [ 82, 107, 139], [83, 109, 141], [85, 111, 144], [86, 113, 146], [88, 115, 149], [89, 117, 151], [ 91, 118, 154], [92, 120, 156], [94, 122, 159], [95, 124, 161], [97, 126, 162], [98, 128, 164], [ 100, 129, 166], [101, 131, 168], [103, 133, 170], [104, 135, 172], [106, 137, 173], [ 107, 138, 175], [109, 140, 177], [110, 142, 179], [112, 144, 181], [113, 145, 183], [ 114, 147, 184], [116, 149, 186], [117, 151, 188], [118, 152, 190], [120, 154, 192], [ 121, 156, 193], [123, 158, 194], [124, 159, 196], [125, 161, 197], [127, 163, 199], [ 128, 165, 200], [130, 166, 202], [131, 168, 203], [132, 170, 205], [134, 172, 206], [ 135, 173, 206], [136, 175, 207], [138, 177, 208], [139, 178, 209], [141, 180, 210], [ 142, 182, 211], [143, 184, 212], [145, 185, 213], [146, 187, 214], [148, 189, 215], [ 149, 191, 216], [150, 192, 217], [152, 194, 218], [153, 196, 219], [154, 198, 220], [ 156, 200, 220], [157, 201, 221], [159, 203, 221], [160, 205, 222], [161, 207, 223], [ 162, 209, 223], [163, 210, 224], [164, 212, 225], [166, 213, 225], [167, 214, 226], [ 168, 216, 227], [169, 217, 227], [171, 218, 228], [173, 220, 228], [174, 221, 228], [ 175, 222, 229], [176, 224, 229], [177, 225, 229], [178, 226, 230], [179, 227, 230], [ 181, 228, 230], [182, 229, 231], [183, 230, 231], [184, 231, 232], [185, 232, 232], [ 186, 233, 232], [187, 234, 233], [188, 235, 233], [190, 236, 233], [191, 237, 234], [ 192, 237, 234], [193, 238, 234], [194, 239, 235], [195, 240, 235], [196, 240, 236], [ 196, 241, 236], [197, 242, 236], [198, 243, 237], [199, 243, 237], [200, 244, 237], [ 201, 245, 238], [202, 245, 238], [203, 245, 238], [204, 246, 239], [205, 246, 239], [ 206, 246, 239], [207, 247, 239], [208, 247, 239], [209, 247, 239], [209, 248, 240], [ 210, 248, 240], [210, 248, 240], [211, 248, 240], [212, 248, 240], [212, 248, 241], [ 213, 248, 241], [214, 248, 241], [215, 248, 241], [216, 248, 241], [217, 248, 242], [ 217, 248, 242], [218, 248, 242], [219, 248, 242], [219, 248, 242], [220, 248, 243], [ 221, 248, 243], [221, 249, 243], [222, 249, 243], [223, 249, 243], [223, 249, 244], [ 223, 249, 244], [224, 249, 244], [224, 249, 244], [225, 249, 245], [225, 249, 245], [ 226, 249, 245], [226, 249, 245], [227, 249, 245], [227, 249, 246], [228, 249, 246], [ 228, 250, 246], [229, 250, 246], [229, 250, 246], [230, 250, 247], [230, 250, 247], [ 231, 250, 247], [231, 250, 247], [232, 250, 247], [233, 250, 248], [233, 250, 248], [ 233, 250, 248], [234, 250, 248], [234, 250, 248], [234, 250, 249], [235, 251, 249], [ 235, 251, 249], [235, 251, 249], [236, 251, 249], [236, 251, 250], [237, 251, 250], [ 237, 251, 250], [237, 251, 250], [238, 251, 250], [238, 251, 250], [238, 251, 250], [ 239, 251, 250], [239, 251, 250], [240, 251, 250], [240, 251, 250], [240, 252, 250], [ 241, 252, 250], [241, 252, 251], [241, 252, 251], [242, 252, 251], [242, 252, 251], [ 242, 252, 251], [243, 252, 251], [243, 252, 251], [244, 252, 251], [244, 252, 251], [ 244, 252, 251], [245, 252, 252], [245, 252, 252], [245, 253, 252], [246, 253, 252], [ 246, 253, 252], [247, 253, 252], [248, 253, 252], [248, 253, 252], [248, 253, 252], [ 249, 253, 252], [249, 253, 253], [249, 253, 253], [250, 253, 253], [250, 253, 253], [ 250, 253, 253], [250, 253, 253], [251, 254, 253], [251, 254, 253], [251, 254, 253], [ 252, 254, 253], [252, 254, 254], [252, 254, 254], [253, 254, 254], [253, 254, 254], [ 253, 254, 254], [253, 254, 254], [254, 254, 254], [254, 254, 254], [254, 254, 254], [254, 254, 254], [255, 255, 255]]) / 255.0 np.ma.clip(ch1, 0, 255, ch1) np.ma.clip(ch2, 0, 255, ch2) np.ma.clip(ch3, 0, 255, ch3) ch1 = np.ma.array( luts[:, 0][ch1.astype(np.uint8)], copy=False, mask=ch1.mask) ch2 = np.ma.array( luts[:, 1][ch2.astype(np.uint8)], copy=False, mask=ch2.mask) ch3 = np.ma.array( luts[:, 2][ch3.astype(np.uint8)], copy=False, mask=ch3.mask) img = geo_image.GeoImage( (ch1, ch2, ch3), self.area, self.time_slot, mode="RGB") return img snow_age.prerequisites = set(['M07', 'M08', 'M09', 'M10', 'M11']) mpop-1.5.0/mpop/instruments/visir.py000066400000000000000000000531111317160620000175410ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015, 2017. # Author(s): # Martin Raspaud # Lars Ørum Rasmussen # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """This module defines the generic VISIR instrument class. """ from mpop.imageo import geo_image from mpop.compositer import Compositer # pylint: disable=W0612 # remove warnings for unused prerequisites class VisirCompositer(Compositer): """Compositer for Visual-IR instruments """ def __call__(self, *channels, **keys): """Build a geoimage. e.g.: img = l.image(0.6, 0.8, -10.8, mode="RGB") """ data = [] area = None inv = [] new_channels = [] for channel in channels: if isinstance(channel, str): if channel.startswith("-"): inv.append(True) channel = channel[1:] else: inv.append(False) else: if channel < 0: inv.append(True) channel = -channel else: inv.append(False) new_channels.append(channel) data.append(self[channel].data) new_area = self[channel].area if area and (new_area != area): raise ValueError("Channels should have the same area") else: area = new_area self.check_channels(*new_channels) img = geo_image.GeoImage(data, area=area, time_slot=self.time_slot, fill_value=keys.get("fill_value", None), crange=keys.get("crange", None), mode=keys.get("mode", None)) img.enhance(inverse=inv, gamma=keys.get("gamma", 1.0), stretch=keys.get("stretch", "no")) return img def channel_image(self, channel, fill_value=0): """Make a black and white image of the *channel*. Linear stretch without clipping is applied by default. """ self.check_channels(channel) img = geo_image.GeoImage(self[channel].data, self[channel].area, self.time_slot, fill_value=fill_value, mode="L") img.enhance(stretch="crude") return img def overview(self, stretch='crude', gamma=1.6, fill_value=(0, 0, 0)): """Make an overview RGB image composite. +--------------------+--------------------+ | Channels | Gamma (default) | +====================+====================+ | VIS0.6 | gamma 1.6 | +--------------------+--------------------+ | VIS0.8 | gamma 1.6 | +--------------------+--------------------+ | IR10.8 (inverted) | gamma 1.6 | +--------------------+--------------------+ Linear stretch without clipping is applied. """ self.check_channels(0.635, 0.85, 10.8) ch1 = self[0.635].check_range() ch2 = self[0.85].check_range() ch3 = -self[10.8].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB") if stretch: img.enhance(stretch=stretch) if gamma: img.enhance(gamma=gamma) return img overview.prerequisites = set([0.635, 0.85, 10.8]) # def overview_sun(self, stretch='crude', gamma=1.6): def overview_sun(self, stretch='linear', gamma=1.6, fill_value=(0, 0, 0)): """Make an overview RGB image composite normalising with cosine to the sun zenith angle. """ self.check_channels(0.635, 0.85, 10.8) lonlats = self[10.8].area.get_lonlats() red = self[0.635].sunzen_corr(self.time_slot, lonlats, limit=88., sunmask=95).data green = self[0.85].sunzen_corr(self.time_slot, lonlats, limit=88., sunmask=95).data blue = -self[10.8].data img = geo_image.GeoImage((red, green, blue), self.area, self.time_slot, fill_value=fill_value, mode="RGB") if stretch: img.enhance(stretch=stretch) if gamma: img.enhance(gamma=gamma) return img overview_sun.prerequisites = set([0.635, 0.85, 10.8]) def night_overview(self, stretch='histogram', gamma=None): """Make an overview RGB image composite using IR channels. +--------------------+--------------------+ | Channels | Gamma | +====================+====================+ | IR3.9 (inverted) | gamma 1 | +--------------------+--------------------+ | IR10.8 (inverted) | gamma 1 | +--------------------+--------------------+ | IR12.0 (inverted) | gamma 1 | +--------------------+--------------------+ Histogram equalization is applied for each channel. """ return self.cloudtop(stretch, gamma) night_overview.prerequisites = set([3.75, 10.8, 12.0]) def natural(self, stretch=None, gamma=1.8, fill_value=(0, 0, 0)): """Make a Natural Colors RGB image composite. +--------------------+--------------------+--------------------+ | Channels | Range (reflectance)| Gamma (default) | +====================+====================+====================+ | IR1.6 | 0 - 90 | gamma 1.8 | +--------------------+--------------------+--------------------+ | VIS0.8 | 0 - 90 | gamma 1.8 | +--------------------+--------------------+--------------------+ | VIS0.6 | 0 - 90 | gamma 1.8 | +--------------------+--------------------+--------------------+ """ self.check_channels(0.635, 0.85, 1.63) ch1 = self[1.63].check_range() ch2 = self[0.85].check_range() ch3 = self[0.635].check_range() img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB", crange=((0, 90), (0, 90), (0, 90))) if stretch: img.enhance(stretch=stretch) if gamma: img.enhance(gamma=gamma) return img natural.prerequisites = set([0.635, 0.85, 1.63]) def airmass(self, fill_value=(0, 0, 0)): """Make an airmass RGB image composite. +--------------------+--------------------+--------------------+ | Channels | Temp | Gamma | +====================+====================+====================+ | WV6.2 - WV7.3 | -25 to 0 K | gamma 1 | +--------------------+--------------------+--------------------+ | IR9.7 - IR10.8 | -40 to 5 K | gamma 1 | +--------------------+--------------------+--------------------+ | WV6.2 | 243 to 208 K | gamma 1 | +--------------------+--------------------+--------------------+ """ self.check_channels(6.7, 7.3, 9.7, 10.8) ch1 = self[6.7].data - self[7.3].data ch2 = self[9.7].data - self[10.8].data ch3 = self[6.7].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB", crange=((-25, 0), (-40, 5), (243, 208))) return img airmass.prerequisites = set([6.7, 7.3, 9.7, 10.8]) def vis06(self): """Make a black and white image of the VIS 0.635um channel. Linear stretch without clipping is applied. """ return self.channel_image(0.6) vis06.prerequisites = set([0.635]) def ir108(self): """Make a black and white image of the IR 10.8um channel. Channel is inverted. Temperature range from -70 °C (white) to +57.5 °C (black) is shown. """ self.check_channels(10.8) img = geo_image.GeoImage(self[10.8].data, self.area, self.time_slot, fill_value=0, mode="L", crange=(-70 + 273.15, 57.5 + 273.15)) img.enhance(inverse=True) return img ir108.prerequisites = set([10.8]) def wv_high(self): """Make a black and white image of the IR 6.7um channel. Channel inverted and a linear stretch is applied with 0.5 % clipping at both ends. """ self.check_channels(6.7) img = geo_image.GeoImage(self[6.7].data, self.area, self.time_slot, fill_value=0, mode="L") img.enhance(inverse=True, stretch="linear") return img wv_high.prerequisites = set([6.7]) def wv_low(self): """Make a black and white image of the IR 7.3um channel. Channel data inverted and a linear stretch is applied with 0.5 % clipping at both ends. """ self.check_channels(7.3) img = geo_image.GeoImage(self[7.3].data, self.area, self.time_slot, fill_value=0, mode="L") img.enhance(inverse=True, stretch="linear") return img wv_low.prerequisites = set([7.3]) def green_snow(self, fill_value=(0, 0, 0)): """Make a Green Snow RGB image composite. +--------------------+--------------------+ | Channels | Gamma | +====================+====================+ | IR1.6 | gamma 1.6 | +--------------------+--------------------+ | VIS0.6 | gamma 1.6 | +--------------------+--------------------+ | IR10.8 (inverted) | gamma 1.6 | +--------------------+--------------------+ Linear stretch without clipping. """ self.check_channels(0.635, 1.63, 10.8) ch1 = self[1.63].check_range() ch2 = self[0.635].check_range() ch3 = -self[10.8].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB") img.enhance(stretch="crude") img.enhance(gamma=1.6) return img green_snow.prerequisites = set([0.635, 1.63, 10.8]) def red_snow(self, fill_value=(0, 0, 0)): """Make a Red Snow RGB image composite. +--------------------+--------------------+ | Channels | Gamma | +====================+====================+ | VIS0.6 | gamma 1.6 | +--------------------+--------------------+ | IR1.6 | gamma 1.6 | +--------------------+--------------------+ | IR10.8 (inverted) | gamma 1.6 | +--------------------+--------------------+ Linear stretch without clipping. """ self.check_channels(0.635, 1.63, 10.8) ch1 = self[0.635].check_range() ch2 = self[1.63].check_range() ch3 = -self[10.8].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB") img.enhance(stretch="crude") return img red_snow.prerequisites = set([0.635, 1.63, 10.8]) def convection(self, fill_value=(0, 0, 0)): """Make a Severe Convection RGB image composite. +--------------------+--------------------+--------------------+ | Channels | Span | Gamma | +====================+====================+====================+ | WV6.2 - WV7.3 | -30 to 0 K | gamma 1 | +--------------------+--------------------+--------------------+ | IR3.9 - IR10.8 | 0 to 55 K | gamma 1 | +--------------------+--------------------+--------------------+ | IR1.6 - VIS0.6 | -70 to 20 % | gamma 1 | +--------------------+--------------------+--------------------+ """ self.check_channels(0.635, 1.63, 3.75, 6.7, 7.3, 10.8) ch1 = self[6.7].data - self[7.3].data ch2 = self[3.75].data - self[10.8].data ch3 = self[1.63].check_range() - self[0.635].check_range() img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB", crange=((-30, 0), (0, 55), (-70, 20))) return img convection.prerequisites = set([0.635, 1.63, 3.75, 6.7, 7.3, 10.8]) def dust(self, fill_value=(0, 0, 0)): """Make a Dust RGB image composite. +--------------------+--------------------+--------------------+ | Channels | Temp | Gamma | +====================+====================+====================+ | IR12.0 - IR10.8 | -4 to 2 K | gamma 1 | +--------------------+--------------------+--------------------+ | IR10.8 - IR8.7 | 0 to 15 K | gamma 2.5 | +--------------------+--------------------+--------------------+ | IR10.8 | 261 to 289 K | gamma 1 | +--------------------+--------------------+--------------------+ """ self.check_channels(8.7, 10.8, 12.0) ch1 = self[12.0].data - self[10.8].data ch2 = self[10.8].data - self[8.7].data ch3 = self[10.8].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB", crange=((-4, 2), (0, 15), (261, 289))) img.enhance(gamma=(1.0, 2.5, 1.0)) return img dust.prerequisites = set([8.7, 10.8, 12.0]) def ash(self, fill_value=(0, 0, 0)): """Make a Ash RGB image composite. +--------------------+--------------------+--------------------+ | Channels | Temp | Gamma | +====================+====================+====================+ | IR12.0 - IR10.8 | -4 to 2 K | gamma 1 | +--------------------+--------------------+--------------------+ | IR10.8 - IR8.7 | -4 to 5 K | gamma 1 | +--------------------+--------------------+--------------------+ | IR10.8 | 243 to 303 K | gamma 1 | +--------------------+--------------------+--------------------+ """ self.check_channels(8.7, 10.8, 12.0) ch1 = self[12.0].data - self[10.8].data ch2 = self[10.8].data - self[8.7].data ch3 = self[10.8].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB", crange=((-4, 2), (-4, 5), (243, 303))) return img ash.prerequisites = set([8.7, 10.8, 12.0]) def fog(self, fill_value=(0, 0, 0)): """Make a Fog RGB image composite. +--------------------+--------------------+--------------------+ | Channels | Temp | Gamma | +====================+====================+====================+ | IR12.0 - IR10.8 | -4 to 2 K | gamma 1 | +--------------------+--------------------+--------------------+ | IR10.8 - IR8.7 | 0 to 6 K | gamma 2.0 | +--------------------+--------------------+--------------------+ | IR10.8 | 243 to 283 K | gamma 1 | +--------------------+--------------------+--------------------+ """ self.check_channels(8.7, 10.8, 12.0) ch1 = self[12.0].data - self[10.8].data ch2 = self[10.8].data - self[8.7].data ch3 = self[10.8].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB", crange=((-4, 2), (0, 6), (243, 283))) img.enhance(gamma=(1.0, 2.0, 1.0)) return img fog.prerequisites = set([8.7, 10.8, 12.0]) def night_fog(self, fill_value=(0, 0, 0)): """Make a Night Fog RGB image composite. +--------------------+--------------------+--------------------+ | Channels | Temp | Gamma | +====================+====================+====================+ | IR12.0 - IR10.8 | -4 to 2 K | gamma 1 | +--------------------+--------------------+--------------------+ | IR10.8 - IR3.9 | 0 to 6 K | gamma 2.0 | +--------------------+--------------------+--------------------+ | IR10.8 | 243 to 293 K | gamma 1 | +--------------------+--------------------+--------------------+ """ self.check_channels(3.75, 10.8, 12.0) ch1 = self[12.0].data - self[10.8].data ch2 = self[10.8].data - self[3.75].data ch3 = self[10.8].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB", crange=((-4, 2), (0, 6), (243, 293))) img.enhance(gamma=(1.0, 2.0, 1.0)) return img night_fog.prerequisites = set([3.75, 10.8, 12.0]) def cloudtop(self, stretch=(0.005, 0.005), gamma=None, fill_value=(0, 0, 0)): """Make a Cloudtop RGB image composite. +--------------------+--------------------+ | Channels | Gamma | +====================+====================+ | IR3.9 (inverted) | gamma 1 | +--------------------+--------------------+ | IR10.8 (inverted) | gamma 1 | +--------------------+--------------------+ | IR12.0 (inverted) | gamma 1 | +--------------------+--------------------+ Linear stretch with 0.5 % clipping at both ends. """ self.check_channels(3.75, 10.8, 12.0) ch1 = -self[3.75].data ch2 = -self[10.8].data ch3 = -self[12.0].data img = geo_image.GeoImage((ch1, ch2, ch3), self.area, self.time_slot, fill_value=fill_value, mode="RGB") if stretch: img.enhance(stretch=stretch) if gamma: img.enhance(gamma=gamma) return img cloudtop.prerequisites = set([3.75, 10.8, 12.0]) # pylint: enable=W0612 mpop-1.5.0/mpop/plugin_base.py000066400000000000000000000041641317160620000163060ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011 SMHI # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """The :mod:`mpop.plugin_base` module defines the plugin API. """ import weakref class Plugin(object): """The base plugin class. It is not to be used as is, it has to be inherited by other classes. """ pass class Reader(Plugin): """Reader plugins. They should have a *pformat* attribute, and implement the *load* method. This is an abstract class to be inherited. """ ptype = "reader" def __init__(self, scene): """The reader plugin takes as input a satellite scene to fill in. Arguments: - `scene`: the scene to fill. """ Plugin.__init__(self) self._scene = weakref.proxy(scene) def load(self, channels_to_load): """Loads the *channels_to_load* into the scene object. """ raise NotImplementedError class Writer(Plugin): """Writer plugins. They must implement the *save* method. This is an abstract class to be inherited. """ ptype = "writer" def __init__(self, scene): """The writer saves the *scene* to *filename*. Arguments: - `scene`: the scene to save. - `filename`: the place to save it. """ Plugin.__init__(self) self._scene = weakref.proxy(scene) def save(self, filename): """Saves the scene to a given *filename*. """ raise NotImplementedError mpop-1.5.0/mpop/projector.py000066400000000000000000000342601317160620000160250ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """This module handles coverage objects. Such objects are used to transform area projected data by changing either the area or the projection or both. A typical usage is to transform one large area in satellite projection to an area of interest in polar projection for example. """ import os import ConfigParser import logging import numpy as np from pyresample import image, utils, geometry, kd_tree from pyresample.bilinear import get_sample_from_bil_info, get_bil_info try: from pyresample.ewa import ll2cr, fornav except ImportError: ll2cr, fornav = None, None from mpop import CONFIG_PATH logger = logging.getLogger(__name__) area_file = None def get_area_file(): global area_file if area_file: return area_file conf = ConfigParser.ConfigParser() conf.read(os.path.join(CONFIG_PATH, "mpop.cfg")) try: area_file = os.path.join(conf.get("projector", "area_directory") or CONFIG_PATH, conf.get("projector", "area_file")) except ConfigParser.NoSectionError: area_file = "" logger.warning("Couldn't find the mpop.cfg file. " "Do you have one ? is it in $PPP_CONFIG_DIR ?") return area_file def get_area_def(area_name): """Get the definition of *area_name* from file. The file is defined to use is to be placed in the $PPP_CONFIG_DIR directory, and its name is defined in mpop's configuration file. """ return utils.parse_area_file(get_area_file(), area_name)[0] def _get_area_hash(area): """Calculate a (close to) unique hash value for a given area. """ try: return hash(area.lons.tostring() + area.lats.tostring()) except AttributeError: try: return hash(area.tostring()) except AttributeError: return hash(str(area)) class Projector(object): """This class define projector objects. They contain the mapping information necessary for projection purposes. For efficiency reasons, generated projectors can be saved to disk for later reuse. Use the :meth:`save` method for this. To define a projector object, on has to specify *in_area* and *out_area*, and can also input the *in_lonlats* or the *mode*. Available modes area: - 'quick' (works only if both in- and out-areas are AreaDefinitions) - 'bilinear' (out-area needs to be AreaDefinition with proj4_string) - 'ewa' - 'nearest'. *radius* defines the radius of influence for nearest neighbour search in 'nearest' and 'bilinear' modes. """ def __init__(self, in_area, out_area, in_latlons=None, mode=None, radius=10000, nprocs=1): if (mode is not None and mode not in ["quick", "nearest", "ewa", "bilinear"]): raise ValueError("Projector mode must be one of 'nearest', " "'quick', 'ewa', 'bilinear'") self.area_file = get_area_file() self.in_area = None self.out_area = None self._cache = None self._filename = None self.mode = "quick" self.radius = radius self.conf = ConfigParser.ConfigParser() self.conf.read(os.path.join(CONFIG_PATH, "mpop.cfg")) # TODO: # - Rework so that in_area and out_area can be lonlats. # - Add a recompute flag ? # Setting up the input area in_id = self._setup_input_area(in_area, latlons=in_latlons) # Setting up the output area out_id = self._setup_output_area(out_area, latlons=None) # if self.in_area == self.out_area: # return # choosing the right mode if necessary if mode is None: try: dicts = in_area.proj_dict, out_area.proj_dict del dicts self.mode = "quick" except AttributeError: self.mode = "nearest" else: self.mode = mode projections_directory = "/var/tmp" try: projections_directory = self.conf.get("projector", "projections_directory") except ConfigParser.NoSectionError: pass self._filename = get_precompute_cache_fname(in_id, out_id, in_area, out_area, self.mode, projections_directory) try: self._cache = {} self._file_cache = np.load(self._filename) except: logger.info("Computing projection from %s to %s...", in_id, out_id) if self.mode == "nearest": self._cache = calc_nearest_params(in_area, out_area, radius, nprocs=nprocs) elif self.mode == "quick": self._cache = calc_quick_params(in_area, out_area) elif self.mode == "ewa": if ll2cr is not None: self._cache = calc_ewa_params(in_area, out_area) else: raise ImportError("Can't import pyresample.ewa") elif self.mode == "bilinear": self._cache = calc_bilinear_params(in_area, out_area, radius, nprocs=nprocs) def _setup_input_area(self, area, latlons=None): """Setup self.in_area and return area id""" try: self.in_area, in_id = get_area_and_id(area, latlons=latlons) except TypeError: raise utils.AreaNotFound("Input area " + str(area) + " must be defined in " + self.area_file + ", be an area object" " or longitudes/latitudes must be " "provided.") return in_id def _setup_output_area(self, area, latlons=None): """Setup output area""" try: self.out_area, out_id = get_area_and_id(area, latlons=latlons) except AttributeError: raise utils.AreaNotFound("Output area " + str(area) + " must be defined in " + self.area_file + " or " "be an area object.") return out_id def save(self, resave=False): """Save the precomputation to disk, and overwrite existing file in case *resave* is true. """ if (not os.path.exists(self._filename)) or resave: logger.info("Saving projection to " + self._filename) np.savez(self._filename, **self._cache) def _project_array_nearest(self, data): """Project array *data* using nearest neighbour resampling""" if 'valid_index' not in self._cache: self._cache['valid_index'] = self._file_cache['valid_index'] self._cache['valid_output_index'] = \ self._file_cache['valid_output_index'] self._cache['index_array'] = self._file_cache['index_array'] valid_index, valid_output_index, index_array = \ (self._cache['valid_index'], self._cache['valid_output_index'], self._cache['index_array']) res = kd_tree.get_sample_from_neighbour_info('nn', self.out_area.shape, data, valid_index, valid_output_index, index_array, fill_value=None) return res def _project_array_quick(self, data): """Project array *data* using quick interpolation""" if 'row_idx' not in self._cache: self._cache['row_idx'] = self._file_cache['row_idx'] self._cache['col_idx'] = self._file_cache['col_idx'] row_idx, col_idx = self._cache['row_idx'], self._cache['col_idx'] img = image.ImageContainer(data, self.in_area, fill_value=None) res = np.ma.array(img.get_array_from_linesample(row_idx, col_idx), dtype=data.dtype) return res def _project_array_ewa(self, data): """Project array *data* using EWA interpolation""" # TODO: should be user configurable? rows_per_scan = None if 'ewa_cols' in self. not_cache: self._cache['ewa_cols'] = self._file_cache['ewa_cols'] self._cache['ewa_rows'] = self._file_cache['ewa_rows'] num_valid_points, res = fornav(self._cache['ewa_cols'], self._cache['ewa_rows'], self.out_area, data, rows_per_scan=rows_per_scan) del num_valid_points return res def _project_array_bilinear(self, data): """Project array *data* using bilinear interpolation""" if 'bilinear_t' not in self._cache: self._cache['bilinear_t'] = self._file_cache['bilinear_t'] self._cache['bilinear_s'] = self._file_cache['bilinear_s'] self._cache['input_idxs'] = self._file_cache['input_idxs'] self._cache['idx_arr'] = self._file_cache['idx_arr'] res = get_sample_from_bil_info(data.ravel(), self._cache['bilinear_t'], self._cache['bilinear_s'], self._cache['input_idxs'], self._cache['idx_arr'], output_shape=self.out_area.shape) res = np.ma.masked_invalid(res) return res def project_array(self, data): """Project an array *data* along the given Projector object. """ if self.mode == "nearest": res = self._project_array_nearest(data) elif self.mode == "quick": res = self._project_array_quick(data) elif self.mode == "ewa": if fornav is not None: res = self._project_array_ewa(data) else: raise ImportError("Can't import pyresample.ewa") elif self.mode == "bilinear": res = self._project_array_bilinear(data) return res def calc_nearest_params(in_area, out_area, radius, nprocs=1): """Calculate projection parameters for nearest neighbour interpolation""" valid_index, valid_output_index, index_array, distance_array = \ kd_tree.get_neighbour_info(in_area, out_area, radius, neighbours=1, nprocs=nprocs) del distance_array cache = {} cache['valid_index'] = valid_index cache['valid_output_index'] = valid_output_index cache['index_array'] = index_array return cache def calc_quick_params(in_area, out_area): """Calculate projection parameters for quick interpolation mode""" ridx, cidx = utils.generate_quick_linesample_arrays(in_area, out_area) cache = {} cache['row_idx'] = ridx cache['col_idx'] = cidx return cache def calc_bilinear_params(in_area, out_area, radius, nprocs=1): """Calculate projection parameters for bilinear interpolation""" bilinear_t, bilinear_s, input_idxs, idx_arr = \ get_bil_info(in_area, out_area, radius, neighbours=32, nprocs=nprocs, masked=False) cache = {} cache['bilinear_s'] = bilinear_s cache['bilinear_t'] = bilinear_t cache['input_idxs'] = input_idxs cache['idx_arr'] = idx_arr return cache def calc_ewa_params(in_area, out_area): """Calculate projection parameters for EWA interpolation""" swath_points_in_grid, cols, rows = ll2cr(in_area, out_area) del swath_points_in_grid cache = {} # cache['ewa_swath_points_in_grid'] = \ # swath_points_in_grid cache['ewa_cols'] = cols cache['ewa_rows'] = rows return cache def get_precompute_cache_fname(in_id, out_id, in_area, out_area, mode, proj_dir): """Form filename for precompute cache""" filename = (in_id + "2" + out_id + "_" + str(_get_area_hash(in_area)) + "to" + str(_get_area_hash(out_area)) + "_" + mode + ".npz") return os.path.join(proj_dir, filename) def get_area_and_id(area, latlons=None): try: area_def = get_area_def(area) area_id = area except (utils.AreaNotFound, AttributeError): try: area_id = area.area_id area_def = area except AttributeError: if latlons is None: raise else: # TODO: Note that latlons are in order (lons, lats) area_def = geometry.SwathDefinition(lons=latlons[0], lats=latlons[1]) area_id = area return area_def, area_id mpop-1.5.0/mpop/satellites/000077500000000000000000000000001317160620000156105ustar00rootroot00000000000000mpop-1.5.0/mpop/satellites/__init__.py000066400000000000000000000170641317160620000177310ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2014, 2015, 2016. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """mpop.satellites is the module englobes all satellite specific modules. In itself, it hold the mighty :meth:`mpop.satellites.get_satellite_class` method. """ import os.path import weakref from ConfigParser import ConfigParser, NoSectionError, NoOptionError import logging from mpop import CONFIG_PATH from mpop.scene import SatelliteInstrumentScene LOG = logging.getLogger(__name__) def get_custom_composites(name): """Get the home made methods for building composites for a given satellite or instrument *name*. """ conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, "mpop.cfg")) try: module_name = conf.get("composites", "module") except (NoSectionError, NoOptionError): LOG.debug("No custom composites provided i config file...") return [] LOG.debug("module_name = %s", str(module_name)) try: name = name.replace("/", "") module = __import__(module_name, globals(), locals(), [name]) except ImportError: LOG.debug("Failed to import custom compositer for %s", str(name)) return [] try: return getattr(module, name) except AttributeError: LOG.debug("Could not get attribute %s from %s", str(name), str(module)) return [] def get_sat_instr_compositer((satellite, number, variant), instrument): """Get the compositer class for a given satellite, defined by the three strings *satellite*, *number*, and *variant*, and *instrument*. The class is then filled with custom composites if there are any (see :func:`get_custom_composites`). If no class is found, an attempt is made to build the class from a corresponding configuration file, see :func:`build_sat_instr_compositer`. """ module_name = variant + satellite + number class_name = (variant.capitalize() + satellite.capitalize() + number.capitalize() + instrument.capitalize()) try: module = __import__(module_name, globals(), locals(), [class_name]) klass = getattr(module, class_name) for k in get_custom_composites(instrument): klass.add_method(k) return klass except (ImportError, AttributeError): return build_sat_instr_compositer((satellite, number, variant), instrument) def build_instrument_compositer(instrument_name): """Automatically generate an instrument compositer class from its *instrument_name*. The class is then filled with custom composites if there are any (see :func:`get_custom_composites`) """ from mpop.instruments.visir import VisirCompositer instrument_class = type(instrument_name.capitalize() + "Compositer", (VisirCompositer,), {"instrument_name": instrument_name}) for i in get_custom_composites(instrument_name): instrument_class.add_method(i) return instrument_class def build_sat_instr_compositer((satellite, number, variant), instrument): """Build a compositer class for the given satellite (defined by the three strings *satellite*, *number*, and *variant*) and *instrument* on the fly, using data from a corresponding config file. They inherit from the corresponding instrument class, which is also created on the fly is no predefined module (containing a compositer) for this instrument is available (see :func:`build_instrument_compositer`). """ fullname = variant + satellite + number conf = ConfigParser() config_file = os.path.join(CONFIG_PATH, fullname + ".cfg") LOG.debug("Looking for config file %s", config_file) if not os.path.exists(config_file): LOG.error("Can't find config file %s, is PPP_CONFIG_DIR set?", config_file) conf.read(config_file) try: LOG.debug('Instrument: ' + str(instrument)) mod = __import__("mpop.instruments." + instrument, globals(), locals(), [instrument.capitalize() + 'Compositer']) instrument_class = getattr(mod, instrument.capitalize() + 'Compositer') for i in get_custom_composites(instrument): # LOG.debug("Custom composite = " + str(i)) instrument_class.add_method(i) except (ImportError, AttributeError): LOG.debug("Build instrument compositer: " + str(instrument)) instrument_class = build_instrument_compositer(instrument) sat_class = type(str(variant.capitalize() + satellite.capitalize() + number.capitalize() + instrument.capitalize() + "Compositer"), (instrument_class,), {}) for i in get_custom_composites(instrument): LOG.debug("Add method to sat_class: " + str(i)) sat_class.add_method(i) return sat_class class GeostationaryFactory(object): """Factory for geostationary satellite scenes. """ @staticmethod def create_scene(satname, satnumber, instrument, time_slot, area=None, variant=''): """Create a compound satellite scene. """ return GenericFactory.create_scene(satname, satnumber, instrument, time_slot, None, area, variant) class PolarFactory(object): """Factory for polar satellite scenes. """ @staticmethod def create_scene(satname, satnumber, instrument, time_slot, orbit=None, area=None, variant=''): """Create a compound satellite scene. """ return GenericFactory.create_scene(satname, satnumber, instrument, time_slot, orbit, area, variant) class GenericFactory(object): """Factory for generic satellite scenes. """ @staticmethod def create_scene(satname, satnumber, instrument, time_slot, orbit, area=None, variant=''): """Create a compound satellite scene. """ satellite = (satname, satnumber, variant) instrument_scene = SatelliteInstrumentScene(satellite=satellite, instrument=instrument, area=area, orbit=orbit, time_slot=time_slot) compositer = get_sat_instr_compositer(satellite, instrument) instrument_scene._CompositerClass = compositer if compositer is not None: # Pass weak ref to compositor to allow garbage collection instrument_scene.image = compositer( weakref.proxy(instrument_scene)) return instrument_scene mpop-1.5.0/mpop/satin/000077500000000000000000000000001317160620000145555ustar00rootroot00000000000000mpop-1.5.0/mpop/satin/__init__.py000066400000000000000000000015351317160620000166720ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009-2010. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Satin Package initializer. """ mpop-1.5.0/mpop/satin/aapp1b.py000066400000000000000000000730331317160620000163010ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012, 2013, 2014, 2015, 2016, 2017 SMHI # Author(s): # Martin Raspaud # Adam Dybbroe # Nina Håkansson # Oana Nicola # Lars Ørum Rasmussen # Panu Lahtinen # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Reader for aapp level 1b data. Options for loading: - pre_launch_coeffs (False): use pre-launch coefficients if True, operational otherwise (if available). http://research.metoffice.gov.uk/research/interproj/nwpsaf/aapp/ NWPSAF-MF-UD-003_Formats.pdf """ import numpy as np import os import logging import datetime import glob from ConfigParser import ConfigParser from mpop import CONFIG_PATH LOGGER = logging.getLogger('aapp1b') def load(satscene, *args, **kwargs): """Read data from file and load it into *satscene*. A possible *calibrate* keyword argument is passed to the AAPP reader. Should be 0 for off (counts), 1 for default (brightness temperatures and reflectances), and 2 for radiances only. If *use_extern_calib* keyword argument is set True, use external calibration data. """ del args conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value if kwargs.get("filename") is not None: options["full_filename"] = kwargs["filename"] options["calibrate"] = kwargs.get("calibrate", True) options["pre_launch_coeffs"] = kwargs.get("pre_launch_coeffs", False) options["use_extern_calib"] = kwargs.get("use_extern_calib", False) LOGGER.info("Loading instrument '%s'", satscene.instrument_name) try: CASES[satscene.instrument_name](satscene, options) except KeyError: raise KeyError("Unknown instrument '%s'" % satscene.instrument_name) def load_avhrr(satscene, options): """Read avhrr data from file and load it into *satscene*. """ if "filename" not in options: raise IOError("No filename given, cannot load.") loaded = set([chn.name for chn in satscene.loaded_channels()]) chns = (satscene.channels_to_load & (set(AVHRR_CHANNEL_NAMES) - loaded)) LOGGER.info("Loading channels %s", str(sorted(list(chns)))) if len(chns) == 0: return values = {"orbit": '%05d' % int(satscene.orbit), "satname": satscene.satname, "number": satscene.number, "instrument": satscene.instrument_name, "satellite": satscene.fullname } done_reading = False if "full_filename" in options: filename = options["full_filename"] LOGGER.debug("Loading from %s", filename) scene = AAPP1b(filename) try: scene.read() done_reading = True except ValueError: LOGGER.info("Can't read %s", filename) if not done_reading: filename = \ os.path.join(satscene.time_slot.strftime(options["dir"]) % values, satscene.time_slot.strftime( options["filename"]) % values) file_list = glob.glob(filename) if len(file_list) > 1: LOGGER.info("More than one l1b file found: %s", str(file_list)) # hrpt_noaa18_20150110_1658_49685.l1b candidate = ('hrpt_' + str(satscene.satname) + str(satscene.number) + satscene.time_slot.strftime('_%Y%m%d_%H%M_') + '%05d' % int(satscene.orbit) + '.l1b') LOGGER.debug("Suggested filename = %s", str(candidate)) candidate_found = False for fname in file_list: l1bname = os.path.basename(fname) if l1bname == candidate: filename = fname candidate_found = True LOGGER.info('The l1b file chosen is this: %s', str(filename)) break if not candidate_found: LOGGER.info("More than one l1b file found: %s", str(file_list)) LOGGER.warning("Couldn't decide which one to take. " "Try take the first one: %s", str(filename)) filename = file_list[0] elif len(file_list) == 0: raise IOError("No l1b file matching!: %s", filename) else: filename = file_list[0] LOGGER.debug("Loading from %s", filename) scene = AAPP1b(filename) try: scene.read() except ValueError: LOGGER.info("Can't read %s, exiting.", filename) return calib_coeffs = None if options["use_extern_calib"]: import h5py LOGGER.info("Reading external calibration coefficients.") try: fid = h5py.File(os.path.join(CONFIG_PATH, satscene.satname + '_calibration_data.h5'), 'r') calib_coeffs = {} for key in fid.keys(): date_diffs = [] for dat in fid[key]['datetime']: date_diffs.append(np.abs(satscene.time_slot - datetime.datetime(dat[0], dat[1], dat[2]))) idx = date_diffs.index(min(date_diffs)) date_diff = satscene.time_slot - \ datetime.datetime(fid[key]['datetime'][idx][0], fid[key]['datetime'][idx][1], fid[key]['datetime'][idx][2]) if date_diff.days < 0: older_or_newer = "newer" else: older_or_newer = "older" LOGGER.info("External calibration for %s is %d " "days %s than data.", key, date_diffs[idx].days, older_or_newer) calib_coeffs[key] = (fid[key]['slope1'][idx], fid[key]['intercept1'][idx], fid[key]['slope2'][idx], fid[key]['intercept2'][idx]) fid.close() if 'ch1' not in calib_coeffs: calib_coeffs['ch1'] = None if 'ch2' not in calib_coeffs: calib_coeffs['ch2'] = None if 'ch3a' not in calib_coeffs: calib_coeffs['ch3a'] = None except IOError: LOGGER.info("No external calibration data found.") scene.calibrate(chns, calibrate=options.get('calibrate', 1), pre_launch_coeffs=options["pre_launch_coeffs"], calib_coeffs=calib_coeffs) if satscene.area is None: scene.navigate() try: from pyresample import geometry except ImportError as ex_: LOGGER.debug("Could not load pyresample: %s", str(ex_)) satscene.lat = scene.lats satscene.lon = scene.lons else: satscene.area = geometry.SwathDefinition(lons=scene.lons, lats=scene.lats) area_name = ("swath_" + satscene.fullname + "_" + str(satscene.time_slot) + "_" + str(scene.lats.shape)) satscene.area.area_id = area_name satscene.area.name = "Satellite projection" satscene.area_id = area_name for chn in chns: if (chn in scene.channels) and np.ma.count(scene.channels[chn]) > 0: satscene[chn].data = scene.channels[chn] satscene[chn].info['units'] = scene.units[chn] satscene[chn].area = satscene.area else: del satscene[chn] AVHRR_CHANNEL_NAMES = ("1", "2", "3A", "3B", "4", "5") # AAPP 1b header _HEADERTYPE = np.dtype([("siteid", "S3"), ("blank", "S1"), ("l1bversnb", " np.expand_dims(intersection, 1) channel[mask1] = (channel * slope1 + intercept1)[mask1] channel[mask2] = (channel * slope2 + intercept2)[mask2] channel[channel < 0] = np.nan return np.ma.masked_invalid(channel) def _ir_calibrate(header, data, irchn, calib_type): """IR calibration *calib_type* = 0: Counts *calib_type* = 1: BT *calib_type* = 2: Radiances """ count = data['hrpt'][:, :, irchn + 2].astype(np.float) if calib_type == 0: return count k1_ = np.expand_dims(data['calir'][:, irchn, 0, 0] / 1.0e9, 1) k2_ = np.expand_dims(data['calir'][:, irchn, 0, 1] / 1.0e6, 1) k3_ = np.expand_dims(data['calir'][:, irchn, 0, 2] / 1.0e6, 1) # Count to radiance conversion: rad = k1_ * count * count + k2_ * count + k3_ all_zero = np.logical_and(np.logical_and(np.equal(k1_, 0), np.equal(k2_, 0)), np.equal(k3_, 0)) idx = np.indices((all_zero.shape[0],)) suspect_line_nums = np.repeat(idx[0], all_zero[:, 0]) if suspect_line_nums.any(): LOGGER.info("Suspicious scan lines: %s", str(suspect_line_nums)) if calib_type == 2: return rad # Central wavenumber: cwnum = header['radtempcnv'][0, irchn, 0] if irchn == 0: cwnum = cwnum / 1.0e2 else: cwnum = cwnum / 1.0e3 bandcor_2 = header['radtempcnv'][0, irchn, 1] / 1e5 bandcor_3 = header['radtempcnv'][0, irchn, 2] / 1e6 # Count to radiance conversion: rad = k1_ * count * count + k2_ * count + k3_ if calib_type == 2: return rad all_zero = np.logical_and(np.logical_and(np.equal(k1_, 0), np.equal(k2_, 0)), np.equal(k3_, 0)) idx = np.indices((all_zero.shape[0],)) suspect_line_nums = np.repeat(idx[0], all_zero[:, 0]) if suspect_line_nums.any(): LOGGER.info("Suspect scan lines: %s", str(suspect_line_nums)) ir_const_1 = 1.1910659e-5 ir_const_2 = 1.438833 t_planck = (ir_const_2 * cwnum) / \ np.log(1 + ir_const_1 * cwnum * cwnum * cwnum / rad) # Band corrections applied to t_planck to get correct # brightness temperature for channel: if bandcor_2 < 0: # Post AAPP-v4 tb_ = bandcor_2 + bandcor_3 * t_planck else: # AAPP 1 to 4 tb_ = (t_planck - bandcor_2) / bandcor_3 # tb_[tb_ <= 0] = np.nan # Data with count=0 are often related to erroneous (bad) lines, but in case # of saturation (channel 3b) count=0 can be observed and associated to a # real measurement. So we leave out this filtering to the user! # tb_[count == 0] = np.nan # tb_[rad == 0] = np.nan return np.ma.masked_array(tb_, np.isnan(tb_)) def show(data, negate=False): """Show the stetched data. """ from PIL import Image data = np.array((data - data.min()) * 255.0 / (data.max() - data.min()), np.uint8) if negate: data = 255 - data img = Image.fromarray(data) img.show() CASES = { "avhrr": load_avhrr, "avhrr/3": load_avhrr, } def main(): import sys from mpop.utils import debug_on debug_on() scene = AAPP1b(sys.argv[1]) scene.read() for name, val in zip(scene._header[0].dtype.names, scene._header[0][0]): print(name + " " + str(val)) starttime = datetime.datetime(scene._header[0][0]["startdatayr"], 1, 1, 0, 0) starttime += \ datetime.timedelta(days=int(scene._header[0][0]["startdatady"]) - 1, seconds=scene._header[0][0]["startdatatime"] / 1000.0) print("starttime: " + str(starttime)) endtime = datetime.datetime(scene._header[-1][0]["enddatayr"], 1, 1, 0, 0) endtime += \ datetime.timedelta(days=int(scene._header[-1][0]["enddatady"]) - 1, seconds=scene._header[-1][0]["enddatatime"] / 1000.0) print("endtime: " + str(endtime)) # print scene._data['hrpt'].shape # show(scene._data['hrpt'][:, :, 4].astype(np.float)) # raw_input() scene.calibrate() scene.navigate() for i__ in AVHRR_CHANNEL_NAMES: data_ = scene.channels[i__] print >> sys.stderr, "%-3s" % i__, \ "%6.2f%%" % (100. * (float(np.ma.count(data_)) / data_.size)), \ "%6.2f, %6.2f, %6.2f" % (data_.min(), data_.mean(), data_.max()) show(scene.channels['2'], negate=False) if __name__ == "__main__": main() mpop-1.5.0/mpop/satin/ahamap_aapp1b.py000066400000000000000000000124671317160620000176140ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Interface to AAPP level 1b format. Uses the AHAMAP reader. """ import warnings warnings.warn(__name__ + " is deprecated, please use aapp1b instead.", DeprecationWarning) import glob import os.path from ConfigParser import ConfigParser import math import numpy as np import logging from mpop import CONFIG_PATH LOG = logging.getLogger(__name__) # Using ahamap: FIXME! EPSILON = 0.001 def load(satscene, *args, **kwargs): """Read data from file and load it into *satscene*. """ del args, kwargs conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value CASES[satscene.instrument_name](satscene, options) def load_avhrr(satscene, options): """Read avhrr data from file and load it into *satscene*. """ if "filename" not in options: raise IOError("No filename given, cannot load.") chns = satscene.channels_to_load & set(["1", "2", "3A", "3B", "4", "5"]) if len(chns) == 0: return values = {"orbit": satscene.orbit, "satname": satscene.satname, "number": satscene.number, "instrument": satscene.instrument_name, "satellite": satscene.fullname } filename = os.path.join(satscene.time_slot.strftime(options["dir"]) % values, satscene.time_slot.strftime(options["filename"]) % values) file_list = glob.glob(filename) if len(file_list) > 1: raise IOError("More than one l1b file matching!") elif len(file_list) == 0: raise IOError("No l1b file matching!: " + filename) filename = file_list[0] LOG.debug("Loading from " + filename) import avhrr # AHAMAP module avh = avhrr.avhrr(filename) avh.get_unprojected() instrument_data = avh.build_raw() available_channels = set([]) data_channels = {} for chn in instrument_data.data: channel_name = chn.info.info["channel_id"][3:].upper() available_channels |= set([channel_name]) data_channels[channel_name] = chn.data for chn in satscene.channels_to_load: if chn in available_channels: if chn in ["1", "2", "3A"]: gain = instrument_data.info["vis_gain"] intercept = instrument_data.info["vis_intercept"] units = "%" else: gain = instrument_data.info["ir_gain"] intercept = instrument_data.info["ir_intercept"] units = "K" chn_array = np.ma.array(data_channels[chn]) missing_data = instrument_data.info["missing_data"] chn_array = np.ma.masked_inside(chn_array, missing_data - EPSILON, missing_data + EPSILON) no_data = instrument_data.info["nodata"] chn_array = np.ma.masked_inside(chn_array, no_data - EPSILON, no_data + EPSILON) satscene[chn] = chn_array satscene[chn].data = np.ma.masked_less(satscene[chn].data * gain + intercept, 0) satscene[chn].info['units'] = units else: LOG.warning("Channel " + str(chn) + " not available, not loaded.") # Compulsory global attribudes satscene.info["title"] = (satscene.satname.capitalize() + satscene.number + " satellite, " + satscene.instrument_name.capitalize() + " instrument.") satscene.info["institution"] = "Original data disseminated by EumetCast." satscene.add_to_history("HRIT/LRIT data read by mipp/mpop.") satscene.info["references"] = "No reference." satscene.info["comments"] = "No comment." lons = instrument_data.londata / math.pi * 180 lats = instrument_data.latdata / math.pi * 180 try: from pyresample import geometry satscene.area = geometry.SwathDefinition(lons=lons, lats=lats) except ImportError: satscene.area = None satscene.lat = lats satscene.lon = lons CASES = { "avhrr": load_avhrr } mpop-1.5.0/mpop/satin/ascat_nc.py000066400000000000000000000054771317160620000167170ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014, 2015 Abhay Devasthale and Martin Raspaud # Author(s): # Martin Raspaud # Adam Dybbroe # Sajid Pareeth # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Loader for ascat, netcdf format. The driver works for netcdf format of ASCAT soil moisture swath data downloaded from here: http://navigator.eumetsat.int/discovery/Start/DirectSearch/DetailResult.do?f%28r0%29=EO:EUM:DAT:METOP:SOMO12 rename the CONFIG file mpop/mpop/etc/metop.ascat.cfg.template to metop.cfg to read the ASCAT data """ import numpy as np from ConfigParser import ConfigParser from mpop import CONFIG_PATH import os from netCDF4 import Dataset def load(satscene): """Load ascat data. """ # Read config file content conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) values = {"orbit": satscene.orbit, "satname": satscene.satname, "number": satscene.number, "instrument": satscene.instrument_name, "satellite": satscene.fullname, "time_slot": satscene.time_slot, "time": satscene.time_slot.strftime('%Y%m%d%H%M%S') } filename = os.path.join( conf.get("ascat-level2", "dir"), satscene.time_slot.strftime(conf.get("ascat-level2", "filename", raw=True)) % values) # Load data from netCDF file ds = Dataset(filename, 'r') for chn_name in satscene.channels_to_load: # Read variable corresponding to channel name data = np.ma.masked_array( ds.variables[chn_name][:], np.isnan(ds.variables[chn_name][:])) satscene[chn_name] = data lons = ds.variables['longitude'][:] lats = ds.variables['latitude'][:] # Set scene area as pyresample geometry object try: from pyresample import geometry satscene.area = geometry.SwathDefinition(lons=lons, lats=lats) except ImportError: # pyresample not available. Set lon and lats directly satscene.area = None satscene.lat = lats satscene.lon = lons mpop-1.5.0/mpop/satin/eps1a.py000066400000000000000000000265211317160620000161460ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Interface to EPS level 1a format. Uses AAPP and the aapp1b reader. """ import glob import logging import os.path import shutil import subprocess import tempfile from ConfigParser import ConfigParser from mpop.satellites import PolarFactory import mpop.satin.aapp1b import datetime from mpop import CONFIG_PATH WORKING_DIR = "/tmp" SATPOS_DIR = os.path.sep.join(os.environ["AAPP_PREFIX"].split(os.path.sep)[:-1]) SATPOS_DIR = os.path.join(SATPOS_DIR, "data", "satpos") LOG = logging.getLogger("eps1a loader") def get_satpos_file(satpos_time, satname): """Return the current satpos file """ satpos_file = os.path.join(SATPOS_DIR, "satpos_"+ satname+"_"+ satpos_time.strftime("%Y%m%d")+".txt") if os.path.exists(satpos_file): return satpos_file elif satpos_time.hour < 2: satpos_time -= datetime.timedelta(days=1) satpos_file = os.path.join(SATPOS_DIR, "satpos_"+ satname+"_"+ satpos_time.strftime("%Y%m%d")+".txt") return satpos_file else: raise IOError("Missing satpos file:" + satpos_file) def load(satscene): """Read data from file and load it into *satscene*. """ conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level1", raw = True): options[option] = value CASES[satscene.instrument_name](satscene, options) def load_avhrr(satscene, options): """Read avhrr data from file and load it into *satscene*. """ if "filename" not in options: raise IOError("No filename given, cannot load.") filename = os.path.join( options["dir"], (satscene.time_slot.strftime(options["filename"]))) file_list = glob.glob(satscene.time_slot.strftime(filename)) if len(file_list) > 1: raise IOError("More than one l1a file matching!") elif len(file_list) == 0: raise IOError("No l1a file matching!: "+ satscene.time_slot.strftime(filename)) filename = file_list[0] conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) new_dir = conf.get(satscene.instrument_name + "-level2", "dir") new_name = conf.get(satscene.instrument_name + "-level2", "filename") pathname = os.path.join(new_dir, satscene.time_slot.strftime(new_name)) convert_to_1b(filename, pathname, satscene.time_slot, options["shortname"]) mpop.satin.aapp1b.load(satscene) os.remove(pathname) def convert_to_1b(in_file, out_file, start_time, shortname): """Convert concatenated file to level 1b. """ (handle, tempname) = tempfile.mkstemp(prefix="eps1a_decommuted", dir=WORKING_DIR) os.close(handle) del handle decommutation(in_file, tempname) calibration_navigation(tempname, start_time, shortname) LOG.debug("Saving to "+out_file) shutil.move(tempname, out_file) def calibration_navigation(filename, start_time, shortname): """Perform calibration on *filename* """ import pysdh2orbnum formated_date = start_time.strftime("%d/%m/%y %H:%M:%S.000") satpos_file = get_satpos_file(start_time, shortname) LOG.debug(formated_date) LOG.debug(satpos_file) orbit_number = str(pysdh2orbnum.sdh2orbnum(shortname, formated_date, satpos_file)) avhrcl = ("cd /tmp;" + "$AAPP_PREFIX/AAPP/bin/avhrcl -c -l -s " + shortname + " -d " + start_time.strftime("%Y%m%d") + " -h " + start_time.strftime("%H%M") + " -n " + orbit_number + " " + filename) LOG.debug("Running " + avhrcl) proc = subprocess.Popen(avhrcl, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) anacl1 = ("cd /tmp;" + "$ANA_PATH/bin/ana_lmk_loc -D " + filename) LOG.debug("Running " + anacl1) proc = subprocess.Popen(anacl1, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) anacl2 = ("cd /tmp;" + "$ANA_PATH/bin/ana_estatt -s " + shortname + " -d " + start_time.strftime("%Y%m%d") + " -h " + start_time.strftime("%H%M") + " -n " + orbit_number) LOG.debug("Running " + anacl2) proc = subprocess.Popen(anacl2, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) avhrcl2 = ("cd /tmp;" + "$AAPP_PREFIX/AAPP/bin/avhrcl -l -s " + shortname + " -d " + start_time.strftime("%Y%m%d") + " -h " + start_time.strftime("%H%M") + " -n " + orbit_number + " " + filename) LOG.debug("Running " + avhrcl2) proc = subprocess.Popen(avhrcl2, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) def decommutation(filename_from, filename_to): """Perform decommutation on *filename_from* and save the result in *filename_to*. """ decom = "$AAPP_PREFIX/metop-tools/bin/decom-avhrr-metop" flags = "-ignore_degraded_inst_mdr -ignore_degraded_proc_mdr" cmd = (decom+" "+ flags+" "+ filename_from+" "+ filename_to) LOG.debug("Running " + cmd) proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) LOG.debug("Decommutation done") def get_orbit(time_slot, shortname): import pysdh2orbnum formated_date = time_slot.strftime("%d/%m/%y %H:%M:%S.000") satpos_file = get_satpos_file(time_slot, shortname) return str(pysdh2orbnum.sdh2orbnum(shortname, formated_date, satpos_file)) def concatenate(granules, channels=None): """Concatenate eps1a granules. """ if granules[0].file_type.startswith("bzipped"): cat_cmd = "bzcat" else: cat_cmd = "cat" new_names = [] filenames = [os.path.join(granule.directory, granule.file_name) for granule in granules] for filename in filenames: new_name, ext = os.path.splitext(os.path.basename(filename)) del ext new_name = os.path.join(WORKING_DIR, new_name) cmd = (cat_cmd + " " + filename + " > " + new_name) LOG.debug("running " + cmd) proc = subprocess.Popen(cmd, shell=True) proc.communicate() new_names.append(new_name) conffile = os.path.join(CONFIG_PATH, granules[0].fullname + ".cfg") conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, conffile)) directory = conf.get('avhrr-level1','dir') filename = conf.get('avhrr-level1','filename') filename = granules[0].time_slot.strftime(filename) output_name = os.path.join(directory, filename) arg_string = " ".join(new_names) cmd = "$KAI/kai -i " + arg_string + " -o " + output_name proc = subprocess.Popen(cmd, shell=True) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) #clean up for new_name in new_names: os.remove(new_name) new_dir = conf.get(granules[0].instrument_name + "-level2", "dir") new_name = conf.get(granules[0].instrument_name + "-level2", "filename") pathname = os.path.join(new_dir, granules[0].time_slot.strftime(new_name)) shortname = conf.get('avhrr-level1','shortname') orbit = get_orbit(granules[0].time_slot, shortname) convert_to_1b(output_name, pathname, granules[0].time_slot, shortname) os.remove(output_name) scene = PolarFactory.create_scene(granules[0].satname, granules[0].number, granules[0].instrument_name, granules[0].time_slot, orbit, variant=granules[0].variant) scene.load(channels) os.remove(pathname) return scene def get_lat_lon(satscene, resolution): """Read lat and lon. """ del resolution return LL_CASES[satscene.instrument_name](satscene, None) def get_lat_lon_avhrr(satscene, options): """Read lat and lon. """ del options return satscene.lat, satscene.lon def get_lonlat(satscene, row, col): """Read lat and lon. """ return LONLAT_CASES[satscene.instrument_name](satscene, row, col) def get_lonlat_avhrr(satscene, row, col): """Read longitude and latitude for a given pixel. """ # Needs the SATID AAPP env variable to be set to find satid.txt... import pyaapp import math t_start = satscene.time_slot epoch = datetime.datetime(1950, 1, 1) t50_start = (t_start - epoch) jday_start = t50_start.seconds / (3600.0 *24) + t50_start.days jday_end = jday_start if(satscene.satname == "metop"): satname = "M02" else: satname = satscene.satname + satscene.number satpos_file = get_satpos_file(satscene.time_slot, satname) pyaapp.read_satpos_file(jday_start, jday_end, satscene.satname+" "+str(int(satscene.number)), satpos_file) att = pyaapp.prepare_attitude(int(satscene.number), 0, 0, 0) lonlat = pyaapp.linepixel2lonlat(int(satscene.number), row, col, att, jday_start, jday_end)[1:3] return (lonlat[0] * 180.0 / math.pi, lonlat[1] * 180.0 / math.pi) LONLAT_CASES = { "avhrr": get_lonlat_avhrr } LL_CASES = { "avhrr": get_lat_lon_avhrr } CASES = { "avhrr": load_avhrr } mpop-1.5.0/mpop/satin/eps_avhrr.py000066400000000000000000001027521317160620000171270ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2012, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Interface to EPS Avhrr/3 level 1b format. http://oiswww.eumetsat.org/WEBOPS/eps-pg/AVHRR/AVHRR-PG-11L1bFormat.htm http://www.eumetsat.int/idcplg?IdcService=GET_FILE&\ dDocName=PDF_TEN_96167-EPS-GPFS&RevisionSelectionMethod=LatestReleased http://www.eumetsat.int/idcplg?IdcService=GET_FILE&\ dDocName=PDF_TEN_97231-EPS-AVHRR&RevisionSelectionMethod=LatestReleased """ import struct import datetime import numpy as np import math from scipy import interpolate import os.path import glob from ConfigParser import ConfigParser from mpop import CONFIG_PATH import logging LOG = logging.getLogger(__name__) RECORD_CLASS = ["Reserved", "MPHR", "SPHR", "IPR", "GEADR", "GIADR", "VEADR", "VIADR", "MDR"] INSTRUMENT_GROUP = ["GENERIC", "AMSU-A", "ASCAT", "ATOVS", "AVHRR/3", "GOME", "GRAS", "HIRS/4", "IASA", "MHS", "SEM", "ADCS", "SBUV", "DUMMY", "ARCHIVE", "IASI_L2"] MPHR = [100, 100, 100, 100, 100, 37, 36, 36, 35, 36, 48, 48, 48, 48, 37, 38, 38, 38, 38, 48, 48, 34, 34, 36, 48, 48, 38, 38, 44, 51, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 35, 48, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 41, 41, 41, 34] SPHR = [38, 36] GIADR_SUB = [None, "GIADR-RADIANCE", "GIADR-ANALOG"] MAX_SCAN_LINES = 2000 def get_bit(bitstring, bit): """Get a given *bit* from *bitstring*. """ return bitstring & (2 ** bit) def read_u_bytes(fdes, size): """Read unsigned bytes, and scale it by 10 ** *sf_*. """ cases = { 1: ">B", 2: ">H", 4: ">I", 8: ">Q" } return struct.unpack(cases[size], fdes.read(size))[0] def read_bytes(fdes, size, sf_=0): """Read signed bytes, and scale it by 10 ** *sf_*. """ cases = { 1: ">b", 2: ">h", 4: ">i", 8: ">q" } if sf_ != 0: return struct.unpack(cases[size], fdes.read(size))[0] * 10.0 ** sf_ else: return struct.unpack(cases[size], fdes.read(size))[0] def read_short_cds(fdes): """Read a short cds date. """ difference = datetime.timedelta(days=read_u_bytes(fdes, 2), milliseconds=read_u_bytes(fdes, 4)) epoch = datetime.datetime(2000, 1, 1) return epoch + difference def read_long_cds(fdes): """Read a long cds date. """ difference = datetime.timedelta(days=read_u_bytes(fdes, 2), milliseconds=read_u_bytes(fdes, 4), microseconds=read_u_bytes(fdes, 2)) epoch = datetime.datetime(2000, 1, 1) return epoch + difference def read_ascii_field(fdes, size): """Read an ascii field. """ field_name = fdes.read(30).strip() fdes.read(2) field_value = fdes.read(size - 33).strip() fdes.read(1) return (field_name, field_value) def read_bitstring(fdes, size): """Read a bit string. """ cases = { 1: ">B", 2: ">H", 4: ">I", 8: ">Q" } return struct.unpack(cases[size], fdes.read(size))[0] def print_bitstring(s__): """Print a bitstring. """ res = "" ts_ = s__ i = 0 for i in range(16): res = str(ts_ & 1) + res ts_ = ts_ >> 1 del i print res def read_grh(fdes): """Read GRH. """ grh = {} record_class = fdes.read(1) if record_class == "": return "EOF" grh["RECORD_CLASS"] = RECORD_CLASS[ord(record_class)] grh["INSTRUMENT_GROUP"] = INSTRUMENT_GROUP[ord(fdes.read(1))] grh["RECORD_SUBCLASS"] = ord(fdes.read(1)) grh["RECORD_SUBCLASS_VERSION"] = ord(fdes.read(1)) grh["RECORD_SIZE"] = read_u_bytes(fdes, 4) grh["RECORD_START_TIME"] = read_short_cds(fdes) grh["RECORD_STOP_TIME"] = read_short_cds(fdes) return grh def read_mphr(fdes, grh, metadata): """Read MPHR. """ del grh for i in MPHR: field_name, field_value = read_ascii_field(fdes, i) try: metadata[field_name] = eval(field_value) except: metadata[field_name] = str(field_value) return metadata def read_sphr(fdes, grh, metadata): """Read SPHR. """ del grh if(metadata["INSTRUMENT_ID"] != "AVHR"): raise NotImplementedError("Only Avhrr for now...") fdes.read(49) for i in SPHR: field_name, field_value = read_ascii_field(fdes, i) try: metadata[field_name] = eval(field_value) except: metadata[field_name] = str(field_value) def read_ipr(fdes, grh, metadata): """Read IPR. """ del grh, metadata ipr = {} ipr["TARGET_RECORD_CLASS"] = read_u_bytes(fdes, 1) ipr["TARGET_INSTRUMENT_GROUP"] = read_u_bytes(fdes, 1) ipr["TARGET_RECORD_SUBCLASS"] = read_u_bytes(fdes, 1) ipr["TARGET_RECORD_OFFSET"] = read_u_bytes(fdes, 4) return ipr def read_geadr(fdes, grh, metadata): """Read GEADR. """ del metadata geadr = {} field_name, field_val = read_ascii_field(fdes, grh["RECORD_SIZE"] - 20) geadr[field_name] = field_val return geadr def read_giadr(fdes, grh, metadata): """Read GIADR. """ if(metadata["INSTRUMENT_ID"] != "AVHR"): raise NotImplementedError("Only Avhrr for now...") if(metadata["PROCESSING_LEVEL"] != "1B"): raise NotImplementedError("Only level 1B for now...") if grh["RECORD_SUBCLASS"] == 1: return read_giadr_radiance(fdes, grh, metadata) elif grh["RECORD_SUBCLASS"] == 2: return read_giadr_analog(fdes, grh, metadata) elif grh["RECORD_SUBCLASS"] == 99: fdes.read(grh["RECORD_SIZE"] - 20) return else: raise ValueError("Undefined subclass " + str(grh["RECORD_SUBCLASS"]) + ", version " + str(grh["RECORD_SUBCLASS_VERSION"]) + "...") def read_giadr_radiance(fdes, grh, metadata): """Read GIADR. """ del grh del metadata giadr = {} giadr["RAMP_CALIBRATION_COEFFICIENT"] = read_bitstring(fdes, 2) giadr["YEAR_RECENT_CALIBRATION"] = read_u_bytes(fdes, 2) giadr["DAY_RECENT_CALIBRATION"] = read_u_bytes(fdes, 2) giadr["PRIMARY_CALIBRATION_ALGORITHM_ID"] = read_u_bytes(fdes, 2) giadr["PRIMARY_CALIBRATION_ALGORITHM_OPTION"] = read_bitstring(fdes, 2) giadr["SECONDARY_CALIBRATION_ALGORITHM_ID"] = read_u_bytes(fdes, 2) giadr["SECONDARY_CALIBRATION_ALGORITHM_OPTION"] = read_bitstring(fdes, 2) giadr["IR_TEMPERATURE1_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE1_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE1_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE1_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE1_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE1_COEFFICIENT6"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE2_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE2_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE2_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE2_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE2_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE2_COEFFICIENT6"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE3_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE3_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE3_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE3_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE3_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE3_COEFFICIENT6"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE4_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE4_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE4_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE4_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE4_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["IR_TEMPERATURE4_COEFFICIENT6"] = read_bytes(fdes, 2) giadr["CH1_SOLAR_FILTERED_IRRADIANCE"] = read_bytes(fdes, 2, -1) giadr["CH1_EQUIVALENT FILTER_WIDTH"] = read_bytes(fdes, 2, -3) giadr["CH2_SOLAR_FILTERED_IRRADIANCE"] = read_bytes(fdes, 2, -1) giadr["CH2_EQUIVALENT FILTER_WIDTH"] = read_bytes(fdes, 2, -3) giadr["CH3A_SOLAR_FILTERED_IRRADIANCE"] = read_bytes(fdes, 2, -1) giadr["CH3A_EQUIVALENT FILTER_WIDTH"] = read_bytes(fdes, 2, -3) giadr["CH3B_CENTRAL_WAVENUMBER"] = read_bytes(fdes, 4, -2) giadr["CH3B_CONSTANT1"] = read_bytes(fdes, 4, -5) giadr["CH3B_CONSTANT2_SLOPE"] = read_bytes(fdes, 4, -6) giadr["CH4_CENTRAL_WAVENUMBER"] = read_bytes(fdes, 4, -3) giadr["CH4_CONSTANT1"] = read_bytes(fdes, 4, -5) giadr["CH4_CONSTANT2_SLOPE"] = read_bytes(fdes, 4, -6) giadr["CH5_CENTRAL_WAVENUMBER"] = read_bytes(fdes, 4, -3) giadr["CH5_CONSTANT1"] = read_bytes(fdes, 4, -5) giadr["CH5_CONSTANT2_SLOPE"] = read_bytes(fdes, 4, -6) return giadr def read_giadr_analog(fdes, grh, metadata): """Read GIADR. """ giadr = {} giadr["PATCH_TEMPERATURE_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["PATCH_TEMPERATURE_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["PATCH_TEMPERATURE_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["PATCH_TEMPERATURE_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["PATCH_TEMPERATURE_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["PATCH_TEMPERATURE_EXTENDED_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["PATCH_TEMPERATURE_EXTENDED_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["PATCH_TEMPERATURE_EXTENDED_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["PATCH_TEMPERATURE_EXTENDED_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["PATCH_TEMPERATURE_EXTENDED_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["PATCH_POWER_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["PATCH_POWER_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["PATCH_POWER_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["PATCH_POWER_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["PATCH_POWER_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["RADIATOR_TEMPERATURE_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["RADIATOR_TEMPERATURE_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["RADIATOR_TEMPERATURE_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["RADIATOR_TEMPERATURE_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["RADIATOR_TEMPERATURE_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE1_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE1_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE1_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE1_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE1_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE2_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE2_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE2_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE2_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE2_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE3_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE3_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE3_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE3_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE3_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE4_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE4_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE4_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE4_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["BLACKBODY_TEMPERATURE4_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["ELECTRONIC_CURRENT_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["ELECTRONIC_CURRENT_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["ELECTRONIC_CURRENT_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["ELECTRONIC_CURRENT_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["ELECTRONIC_CURRENT_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["MOTOR_CURRENT_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["MOTOR_CURRENT_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["MOTOR_CURRENT_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["MOTOR_CURRENT_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["MOTOR_CURRENT_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["EARTH_SHIELD_POSITION_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["EARTH_SHIELD_POSITION_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["EARTH_SHIELD_POSITION_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["EARTH_SHIELD_POSITION_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["EARTH_SHIELD_POSITION_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["ELECTRONIC_TEMPERATURE_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["ELECTRONIC_TEMPERATURE_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["ELECTRONIC_TEMPERATURE_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["ELECTRONIC_TEMPERATURE_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["ELECTRONIC_TEMPERATURE_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["COOLER_HOUSING_TEMPERATURE_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["COOLER_HOUSING_TEMPERATURE_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["COOLER_HOUSING_TEMPERATURE_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["COOLER_HOUSING_TEMPERATURE_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["COOLER_HOUSING_TEMPERATURE_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["BASEPLATE_TEMPERATURE_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["BASEPLATE_TEMPERATURE_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["BASEPLATE_TEMPERATURE_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["BASEPLATE_TEMPERATURE_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["BASEPLATE_TEMPERATURE_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["MOTOR_HOUSING_TEMPERATURE_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["MOTOR_HOUSING_TEMPERATURE_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["MOTOR_HOUSING_TEMPERATURE_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["MOTOR_HOUSING_TEMPERATURE_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["MOTOR_HOUSING_TEMPERATURE_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["AD_CONVERTER_TEMPERATURE_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["AD_CONVERTER_TEMPERATURE_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["AD_CONVERTER_TEMPERATURE_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["AD_CONVERTER_TEMPERATURE_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["AD_CONVERTER_TEMPERATURE_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["DETECTOR4_BIAS_VOLTAGE_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["DETECTOR4_BIAS_VOLTAGE_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["DETECTOR4_BIAS_VOLTAGE_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["DETECTOR4_BIAS_VOLTAGE_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["DETECTOR4_BIAS_VOLTAGE_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["DETECTOR5_BIAS_VOLTAGE_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["DETECTOR5_BIAS_VOLTAGE_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["DETECTOR5_BIAS_VOLTAGE_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["DETECTOR5_BIAS_VOLTAGE_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["DETECTOR5_BIAS_VOLTAGE_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["CH3B_BLACKBODY_VIEW_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["CH3B_BLACKBODY_VIEW_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["CH3B_BLACKBODY_VIEW_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["CH3B_BLACKBODY_VIEW_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["CH3B_BLACKBODY_VIEW_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["CH4_BLACKBODY_VIEW_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["CH4_BLACKBODY_VIEW_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["CH4_BLACKBODY_VIEW_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["CH4_BLACKBODY_VIEW_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["CH4_BLACKBODY_VIEW_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["CH5_BLACKBODY_VIEW_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["CH5_BLACKBODY_VIEW_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["CH5_BLACKBODY_VIEW_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["CH5_BLACKBODY_VIEW_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["CH5_BLACKBODY_VIEW_COEFFICIENT5"] = read_bytes(fdes, 2) giadr["REFERENCE_VOLTAGE_COEFFICIENT1"] = read_bytes(fdes, 2) giadr["REFERENCE_VOLTAGE_COEFFICIENT2"] = read_bytes(fdes, 2) giadr["REFERENCE_VOLTAGE_COEFFICIENT3"] = read_bytes(fdes, 2) giadr["REFERENCE_VOLTAGE_COEFFICIENT4"] = read_bytes(fdes, 2) giadr["REFERENCE_VOLTAGE_COEFFICIENT5"] = read_bytes(fdes, 2) return giadr def read_veadr(fdes, grh, metadata): """Read VEADR. """ veadr = {} field_name, field_val = read_ascii_field(fdes, grh["RECORD_SIZE"] - 20) veadr[field_name] = field_val return veadr def read_mdr(fdes, grh, metadata): """Read MDR. """ if grh["RECORD_SUBCLASS"] != 2: raise ValueError("Only l1b supported for now") return read_mdr_1b(fdes, grh, metadata) def read_mdr_1b(fdes, grh, metadata): """Read MDR section, 1B type. """ mdr = {} mdr["DEGRADED_INST_MDR"] = read_u_bytes(fdes, 1) mdr["DEGRADED_PROC_MDR"] = read_u_bytes(fdes, 1) mdr["EARTH_VIEWS_PER_SCANLINE"] = read_bytes(fdes, 2) scanlength = mdr["EARTH_VIEWS_PER_SCANLINE"] array = (np.fromfile(file=fdes, dtype=">i2", count=scanlength * 5) * 10 ** -2) array = array.reshape(5, scanlength) array[2, :] *= 10 ** -2 mdr["SCENE_RADIANCES"] = array # Channels 1, 2, 3a in units of W/(m^2.sr). # Channels 3b, 4, 5 in units of mW/(m^2.sr.cm^-1). # Channels 1, 2, 4 & 5 with scale factor = 2. # Channels 3a or 3b with scale factor = 4. mdr["TIME_ATTITUDE"] = read_u_bytes(fdes, 4) mdr["EULER_ANGLE"] = (read_bytes(fdes, 2), read_bytes(fdes, 2), read_bytes(fdes, 2)) mdr["NAVIGATION_STATUS"] = read_bitstring(fdes, 4) mdr["SPACECRAFT_ALTITUDE"] = read_u_bytes(fdes, 4) mdr["ANGULAR_RELATIONS_FIRST"] = (read_bytes(fdes, 2), read_bytes(fdes, 2), read_bytes(fdes, 2), read_bytes(fdes, 2)) mdr["ANGULAR_RELATIONS_LAST"] = (read_bytes(fdes, 2), read_bytes(fdes, 2), read_bytes(fdes, 2), read_bytes(fdes, 2)) mdr["EARTH_LOCATION_FIRST"] = np.array((read_bytes(fdes, 4, -4), read_bytes(fdes, 4, -4))) mdr["EARTH_LOCATION_LAST"] = np.array((read_bytes(fdes, 4, -4), read_bytes(fdes, 4, -4))) mdr["NUM_NAVIGATION_POINTS"] = read_bytes(fdes, 2) mdr["ANGULAR_RELATIONS"] = np.fromfile(file=fdes, dtype=">i2", count=412) * 10 ** -2 mdr["EARTH_LOCATIONS"] = np.fromfile(file=fdes, dtype=">i4", count=206) * 10 ** -4 mdr["QUALITY_INDICATOR"] = read_bitstring(fdes, 4) mdr["SCAN_LINE_QUALITY"] = read_bitstring(fdes, 4) mdr["CALIBRATION_QUALITY"] = (read_bitstring(fdes, 2), read_bitstring(fdes, 2), read_bitstring(fdes, 2)) mdr["COUNT_ERROR_FRAME"] = read_u_bytes(fdes, 2) mdr["CH123A_CURVE_SLOPE1"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_CURVE_INTERCEPT1"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_CURVE_SLOPE2"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_CURVE_INTERCEPT2"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_CURVE_INTERCEPTION"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_TEST_CURVE_SLOPE1"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_TEST_CURVE_INTERCEPT1"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_TEST_CURVE_SLOPE2"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_TEST_CURVE_INTERCEPT2"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_TEST_CURVE_INTERCEPTION"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_PRELAUNCH_CURVE_SLOPE"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_PRELAUNCH_CURVE_INTERCEPT1"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_PRELAUNCH_CURVE_SLOPE2"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_PRELAUNCH_CURVE_INTERCEPT2"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH123A_PRELAUNCH_CURVE_INTERCEPTION"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH3B45_SECOND_TERM"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH3B45_FIRST_TERM"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH3B45_ZEROTH_TERM"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH3B45_TEST_SECOND_TERM"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH3B45_TEST_FIRST_TERM"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CH3B45_TEST_ZEROTH_TERM"] = (read_bytes(fdes, 4), read_bytes(fdes, 4), read_bytes(fdes, 4)) mdr["CLOUD_INFORMATION"] = np.fromfile(file=fdes, dtype=" 180] -= 360 llons[llons < -180] += 360 calibrate(channels, info_giadr) return channels, llats, llons, g3a, g3b, metadata["ORBIT_START"] CASES = {"MPHR": read_mphr, "SPHR": read_sphr, "IPR": read_ipr, "GEADR": read_geadr, "GIADR": read_giadr, "VEADR": read_veadr, "MDR": read_mdr} EPSILON = 0.001 def load(satscene): """Read data from file and load it into *satscene*. """ conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value LOAD_CASES[satscene.instrument_name](satscene, options) def load_avhrr(satscene, options): """Read avhrr data from file and load it into *satscene*. """ if "filename" not in options: raise IOError("No filename given, cannot load.") values = {"INSTRUMENT": satscene.instrument_name[:4].upper(), "FNAME": satscene.satname[0].upper() + satscene.number } filename = os.path.join( options["dir"], (satscene.time_slot.strftime(options["filename"]) % values)) LOG.debug("Looking for file %s" % satscene.time_slot.strftime(filename)) file_list = glob.glob(satscene.time_slot.strftime(filename)) if len(file_list) > 1: raise IOError("More than one l1b file matching!") elif len(file_list) == 0: raise IOError("No l1b file matching!") try: fdes = open(file_list[0]) channels, lats, lons, g3a, g3b, orbit = read(fdes) finally: fdes.close() channels = np.ma.masked_invalid(channels) satscene["1"] = channels[0, :, :] satscene["2"] = channels[1, :, :] satscene["4"] = channels[4, :, :] satscene["5"] = channels[5, :, :] if g3a: satscene["3A"] = channels[2, :, :] if g3b: satscene["3B"] = channels[3, :, :] print "Inside eps_avhrr.load_avhrr: orbit = ", orbit #satscene.orbit = str(int(orbit) + 1) satscene.orbit = str(int(orbit)) try: from pyresample import geometry satscene.area = geometry.SwathDefinition(lons=lons, lats=lats) except ImportError: satscene.area = None satscene.lat = lats satscene.lon = lons def get_lonlat(satscene, row, col): try: if (satscene.area is None and (satscene.lat is None or satscene.lon is None)): load(satscene) except AttributeError: load(satscene) try: return satscene.area.lons[row, col], satscene.area.lats[row, col] except AttributeError: return satscene.lon[row, col], satscene.lat[row, col] def get_lat_lon(satscene, resolution): """Read lat and lon. """ del resolution return LAT_LON_CASES[satscene.instrument_name](satscene, None) def get_lat_lon_avhrr(satscene, options): """Read lat and lon. """ del options return satscene.lat, satscene.lon LAT_LON_CASES = { "avhrr": get_lat_lon_avhrr } LOAD_CASES = { "avhrr": load_avhrr } if __name__ == "__main__": pass mpop-1.5.0/mpop/satin/eps_l1b.py000066400000000000000000000440731317160620000164640ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012-2017 Pytroll # Author(s): # Martin Raspaud # Adam Dybbroe # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Reader for eps level 1b data. Uses xml files as a format description. See: http://www.eumetsat.int/website/wcm/idc/idcplg?IdcService=GET_FILE&dDocName=PDF_TEN_97231-EPS-AVHRR&RevisionSelectionMethod=LatestReleased&Rendition=Web and http://www.eumetsat.int/website/wcm/idc/idcplg?IdcService=GET_FILE&dDocName=PDF_TEN_990004-EPS-AVHRR1-PGS&RevisionSelectionMethod=LatestReleased&Rendition=Web """ import glob import os from ConfigParser import ConfigParser import numpy as np from mpop import CONFIG_PATH from mpop.satin.xmlformat import XMLFormat import logging LOG = logging.getLogger(__name__) try: from pyresample import geometry except ImportError: pass try: import numexpr as ne except ImportError: pass C1 = 1.191062e-05 # mW/(m2*sr*cm-4) C2 = 1.4387863 # K/cm-1 def to_bt(arr, wc_, a__, b__): """Convert to BT. """ try: return ne.evaluate("a__ + b__ * (C2 * wc_ / " "(log(1 + (C1 * (wc_ ** 3) / arr))))") except NameError: return a__ + b__ * (C2 * wc_ / np.log(1 + (C1 * (wc_ ** 3) / arr))) def to_refl(arr, solar_flux): """Convert to reflectances. """ return arr * np.pi * 100.0 / solar_flux def read_raw(filename): """Read *filename* without scaling it afterwards. """ form = XMLFormat(os.path.join(CONFIG_PATH, "eps_avhrrl1b_6.5.xml")) grh_dtype = np.dtype([("record_class", "|i1"), ("INSTRUMENT_GROUP", "|i1"), ("RECORD_SUBCLASS", "|i1"), ("RECORD_SUBCLASS_VERSION", "|i1"), ("RECORD_SIZE", ">u4"), ("RECORD_START_TIME", "S6"), ("RECORD_STOP_TIME", "S6")]) record_class = ["Reserved", "mphr", "sphr", "ipr", "geadr", "giadr", "veadr", "viadr", "mdr"] records = [] with open(filename, "rb") as fdes: while True: grh = np.fromfile(fdes, grh_dtype, 1) if not grh: break try: rec_class = record_class[grh["record_class"][0]] sub_class = grh["RECORD_SUBCLASS"][0] record = np.fromfile(fdes, form.dtype((rec_class, sub_class)), 1) records.append((rec_class, record, sub_class)) except KeyError: fdes.seek(grh["RECORD_SIZE"][0] - 20, 1) return records, form def get_filename(satscene, level): """Get the filename. """ conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-" + level, raw=True): options[option] = value values = {"INSTRUMENT": satscene.instrument_name[:4].upper(), "FNAME": satscene.satname[0].upper() + satscene.number } filename = os.path.join( options["dir"], (satscene.time_slot.strftime(options["filename"]) % values)) LOG.debug("Looking for file %s" % satscene.time_slot.strftime(filename)) file_list = glob.glob(satscene.time_slot.strftime(filename)) if len(file_list) > 1: raise IOError("More than one l1b file matching!") elif len(file_list) == 0: raise IOError("No l1b file matching!") return file_list[0] class EpsAvhrrL1bReader(object): """Eps level 1b reader for AVHRR data. """ def __init__(self, filename): self.records, self.form = read_raw(filename) self.mdrs = [record[1] for record in self.records if record[0] == "mdr"] self.scanlines = len(self.mdrs) self.sections = {("mdr", 2): np.concatenate(self.mdrs)} for record in self.records: if record[0] == "mdr": continue if (record[0], record[2]) in self.sections: raise ValueError("Too many " + str((record[0], record[2]))) else: self.sections[(record[0], record[2])] = record[1] self.lons, self.lats = None, None def __getitem__(self, key): for altkey in self.form.scales.keys(): try: try: return (self.sections[altkey][key] * self.form.scales[altkey][key]) except TypeError: val = float(self.sections[altkey][key][0].split("=")[1]) return val * self.form.scales[altkey][key] except ValueError: continue raise KeyError("No matching value for " + str(key)) def keys(self): """List of reader's keys. """ keys = [] for val in self.form.scales.values(): keys += val.dtype.fields.keys() return keys def get_full_lonlats(self): """Get the interpolated lons/lats. """ lats = np.hstack((self["EARTH_LOCATION_FIRST"][:, [0]], self["EARTH_LOCATIONS"][:, :, 0], self["EARTH_LOCATION_LAST"][:, [0]])) lons = np.hstack((self["EARTH_LOCATION_FIRST"][:, [1]], self["EARTH_LOCATIONS"][:, :, 1], self["EARTH_LOCATION_LAST"][:, [1]])) nav_sample_rate = self["NAV_SAMPLE_RATE"] earth_views_per_scanline = self["EARTH_VIEWS_PER_SCANLINE"] if nav_sample_rate == 20 and earth_views_per_scanline == 2048: from geotiepoints import metop20kmto1km self.lons, self.lats = metop20kmto1km(lons, lats) else: raise NotImplementedError("Lon/lat expansion not implemented for " + "sample rate = " + str(nav_sample_rate) + " and earth views = " + str(earth_views_per_scanline)) return self.lons, self.lats def get_lonlat(self, row, col): """Get lons/lats for given indices. WARNING: if the lon/lats were not expanded, this will refer to the tiepoint data. """ if self.lons is None or self.lats is None: self.lats = np.hstack((self["EARTH_LOCATION_FIRST"][:, [0]], self["EARTH_LOCATIONS"][:, :, 0], self["EARTH_LOCATION_LAST"][:, [0]])) self.lons = np.hstack((self["EARTH_LOCATION_FIRST"][:, [1]], self["EARTH_LOCATIONS"][:, :, 1], self["EARTH_LOCATION_LAST"][:, [1]])) return self.lons[row, col], self.lats[row, col] def get_channels(self, channels, calib_type): """Get calibrated channel data. *calib_type* = 0: Counts *calib_type* = 1: Reflectances and brightness temperatures *calib_type* = 2: Radiances """ if calib_type == 0: raise ValueError('calibrate=0 is not supported! ' + 'This reader cannot return counts') elif calib_type != 1 and calib_type != 2: raise ValueError('calibrate=' + str(calib_type) + 'is not supported!') if ("3a" in channels or "3A" in channels or "3b" in channels or "3B" in channels): three_a = ((self["FRAME_INDICATOR"] & 2 ** 16) == 2 ** 16) three_b = ((self["FRAME_INDICATOR"] & 2 ** 16) == 0) chans = {} for chan in channels: if chan not in ["1", "2", "3a", "3A", "3b", "3B", "4", "5"]: LOG.info("Can't load channel in eps_l1b: " + str(chan)) continue if chan == "1": if calib_type == 1: chans[chan] = np.ma.array( to_refl(self["SCENE_RADIANCES"][:, 0, :], self["CH1_SOLAR_FILTERED_IRRADIANCE"])) else: chans[chan] = np.ma.array( self["SCENE_RADIANCES"][:, 0, :]) if chan == "2": if calib_type == 1: chans[chan] = np.ma.array( to_refl(self["SCENE_RADIANCES"][:, 1, :], self["CH2_SOLAR_FILTERED_IRRADIANCE"])) else: chans[chan] = np.ma.array( self["SCENE_RADIANCES"][:, 1, :]) if chan.lower() == "3a": if calib_type == 1: chans[chan] = np.ma.array( to_refl(self["SCENE_RADIANCES"][:, 2, :], self["CH3A_SOLAR_FILTERED_IRRADIANCE"])) else: chans[chan] = np.ma.array(self["SCENE_RADIANCES"][:, 2, :]) chans[chan][three_b, :] = np.nan chans[chan] = np.ma.masked_invalid(chans[chan]) if chan.lower() == "3b": if calib_type == 1: chans[chan] = np.ma.array( to_bt(self["SCENE_RADIANCES"][:, 2, :], self["CH3B_CENTRAL_WAVENUMBER"], self["CH3B_CONSTANT1"], self["CH3B_CONSTANT2_SLOPE"])) else: chans[chan] = self["SCENE_RADIANCES"][:, 2, :] chans[chan][three_a, :] = np.nan chans[chan] = np.ma.masked_invalid(chans[chan]) if chan == "4": if calib_type == 1: chans[chan] = np.ma.array( to_bt(self["SCENE_RADIANCES"][:, 3, :], self["CH4_CENTRAL_WAVENUMBER"], self["CH4_CONSTANT1"], self["CH4_CONSTANT2_SLOPE"])) else: chans[chan] = np.ma.array( self["SCENE_RADIANCES"][:, 3, :]) if chan == "5": if calib_type == 1: chans[chan] = np.ma.array( to_bt(self["SCENE_RADIANCES"][:, 4, :], self["CH5_CENTRAL_WAVENUMBER"], self["CH5_CONSTANT1"], self["CH5_CONSTANT2_SLOPE"])) else: chans[chan] = np.ma.array(self["SCENE_RADIANCES"][:, 4, :]) return chans def get_lonlat(scene, row, col): """Get the longitutes and latitudes for the give *rows* and *cols*. """ try: filename = get_filename(scene, "granules") except IOError: #from mpop.satin.eps1a import get_lonlat_avhrr # return get_lonlat_avhrr(scene, row, col) from pyorbital.orbital import Orbital import pyproj from datetime import timedelta start_time = scene.time_slot end_time = scene.time_slot + timedelta(minutes=3) orbital = Orbital("METOP-A") track_start = orbital.get_lonlatalt(start_time) track_end = orbital.get_lonlatalt(end_time) geod = pyproj.Geod(ellps='WGS84') az_fwd, az_back, dist = geod.inv(track_start[0], track_start[1], track_end[0], track_end[1]) del dist M02_WIDTH = 2821885.8962408099 pos = ((col - 1024) * M02_WIDTH) / 2048.0 if row > 520: lonlatdist = geod.fwd(track_end[0], track_end[1], az_back - 86.253533216206648, -pos) else: lonlatdist = geod.fwd(track_start[0], track_start[1], az_fwd - 86.253533216206648, pos) return lonlatdist[0], lonlatdist[1] try: if scene.lons is None or scene.lats is None: records, form = read_raw(filename) mdrs = [record[1] for record in records if record[0] == "mdr"] sphrs = [record for record in records if record[0] == "sphr"] sphr = sphrs[0][1] scene.lons, scene.lats = _get_lonlats(mdrs, sphr, form) return scene.lons[row, col], scene.lats[row, col] except AttributeError: records, form = read_raw(filename) mdrs = [record[1] for record in records if record[0] == "mdr"] sphrs = [record for record in records if record[0] == "sphr"] sphr = sphrs[0][1] scene.lons, scene.lats = _get_lonlats(mdrs, sphr, form) return scene.lons[row, col], scene.lats[row, col] def _get_lonlats(mdrs, sphr, form): """Get sparse arrays of lon/lats. """ scanlines = len(mdrs) mdrs = np.concatenate(mdrs) lats = np.hstack((mdrs["EARTH_LOCATION_FIRST"][:, [0]] * form.scales[("mdr", 2)]["EARTH_LOCATION_FIRST"][:, 0], mdrs["EARTH_LOCATIONS"][:, :, 0] * form.scales[("mdr", 2)]["EARTH_LOCATIONS"][:, :, 0], mdrs["EARTH_LOCATION_LAST"][:, [0]] * form.scales[("mdr", 2)]["EARTH_LOCATION_LAST"][:, 0])) lons = np.hstack((mdrs["EARTH_LOCATION_FIRST"][:, [1]] * form.scales[("mdr", 2)]["EARTH_LOCATION_FIRST"][:, 1], mdrs["EARTH_LOCATIONS"][:, :, 1] * form.scales[("mdr", 2)]["EARTH_LOCATIONS"][:, :, 1], mdrs["EARTH_LOCATION_LAST"][:, [1]] * form.scales[("mdr", 2)]["EARTH_LOCATION_LAST"][:, 1])) nav_sample_rate = int(sphr["NAV_SAMPLE_RATE"][0].split("=")[1]) earth_views_per_scanline = \ int(sphr["EARTH_VIEWS_PER_SCANLINE"][0].split("=")[1]) geo_samples = np.round(earth_views_per_scanline / nav_sample_rate) + 3 samples = np.zeros(geo_samples, dtype=np.intp) samples[1:-1] = np.arange(geo_samples - 2) * 20 + 5 - 1 samples[-1] = earth_views_per_scanline - 1 mask = np.ones((scanlines, earth_views_per_scanline)) mask[:, samples] = 0 geolats = np.ma.empty((scanlines, earth_views_per_scanline), dtype=lats.dtype) geolats.mask = mask geolats[:, samples] = lats geolons = np.ma.empty((scanlines, earth_views_per_scanline), dtype=lons.dtype) geolons.mask = mask geolons[:, samples] = lons return geolons, geolats def get_corners(filename): """Get the corner lon/lats of the file. """ records, form = read_raw(filename) mdrs = [record[1] for record in records if record[0] == "mdr"] ul_ = (mdrs[0]["EARTH_LOCATION_FIRST"] * form.scales[("mdr", 2)]["EARTH_LOCATION_FIRST"]) ur_ = (mdrs[0]["EARTH_LOCATION_LAST"] * form.scales[("mdr", 2)]["EARTH_LOCATION_LAST"]) ll_ = (mdrs[-1]["EARTH_LOCATION_FIRST"] * form.scales[("mdr", 2)]["EARTH_LOCATION_FIRST"]) lr_ = (mdrs[-1]["EARTH_LOCATION_LAST"] * form.scales[("mdr", 2)]["EARTH_LOCATION_LAST"]) return ul_, ur_, ll_, lr_ def load(scene, *args, **kwargs): """Loads the *channels* into the satellite *scene*. A possible *calibrate* keyword argument is passed to the AAPP reader Should be 0 for off, 1 for default, and 2 for radiances only. However, as the AAPP-lvl1b file contains radiances this reader cannot return counts, so calibrate=0 is not allowed/supported. The radiance to counts conversion is not possible. """ del args calibrate = kwargs.get("calibrate", True) if calibrate == 0: raise ValueError('calibrate=0 is not supported! ' + 'This reader cannot return counts') if kwargs.get("filename") is not None: filename = kwargs["filename"] else: filename = (kwargs.get("filename", None) or get_filename(scene, "level2")) if isinstance(filename, (list, tuple, set)): filenames = filename else: filenames = [filename] LOG.debug("Using file(s) %s", str(filename)) readers = [EpsAvhrrL1bReader(filename) for filename in filenames] arrs = {} llons = [] llats = [] loaded_channels = set() for reader in readers: for chname, arr in reader.get_channels(scene.channels_to_load, calibrate).items(): arrs.setdefault(chname, []).append(arr) loaded_channels.add(chname) if scene.orbit is None: scene.orbit = int(reader["ORBIT_START"][0]) scene.info["orbit_number"] = scene.orbit lons, lats = reader.get_full_lonlats() llons.append(lons) llats.append(lats) for chname in loaded_channels: scene[chname] = np.ma.vstack(arrs[chname]) if chname in ["1", "2", "3A"]: scene[chname].info["units"] = "%" elif chname in ["4", "5", "3B"]: scene[chname].info["units"] = "K" lons = np.vstack(llons) lats = np.vstack(llats) try: scene.area = geometry.SwathDefinition(lons, lats) except NameError: scene.lons, scene.lats = lons, lats def norm255(a__): """normalize array to uint8. """ arr = a__ * 1.0 arr = (arr - arr.min()) * 255.0 / (arr.max() - arr.min()) return arr.astype(np.uint8) def show(a__): """show array. """ from PIL import Image Image.fromarray(norm255(a__), "L").show() if __name__ == '__main__': pass mpop-1.5.0/mpop/satin/fy3_mersi.py000066400000000000000000000156261317160620000170410ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2015 Adam.Dybbroe # Author(s): # Adam.Dybbroe # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """A reader for the FY3 Mersi-1 """ import numpy as np import os import logging from datetime import datetime import glob from ConfigParser import ConfigParser from mpop import CONFIG_PATH import h5py import pdb LOGGER = logging.getLogger('mersi-1') def load(satscene, *args, **kwargs): """Read data from file and load it into *satscene*. A possible *calibrate* keyword argument is passed to the AAPP reader. Should be 0 for off (counts), 1 for default (brightness temperatures and reflectances), and 2 for radiances only. If *use_extern_calib* keyword argument is set True, use external calibration data. """ del args conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value if kwargs.get("filename") is not None: options["full_filename"] = kwargs["filename"] if kwargs.get("calibrate") is not None: options["calibrate"] = kwargs["calibrate"] else: options["calibrate"] = True LOGGER.info("Loading instrument '%s'", satscene.instrument_name) try: CASES[satscene.instrument_name](satscene, options) except KeyError: raise KeyError("Unknown instrument '%s'" % satscene.instrument_name) def load_mersi(satscene, options): """Read the Mersi-1 hdf file""" if "filename_1000m" not in options: raise IOError("No 1km mersi-1 filename given, cannot load.") values = {"orbit": satscene.orbit, "satname": satscene.satname, "instrument": satscene.instrument_name, "satellite": satscene.fullname } filename_1000m = \ os.path.join(satscene.time_slot.strftime(options["dir"]) % values, satscene.time_slot.strftime( options["filename_1000m"]) % values) LOGGER.debug("Filename= %s", filename_1000m) datasets = ['EV_250_Aggr.1KM_RefSB', 'EV_250_Aggr.1KM_Emissive', 'EV_1KM_RefSB'] calibrate = options['calibrate'] # Get the calibration information: h5f = h5py.File(filename_1000m) # The K0, K1 and K2 coefficients: vis_cal_coeff = h5f['Calibration']['VIS_Cal_Coeff'][:] # See also "Update of Calibration for Reflective Solar Bands of MERSI / FY-3C" # http://satellite.cma.gov.cn/PortalSite/Download/FY3C/CalibrationCoefficient/Update%20of%20Calibration%20for%20Reflective%20Solar%20Bands%20of%20MERSI_20140618.doc sv_dn_average = h5f['Calibration']['SV_DN_average'][:] # Expand array over all lines (10 lines per scan): sv_dn_average = np.repeat(sv_dn_average, 10, axis=1) date_orig = h5f.attrs['DN_Normalized_LUT_UpdateDate'] dtobj_orig = datetime.strptime(date_orig, '%Y-%m-%d') obs_beg_date = h5f.attrs["Observing Beginning Date"] obs_beg_time = h5f.attrs["Observing Beginning Time"] dtobj_obs = datetime.strptime( obs_beg_date + obs_beg_time, '%Y-%m-%d%H:%M:%S.%f') h5f.close() # Get the days since 'launch' or since coefficients update: dsl = (dtobj_obs - dtobj_orig).days slopes = (vis_cal_coeff[:, 0] + vis_cal_coeff[:, 1] * dsl + vis_cal_coeff[:, 2] * dsl * dsl) # The slopes are available for band 1-4 and 6-20. # To keep consistency with the other cal-coefficients we add the IR band as # well, and set the slope to 1: slopes = np.concatenate((slopes[0:4], [1], slopes[4:])) mersi_band_index = 0 with h5py.File(filename_1000m) as h5f: for dset in datasets: band_data = h5f['Data'][dset] valid_range = band_data.attrs['valid_range'] LOGGER.debug("valid-range = " + str(valid_range)) # FIXME! There seem to be useful data outside the valid range! valid_range = (0, 65535) fillvalue = band_data.attrs['FillValue'] band_names = band_data.attrs['band_name'].split(',') slope = band_data.attrs['Slope'] intercept = band_data.attrs['Intercept'] LOGGER.debug('band names = ' + str(band_names)) for (i, band) in enumerate(band_names): if band not in satscene.channels_to_load: continue LOGGER.debug("Reading channel %s, i=%d", band, i) # Take care of the case when there is only one # single band (band 5: IR) in the dataset: if len(band_data.shape) == 2: data = band_data else: data = band_data[i] bandmask = np.logical_or(np.less(data, valid_range[0]), np.greater(data, valid_range[1])) if calibrate: data = slopes[mersi_band_index] * ( data - np.array([sv_dn_average[mersi_band_index]]).transpose()) satscene[band] = np.ma.masked_array(data, mask=bandmask, copy=False) satscene[band].info = { 'var_name': 'ch' + str(band), 'var_data': satscene[band].data, 'var_dim_names': ('x', 'y'), '_FillValue': fillvalue, 'standard_name': '', 'short_name': band, 'scale_factor': slope, 'add_offset': intercept, } mersi_band_index = mersi_band_index + 1 satscene.info = { 'Antenna': 'None', 'Receiver': 'Unknown', 'Time': satscene.time_slot.strftime("%Y-%m-%d %H:%M:%S UTC"), 'Area_Name': "swath", #'Projection': 'satproj', 'Platform Name': satscene.fullname, 'Service': '', #'Columns' : satscene.channels[0].shape[1], #'Lines' : satscene.channels[0].shape[0], 'SampleX': 1.0, 'SampleY': 1.0, 'title': 'MERSI Level 1', } # Get geolocation information CASES = { "mersi/1": load_mersi, } mpop-1.5.0/mpop/satin/fy3_virr.py000066400000000000000000000143521317160620000166770ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2015, 2016 Adam.Dybbroe # Author(s): # Adam.Dybbroe # Katerina.Melnik # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """A VIRR reader for FY3-B and maybe A.... """ import numpy as np import os import logging from datetime import datetime from ConfigParser import ConfigParser from mpop import CONFIG_PATH import h5py from pyspectral.blackbody import blackbody_wn_rad2temp as rad2temp LOGGER = logging.getLogger('virr') def load(satscene, *args, **kwargs): """Read data from file and load it into *satscene*. A possible *calibrate* keyword argument is passed to the AAPP reader. Should be 0 for off (counts), 1 for default (brightness temperatures and reflectances), and 2 for radiances only. If *use_extern_calib* keyword argument is set True, use external calibration data. """ del args conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value if kwargs.get("filename") is not None: options["full_filename"] = kwargs["filename"] if kwargs.get("calibrate") is not None: options["calibrate"] = kwargs["calibrate"] else: options["calibrate"] = True LOGGER.debug("Calibrate = " + str(options["calibrate"])) LOGGER.info("Loading instrument '%s'", satscene.instrument_name) try: CASES[satscene.instrument_name](satscene, options) except KeyError: raise KeyError("Unknown instrument '%s'" % satscene.instrument_name) def load_virr(satscene, options): """Read the VIRR hdf5 file""" if "filename" not in options: raise IOError("No 1km virr filename given, cannot load") values = {"orbit": satscene.orbit, "satname": satscene.satname, "instrument": satscene.instrument_name, "satellite": satscene.fullname } filename = \ os.path.join(satscene.time_slot.strftime(options["dir"]) % values, satscene.time_slot.strftime( options["filename"]) % values) LOGGER.debug("Filename= %s", filename) datasets = ['EV_Emissive', 'EV_RefSB'] calibrate = options['calibrate'] LOGGER.debug("Calibrate = " + str(calibrate)) h5f = h5py.File(filename, 'r') # Get geolocation information lons = h5f['Longitude'][:] lats = h5f['Latitude'][:] # Mask out unrealistic values: mask = np.logical_or(lats > 90., lons > 90.) lons = np.ma.masked_array(lons, mask=mask) lats = np.ma.masked_array(lats, mask=mask) sunz = h5f['SolarZenith'][:] slope = h5f['SolarZenith'].attrs['Slope'][0] intercept = h5f['SolarZenith'].attrs['Intercept'][0] sunz = sunz * slope + intercept sunz = np.where(np.greater(sunz, 85.0), 85.0, sunz) # Get the calibration information # Emissive radiance coefficients: emis_offs = h5f['Emissive_Radiance_Offsets'][:] emis_scales = h5f['Emissive_Radiance_Scales'][:] # Central wave number (unit = cm-1) for the three IR bands # It is ordered according to decreasing wave number (increasing wavelength): # 3.7 micron, 10.8 micron, 12 micron emiss_centroid_wn = h5f.attrs['Emmisive_Centroid_Wave_Number'] # VIS/NIR calibration stuff: refsb_cal_coeff = h5f.attrs['RefSB_Cal_Coefficients'] visnir_scales = refsb_cal_coeff[0::2] visnir_offs = refsb_cal_coeff[1::2] refsb_effective_wl = h5f.attrs['RefSB_Effective_Wavelength'] # Read the band data: for dset in datasets: band_data = h5f[dset] valid_range = band_data.attrs['valid_range'] LOGGER.debug("valid-range = " + str(valid_range)) fillvalue = band_data.attrs['_FillValue'] band_names = band_data.attrs['band_name'].split(',') slope = band_data.attrs['Slope'] intercept = band_data.attrs['Intercept'] units = band_data.attrs['units'] long_name = band_data.attrs['long_name'] LOGGER.debug('band names = ' + str(band_names)) for (i, band) in enumerate(band_names): if band not in satscene.channels_to_load: continue LOGGER.debug("Reading channel %s, i=%d", band, i) data = band_data[i] bandmask = np.logical_or(np.less(data, valid_range[0]), np.greater(data, valid_range[1])) if calibrate: if dset in ['EV_Emissive']: data = (np.array([emis_offs[:, i]]).transpose() + data * np.array([emis_scales[:, i]]).transpose()) # Radiance to Tb conversion. # Pyspectral wants SI units, # but radiance data are in mW/m^2/str/cm^-1 and wavenumbers are in cm^-1 # Therefore multply wavenumber by 100 and radiances by # 10^-5 data = rad2temp(emiss_centroid_wn[i] * 100., data * 1e-5) LOGGER.debug("IR data calibrated") if dset in ['EV_RefSB']: data = (visnir_offs[i] + data * visnir_scales[i]) / np.cos(np.deg2rad(sunz)) satscene[band] = np.ma.masked_array(data, mask=bandmask, copy=False) from pyresample import geometry satscene.area = geometry.SwathDefinition(lons=lons, lats=lats) h5f.close() CASES = { "virr": load_virr, } mpop-1.5.0/mpop/satin/gac_l1b.py000066400000000000000000000116511317160620000164230ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014 Abhay Devasthale and Martin Raspaud # Author(s): # Abhay Devasthale # Martin Raspaud # Adam Dybbroe # Sajid Pareeth # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Read a gac file. Reads L1b GAC data from KLM series of satellites (NOAA-15 and later) and does most of the computations. Format specification can be found here: http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/c8/sec83142-1.htm """ import glob import logging import os from ConfigParser import ConfigParser import numpy as np from mpop import CONFIG_PATH from pygac.gac_klm import KLMReader from pygac.gac_pod import PODReader LOGGER = logging.getLogger(__name__) def load(satscene, *args, **kwargs): """Read data from file and load it into *satscene*. A possible *calibrate* keyword argument is passed to the AAPP reader. Should be 0 for off (counts), 1 for default (brightness temperatures and reflectances), and 2 for radiances only. """ del args conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value if kwargs.get("filename") is not None: options["filename"] = kwargs["filename"] options["dir"] = None options["calibrate"] = kwargs.get("calibrate", True) LOGGER.info("Loading instrument '%s'" % satscene.instrument_name) try: CASES[satscene.instrument_name](satscene, options) except KeyError: raise KeyError("Unknown instrument '%s'" % satscene.instrument_name) def load_avhrr(satscene, options): """Read avhrr data from file and load it into *satscene*. """ if "filename" not in options: raise IOError("No filename given, cannot load.") values = {"orbit": satscene.orbit, "satname": satscene.satname, "number": satscene.number, "instrument": satscene.instrument_name, "satellite": satscene.fullname} if options["dir"] is None: filename = options["filename"] else: filename = os.path.join( satscene.time_slot.strftime(options["dir"]) % values, satscene.time_slot.strftime(options["filename"]) % values) file_list = glob.glob(filename) if len(file_list) > 1: raise IOError("More than one l1b file matching!") elif len(file_list) == 0: raise IOError("No l1b file matching!: " + filename) filename = file_list[0] LOGGER.debug("Loading from " + filename) with open(filename) as fdes: data = fdes.read(3) if data in ["CMS", "NSS", "UKM", "DSS"]: reader = KLMReader chn_dict = AVHRR3_CHANNEL_NAMES else: reader = PODReader chn_dict = AVHRR_CHANNEL_NAMES chns = satscene.channels_to_load & set(chn_dict.keys()) LOGGER.info("Loading channels " + str(sorted(list(chns)))) if len(chns) == 0: return scene = reader() scene.read(filename) scene.get_lonlat() scene.adjust_clock_drift() channels = scene.get_calibrated_channels() # scene.navigate() try: from pyresample import geometry except ImportError as ex_: LOGGER.debug("Could not load pyresample: " + str(ex_)) satscene.lat = scene.lats satscene.lon = scene.lons else: satscene.area = geometry.SwathDefinition(lons=scene.lons, lats=scene.lats) area_name = ("swath_" + satscene.fullname + "_" + str(satscene.time_slot) + "_" + str(scene.lats.shape)) satscene.area.area_id = area_name satscene.area.name = "Satellite projection" satscene.area_id = area_name for chn in chns: data = channels[:, :, chn_dict[chn]] if np.ma.count(data) > 0: satscene[chn].data = np.ma.masked_invalid(data, copy=False) satscene[chn].area = satscene.area AVHRR3_CHANNEL_NAMES = {"1": 0, "2": 1, "3A": 2, "3B": 3, "4": 4, "5": 5} AVHRR_CHANNEL_NAMES = {"1": 0, "2": 1, "3": 2, "4": 3, "5": 4} CASES = {"avhrr/1": load_avhrr, "avhrr/2": load_avhrr, "avhrr/3": load_avhrr, } mpop-1.5.0/mpop/satin/gribformat.py000066400000000000000000000044031317160620000172640ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2016 Adam.Dybbroe # Author(s): # Adam.Dybbroe # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Utility functions to read Grib messages """ import os import pygrib import os.path class Grib(object): def __init__(self, fname): self._abspath = os.path.abspath(fname) @property def nmsgs(self): '''Number of GRIB messages in file. ''' prop = 'nmsgs' attr = '_{}'.format(prop) if not hasattr(self, attr): grbs = pygrib.open(self._abspath) nmsgs = grbs.messages grbs.close() setattr(self, attr, nmsgs) return getattr(self, attr) def get(self, gmessage, key='values'): ''' Returns the value for the 'key' for a given message number 'gmessage' or message field name 'gmessage'. ''' grbs = pygrib.open(self._abspath) if type(gmessage) == int: mnbr = gmessage elif type(gmessage) == str: msg_found = False msgnum = 1 while msgnum < self.nmsgs + 1: if grbs[msgnum]['parameterName'] == gmessage: msg_found = True break msgnum = msgnum + 1 if msg_found: mnbr = msgnum else: print("No Grib message found with parameter name = %s" % gmessage) return None if grbs[mnbr].valid_key(key): arr = grbs[mnbr][key] grbs.close() return arr else: grbs.close() return mpop-1.5.0/mpop/satin/h5_pps_l2.py000066400000000000000000000441311317160620000167250ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014, 2015 Adam.Dybbroe # Author(s): # Adam.Dybbroe # Martin.Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """PPS netcdf cloud product reader """ import os.path from ConfigParser import ConfigParser from ConfigParser import NoOptionError from datetime import datetime import glob import numpy as np import mpop.channel from mpop import CONFIG_PATH from mpop.plugin_base import Reader import logging LOG = logging.getLogger(__name__) NEW_PRODNAMES = {'cloudtype': 'CT', 'cloudmask': 'CMA', 'precipclouds': 'PC', 'cpp': 'CPP', 'ctth': 'CTTH'} PPS_DATASETS = ['Cloud Type', 'Multilayer Cloud Detection', "SAFNWC PPS PC likelihood of intense precipitation", "SAFNWC PPS PC likelihood of moderate precipitation", "SAFNWC PPS PC likelihood of light precipitation", ] class InfoObject(object): """Simple data and info container. """ def __init__(self): self.info = {} self.data = None class NwcSafPpsChannel(mpop.channel.GenericChannel): def __init__(self, filename=None): mpop.channel.GenericChannel.__init__(self) self.mda = {} self._projectables = [] self._keys = [] self._refs = {} self.shape = None if filename: self.read(filename) def read(self, filename, load_lonlat=True): """Read the PPS v2014 formatet data""" LOG.debug("New netCDF CF file format!") import h5py h5f = h5py.File(filename, 'r') self.mda.update(h5f.attrs.items()) self.mda["satellite"] = h5f.attrs['platform'] self.mda["orbit"] = h5f.attrs['orbit_number'] try: self.mda["time_slot"] = datetime.strptime(h5f.attrs['time_coverage_start'][:-2], "%Y%m%dT%H%M%S") except AttributeError: LOG.debug("No time information in product file!") variables = {} for key, item in h5f.items(): if item.attrs.get("CLASS") != 'DIMENSION_SCALE': variables[key] = item # processed variables processed = set() non_processed = set(variables.keys()) - processed for var_name in non_processed: if var_name in ['lon', 'lat']: continue var = variables[var_name] if ("standard_name" not in var.attrs.keys() and "long_name" not in var.attrs.keys() and var.attrs.get("CLASS") != "PALETTE"): LOG.info("Delayed processing of " + var_name) continue # Don't know how to unambiguously decide if the array is really a # data array or a palette or something else! # FIXME! if "standard_name" in var.attrs.keys(): self._projectables.append(var_name) elif "long_name" in var.attrs.keys(): dset_found = False for item in PPS_DATASETS: if var.attrs['long_name'].find(item) >= 0: self._projectables.append(var_name) dset_found = True break if not dset_found: self.mda[var_name] = var[:] # try: # self.mda[var_name] = var[:].filled(0) # except AttributeError: # self.mda[var_name] = var[:] continue elif var.attrs.get("CLASS") == "PALETTE": self.mda[var_name] = var[:] continue setattr(self, var_name, InfoObject()) for key, item in var.attrs.items(): if key != "DIMENSION_LIST": getattr(self, var_name).info[key] = item data = var[:] if 'valid_range' in var.attrs.keys(): data = np.ma.masked_outside(data, *var.attrs['valid_range']) elif '_FillValue' in var.attrs.keys(): data = np.ma.masked_where(data, var.attrs['_FillValue']) dataset = (data * var.attrs.get("scale_factor", 1) + var.attrs.get("add_offset", 0)) getattr(self, var_name).data = dataset LOG.debug("long_name: " + str(var.attrs['long_name'])) LOG.debug("Var=" + str(var_name) + " shape=" + str(dataset.shape)) if self.shape is None: self.shape = dataset.shape elif self.shape != dataset.shape: LOG.debug("Shape=" + str(dataset.shape) + " Not the same shape as previous field...") # raise ValueError("Different variable shapes !") # dims = var.dimensions # dim = dims[0] processed |= set([var_name]) non_processed = set(variables.keys()) - processed if len(non_processed) > 0: LOG.warning( "Remaining non-processed variables: " + str(non_processed)) # Get lon,lat: # from pyresample import geometry # area = geometry.SwathDefinition(lons=lon, lats=lat) # close the h5 file. h5f.close() del h5f return def project(self, coverage): """Project the data""" LOG.debug("Projecting channel %s..." % (self.name)) import copy res = copy.copy(self) # Project the data for var in self._projectables: LOG.info("Projecting " + str(var)) res.__dict__[var] = copy.copy(self.__dict__[var]) res.__dict__[var].data = coverage.project_array( self.__dict__[var].data) res.name = self.name res.resolution = self.resolution res.filled = True res.area = coverage.out_area return res def is_loaded(self): """Tells if the channel contains loaded data. """ return True # return len(self._projectables) > 0 def save(self, filename, old=True, **kwargs): del kwargs if old: from nwcsaf_formats.ppsv2014_to_oldformat import write_product write_product(self, filename) else: raise NotImplementedError("Can't save to new pps format yet.") class PPSReader(Reader): pformat = "h5_pps_l2" def load(self, satscene, *args, **kwargs): """Read data from file and load it into *satscene*. """ lonlat_is_loaded = False geofilename = kwargs.get('geofilename') prodfilename = kwargs.get('filename') products = [] if "CTTH" in satscene.channels_to_load: products.append("ctth") if "CT" in satscene.channels_to_load: products.append("cloudtype") if "CMA" in satscene.channels_to_load: products.append("cloudmask") if "PC" in satscene.channels_to_load: products.append("precipclouds") if "CPP" in satscene.channels_to_load: products.append("cpp") if len(products) == 0: return try: area_name = satscene.area_id or satscene.area.area_id except AttributeError: area_name = "satproj_?????_?????" # Looking for geolocation file conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) try: geodir = conf.get(satscene.instrument_name + "-level3", "cloud_product_geodir", vars=os.environ) except NoOptionError: LOG.warning("No option 'geodir' in level3 section") geodir = None if not geofilename and geodir: # Load geo file from config file: try: if not satscene.orbit: orbit = "" else: orbit = satscene.orbit geoname_tmpl = conf.get(satscene.instrument_name + "-level3", "cloud_product_geofilename", raw=True, vars=os.environ) filename_tmpl = (satscene.time_slot.strftime(geoname_tmpl) % {"orbit": str(orbit).zfill(5) or "*", "area": area_name, "satellite": satscene.satname + satscene.number}) file_list = glob.glob(os.path.join(geodir, filename_tmpl)) if len(file_list) > 1: LOG.warning("More than 1 file matching for geoloaction: " + str(file_list)) elif len(file_list) == 0: LOG.warning( "No geolocation file matching!: " + os.path.join(geodir, filename_tmpl)) else: geofilename = file_list[0] except NoOptionError: geofilename = None # Reading the products classes = {"ctth": CloudTopTemperatureHeight, "cloudtype": CloudType, "cloudmask": CloudMask, "precipclouds": PrecipitationClouds, "cpp": CloudPhysicalProperties } nodata_mask = False area = None lons = None lats = None chn = None shape = None read_external_geo = {} for product in products: LOG.debug("Loading " + product) if isinstance(prodfilename, (list, tuple, set)): for fname in prodfilename: kwargs['filename'] = fname self.load(satscene, *args, **kwargs) return elif (prodfilename and os.path.basename(prodfilename).startswith('S_NWC')): if os.path.basename(prodfilename).split("_")[2] == NEW_PRODNAMES[product]: filename = prodfilename else: continue else: filename = conf.get(satscene.instrument_name + "-level3", "cloud_product_filename", raw=True, vars=os.environ) directory = conf.get(satscene.instrument_name + "-level3", "cloud_product_dir", vars=os.environ) pathname_tmpl = os.path.join(directory, filename) LOG.debug("Path = " + str(pathname_tmpl)) if not satscene.orbit: orbit = "" else: orbit = satscene.orbit filename_tmpl = (satscene.time_slot.strftime(pathname_tmpl) % {"orbit": str(orbit).zfill(5) or "*", "area": area_name, "satellite": satscene.satname + satscene.number, "product": product}) file_list = glob.glob(filename_tmpl) if len(file_list) == 0: product_name = NEW_PRODNAMES.get(product, product) LOG.info("No " + str(product) + " product in old format matching") filename_tmpl = (satscene.time_slot.strftime(pathname_tmpl) % {"orbit": str(orbit).zfill(5) or "*", "area": area_name, "satellite": satscene.satname + satscene.number, "product": product_name}) file_list = glob.glob(filename_tmpl) if len(file_list) > 1: LOG.warning("More than 1 file matching for " + product + "! " + str(file_list)) continue elif len(file_list) == 0: LOG.warning( "No " + product + " matching!: " + filename_tmpl) continue else: filename = file_list[0] chn = classes[product]() chn.read(filename, lonlat_is_loaded == False) satscene.channels.append(chn) # Check if geolocation is loaded: if not chn.area: read_external_geo[product] = chn shape = chn.shape # Check if some 'channel'/product needs geolocation. If some product does # not have geolocation, get it from the geofilename: if not read_external_geo: LOG.info("Loading PPS parameters done.") return # Load geolocation interpolate = False if geofilename: geodict = get_lonlat(geofilename) lons, lats = geodict['lon'], geodict['lat'] if lons.shape != shape or lats.shape != shape: interpolate = True row_indices = geodict['row_indices'] column_indices = geodict['col_indices'] lonlat_is_loaded = True else: LOG.warning("No Geo file specified: " + "Geolocation will be loaded from product") if lonlat_is_loaded: if interpolate: from geotiepoints import SatelliteInterpolator cols_full = np.arange(shape[1]) rows_full = np.arange(shape[0]) satint = SatelliteInterpolator((lons, lats), (row_indices, column_indices), (rows_full, cols_full)) # satint.fill_borders("y", "x") lons, lats = satint.interpolate() try: from pyresample import geometry lons = np.ma.masked_array(lons, nodata_mask) lats = np.ma.masked_array(lats, nodata_mask) area = geometry.SwathDefinition(lons=lons, lats=lats) except ImportError: area = None for chn in read_external_geo.values(): if area: chn.area = area else: chn.lat = lats chn.lon = lons LOG.info("Loading PPS parameters done.") return class CloudType(NwcSafPpsChannel): def __init__(self, filename=None): NwcSafPpsChannel.__init__(self, filename) self.name = "CT" class CloudTopTemperatureHeight(NwcSafPpsChannel): def __init__(self, filename=None): NwcSafPpsChannel.__init__(self, filename) self.name = "CTTH" class CloudMask(NwcSafPpsChannel): def __init__(self, filename=None): NwcSafPpsChannel.__init__(self, filename) self.name = "CMA" class PrecipitationClouds(NwcSafPpsChannel): def __init__(self, filename=None): NwcSafPpsChannel.__init__(self, filename) self.name = "PC" class CloudPhysicalProperties(NwcSafPpsChannel): def __init__(self, filename=None): NwcSafPpsChannel.__init__(self, filename) self.name = "CPP" def get_lonlat(filename): """Read lon,lat from netCDF4 CF file""" import h5py col_indices = None row_indices = None LOG.debug("Geo File = " + filename) h5f = h5py.File(filename, 'r') lon = h5f["where"]["lon"]["data"] no_data = h5f["where"]["lon"]["what"].attrs["nodata"] missing_data = h5f["where"]["lon"]["what"].attrs["missingdata"] lons = np.ma.masked_equal(lon[:], no_data) lons = np.ma.masked_equal(lons, missing_data) scale_factor = h5f["where"]["lon"]["what"].attrs["gain"] add_offset = h5f["where"]["lon"]["what"].attrs["offset"] lons = lons * scale_factor + add_offset lat = h5f["where"]["lat"]["data"] no_data = h5f["where"]["lat"]["what"].attrs["nodata"] missing_data = h5f["where"]["lat"]["what"].attrs["missingdata"] lats = np.ma.masked_equal(lat[:], no_data) lats = np.ma.masked_equal(lats, missing_data) scale_factor = h5f["where"]["lat"]["what"].attrs["gain"] add_offset = h5f["where"]["lat"]["what"].attrs["offset"] lats = lats * scale_factor + add_offset # FIXME: this is to mask out the npp bowtie deleted pixels... if h5f["how"].attrs['platform'] == "npp": new_mask = np.zeros((16, 3200), dtype=bool) new_mask[0, :1008] = True new_mask[1, :640] = True new_mask[14, :640] = True new_mask[15, :1008] = True new_mask[14, 2560:] = True new_mask[1, 2560:] = True new_mask[0, 2192:] = True new_mask[15, 2192:] = True new_mask = np.tile(new_mask, (lons.shape[0] / 16, 1)) lons = np.ma.masked_where(new_mask, lons) lats = np.ma.masked_where(new_mask, lats) # close the h5 file. h5f.close() del h5f return {'lon': lons, 'lat': lats, 'col_indices': col_indices, 'row_indices': row_indices} if __name__ == '__main__': from mpop.utils import debug_on debug_on() # cpp = CloudPhysicalProperties( # "/data/proj/safutv/data/polar_out/direct_readout/S_NWC_CPP_eos2_66743_20141120T1143140Z_20141120T1156542Z.nc") ct = CloudType( "S_NWC_CT_noaa19_30165_20141216T0135195Z_20141216T0151114Z.h5") res = get_lonlat( "S_NWC_avhrr_noaa19_30165_20141216T0135195Z_20141216T0151114Z.h5") mpop-1.5.0/mpop/satin/hdfeos_l1b.py000066400000000000000000001222361317160620000171430ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010-2014, 2016. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Ronald Scheirer # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Interface to Modis level 1b format send through Eumetcast. http://www.icare.univ-lille1.fr/wiki/index.php/MODIS_geolocation http://www.sciencedirect.com/science?_ob=MiamiImageURL&_imagekey=B6V6V-4700BJP-\ 3-27&_cdi=5824&_user=671124&_check=y&_orig=search&_coverDate=11%2F30%2F2002&vie\ w=c&wchp=dGLzVlz-zSkWz&md5=bac5bc7a4f08007722ae793954f1dd63&ie=/sdarticle.pdf """ import glob from fnmatch import fnmatch import os.path from ConfigParser import ConfigParser import multiprocessing from trollsift.parser import Parser, globify import math import numpy as np from pyhdf.SD import SD from pyhdf.error import HDF4Error import hashlib from pyresample import geometry import copy from mpop import CONFIG_PATH from mpop.plugin_base import Reader from mpop.scene import assemble_segments import logging logger = logging.getLogger(__name__) def get_filename(template, time_slot): tmpl = time_slot.strftime(template) file_list = glob.glob(tmpl) if len(file_list) > 1: raise IOError("More than 1 file matching template %s", tmpl) elif len(file_list) == 0: raise IOError("No EOS MODIS file matching " + tmpl) return file_list[0] def check_filename(tmpl): file_list = glob.glob(tmpl) if len(file_list) > 1: raise IOError("More than 1 file matching template %s", tmpl) elif len(file_list) == 0: raise IOError("No EOS MODIS file matching " + tmpl) return file_list[0] class ModisReader(Reader): pformat = "hdfeos_l1b" res = {"1": 1000, "Q": 250, "H": 500} inv_res = {1000: "1", 250: "Q", 500: "H"} def __init__(self, *args, **kwargs): Reader.__init__(self, *args, **kwargs) self.datafiles = {} self.geofile = None self.filename = None self.data = None self.areas = {} def get_sunsat_angles(self, resolution=1000): """Get sun-satellite viewing geometry for the 1km resolution data Optional arguments: None Return sun-zenith, sun-azimuth, sat-zenith, sat-azimuth """ logger.debug("generating sun-sat viewing angles at %d", resolution) if self.geofile is not None: coarse_resolution = 1000 filename = self.geofile else: coarse_resolution = 5000 logger.info("Using 5km Sun-Sat viewing geometry and interpolating") filename = (self.datafiles.get(1000) or self.datafiles.get(500) or self.datafiles.get(250)) raise NotImplementedError("Not yet implemented...") logger.debug("Loading sun-sat angles from file: " + str(filename) + " at resolution " + str(coarse_resolution)) eosdata = SD(str(filename)) hdf_names = ['SolarZenith', 'SolarAzimuth', 'SensorZenith', 'SensorAzimuth'] local_names = ['sunz', 'sun_azi', 'satz', 'sat_azi'] data = {} for lname, dname in zip(local_names, hdf_names): data[lname] = eosdata.select(dname) fill_value = data[lname].attributes()["_FillValue"] scale = data[lname].attributes()["scale_factor"] data[lname] = np.ma.masked_equal(data[lname].get(), fill_value) data[lname] = data[lname] * scale return (data['sunz'], data['sun_azi'], data['satz'], data['sat_azi']) def load(self, satscene, filename=None, *args, **kwargs): conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = dict(conf.items(satscene.instrument_name + "-level2", raw=True)) options["resolution"] = 1000 options["geofile"] = os.path.join(options["dir"], options["geofile"]) options.update(kwargs) fparser = Parser(options.get("filename")) gparser = Parser(options.get("geofile")) if filename is not None: datasets = {} if not isinstance(filename, (list, set, tuple)): filename = [filename] for fname in filename: if fnmatch(os.path.basename(fname), fparser.globify()): metadata = fparser.parse(os.path.basename(fname)) datasets.setdefault( metadata["start_time"], []).append(fname) elif fnmatch(os.path.basename(fname), gparser.globify()): metadata = fparser.parse(fname) datasets.setdefault( metadata["start_time"], []).append(fname) scenes = [] for start_time, dataset in datasets.iteritems(): newscn = copy.deepcopy(satscene) newscn.time_slot = start_time self.load_dataset(newscn, filename=dataset, *args, **kwargs) scenes.append(newscn) if not scenes: logger.debug("Looking for files") self.load_dataset(satscene, *args, **kwargs) else: entire_scene = assemble_segments( sorted(scenes, key=lambda x: x.time_slot)) satscene.channels = entire_scene.channels satscene.area = entire_scene.area satscene.orbit = int(entire_scene.orbit) satscene.info["orbit_number"] = int(entire_scene.orbit) else: self.load_dataset(satscene, *args, **kwargs) def load_dataset(self, satscene, filename=None, *args, **kwargs): """Read data from file and load it into *satscene*. """ del args conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = dict(conf.items(satscene.instrument_name + "-level2", raw=True)) options["resolution"] = 1000 options["geofile"] = os.path.join(options["dir"], options["geofile"]) options.update(kwargs) fparser = Parser(options.get("filename")) gparser = Parser(options.get("geofile")) if isinstance(filename, (list, set, tuple)): # we got the entire dataset. for fname in filename: if fnmatch(os.path.basename(fname), fparser.globify()): metadata = fparser.parse(os.path.basename(fname)) resolution = self.res[metadata["resolution"]] self.datafiles[resolution] = fname elif fnmatch(os.path.basename(fname), gparser.globify()): self.geofile = fname elif ((filename is not None) and fnmatch(os.path.basename(options["filename"]), fparser.globify())): # read just one file logger.debug("Reading from file: " + str(options["filename"])) filename = options["filename"] resolution = self.res[os.path.basename(filename)[5]] self.datafiles[resolution] = filename if not self.datafiles: # find files according to config logger.debug( "Didn't get any valid file as input, looking in defined places") resolution = int(options["resolution"]) or 1000 for res in [250, 500, 1000]: datafile = globify(os.path.join(options['dir'], options["filename"]), {'resolution': self.inv_res[res], 'start_time': satscene.time_slot}) try: self.datafiles[res] = check_filename(datafile) except IOError: self.datafiles[res] = None logger.warning("Can't find file for resolution %s with template: %s", str(res), datafile) try: self.geofile = check_filename(globify(options["geofile"], {'start_time': satscene.time_slot})) except IOError: self.geofile = None logger.warning("Can't find geofile with template: %s", options['geofile']) resolution = options["resolution"] cores = options.get("cores", max(multiprocessing.cpu_count() / 4, 1)) datadict = { 1000: ['EV_250_Aggr1km_RefSB', 'EV_500_Aggr1km_RefSB', 'EV_1KM_RefSB', 'EV_1KM_Emissive'], 500: ['EV_250_Aggr500_RefSB', 'EV_500_RefSB'], 250: ['EV_250_RefSB']} loaded_bands = [] # process by dataset, reflective and emissive datasets separately resolutions = [250, 500, 1000] for res in resolutions: if res < resolution: continue logger.debug("Working on resolution %d", res) self.filename = self.datafiles[res] logger.debug("Using " + str(cores) + " cores for interpolation") try: self.data = SD(str(self.filename)) except HDF4Error as err: logger.warning("Could not load data from " + str(self.filename) + ": " + str(err)) continue datasets = datadict[res] for dataset in datasets: subdata = self.data.select(dataset) band_names = subdata.attributes()["band_names"].split(",") if len(satscene.channels_to_load & set(band_names)) > 0: # get the relative indices of the desired channels indices = [i for i, band in enumerate(band_names) if band in satscene.channels_to_load] uncertainty = self.data.select(dataset + "_Uncert_Indexes") if dataset.endswith('Emissive'): array = calibrate_tb( subdata, uncertainty, indices, band_names) else: array = calibrate_refl(subdata, uncertainty, indices) for (i, idx) in enumerate(indices): if band_names[idx] in loaded_bands: continue satscene[band_names[idx]] = array[i] # fix the resolution to match the loaded data. satscene[band_names[idx]].resolution = res loaded_bands.append(band_names[idx]) # Get the orbit number if not satscene.orbit: mda = self.data.attributes()["CoreMetadata.0"] orbit_idx = mda.index("ORBITNUMBER") satscene.orbit = int(mda[orbit_idx + 111:orbit_idx + 116]) # Get the geolocation # if resolution != 1000: # logger.warning("Cannot load geolocation at this resolution (yet).") # return for band_name in loaded_bands: lon, lat = self.get_lonlat( satscene[band_name].resolution, satscene.time_slot, cores) area = geometry.SwathDefinition(lons=lon, lats=lat) satscene[band_name].area = area # Trimming out dead sensor lines (detectors) on aqua: # (in addition channel 21 is noisy) if satscene.satname == "aqua": for band in ["6", "27", "36"]: if not satscene[band].is_loaded() or satscene[band].data.mask.all(): continue width = satscene[band].data.shape[1] height = satscene[band].data.shape[0] indices = satscene[band].data.mask.sum(1) < width if indices.sum() == height: continue satscene[band] = satscene[band].data[indices, :] satscene[band].area = geometry.SwathDefinition( lons=satscene[band].area.lons[indices, :], lats=satscene[band].area.lats[indices, :]) # Trimming out dead sensor lines (detectors) on terra: # (in addition channel 27, 30, 34, 35, and 36 are nosiy) if satscene.satname == "terra": for band in ["29"]: if not satscene[band].is_loaded() or satscene[band].data.mask.all(): continue width = satscene[band].data.shape[1] height = satscene[band].data.shape[0] indices = satscene[band].data.mask.sum(1) < width if indices.sum() == height: continue satscene[band] = satscene[band].data[indices, :] satscene[band].area = geometry.SwathDefinition( lons=satscene[band].area.lons[indices, :], lats=satscene[band].area.lats[indices, :]) for band_name in loaded_bands: band_uid = hashlib.sha1(satscene[band_name].data.mask).hexdigest() satscene[band_name].area.area_id = ("swath_" + satscene.fullname + "_" + str(satscene.time_slot) + "_" + str(satscene[ band_name].shape) + "_" + str(band_uid)) satscene[band_name].area_id = satscene[band_name].area.area_id def get_lonlat(self, resolution, time_slot, cores=1): """Read lat and lon. """ if (resolution, time_slot) in self.areas: return self.areas[resolution, time_slot] logger.debug("generating lon, lat at %d", resolution) if self.geofile is not None: coarse_resolution = 1000 filename = self.geofile else: coarse_resolution = 5000 logger.info("Using 5km geolocation and interpolating") filename = (self.datafiles.get(1000) or self.datafiles.get(500) or self.datafiles.get(250)) logger.debug("Loading geolocation from file: " + str(filename) + " at resolution " + str(coarse_resolution)) data = SD(str(filename)) lat = data.select("Latitude") fill_value = lat.attributes()["_FillValue"] lat = np.ma.masked_equal(lat.get(), fill_value) lon = data.select("Longitude") fill_value = lon.attributes()["_FillValue"] lon = np.ma.masked_equal(lon.get(), fill_value) if resolution == coarse_resolution: self.areas[resolution] = lon, lat return lon, lat from geotiepoints import modis5kmto1km, modis1kmto500m, modis1kmto250m logger.debug("Interpolating from " + str(coarse_resolution) + " to " + str(resolution)) if coarse_resolution == 5000: lon, lat = modis5kmto1km(lon, lat) if resolution == 500: lon, lat = modis1kmto500m(lon, lat, cores) if resolution == 250: lon, lat = modis1kmto250m(lon, lat, cores) self.areas[resolution, time_slot] = lon, lat return lon, lat # These have to be interpolated... def get_height(self): return self.data.select("Height") def get_sunz(self): return self.data.select("SolarZenith") def get_suna(self): return self.data.select("SolarAzimuth") def get_satz(self): return self.data.select("SensorZenith") def get_sata(self): return self.data.select("SensorAzimuth") def load(satscene, *args, **kwargs): """Read data from file and load it into *satscene*. """ del args conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = kwargs for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value options["resolution"] = kwargs.get("resolution", 1000) options["filename"] = kwargs.get("filename") CASES[satscene.instrument_name](satscene, options) def calibrate_refl(subdata, uncertainty, indices): """Calibration for reflective channels. """ del uncertainty #uncertainty_array = uncertainty.get() # array = np.ma.MaskedArray(subdata.get(), # mask=(uncertainty_array >= 15)) # FIXME: The loading should not be done here. array = np.vstack(np.expand_dims(subdata[idx, :, :], 0) for idx in indices) valid_range = subdata.attributes()["valid_range"] array = np.ma.masked_outside(array, valid_range[0], valid_range[1], copy=False) array = array * np.float32(1.0) offsets = np.array(subdata.attributes()["reflectance_offsets"], dtype=np.float32)[indices] scales = np.array(subdata.attributes()["reflectance_scales"], dtype=np.float32)[indices] dims = (len(indices), 1, 1) array = (array - offsets.reshape(dims)) * scales.reshape(dims) * 100 return array def calibrate_tb(subdata, uncertainty, indices, band_names): """Calibration for the emissive channels. """ del uncertainty #uncertainty_array = uncertainty.get() # array = np.ma.MaskedArray(subdata.get(), # mask=(uncertainty_array >= 15)) # FIXME: The loading should not be done here. array = np.vstack(np.expand_dims(subdata[idx, :, :], 0) for idx in indices) valid_range = subdata.attributes()["valid_range"] array = np.ma.masked_outside(array, valid_range[0], valid_range[1], copy=False) offsets = np.array(subdata.attributes()["radiance_offsets"], dtype=np.float32)[indices] scales = np.array(subdata.attributes()["radiance_scales"], dtype=np.float32)[indices] #- Planck constant (Joule second) h__ = np.float32(6.6260755e-34) #- Speed of light in vacuum (meters per second) c__ = np.float32(2.9979246e+8) #- Boltzmann constant (Joules per Kelvin) k__ = np.float32(1.380658e-23) #- Derived constants c_1 = 2 * h__ * c__ * c__ c_2 = (h__ * c__) / k__ #- Effective central wavenumber (inverse centimeters) cwn = np.array([ 2.641775E+3, 2.505277E+3, 2.518028E+3, 2.465428E+3, 2.235815E+3, 2.200346E+3, 1.477967E+3, 1.362737E+3, 1.173190E+3, 1.027715E+3, 9.080884E+2, 8.315399E+2, 7.483394E+2, 7.308963E+2, 7.188681E+2, 7.045367E+2], dtype=np.float32) #- Temperature correction slope (no units) tcs = np.array([ 9.993411E-1, 9.998646E-1, 9.998584E-1, 9.998682E-1, 9.998819E-1, 9.998845E-1, 9.994877E-1, 9.994918E-1, 9.995495E-1, 9.997398E-1, 9.995608E-1, 9.997256E-1, 9.999160E-1, 9.999167E-1, 9.999191E-1, 9.999281E-1], dtype=np.float32) #- Temperature correction intercept (Kelvin) tci = np.array([ 4.770532E-1, 9.262664E-2, 9.757996E-2, 8.929242E-2, 7.310901E-2, 7.060415E-2, 2.204921E-1, 2.046087E-1, 1.599191E-1, 8.253401E-2, 1.302699E-1, 7.181833E-2, 1.972608E-2, 1.913568E-2, 1.817817E-2, 1.583042E-2], dtype=np.float32) # Transfer wavenumber [cm^(-1)] to wavelength [m] cwn = 1 / (cwn * 100) # Some versions of the modis files do not contain all the bands. emmissive_channels = ["20", "21", "22", "23", "24", "25", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36"] current_channels = [i for i, band in enumerate(emmissive_channels) if band in band_names] global_indices = list(np.array(current_channels)[indices]) dims = (len(indices), 1, 1) cwn = cwn[global_indices].reshape(dims) tcs = tcs[global_indices].reshape(dims) tci = tci[global_indices].reshape(dims) tmp = (array - offsets.reshape(dims)) * scales.reshape(dims) tmp = c_2 / (cwn * np.ma.log(c_1 / (1000000 * tmp * cwn ** 5) + 1)) array = (tmp - tci) / tcs return array def load_modis(satscene, options): """Read modis data from file and load it into *satscene*. *resolution* parameters specifies in which resolution to load the data. If the specified resolution is not available for the channel, it is NOT loaded. If no resolution is specified, the 1km resolution (aggregated) is used. """ if options["filename"] is not None: logger.debug("Reading from file: " + str(options["filename"])) filename = options["filename"] res = {"1": 1000, "Q": 250, "H": 500} resolution = res[os.path.split(filename)[1][5]] else: resolution = int(options["resolution"]) or 1000 filename_tmpl = satscene.time_slot.strftime(options["filename" + str(resolution)]) file_list = glob.glob(os.path.join(options["dir"], filename_tmpl)) if len(file_list) > 1: raise IOError("More than 1 file matching!") elif len(file_list) == 0: raise IOError("No EOS MODIS file matching " + filename_tmpl + " in " + options["dir"]) filename = file_list[0] cores = options.get("cores", 1) logger.debug("Using " + str(cores) + " cores for interpolation") load_generic(satscene, filename, resolution, cores) def load_generic(satscene, filename, resolution, cores): """Read modis data, generic part. """ try: data = SD(str(filename)) except HDF4Error as err: logger.warning("Could not load data from " + str(filename) + ": " + str(err)) return datadict = { 1000: ['EV_250_Aggr1km_RefSB', 'EV_500_Aggr1km_RefSB', 'EV_1KM_RefSB', 'EV_1KM_Emissive'], 500: ['EV_250_Aggr500_RefSB', 'EV_500_RefSB'], 250: ['EV_250_RefSB']} datasets = datadict[resolution] loaded_bands = [] # process by dataset, reflective and emissive datasets separately for dataset in datasets: subdata = data.select(dataset) band_names = subdata.attributes()["band_names"].split(",") if len(satscene.channels_to_load & set(band_names)) > 0: # get the relative indices of the desired channels indices = [i for i, band in enumerate(band_names) if band in satscene.channels_to_load] uncertainty = data.select(dataset + "_Uncert_Indexes") if dataset.endswith('Emissive'): array = calibrate_tb(subdata, uncertainty, indices, band_names) else: array = calibrate_refl(subdata, uncertainty, indices) for (i, idx) in enumerate(indices): satscene[band_names[idx]] = array[i] # fix the resolution to match the loaded data. satscene[band_names[idx]].resolution = resolution loaded_bands.append(band_names[idx]) # Get the orbit number if not satscene.orbit: mda = data.attributes()["CoreMetadata.0"] orbit_idx = mda.index("ORBITNUMBER") satscene.orbit = int(mda[orbit_idx + 111:orbit_idx + 116]) # Get the geolocation # if resolution != 1000: # logger.warning("Cannot load geolocation at this resolution (yet).") # return lat, lon = get_lat_lon(satscene, resolution, filename, cores) area = geometry.SwathDefinition(lons=lon, lats=lat) for band_name in loaded_bands: satscene[band_name].area = area # Trimming out dead sensor lines (detectors) on aqua: # (in addition channel 21 is noisy) if satscene.satname == "aqua": for band in ["6", "27", "36"]: if not satscene[band].is_loaded() or satscene[band].data.mask.all(): continue width = satscene[band].data.shape[1] height = satscene[band].data.shape[0] indices = satscene[band].data.mask.sum(1) < width if indices.sum() == height: continue satscene[band] = satscene[band].data[indices, :] satscene[band].area = geometry.SwathDefinition( lons=satscene[band].area.lons[indices, :], lats=satscene[band].area.lats[indices, :]) satscene[band].area.area_id = ("swath_" + satscene.fullname + "_" + str(satscene.time_slot) + "_" + str(satscene[band].shape) + "_" + str(band)) # Trimming out dead sensor lines (detectors) on terra: # (in addition channel 27, 30, 34, 35, and 36 are nosiy) if satscene.satname == "terra": for band in ["29"]: if not satscene[band].is_loaded() or satscene[band].data.mask.all(): continue width = satscene[band].data.shape[1] height = satscene[band].data.shape[0] indices = satscene[band].data.mask.sum(1) < width if indices.sum() == height: continue satscene[band] = satscene[band].data[indices, :] satscene[band].area = geometry.SwathDefinition( lons=satscene[band].area.lons[indices, :], lats=satscene[band].area.lats[indices, :]) satscene[band].area.area_id = ("swath_" + satscene.fullname + "_" + str(satscene.time_slot) + "_" + str(satscene[band].shape) + "_" + str(band)) def get_lat_lon(satscene, resolution, filename, cores=1): """Read lat and lon. """ conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value options["filename"] = filename options["resolution"] = resolution options["cores"] = cores return LAT_LON_CASES[satscene.instrument_name](satscene, options) def get_lat_lon_modis(satscene, options): """Read lat and lon. """ filename_tmpl = satscene.time_slot.strftime(options["geofile"]) file_list = glob.glob(os.path.join(options["dir"], filename_tmpl)) if len(file_list) == 0: # Try in the same directory as the data data_dir = os.path.split(options["filename"])[0] file_list = glob.glob(os.path.join(data_dir, filename_tmpl)) if len(file_list) > 1: logger.warning("More than 1 geolocation file matching!") filename = max(file_list, key=lambda x: os.stat(x).st_mtime) coarse_resolution = 1000 elif len(file_list) == 0: logger.warning("No geolocation file matching " + filename_tmpl + " in " + options["dir"]) logger.debug("Using 5km geolocation and interpolating") filename = options["filename"] coarse_resolution = 5000 else: filename = file_list[0] coarse_resolution = 1000 logger.debug("Loading geolocation file: " + str(filename) + " at resolution " + str(coarse_resolution)) resolution = options["resolution"] data = SD(str(filename)) lat = data.select("Latitude") fill_value = lat.attributes()["_FillValue"] lat = np.ma.masked_equal(lat.get(), fill_value) lon = data.select("Longitude") fill_value = lon.attributes()["_FillValue"] lon = np.ma.masked_equal(lon.get(), fill_value) if resolution == coarse_resolution: return lat, lon cores = options["cores"] from geotiepoints import modis5kmto1km, modis1kmto500m, modis1kmto250m logger.debug("Interpolating from " + str(coarse_resolution) + " to " + str(resolution)) if coarse_resolution == 5000: lon, lat = modis5kmto1km(lon, lat) if resolution == 500: lon, lat = modis1kmto500m(lon, lat, cores) if resolution == 250: lon, lat = modis1kmto250m(lon, lat, cores) return lat, lon def get_lonlat(satscene, row, col): """Estimate lon and lat. """ import glob conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) path = conf.get("modis-level2", "dir") geofile_tmpl = conf.get("modis-level2", "geofile") filename_tmpl = satscene.time_slot.strftime(geofile_tmpl) file_list = glob.glob(os.path.join(path, filename_tmpl)) if len(file_list) > 1: raise IOError("More than 1 geolocation file matching!" + filename_tmpl) elif len(file_list) == 0: logger.info("No MODIS geolocation file matching: " + filename_tmpl + ", estimating") filename = "" else: filename = file_list[0] logger.debug("Geolocation file = " + filename) if(os.path.exists(filename) and (satscene.lon is None or satscene.lat is None)): data = SD(filename) lat = data.select("Latitude") fill_value = lat.attributes()["_FillValue"] satscene.lat = np.ma.masked_equal(lat.get(), fill_value) lon = data.select("Longitude") fill_value = lon.attributes()["_FillValue"] satscene.lon = np.ma.masked_equal(lon.get(), fill_value) estimate = True try: lon = satscene.lon[row, col] lat = satscene.lat[row, col] if(satscene.lon.mask[row, col] == False and satscene.lat.mask[row, col] == False): estimate = False except TypeError: pass except IndexError: pass if not estimate: return lon, lat from mpop.saturn.two_line_elements import Tle tle = Tle(satellite=satscene.satname) track_start = tle.get_latlonalt(satscene.time_slot) track_end = tle.get_latlonalt(satscene.time_slot + satscene.granularity) # WGS84 # flattening f__ = 1 / 298.257223563 # semi_major_axis a__ = 6378137.0 s__, alpha12, alpha21 = vinc_dist(f__, a__, track_start[0], track_start[1], track_end[0], track_end[1]) scanlines = satscene.granularity.seconds / satscene.span if row < scanlines / 2: if row == 0: track_now = track_start else: track_now = vinc_pt(f__, a__, track_start[0], track_start[1], alpha12, (s__ * row) / scanlines) lat_now = track_now[0] lon_now = track_now[1] s__, alpha12, alpha21 = vinc_dist(f__, a__, lat_now, lon_now, track_end[0], track_end[1]) fac = 1 else: if scanlines - row - 1 == 0: track_now = track_end else: track_now = vinc_pt(f__, a__, track_end[0], track_end[1], alpha21, (s__ * (scanlines - row - 1)) / scanlines) lat_now = track_now[0] lon_now = track_now[1] s__, alpha12, alpha21 = vinc_dist(f__, a__, lat_now, lon_now, track_start[0], track_start[1]) fac = -1 if col < 1354 / 2: lat, lon, alp = vinc_pt(f__, a__, lat_now, lon_now, alpha12 + np.pi / 2 * fac, 2340000.0 / 2 - (2340000.0 / 1354) * col) else: lat, lon, alp = vinc_pt(f__, a__, lat_now, lon_now, alpha12 - np.pi / 2 * fac, (2340000.0 / 1354) * col - 2340000.0 / 2) del alp lon = np.rad2deg(lon) lat = np.rad2deg(lat) if lon > 180: lon -= 360 if lon <= -180: lon += 360 return lon, lat def vinc_dist(f__, a__, phi1, lembda1, phi2, lembda2): """ Returns the distance between two geographic points on the ellipsoid and the forward and reverse azimuths between these points. lats, longs and azimuths are in radians, distance in metres Returns ( s__, alpha12, alpha21 ) as a tuple """ if (abs(phi2 - phi1) < 1e-8) and (abs(lembda2 - lembda1) < 1e-8): return 0.0, 0.0, 0.0 two_pi = 2.0 * math.pi b__ = a__ * (1.0 - f__) tan_u1 = (1 - f__) * math.tan(phi1) tan_u2 = (1 - f__) * math.tan(phi2) u_1 = math.atan(tan_u1) u_2 = math.atan(tan_u2) lembda = lembda2 - lembda1 last_lembda = -4000000.0 # an impossibe value omega = lembda # Iterate the following equations, # until there is no significant change in lembda while (last_lembda < -3000000.0 or lembda != 0 and abs((last_lembda - lembda) / lembda) > 1.0e-9): sqr_sin_sigma = (pow(math.cos(u_2) * math.sin(lembda), 2) + pow((math.cos(u_1) * math.sin(u_2) - math.sin(u_1) * math.cos(u_2) * math.cos(lembda)), 2)) sin_sigma = math.sqrt(sqr_sin_sigma) cos_sigma = (math.sin(u_1) * math.sin(u_2) + math.cos(u_1) * math.cos(u_2) * math.cos(lembda)) sigma = math.atan2(sin_sigma, cos_sigma) sin_alpha = (math.cos(u_1) * math.cos(u_2) * math.sin(lembda) / math.sin(sigma)) alpha = math.asin(sin_alpha) cos2sigma_m = (math.cos(sigma) - (2 * math.sin(u_1) * math.sin(u_2) / pow(math.cos(alpha), 2))) c__ = ((f__ / 16) * pow(math.cos(alpha), 2) * (4 + f__ * (4 - 3 * pow(math.cos(alpha), 2)))) last_lembda = lembda lembda = (omega + (1 - c__) * f__ * math.sin(alpha) * (sigma + c__ * math.sin(sigma) * (cos2sigma_m + c__ * math.cos(sigma) * (-1 + 2 * pow(cos2sigma_m, 2))))) u2_ = pow(math.cos(alpha), 2) * (a__ * a__ - b__ * b__) / (b__ * b__) aa_ = 1 + (u2_ / 16384) * (4096 + u2_ * (-768 + u2_ * (320 - 175 * u2_))) bb_ = (u2_ / 1024) * (256 + u2_ * (-128 + u2_ * (74 - 47 * u2_))) delta_sigma = bb_ * sin_sigma * (cos2sigma_m + (bb_ / 4) * (cos_sigma * (-1 + 2 * pow(cos2sigma_m, 2)) - (bb_ / 6) * cos2sigma_m * (-3 + 4 * sqr_sin_sigma) * (-3 + 4 * pow(cos2sigma_m, 2)))) s__ = b__ * aa_ * (sigma - delta_sigma) alpha12 = (math.atan2((math.cos(u_2) * math.sin(lembda)), (math.cos(u_1) * math.sin(u_2) - math.sin(u_1) * math.cos(u_2) * math.cos(lembda)))) alpha21 = (math.atan2((math.cos(u_1) * math.sin(lembda)), (-math.sin(u_1) * math.cos(u_2) + math.cos(u_1) * math.sin(u_2) * math.cos(lembda)))) if (alpha12 < 0.0): alpha12 = alpha12 + two_pi if (alpha12 > two_pi): alpha12 = alpha12 - two_pi alpha21 = alpha21 + two_pi / 2.0 if (alpha21 < 0.0): alpha21 = alpha21 + two_pi if (alpha21 > two_pi): alpha21 = alpha21 - two_pi return s__, alpha12, alpha21 # END of Vincenty's Inverse formulae #---------------------------------------------------------------------------- # Vincenty's Direct formulae | # Given: latitude and longitude of a point (phi1, lembda1) and | # the geodetic azimuth (alpha12) | # and ellipsoidal distance in metres (s) to a second point, | # | # Calculate: the latitude and longitude of the second point (phi2, lembda2) | # and the reverse azimuth (alpha21). | # | #---------------------------------------------------------------------------- def vinc_pt(f__, a__, phi1, lembda1, alpha12, s__): """ Returns the lat and long of projected point and reverse azimuth given a reference point and a distance and azimuth to project. lats, longs and azimuths are passed in decimal degrees Returns ( phi2, lambda2, alpha21 ) as a tuple """ two_pi = 2.0 * math.pi if (alpha12 < 0.0): alpha12 = alpha12 + two_pi if (alpha12 > two_pi): alpha12 = alpha12 - two_pi b__ = a__ * (1.0 - f__) tan_u1 = (1 - f__) * math.tan(phi1) u_1 = math.atan(tan_u1) sigma1 = math.atan2(tan_u1, math.cos(alpha12)) sinalpha = math.cos(u_1) * math.sin(alpha12) cosalpha_sq = 1.0 - sinalpha * sinalpha u_2 = cosalpha_sq * (a__ * a__ - b__ * b__) / (b__ * b__) aa_ = 1.0 + (u_2 / 16384) * (4096 + u_2 * (-768 + u_2 * (320 - 175 * u_2))) bb_ = (u_2 / 1024) * (256 + u_2 * (-128 + u_2 * (74 - 47 * u_2))) # Starting with the approximation sigma = (s__ / (b__ * aa_)) last_sigma = 2.0 * sigma + 2.0 # something impossible # Iterate the following three equations # until there is no significant change in sigma # two_sigma_m , delta_sigma while (abs((last_sigma - sigma) / sigma) > 1.0e-9): two_sigma_m = 2 * sigma1 + sigma delta_sigma = (bb_ * math.sin(sigma) * (math.cos(two_sigma_m) + (bb_ / 4) * (math.cos(sigma) * (-1 + 2 * math.pow(math.cos(two_sigma_m), 2) - (bb_ / 6) * math.cos(two_sigma_m) * (-3 + 4 * math.pow(math.sin(sigma), 2)) * (-3 + 4 * math.pow(math.cos(two_sigma_m), 2)))))) last_sigma = sigma sigma = (s__ / (b__ * aa_)) + delta_sigma phi2 = math.atan2((math.sin(u_1) * math.cos(sigma) + math.cos(u_1) * math.sin(sigma) * math.cos(alpha12)), ((1 - f__) * math.sqrt(math.pow(sinalpha, 2) + pow(math.sin(u_1) * math.sin(sigma) - math.cos(u_1) * math.cos(sigma) * math.cos(alpha12), 2)))) lembda = math.atan2((math.sin(sigma) * math.sin(alpha12)), (math.cos(u_1) * math.cos(sigma) - math.sin(u_1) * math.sin(sigma) * math.cos(alpha12))) cc_ = (f__ / 16) * cosalpha_sq * (4 + f__ * (4 - 3 * cosalpha_sq)) omega = lembda - (1 - cc_) * f__ * sinalpha * \ (sigma + cc_ * math.sin(sigma) * (math.cos(two_sigma_m) + cc_ * math.cos(sigma) * (-1 + 2 * math.pow(math.cos(two_sigma_m), 2)))) lembda2 = lembda1 + omega alpha21 = math.atan2(sinalpha, (-math.sin(u_1) * math.sin(sigma) + math.cos(u_1) * math.cos(sigma) * math.cos(alpha12))) alpha21 = alpha21 + two_pi / 2.0 if (alpha21 < 0.0): alpha21 = alpha21 + two_pi if (alpha21 > two_pi): alpha21 = alpha21 - two_pi return phi2, lembda2, alpha21 # END of Vincenty's Direct formulae CASES = { "modis": load_modis } LAT_LON_CASES = { "modis": get_lat_lon_modis } if __name__ == "__main__": filenames = [u'/data/prod/satellit/modis/lvl1/thin_MYD021KM.A2015287.0255.005.2015287051016.NRT.hdf', u'/data/prod/satellit/modis/lvl1/thin_MYD021KM.A2015287.0300.005.2015287050819.NRT.hdf', u'/data/prod/satellit/modis/lvl1/thin_MYD021KM.A2015287.0305.005.2015287050825.NRT.hdf'] from mpop.utils import debug_on debug_on() from mpop.satellites import PolarFactory from datetime import datetime time_slot = datetime(2015, 10, 14, 2, 55) orbit = "18181" global_data = PolarFactory.create_scene( "EARSEOS-Aqua", "", "modis", time_slot, orbit) global_data.load([3.75, 0.555, 0.551, 7.3, 1.63, 10.8, 0.488, 12.0, 0.85, 0.469, 0.748, 0.443, 0.645, 6.7, 0.635, 8.7, 0.412], filename=filenames) #global_data.channels_to_load = set(['31']) #reader = ModisReader(global_data) #reader.load(global_data, filename=filenames) print global_data # global_data[10.8].show() mpop-1.5.0/mpop/satin/helper_functions.py000066400000000000000000000122051317160620000204760ustar00rootroot00000000000000 #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014, 2015. # # Author(s): # # Panu Lahtinen # Martin Raspaud # # This file is part of mpop. # # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # mpop. If not, see . '''Helper functions for area extent calculations. ''' import numpy as np from mpop.projector import get_area_def # from pyresample.utils import AreaNotFound import pyresample import logging from pyproj import Proj LOGGER = logging.getLogger(__name__) def area_def_names_to_extent(area_def_names, proj4_str, default_extent=(-5567248.07, -5570248.48, 5570248.48, 5567248.07)): '''Convert a list of *area_def_names* to maximal area extent in destination projection defined by *proj4_str*. *default_extent* gives the extreme values. Default value is MSG3 extents at lat0=0.0. If a boundary of one of the area_defs is entirely invalid, the *default_extent* is taken. ''' if type(area_def_names) is not list: area_def_names = [area_def_names] maximum_extent = None for name in area_def_names: try: adef = get_area_def(name) if "proj=geos" in adef.proj4_string: maximum_extent = update_max_extent(maximum_extent, adef.area_extent) continue boundaries = adef.get_boundary_lonlats() except pyresample.utils.AreaNotFound: LOGGER.warning('Area definition not found ' + name) continue except AttributeError: boundaries = name.get_boundary_lonlats() if (any(boundaries[0].side1 > 1e20) or any(boundaries[0].side2 > 1e20) or any(boundaries[0].side3 > 1e20) or any(boundaries[0].side4 > 1e20)): if default_extent: maximum_extent = list(default_extent) continue else: return None lon_sides = (boundaries[0].side1, boundaries[0].side2, boundaries[0].side3, boundaries[0].side4) lat_sides = (boundaries[1].side1, boundaries[1].side2, boundaries[1].side3, boundaries[1].side4) maximum_extent = boundaries_to_extent(proj4_str, maximum_extent, default_extent, lon_sides, lat_sides) if not maximum_extent: return None maximum_extent = list(maximum_extent) maximum_extent[0] -= 10000 maximum_extent[1] -= 10000 maximum_extent[2] += 10000 maximum_extent[3] += 10000 return maximum_extent def boundaries_to_extent(proj4_str, maximum_extent, default_extent, lon_sides, lat_sides): '''Get area extent from given boundaries. ''' # proj4-ify the projection string if '+' not in proj4_str: proj4_str = proj4_str.split(' ') proj4_str = '+' + ' +'.join(proj4_str) pro = Proj(proj4_str) # extents for edges x_dir, y_dir = pro(np.concatenate(lon_sides), np.concatenate(lat_sides)) # replace invalid values with NaN x_dir[np.abs(x_dir) > 1e20] = np.nan y_dir[np.abs(y_dir) > 1e20] = np.nan # return None when no default specified if not default_extent: if any(np.isnan(x_dir)) or any(np.isnan(x_dir)): return None # Get the maximum needed extent from different corners. extent = [np.nanmin(x_dir), np.nanmin(y_dir), np.nanmax(x_dir), np.nanmax(y_dir)] # Replace "infinity" values with default extent for i in range(4): if extent[i] is np.nan: extent[i] = default_extent[i] maximum_extent = update_max_extent(maximum_extent, extent) # Replace "infinity" values with default extent for i in range(4): if not np.isfinite(maximum_extent[i]): maximum_extent[i] = default_extent[i] return maximum_extent def update_max_extent(maximum_extent, extent): """Update the *maximum_extent* to cover also *extent*""" # update maximum extent if maximum_extent is None: maximum_extent = extent else: if maximum_extent[0] > extent[0]: maximum_extent[0] = extent[0] if maximum_extent[1] > extent[1]: maximum_extent[1] = extent[1] if maximum_extent[2] < extent[2]: maximum_extent[2] = extent[2] if maximum_extent[3] < extent[3]: maximum_extent[3] = extent[3] return maximum_extent mpop-1.5.0/mpop/satin/hrpt.py000066400000000000000000000330031317160620000161030ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Interface to HRPT level 0 format. Uses AAPP and the aapp1b reader. """ import glob import logging import os.path import shutil import subprocess import tempfile from ConfigParser import ConfigParser import datetime from mpop.utils import ensure_dir import mpop.satin.aapp1b from mpop import CONFIG_PATH from mpop.satellites import PolarFactory WORKING_DIR = "/tmp" SATPOS_DIR = os.path.sep.join(os.environ["AAPP_PREFIX"].split(os.path.sep)[:-1]) SATPOS_DIR = os.path.join(SATPOS_DIR, "data", "satpos") LOG = logging.getLogger("hrpt loader") def get_satpos_file(satpos_time, satname): """Return the current satpos file """ satpos_file = os.path.join(SATPOS_DIR, "satpos_"+ satname+"_"+ satpos_time.strftime("%Y%m%d")+".txt") if os.path.exists(satpos_file): return satpos_file elif satpos_time.hour < 2: satpos_time -= datetime.timedelta(days=1) satpos_file = os.path.join(SATPOS_DIR, "satpos_"+ satname+"_"+ satpos_time.strftime("%Y%m%d")+".txt") return satpos_file else: raise IOError("Missing satpos file:" + satpos_file) def load(satscene): """Read data from file and load it into *satscene*. """ conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level1", raw = True): options[option] = value CASES[satscene.instrument_name](satscene, options) def load_avhrr(satscene, options): """Read avhrr data from file and load it into *satscene*. """ if "filename" not in options: raise IOError("No filename given, cannot load.") filename = os.path.join( options["dir"], (satscene.time_slot.strftime(options["filename"]))) file_list = glob.glob(satscene.time_slot.strftime(filename)) if len(file_list) > 1: raise IOError("More than one hrpt file matching!") elif len(file_list) == 0: raise IOError("No hrpt file matching!: " + satscene.time_slot.strftime(filename)) filename = file_list[0] (handle, tempname) = tempfile.mkstemp(prefix="hrpt_decommuted", dir=WORKING_DIR) os.close(handle) del handle if(satscene.satname == "metop"): satname = "M02" else: satname = satscene.satname + satscene.number decommutation(filename, tempname, satscene, options, satname) calibration_navigation(tempname, satscene, options) conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) new_dir = conf.get(satscene.instrument_name + "-level2", "dir") new_name = conf.get(satscene.instrument_name + "-level2", "filename") pathname = os.path.join(new_dir, satscene.time_slot.strftime(new_name)) LOG.debug("Saving to "+pathname) shutil.move(tempname, pathname) mpop.satin.aapp1b.load(satscene) os.remove(pathname) def convert_to_1b(in_filename, out_filename, time_slot_start, time_slot_end, shortname, orbit): """Convert hrpt file to level 1b. """ (handle, tempname) = tempfile.mkstemp(prefix="hrpt_decommuted", dir=WORKING_DIR) os.close(handle) del handle LOG.debug("Decommuting...") decommutation(in_filename, tempname, time_slot_start, time_slot_end, shortname) LOG.debug("Calibrating, navigating...") calibration_navigation(tempname, time_slot_start, shortname) conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, "regional" + shortname + ".cfg")) new_dir = conf.get("avhrr-level2", "dir", raw=True) new_name = conf.get("avhrr-level2", "filename", raw=True) options = {"satellite": shortname, "orbit": orbit} pathname = time_slot_start.strftime(os.path.join(new_dir, new_name))%options LOG.debug("Saving to "+pathname) ensure_dir(pathname) shutil.move(tempname, pathname) def calibration_navigation(filename, time_slot, shortname): """Perform calibration on *filename* """ import pysdh2orbnum LOG.info("Calibrating "+filename) formated_date = time_slot.strftime("%d/%m/%y %H:%M:%S.000") satpos_file = get_satpos_file(time_slot, shortname) LOG.debug(formated_date) LOG.debug(satpos_file) orbit_number = str(pysdh2orbnum.sdh2orbnum(shortname, formated_date, satpos_file)) avhrcl = ("cd /tmp;" + "$AAPP_PREFIX/AAPP/bin/avhrcl -c -l -s " + shortname + " -d " + time_slot.strftime("%Y%m%d") + " -h " + time_slot.strftime("%H%M") + " -n " + orbit_number + " " + filename) LOG.debug("Running " + avhrcl) proc = subprocess.Popen(avhrcl, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) anacl1 = ("cd /tmp;" + "$ANA_PATH/bin/ana_lmk_loc -D " + filename) LOG.debug("Running " + anacl1) proc = subprocess.Popen(anacl1, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) anacl2 = ("cd /tmp;" + "$ANA_PATH/bin/ana_estatt -s " + shortname + " -d " + time_slot.strftime("%Y%m%d") + " -h " + time_slot.strftime("%H%M") + " -n " + orbit_number) LOG.debug("Running " + anacl2) proc = subprocess.Popen(anacl2, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) avhrcl2 = ("cd /tmp;" + "$AAPP_PREFIX/AAPP/bin/avhrcl -l -s " + shortname + " -d " + time_slot.strftime("%Y%m%d") + " -h " + time_slot.strftime("%H%M") + " -n " + orbit_number + " " + filename) LOG.debug("Running " + avhrcl2) proc = subprocess.Popen(avhrcl2, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) def decommutation(filename_from, filename_to, time_slot_start, time_slot_end, shortname): """Perform decommutation on *filename_from* and save the result in *filename_to*. """ import pysdh2orbnum LOG.info("Decommuting "+filename_from) (handle, tempname) = tempfile.mkstemp(prefix="decommutation", suffix=".par", dir=WORKING_DIR) os.close(handle) handle = open(tempname, "w") handle.write("1,0,0,0,0,0,0,0,0,0,0,1\n") handle.write("10,11,15,15,16,0,0,13,0,0,0,14\n") handle.write(str(time_slot_start.year) + "\n") handle.write("0\n") satpos_file = get_satpos_file(time_slot_start, shortname) formated_date = time_slot_start.strftime("%d/%m/%y %H:%M:%S.000") orbit_start = str(pysdh2orbnum.sdh2orbnum(shortname, formated_date, satpos_file)) satpos_file = get_satpos_file(time_slot_end, shortname) formated_date = time_slot_end.strftime("%d/%m/%y %H:%M:%S.000") orbit_end = str(pysdh2orbnum.sdh2orbnum(shortname, formated_date, satpos_file)) handle.write(orbit_start + "," + orbit_end + "\n") handle.close() decom = "$AAPP_PREFIX/AAPP/bin/decommutation" cmd = " ".join(["cd " + WORKING_DIR + ";", decom, "ATOVS", tempname, filename_from]) LOG.debug("Running " + cmd) proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) shutil.move(os.path.join(WORKING_DIR, "hrpt.l1b"), filename_to) os.remove(tempname) LOG.debug("Decommutation done") def get_orbit(time_slot, shortname): import pysdh2orbnum formated_date = time_slot.strftime("%d/%m/%y %H:%M:%S.000") satpos_file = get_satpos_file(time_slot, shortname) return str(pysdh2orbnum.sdh2orbnum(shortname, formated_date, satpos_file)) def concatenate(granules, channels=None): """Concatenate hrpt files. """ filenames = [os.path.join(granule.directory, granule.file_name) for granule in granules] arg_string = " ".join(filenames) if filenames[0].endswith(".bz2"): cat_cmd = "bzcat" else: cat_cmd = "cat" conffile = os.path.join(CONFIG_PATH, granules[0].fullname + ".cfg") conf = ConfigParser() conf.read(conffile) directory = conf.get('avhrr-level1','dir') filename = conf.get('avhrr-level1','filename') filename = granules[0].time_slot.strftime(filename) output_name = os.path.join(directory, filename) cmd = cat_cmd + " " + arg_string + " > " + output_name LOG.debug(cmd) proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() if out: LOG.debug(out) if err: LOG.error(err) LOG.debug("Done concatenating level0 files.") new_dir = conf.get(granules[0].instrument_name + "-level2", "dir") new_name = conf.get(granules[0].instrument_name + "-level2", "filename") pathname = os.path.join(new_dir, granules[0].time_slot.strftime(new_name)) shortname = conf.get('avhrr-level1','shortname') convert_to_1b(output_name, pathname, granules[0].time_slot, granules[-1].time_slot + granules[-1].granularity, shortname, get_orbit(granules[0].time_slot, shortname)) os.remove(output_name) scene = PolarFactory.create_scene(granules[0].satname, granules[0].number, granules[0].instrument_name, granules[0].time_slot, get_orbit(granules[0].time_slot, shortname), variant=granules[0].variant) scene.load(channels) os.remove(pathname) return scene def get_lat_lon(satscene, resolution): """Read lat and lon. """ del resolution return LL_CASES[satscene.instrument_name](satscene, None) def get_lat_lon_avhrr(satscene, options): """Read lat and lon. """ del options return satscene.lat, satscene.lon def get_lonlat(satscene, row, col): """Read lat and lon. """ return LONLAT_CASES[satscene.instrument_name](satscene, row, col) def get_lonlat_avhrr(satscene, row, col): """Read longitude and latitude for a given pixel. """ # Needs the SATID AAPP env variable to be set to find satid.txt... import pyaapp import math t_start = satscene.time_slot epoch = datetime.datetime(1950, 1, 1) t50_start = (t_start - epoch) jday_start = t50_start.seconds / (3600.0 *24) + t50_start.days jday_end = jday_start if(satscene.satname == "metop"): satname = "M02" else: satname = satscene.satname + satscene.number satpos_file = get_satpos_file(satscene.time_slot, satname) pyaapp.read_satpos_file(jday_start, jday_end, satscene.satname+" "+str(int(satscene.number)), satpos_file) att = pyaapp.prepare_attitude(int(satscene.number), 0, 0, 0) lonlat = pyaapp.linepixel2lonlat(int(satscene.number), row, col, att, jday_start, jday_end)[1:3] return (lonlat[0] * 180.0 / math.pi, lonlat[1] * 180.0 / math.pi) LONLAT_CASES = { "avhrr": get_lonlat_avhrr } LL_CASES = { "avhrr": get_lat_lon_avhrr } CASES = { "avhrr": load_avhrr } mpop-1.5.0/mpop/satin/hrpt_hmf.py000066400000000000000000000447651317160620000167560ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014 Martin Raspaud # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Interface to HRPT level 0 format. Needs pyorbital. Since the loading and calibration goes quite fast, all channels are calibrated at the same time, so don't hesitate to load all the channels anyway. Contrarily to AAPP, no smoothing, sigma or gross filtering is taking place. TODO: - Faster navigation (pyorbital). """ from ConfigParser import ConfigParser import os import logging import glob import numpy as np import numexpr as ne from mpop.plugin_base import Reader from mpop import CONFIG_PATH from pyresample.geometry import SwathDefinition logger = logging.getLogger(__name__) # Constants c1 = 1.1910427e-5 #mW/(m2-sr-cm-4) c2 = 1.4387752 #cm-K calib = {"noaa 15": # copy from noaa 16 { # VIS "intersections": np.array([497.5, 500.3, 498.7]), "slopes_l": np.array([0.0523, 0.0513, 0.0262]), "slopes_h": np.array([0.1528, 0.1510, 0.1920]), "intercepts_l": np.array([-2.016, -1.943, -1.01]), "intercepts_h": np.array([-51.91, -51.77, -84.2]), # IR "d0": np.array([276.355, 276.142, 275.996, 276.132, 0]), "d1": np.array([0.0562, 0.05605, 0.05486, 0.0594, 0]), "d2": np.array([-1.590e-5, -1.707e-5, -1.223e-5, -1.344e-5, 0]), "d3": np.array([2.486e-8, 2.595e-8, 1.862e-8, 2.112e-8, 0]), "d4": np.array([-1.199e-11, -1.224e-11, -0.853e-11, -1.001e-11, 0]), "prt_weights": np.array((.25, .25, .25, .25)), "vc": np.array((2700.1148, 917.2289, 838.1255)), "A": np.array((1.592459, 0.332380, 0.674623)), "B": np.array((0.998147, 0.998522, 0.998363)), "N_S": np.array([0, -2.467, -2.009]), "b0": np.array([0, 2.96, 2.25]), "b1": np.array([0, -0.05411, -0.03665]), "b2": np.array([0, 0.00024532, 0.00014854]), }, "noaa 16": { # VIS "intersections": np.array([497.5, 500.3, 498.7]), "slopes_l": np.array([0.0523, 0.0513, 0.0262]), "slopes_h": np.array([0.1528, 0.1510, 0.1920]), "intercepts_l": np.array([-2.016, -1.943, -1.01]), "intercepts_h": np.array([-51.91, -51.77, -84.2]), # IR "d0": np.array([276.355, 276.142, 275.996, 276.132, 0]), "d1": np.array([0.0562, 0.05605, 0.05486, 0.0594, 0]), "d2": np.array([-1.590e-5, -1.707e-5, -1.223e-5, -1.344e-5, 0]), "d3": np.array([2.486e-8, 2.595e-8, 1.862e-8, 2.112e-8, 0]), "d4": np.array([-1.199e-11, -1.224e-11, -0.853e-11, -1.001e-11, 0]), "prt_weights": np.array((.25, .25, .25, .25)), "vc": np.array((2700.1148, 917.2289, 838.1255)), "A": np.array((1.592459, 0.332380, 0.674623)), "B": np.array((0.998147, 0.998522, 0.998363)), "N_S": np.array([0, -2.467, -2.009]), "b0": np.array([0, 2.96, 2.25]), "b1": np.array([0, -0.05411, -0.03665]), "b2": np.array([0, 0.00024532, 0.00014854]), }, "noaa 18": # FIXME: copy of noaa 19 { # VIS "intersections": np.array([496.43, 500.37, 496.11]), "slopes_l": np.array([0.055091, 0.054892, 0.027174]), "slopes_h": np.array([0.16253, 0.16325, 0.18798]), "intercepts_l": np.array([-2.1415, -2.1288, -1.0881]), "intercepts_h": np.array([-55.863, -56.445, -81.491]), # IR "d0": np.array([276.601, 276.683, 276.565, 276.615, 0]), "d1": np.array([0.05090, 0.05101, 0.05117, 0.05103, 0]), "d2": np.array([1.657e-6, 1.482e-6, 1.313e-6, 1.484e-6, 0]), "d3": np.array([0, 0, 0, 0, 0]), "d4": np.array([0, 0, 0, 0, 0]), "prt_weights": np.array((1, 1, 1, 1)), "vc": np.array((2659.7952, 928.1460, 833.2532)), "A": np.array((1.698704, 0.436645, 0.253179)), "B": np.array((0.996960, 0.998607, 0.999057)), "N_S": np.array([0, -5.49, -3.39]), "b0": np.array([0, 5.70, 3.58]), "b1": np.array([0, -0.11187, -0.05991]), "b2": np.array([0, 0.00054668, 0.00024985]), }, "noaa 19": { # VIS "intersections": np.array([496.43, 500.37, 496.11]), "slopes_l": np.array([0.055091, 0.054892, 0.027174]), "slopes_h": np.array([0.16253, 0.16325, 0.18798]), "intercepts_l": np.array([-2.1415, -2.1288, -1.0881]), "intercepts_h": np.array([-55.863, -56.445, -81.491]), # IR "d0": np.array([276.601, 276.683, 276.565, 276.615, 0]), "d1": np.array([0.05090, 0.05101, 0.05117, 0.05103, 0]), "d2": np.array([1.657e-6, 1.482e-6, 1.313e-6, 1.484e-6, 0]), "d3": np.array([0, 0, 0, 0, 0]), "d4": np.array([0, 0, 0, 0, 0]), "prt_weights": np.array((1, 1, 1, 1)), "vc": np.array((2659.7952, 928.1460, 833.2532)), "A": np.array((1.698704, 0.436645, 0.253179)), "B": np.array((0.996960, 0.998607, 0.999057)), "N_S": np.array([0, -5.49, -3.39]), "b0": np.array([0, 5.70, 3.58]), "b1": np.array([0, -0.11187, -0.05991]), "b2": np.array([0, 0.00054668, 0.00024985]), }, "metop-a": { # VIS "intersections": np.array([501, 500, 502]), "slopes_l": np.array([0.0537, 0.0545, 0.0264]), "slopes_h": np.array([0.1587, 0.1619, 0.1837]), "intercepts_l": np.array([-2.1719, -2.167, -1.0868]), "intercepts_h": np.array([-54.7824, -55.913, -80.0116]), # IR "d0": np.array([276.6194, 276.6511, 276.6597, 276.3685, 0]), "d1": np.array([0.050919, 0.050892, 0.050845, 0.050992, 0]), "d2": np.array([1.470892e-6, 1.489e-6, 1.520646e-6, 1.48239e-6, 0]), "d3": np.array([0, 0, 0, 0, 0]), "d4": np.array([0, 0, 0, 0, 0]), "prt_weights": np.array((1, 1, 1, 1)) / 4.0, "vc": np.array((2687, 927.2, 837.7)), "A": np.array((2.06699, 0.55126, 0.34716)), "B": np.array((0.996577, 0.998509, 0.998947)), "N_S": np.array([0, -4.98, -3.40]), "b0": np.array([0, 5.44, 3.84]), "b1": np.array([0, 0.89848 - 1, 0.93751 - 1]), "b2": np.array([0, 0.00046964, 0.00025239]), }, "metop-b": { # VIS "intersections": np.array([501, 503, 501]), "slopes_l": np.array([0.053572113, 0.051817433, 0.023518528]), "slopes_h": np.array([0.15871941, 0.15264062, 0.16376181]), "intercepts_l": np.array([-2.1099778, -2.0923391, -0.9879577]), "intercepts_h": np.array([-54.751018, -52.806460, -71.229881]), # IR "d0": np.array([276.5853, 276.5335, 276.5721, 276.5750, 0]), "d1": np.array([0.050933, 0.051033, 0.051097, 0.05102, 0]), "d2": np.array([1.54333e-6, 1.49751e-6, 1.42928e-6, 1.50841e-6, 0]), "d3": np.array([0, 0, 0, 0, 0]), "d4": np.array([0, 0, 0, 0, 0]), "prt_weights": np.array((1, 1, 1, 1)) / 4.0, "vc": np.array((2687, 927.2, 837.7)), "A": np.array((2.06699, 0.55126, 0.34716)), "B": np.array((0.996577, 0.998509, 0.998947)), "N_S": np.array([0, -4.75, -4.39]), "b0": np.array([0, 4.85, 4.36]), "b1": np.array([0, 0.903229 - 1, 0.923365 - 1]), "b2": np.array([0, 0.00048091, 0.00033524]), } } SATELLITES = {7: "noaa 15", 3: "noaa 16", 5: "noaa 18", 13: "noaa 18", 15: "noaa 19"} def bfield(array, bit): """return the bit array. """ return (array & 2**(9 - bit + 1)).astype(np.bool) class HRPTReader(Reader): """HRPT minor frame reader. """ pformat = "hrpt_hmf" def load(self, satscene): """Read data from file and load it into *satscene*. """ conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw = True): options[option] = value CASES[satscene.instrument_name](self, satscene, options) def load_avhrr(self, satscene, options): """Read avhrr data from file and load it into *satscene*. """ if "filename" not in options: raise IOError("No filename given, cannot load.") filename = os.path.join( options["dir"], (satscene.time_slot.strftime(options["filename"]))) file_list = glob.glob(satscene.time_slot.strftime(filename)) if len(file_list) > 1: raise IOError("More than one hrpt file matching!") elif len(file_list) == 0: raise IOError("No hrpt file matching!: " + satscene.time_slot.strftime(filename)) filename = file_list[0] array = read_file(filename) sat = (array["id"]["id"] & (2 ** 6 - 1)) >> 3 sat = SATELLITES[sat[len(sat) / 2]] lon, lat, alt = navigate(array["timecode"], sat) area = SwathDefinition(lon.reshape(2048, -1), lat.reshape(2048, -1)) satscene.area = area vis = vis_cal(array["image_data"][:, :, :3], sat) ir_ = ir_cal(array["image_data"][:, :, 2:], array["telemetry"]["PRT"], array["back_scan"], array["space_data"], sat) channels = np.empty(array["image_data"].shape, dtype=np.float64) channels[:, :, :2] = vis[:, :, :2] channels[:, :, 3:] = ir_[:, :, 1:] ch3a = bfield(array["id"]["id"], 10) ch3b = np.logical_not(ch3a) channels[ch3a, :, 2] = vis[ch3a, :, 2] channels[ch3b, :, 2] = ir_[ch3b, :, 0] for chan in satscene.channels_to_load: if chan == "1": satscene["1"] = np.ma.array(vis[:, :, 0]) if chan == "2": satscene["2"] = np.ma.array(vis[:, :, 1]) if chan == "3A": satscene["3A"] = np.ma.array(vis[:, :, 2], mask=np.tile(ch3a, (1, 2048))) if chan == "3B": satscene["3B"] = np.ma.array(ir_[:, :, 0], mask=np.tile(ch3b, (1, 2048))) if chan == "4": satscene["4"] = np.ma.array(ir_[:, :, 1]) if chan == "5": satscene["5"] = np.ma.array(ir_[:, :, 2]) ## Reading ## http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/c4/sec4-1.htm#t413-1 def read_file(filename): """Read the file using numpy """ dtype = np.dtype([('frame_sync', '>u2', (6, )), ('id', [('id', '>u2'), ('spare', '>u2')]), ('timecode', '>u2', (4, )), ('telemetry', [("ramp_calibration", '>u2', (5, )), ("PRT", '>u2', (3, )), ("ch3_patch_temp", '>u2'), ("spare", '>u2'),]), ('back_scan', '>u2', (10, 3)), ('space_data', '>u2', (10, 5)), ('sync', '>u2'), ('TIP_data', '>u2', (520, )), ('spare', '>u2', (127, )), ('image_data', '>u2', (2048, 5)), ('aux_sync', '>u2', (100, ))]) arr = np.memmap(filename, dtype=dtype) #arr = arr.newbyteorder() return arr ## navigation from pyorbital.orbital import Orbital from datetime import datetime, timedelta, time from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt def timecode(tc_array): word = tc_array[0] day = word >> 1 word = tc_array[1] msecs = ((127) & word) * 1024 word = tc_array[2] msecs += word & 1023 msecs *= 1024 word = tc_array[3] msecs += word & 1023 return datetime(2014, 1, 1) + timedelta(days=int(day) - 1, milliseconds=int(msecs)) def navigate(timecodes, satellite): orb = Orbital(satellite) first_time = timecode(timecodes[0]) first_time = datetime(first_time.year, first_time.month, first_time.day) hrpttimes = [timecode(x) - first_time for x in timecodes] hrpttimes = np.array([x.seconds + x.microseconds / 1000000.0 for x in hrpttimes]) scan_points = np.arange(2048) if satellite == "noaa 16": scan_angle = 55.25 else: scan_angle = 55.37 scans_nb = len(hrpttimes) avhrr_inst = np.vstack(((scan_points / 1023.5 - 1) * np.deg2rad(-scan_angle), np.zeros((len(scan_points),)))).T avhrr_inst = np.tile(avhrr_inst, [scans_nb, 1]) offset = hrpttimes times = (np.tile(scan_points * 0.000025, [scans_nb, 1]) + np.expand_dims(offset, 1)) sgeom = ScanGeometry(avhrr_inst, times.ravel()) s_times = sgeom.times(first_time) rpy = (0, 0, 0) pixels_pos = compute_pixels((orb.tle._line1, orb.tle._line2), sgeom, s_times, rpy) pos_time = get_lonlatalt(pixels_pos, s_times) return pos_time ## VIS calibration def vis_cal(vis_data, sat): """Calibrates the visual data using dual gain. """ logger.debug("Visual calibration") vis = np.empty(vis_data.shape, dtype=np.float64) for i in range(3): ch = vis_data[:, :, i] intersect = calib[sat]["intersections"][i] slope_l = calib[sat]["slopes_l"][i] slope_h = calib[sat]["slopes_h"][i] intercept_l = calib[sat]["intercepts_l"][i] intercept_h = calib[sat]["intercepts_h"][i] vis[:, :, i] = ne.evaluate("where(ch < intersect, ch * slope_l + intercept_l, ch * slope_h + intercept_h)") return vis ## IR calibration def ir_cal(ir_data, telemetry, back_scan, space_data, sat): alen = ir_data.shape[0] logger.debug("IR calibration") logger.debug(" Preparing telemetry...") factor = np.ceil(alen / 5.0) + 1 displacement = (telemetry[0:5, :] == np.array([0, 0, 0])).sum(1).argmax() + 1 offset = 4 - (displacement - 1) globals().update(calib[sat]) bd0 = np.tile(d0.reshape(-1, 1), (factor, 3))[offset:offset + alen] bd1 = np.tile(d1.reshape(-1, 1), (factor, 3))[offset:offset + alen] bd2 = np.tile(d2.reshape(-1, 1), (factor, 3))[offset:offset + alen] bd3 = np.tile(d3.reshape(-1, 1), (factor, 3))[offset:offset + alen] bd4 = np.tile(d4.reshape(-1, 1), (factor, 3))[offset:offset + alen] PRT = telemetry T_PRT = bd0 + PRT * (bd1 + PRT * (bd2 + PRT * (bd3 + PRT * bd4))) sublen = np.floor((T_PRT.shape[0] - displacement) / 5.0) * 5 TMP_PRT = T_PRT[displacement:displacement + sublen] logger.debug(" Computing blackbody temperatures...") MEAN = ((TMP_PRT[::5] + TMP_PRT[1::5] + TMP_PRT[2::5] + TMP_PRT[3::5]) / 4).repeat(5, 0) if displacement == 0: T_BB_beg = None elif displacement == 1: T_BB_beg = MEAN[0] else: T_BB_beg = np.tile(T_PRT[:displacement].sum(0) / (displacement - 1), (displacement, 1)) if sublen + displacement >=T_PRT.shape[0]: T_BB_end = None else: T_BB_end = np.tile(T_PRT[sublen+displacement:].mean(0), (T_PRT.shape[0] - sublen - displacement, 1)) if T_BB_beg is not None: to_stack = [T_BB_beg, MEAN] else: to_stack = [MEAN] if T_BB_end is not None: to_stack.append(T_BB_end) T_BB = np.vstack(to_stack) if sat in ["noaa 15", "noaa 16"]: # three readings for klm T_BB = T_BB.mean(0) T_BB_star = A + B * T_BB N_BB = (c1 * vc ** 3) / (np.exp((c2 * vc)/(T_BB_star)) - 1) C_S = space_data[:,:, 2:].mean(1) C_BB = back_scan.mean(1) C_E = ir_data # aapp style #G = (N_BB - N_S) / (C_BB - C_S) #k1 = G**2 * b2 #k2 = (b1 + 1) *G - 2 * k1 * C_S + 2*b2 * G * N_S #k3 = b0 + (b1 + 1) * N_S - (b1 + 1) *G * C_S + b2 * (N_S - G * N_S) ** 2 #N_E = k1[:, np.newaxis, :] * C_E * C_E + k2[:, np.newaxis, :] * C_E + k3[:, np.newaxis, :] logger.debug(" Computing linear part of radiances...") C_Sr = C_S[:, np.newaxis, :] Cr = ((N_BB - N_S) / (C_S - C_BB))[:, np.newaxis, :] N_lin = ne.evaluate("(N_S + (Cr * (C_Sr - C_E)))") logger.debug(" Computing radiance correction...") # the +1 (N_lin) here is for Ne = Nlin + Ncor N_E = ne.evaluate("((b2 * N_lin + b1 + 1) * N_lin + b0)") logger.debug(" Computing channels brightness temperatures...") T_E_star = ne.evaluate("(c2 * vc / (log(1 + c1 * vc**3 / N_E)))") T_E = ne.evaluate("(T_E_star - A) / B") return T_E CASES = { "avhrr": HRPTReader.load_avhrr } if __name__ == '__main__': import sys array = read_file(sys.argv[1]) sat = (array["id"]["id"] & (2 ** 6 - 1)) >> 3 sat = int(np.round(np.mean(sat))) sat = SATELLITES[sat] vis = vis_cal(array["image_data"][:, :, :3], sat) ir_ = ir_cal(array["image_data"][:, :, 2:], array["telemetry"]["PRT"], array["back_scan"], array["space_data"], sat) channels = np.empty(array["image_data"].shape, dtype=np.float64) channels[:, :, :2] = vis[:, :, :2] channels[:, :, 3:] = ir_[:, :, 1:] ch3a = bfield(array["id"]["id"], 10) ch3b = np.logical_not(ch3a) channels[ch3a, :, 2] = vis[ch3a, :, 2] channels[ch3b, :, 2] = ir_[ch3b, :, 0] lon, lat, alt = navigate(array["timecode"], sat) area = SwathDefinition(lon.reshape(2048, -1), lat.reshape(2048, -1)) mpop-1.5.0/mpop/satin/hsaf_h03.py000066400000000000000000000213701317160620000165250ustar00rootroot00000000000000""" Reader for EUMETSATs Hydrology SAF (HSAF) h03 product HSAF website http://hsaf.meteoam.it h03 product is precipitation rate at the ground by GEO(MSG)/Infrared supported by LEO/Microwave http://hsaf.meteoam.it/precipitation.php?tab=3 After registration the data is available from ftp://ftphsaf.meteoam.it/h03 possible accepted formats for this reader are: * grib as provided by HSAF * netCDF (grib file converted with cdo) - Initial version: 2015-07-23 Ulrich Hamann (MeteoSwiss) """ from ConfigParser import ConfigParser from mpop import CONFIG_PATH import os import numpy.ma as ma from glob import glob import datetime def load(satscene, **kargs): """Reader for EUMETSATs Hydrology SAF (HSAF) h03 product h03 product is precipitation rate at the ground by GEO(MSG)/Infrared supported by LEO/Microwave http://hsaf.meteoam.it/precipitation.php?tab=3 """ # Read config file content conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) values = {"orbit": satscene.orbit, "satname": satscene.satname, "number": satscene.number, "instrument": satscene.instrument_name, "satellite": satscene.fullname } # end of scan time 12min after start end_time = satscene.time_slot + datetime.timedelta(minutes=12) filepath = end_time.strftime(conf.get("seviri-level2", "dir",raw=True)) filepattern = end_time.strftime(conf.get("seviri-level2", "filename",raw=True)) % values filename = os.path.join( filepath, filepattern) print "... search for file: ", filename filenames=glob(str(filename)) if len(filenames) == 0: print "*** Error, no file found" quit() elif len(filenames) > 1: print "*** Warning, more than 1 datafile found: " for filename in filenames: print " ", filename # possible formats: h03_20150513_1557_rom.grb.gz, h03_20150513_1612_rom.grb, h03_20150513_1612_rom.nc fileformats = [filename.split(".")[-1] for filename in filenames] # try to find grb file if 'grb' in fileformats: # read grib data, fill_value, units, long_name = read_h03_grib(filenames[fileformats.index('grb')]) elif 'nc' in fileformats: # read netCDF data, fill_value, units, long_name = read_h03_netCDF(filenames[fileformats.index('nc')]) elif 'gz' in fileformats: # unzip from subprocess import call infile = filenames[fileformats.index('gz')] outfile = infile[:-3] print " unizp ", infile # gunzip -c h03_20150513_1557_rom.grb.gz > h03_20150513_1557_rom.grb # call("/bin/gunzip "+ infile +" 2>&1", shell=True) # dont keep gz file call("/bin/gunzip -c "+ infile+" > "+ outfile +" 2>&1", shell=True) # keep gz file # check format of gunziped file if outfile.split(".")[-1] == 'grb': data, fill_value, units, long_name = read_h03_grib(outfile) elif outfile.split(".")[-1] == 'nc': data, fill_value, units, long_name = read_h03_netCDF(outfile) if units == "kg m**-2 s**-1" or units == "kg m-2s-1": data *= 3600 units = "kg m-2 h-1" satscene['h03'] = data satscene['h03'].fill_value = fill_value satscene['h03'].units = units satscene['h03'].long_name = long_name satscene['h03'].product_name = 'h03' # personal communication with help desk # Each H03 grib file contains precipitation data of a 900x1900 pixel sub-area of the SEVIRI full disk area (3712x3712 pixels). # The first pixel of H03 (pixel (1,1)) grib file corresponds to Seviri pixel (1095,85) if the Seviri pixel (1,1) is in the Nort-East. # I can confirm that only the prime satellite is used (position subsatellite longitude 0 degree East). # For the future we are thinking to disseminate the h03 outputs already corrected in parallax. # conversion of above information to correct AreaDefinition # full_disk = get_area_def("SeviriDiskFull") # from mpop.projector import get_area_def # import numpy as np # np.array(area_def.get_proj_coords(data_slice=(85+900,1095 ))) - 3000.40316582 / 2. # array([-2284807.01076965, 2611850.9558437 ]) # np.array(area_def.get_proj_coords(data_slice=(85 ,1095+1900))) + 3000.40316582 / 2. # array([ 3418959.40744847, 5315214.20824482]) # or # aex = full_disk.get_area_extent_for_subsets(985,1095,85,2995) proj = {'proj': 'geos', 'a': '6378169.0', 'b': '6356583.8', 'h': '35785831.0', 'lon_0': '0.0'} aex = (-2284807.01076965, 2611850.9558437, 3418959.40744847, 5315214.20824482) from pyresample.geometry import AreaDefinition satscene.area = AreaDefinition("hsaf", "hsaf", "geos0", proj, 1900, 900, aex) def read_h03_grib(filename): try: import pygrib except ImportError: print "... module pygrib needs to be installed" quit() # see http://pygrib.googlecode.com/svn/trunk/docs/pygrib-module.html print("... read data from %s" % str(filename)) grbs = pygrib.open(filename) #print(grbs) #print 'inventory' #for grb in grbs: # print(grb) #print 'end inventory' long_name = 'Instantaneous rain rate' units = 'kg m**-2 s**-1' _FillValue = 0.0 grb = grbs.select(name=long_name)[0] # print(grb) data = ma.asarray(grb.values) data.mask = (data == 0.0) print ' fill_value: ', 0 print ' units: ', units print ' long_name: ', long_name print ' datatype: ', type(data) print ' shape: ', data.shape print ' min/max: ', data.min(), data.max() return data, _FillValue, units, long_name def read_h03_netCDF(filename): try: from netCDF4 import Dataset except ImportError: print "... module netCDF4 needs to be installed" quit() print("... read data from %s" % str(filename)) # Load data from netCDF file # see also http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html ds = Dataset(filename, 'r') if 'irrate' in ds.variables: print ' found variable irrate' var_name='irrate' # converted with: cdo -f nc4c -z zip copy infile outfile elif 'IRRATE_P30_GSV0' in ds.variables: print ' found variable IRRATE_P30_GSV0' var_name='IRRATE_P30_GSV0' # converted with: ncl_convert2nc h03_20150529_0827_rom.grb -e grb -nc4c -cl 9 #variables: # float IRRATE_P30_GSV0(ygrid_0, xgrid_0) ; # IRRATE_P30_GSV0:initial_time = "05/29/2015 (08:27)" ; # IRRATE_P30_GSV0:parameter_template_discipline_category_number = 30, 3, 1, 1 ; # IRRATE_P30_GSV0:parameter_discipline_and_category = "Space products, Quantitative products" ; # IRRATE_P30_GSV0:grid_type = "Space view perspective or orthographic" ; # IRRATE_P30_GSV0:_FillValue = 1.e+20f ; # IRRATE_P30_GSV0:units = "kg m-2s-1" ; # IRRATE_P30_GSV0:long_name = "Instantaneous rain rate" ; # IRRATE_P30_GSV0:production_status = "Operational test products" ; # IRRATE_P30_GSV0:center = "Rome (RSMC)" ; print '*** Error, does not work for unknown reason' print ' data.mask = (data == _FillValue) | (data == 0.0) produce error' quit() #print type(ds.variables[var_name]) #print dir(ds.variables[var_name]) _FillValue = ds.variables[var_name]._FillValue # or fill_value = ds.variables[var_name].getncattr('_FillValue') units = ds.variables[var_name].units long_name = ds.variables[var_name].long_name # Read variable corresponding to channel name data = ma.asarray(ds.variables[var_name]) print ' fill_value: ', ds.variables[var_name]._FillValue print ' units: ', ds.variables[var_name].units print ' long_name: ', ds.variables[var_name].long_name print ' datatype: ', ds.variables[var_name].datatype print ' shape: ', data.shape print ' min/max: ', data.min(), data.max() if len(data.shape) == 3: if data.shape[0] == 1: print " reduce to 2 dimensions (skip time dimension)" data = ma.asarray(ds.variables[var_name][0,:,:]) else: print "*** Error, unknown netCDF file format in h03_nc.py" print " probably more time steps in one file (not implemented yet)" quit() data.mask = (data == _FillValue) | (data == 0.0) return data, _FillValue, units, long_name mpop-1.5.0/mpop/satin/lac_l1b.py000066400000000000000000000120651317160620000164300ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014 Abhay Devasthale and Martin Raspaud # Author(s): # Abhay Devasthale # Martin Raspaud # Adam Dybbroe # Sajid Pareeth # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Read a gac file. Reads L1b GAC data from KLM series of satellites (NOAA-15 and later) and does most of the computations. Format specification can be found here: http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/c8/sec83142-1.htm """ import os import glob from ConfigParser import ConfigParser import logging import numpy as np from pygac.lac_klm import KLMReader from pygac.lac_pod import PODReader from mpop import CONFIG_PATH LOGGER = logging.getLogger(__name__) def load(satscene, *args, **kwargs): """Read data from file and load it into *satscene*. A possible *calibrate* keyword argument is passed to the AAPP reader. Should be 0 for off (counts), 1 for default (brightness temperatures and reflectances), and 2 for radiances only. """ del args conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value if kwargs.get("filename") is not None: options["filename"] = kwargs["filename"] options["dir"] = None options["calibrate"] = kwargs.get("calibrate", True) LOGGER.info("Loading instrument '%s'" % satscene.instrument_name) try: CASES[satscene.instrument_name](satscene, options) except KeyError: raise KeyError("Unknown instrument '%s'" % satscene.instrument_name) def load_avhrr(satscene, options): """Read avhrr data from file and load it into *satscene*. """ if "filename" not in options: raise IOError("No filename given, cannot load.") values = {"orbit": satscene.orbit, "satname": satscene.satname, "number": satscene.number, "instrument": satscene.instrument_name, "satellite": satscene.fullname } if options["dir"] is None: filename = options["filename"] else: filename = os.path.join(satscene.time_slot.strftime(options["dir"]) % values, satscene.time_slot.strftime( options["filename"]) % values) file_list = glob.glob(filename) if len(file_list) > 1: raise IOError("More than one l1b file matching!") elif len(file_list) == 0: raise IOError("No l1b file matching!: " + filename) filename = file_list[0] LOGGER.debug("Loading from " + filename) with open(filename) as fdes: data = fdes.read(3) if data in ["CMS", "NSS", "UKM", "DSS"]: reader = KLMReader chn_dict = AVHRR3_CHANNEL_NAMES else: reader = PODReader chn_dict = AVHRR_CHANNEL_NAMES chns = satscene.channels_to_load & set(chn_dict.keys()) LOGGER.info("Loading channels " + str(sorted(list(chns)))) if len(chns) == 0: return scene = reader() scene.read(filename) scene.get_lonlat() scene.adjust_clock_drift() channels = scene.get_calibrated_channels() # scene.navigate() try: from pyresample import geometry except ImportError as ex_: LOGGER.debug("Could not load pyresample: " + str(ex_)) satscene.lat = scene.lats satscene.lon = scene.lons else: satscene.area = geometry.SwathDefinition(lons=scene.lons, lats=scene.lats) area_name = ("swath_" + satscene.fullname + "_" + str(satscene.time_slot) + "_" + str(scene.lats.shape)) satscene.area.area_id = area_name satscene.area.name = "Satellite projection" satscene.area_id = area_name for chn in chns: data = channels[:, :, chn_dict[chn]] if np.ma.count(data) > 0: satscene[chn].data = np.ma.masked_invalid(data, copy=False) satscene[chn].area = satscene.area AVHRR3_CHANNEL_NAMES = {"1": 0, "2": 1, "3A": 2, "3B": 3, "4": 4, "5": 5} AVHRR_CHANNEL_NAMES = {"1": 0, "2": 1, "3": 2, "4": 3, "5": 4} CASES = { "avhrr/1": load_avhrr, "avhrr/2": load_avhrr, "avhrr/3": load_avhrr, } mpop-1.5.0/mpop/satin/mipp_sentinel.py000066400000000000000000000112601317160620000177750ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014 Adam.Dybbroe # Author(s): # Adam.Dybbroe # Helge Pfeiffer # Lars Orum Rasmussen # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Sentinel-1 reader """ import ConfigParser import os from mipp.read_geotiff import read_geotiff from mipp.xsar import S1A from datetime import datetime import numpy as np from glob import glob from pyresample import geometry import mpop from mpop import CONFIG_PATH import logging LOG = logging.getLogger(__name__) from mpop.plugin_base import Reader class SentinelGRDChannel(mpop.channel.GenericChannel): def __init__(self, name='unknown', resolution='unknown'): mpop.channel.GenericChannel.__init__(self) self._is_loaded = False self.name = name self.resolution = resolution self.data = None self.shape = None self._projectables = [] self._projectables.append(name) def is_loaded(self): return self._is_loaded def set_loaded(self): self._is_loaded = not self._is_loaded def project(self, coverage): """Project what can be projected in the product. """ import copy res = copy.copy(self) res.data = coverage.project_array(self.data) return res class GeoTiffReader(Reader): pformat = "mipp_sentinel" def load(self, satscene, **kwargs): LOG.debug('channels to load: ' + str(satscene.channels_to_load)) conf = ConfigParser.ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value options["resolution"] = kwargs.get("resolution", 'unknown') values = {"orbit": satscene.orbit} path_template = datetime.strftime(satscene.time_slot, options["dir"]) % values dirlist = glob(path_template) if len(dirlist) != 1: raise IOError("Couldn't identify a unique measurments directory!, " + "from path_template '%s'" % path_template) dirname = dirlist[0] if not os.path.exists(dirname): raise IOError('Directory ' + str(dirname) + ' does not exist') # Read meta data mda = S1A.read_metadata(dirname) channels_available = set(mda.channels.keys()) ##filelist = glob(os.path.join(dirname, options["filename"])) ##if len(filelist) == 0: ## LOG.warning('No files found!') channels_to_load = satscene.channels_to_load.intersection(channels_available) # Loading of channels LOG.debug('available channels to load: ' + str(channels_to_load)) for channel_name in channels_to_load: channel_file = mda.channels[channel_name] LOG.debug("Load channel: '%s' %s" % (channel_name, str(channel_file))) lons, lats, data = self.load_channel(channel_file) chn = SentinelGRDChannel(channel_name, mda.pixel_spacing[0]) chn.area = geometry.SwathDefinition(lons=lons, lats=lats) chn.data = np.ma.masked_array(data) chn.shape = data.shape chn.set_loaded() satscene[channel_name] = chn satscene.info['manifest'] = mda def load_channel(self, filename): """Load one sentinel channel file""" from geotiepoints.basic_interpolator import BasicSatelliteInterpolator params, data = read_geotiff(filename) tie_lons = params['tiepoints']['lons'] tie_lats = params['tiepoints']['lats'] tie_cols = params['tiepoints']['cols'] tie_rows = params['tiepoints']['rows'] interpolator = BasicSatelliteInterpolator(tie_cols, tie_rows, tie_lats, tie_lons) lats, lons = interpolator.interpolate() return lons, lats, data mpop-1.5.0/mpop/satin/mipp_xrit.py000066400000000000000000000307231317160620000171470ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2013, 2014, 2015. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Esben S. Nielsen # Panu Lahtinen # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Interface to Eumetcast level 1.5 HRIT/LRIT format. Uses the MIPP reader. """ import ConfigParser import fnmatch import logging import os from pyproj import Proj from mipp import CalibrationError, ReaderError, xrit from mpop import CONFIG_PATH from mpop.plugin_base import Reader from mpop.satin.helper_functions import area_def_names_to_extent LOGGER = logging.getLogger(__name__) try: # Work around for on demand import of pyresample. pyresample depends # on scipy.spatial which memory leaks on multiple imports IS_PYRESAMPLE_LOADED = False from pyresample import geometry from mpop.projector import get_area_def IS_PYRESAMPLE_LOADED = True except ImportError: LOGGER.warning("pyresample missing. Can only work in satellite projection") class XritReader(Reader): '''Class for reading XRIT data. ''' pformat = "mipp_xrit" def load(self, *args, **kwargs): load(*args, **kwargs) def load(satscene, calibrate=True, area_extent=None, area_def_names=None, **kwargs): """Read data from file and load it into *satscene*. The *calibrate* argument is passed to mipp (should be 0 for off, 1 for default, and 2 for radiances only). """ conf = ConfigParser.RawConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2"): options[option] = value for section in conf.sections(): if(section.startswith(satscene.instrument_name) and not (section == "satellite") and # not section[:-1].endswith("-level") and not section.endswith("-granules")): options[section] = dict(conf.items(section)) filenames = kwargs.get('filename') CASES.get(satscene.instrument_name, load_generic)(satscene, options, calibrate, area_extent, area_def_names, filenames) def load_generic(satscene, options, calibrate=True, area_extent=None, area_def_names=None, filenames=None): """Read imager data from file and load it into *satscene*. """ os.environ["PPP_CONFIG_DIR"] = CONFIG_PATH LOGGER.debug("Channels to load from %s: %s" % (satscene.instrument_name, satscene.channels_to_load)) # Compulsory global attributes satscene.info["title"] = (satscene.satname.capitalize() + satscene.number + " satellite, " + satscene.instrument_name.capitalize() + " instrument.") satscene.info["institution"] = "Original data disseminated by EumetCast." satscene.add_to_history("HRIT/LRIT data read by mipp/mpop.") satscene.info["references"] = "No reference." satscene.info["comments"] = "No comment." from_area = False if satscene.end_time is not None: time_slot = satscene.time_slot, satscene.end_time else: time_slot = satscene.time_slot if area_extent is None and satscene.area is not None: if not satscene.area_def: satscene.area = get_area_def(satscene.area_id) area_extent = satscene.area.area_extent from_area = True area_converted_to_extent = False for chn in satscene.channels_to_load: use_filenames = False # Sort out filenames if filenames is not None: for section in options.keys(): if section.endswith('-level1'): break try: pattern_pro = eval(options[section].get('filename_pro')) except TypeError: pattern_pro = None try: pattern_epi = eval(options[section].get('filename_epi')) except TypeError: pattern_epi = None pattern = eval(options[section].get('filename')) epilogue = None prologue = None image_files = [] if pattern_epi is not None: glob_epi = satscene.time_slot.strftime( pattern_epi) % ({'segment': "EPI".ljust(9, '_'), 'channel': chn + '*'}) else: glob_epi = 'eggs_and_spam' if pattern_pro is not None: glob_pro = satscene.time_slot.strftime( pattern_pro) % ({'segment': "PRO".ljust(9, '_'), 'channel': chn + '*'}) else: glob_pro = 'eggs_and_spam' glob_img = satscene.time_slot.strftime( pattern) % ({'segment': "*", 'channel': chn + '*'}) for filename in filenames: if fnmatch.fnmatch(os.path.basename(filename), glob_img): image_files.append(filename) elif pattern_pro is not None and fnmatch.fnmatch( os.path.basename(filename), glob_pro): prologue = filename elif pattern_epi is not None and fnmatch.fnmatch( os.path.basename(filename), glob_epi): epilogue = filename if len(image_files) == 0 and prologue is None and epilogue is None: use_filenames = False else: use_filenames = True if from_area: try: if use_filenames: metadata = xrit.sat.load_files(prologue, image_files, epilogue, platform_name=satscene.fullname, only_metadata=True) else: metadata = xrit.sat.load(satscene.fullname, time_slot, chn, only_metadata=True) if(satscene.area_def.proj_dict["proj"] != "geos" or float(satscene.area_def.proj_dict["lon_0"]) != metadata.sublon): raise ValueError("Slicing area must be in " "geos projection, and lon_0 should match " "the satellite's position.") except ReaderError, err: # if channel can't be found, go on with next channel LOGGER.error(str(err)) continue # Convert area definitions to maximal area_extent if not area_converted_to_extent and area_def_names is not None: try: if use_filenames: metadata = xrit.sat.load_files(prologue, image_files, epilogue, platform_name=satscene.fullname, only_metadata=True) else: metadata = xrit.sat.load(satscene.fullname, time_slot, chn, only_metadata=True) except ReaderError as err: LOGGER.warning(str(err)) continue # if area_extent is given, assume it gives the maximum # extent of the satellite view if area_extent is not None: area_extent = area_def_names_to_extent(area_def_names, metadata.proj4_params, area_extent) # otherwise use the default value (MSG3 extent at # lon0=0.0), that is, do not pass default_extent=area_extent else: area_extent = area_def_names_to_extent(area_def_names, metadata.proj4_params, default_extent=None) if area_extent is None: LOGGER.info('Could not derive area_extent from area_def_names') area_converted_to_extent = True try: if use_filenames: image = xrit.sat.load_files(prologue, image_files, epilogue, platform_name=satscene.fullname, mask=True, calibrate=calibrate) else: image = xrit.sat.load(satscene.fullname, time_slot, chn, mask=True, calibrate=calibrate) if area_extent: metadata, data = image(area_extent) else: metadata, data = image() except CalibrationError: LOGGER.warning( "Loading non calibrated data since calibration failed.") if use_filenames: image = xrit.sat.load_files(prologue, image_files, epilogue, platform_name=satscene.fullname, mask=True, calibrate=False) else: image = xrit.sat.load(satscene.fullname, time_slot, chn, mask=True, calibrate=False) if area_extent: metadata, data = image(area_extent) else: metadata, data = image() except ReaderError as err: # if channel can't be found, go on with next channel LOGGER.warning(str(err)) continue satscene[chn] = data satscene[chn].info['units'] = metadata.calibration_unit satscene[chn].info['sublon'] = metadata.sublon satscene[chn].info['satname'] = satscene.satname satscene[chn].info['satnumber'] = satscene.number satscene[chn].info['instrument_name'] = satscene.instrument_name satscene[chn].info['time'] = satscene.time_slot # Build an area on the fly from the mipp metadata proj_params = getattr(metadata, "proj4_params").split(" ") proj_dict = {} for param in proj_params: key, val = param.split("=") proj_dict[key] = val if IS_PYRESAMPLE_LOADED: # Build area_def on-the-fly satscene[chn].area = geometry.AreaDefinition( satscene.satname + satscene.instrument_name + str(metadata.area_extent) + str(data.shape), "On-the-fly area", proj_dict["proj"], proj_dict, data.shape[1], data.shape[0], metadata.area_extent) else: LOGGER.info("Could not build area, pyresample missing...") CASES = {} mpop-1.5.0/mpop/satin/mipp_xsar.py000066400000000000000000000117611317160620000171370ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Esben S. Nielsen # Lars Orum Rasmussen # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Interface to SAR level-1 data. Using the MIPP reader. """ import ConfigParser import os from mipp import xsar from mipp import ReaderError, CalibrationError from mpop import CONFIG_PATH import logging LOG = logging.getLogger(__name__) try: # Work around for on demand import of pyresample. pyresample depends # on scipy.spatial which memory leaks on multiple imports is_pyresample_loaded = False from pyresample import geometry from mpop.projector import get_area_def is_pyresample_loaded = True except ImportError: LOG.warning("pyresample missing. Can only work in satellite projection") def load(satscene, calibrate=True): """Read data from file and load it into *satscene*. The *calibrate* argument is passed to mipp (should be 0 for off, 1 for default, and 2 for radiances only). """ conf = ConfigParser.ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2"): options[option] = value for section in conf.sections(): if(section.startswith(satscene.instrument_name) and not (section == "satellite") and not section[:-1].endswith("-level") and not section.endswith("-granules")): options[section] = conf.items(section) CASES.get(satscene.instrument_name, load_generic)(satscene, options, calibrate) def load_generic(satscene, options, calibrate=True): """Read sar data from file and load it into *satscene*. """ os.environ["PPP_CONFIG_DIR"] = CONFIG_PATH LOG.debug("Channels to load from %s: %s" % (satscene.instrument_name, satscene.channels_to_load)) # Compulsory global attribudes satscene.info["title"] = (satscene.satname.capitalize() + satscene.number + " satellite, " + satscene.instrument_name.capitalize() + " instrument.") satscene.info["institution"] = "No institution." satscene.add_to_history("SAR data read by mipp/mpop.") satscene.info["references"] = "No reference." satscene.info["comments"] = "No comment." for chn in satscene.channels_to_load: try: metadata, data = xsar.sat.load(satscene.fullname, satscene.time_slot, chn, mask=True, calibrate=calibrate) except CalibrationError: LOG.warning( "Loading non calibrated data since calibration failed.") metadata, data = xsar.sat.load(satscene.fullname, satscene.time_slot, chn, mask=True, calibrate=False) except ReaderError: # if channel can't be found, go on with next channel continue satscene[chn] = data satscene[chn].info['units'] = metadata.calibration_unit if is_pyresample_loaded: # Build an area on the fly from the mipp metadata proj_params = getattr(metadata, "proj4_params").split() proj_dict = {} for param in proj_params: key, val = [i.strip() for i in param.split("=")] proj_dict[key] = val # Build area_def on-the-fly satscene[chn].area = geometry.AreaDefinition( satscene.satname + satscene.instrument_name + str(metadata.area_extent) + str(data.shape), "On-the-fly area", proj_dict["proj"], proj_dict, data.shape[1], data.shape[0], metadata.area_extent) else: LOG.info("Could not build area, pyresample missing...") CASES = {} mpop-1.5.0/mpop/satin/modis_level2.py000066400000000000000000000266001317160620000175170ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, 2013, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Plugin for reading AQUA MODIS level 2 EOS HDF files downloaded from NASA FTP import """ import os.path from ConfigParser import ConfigParser import datetime import numpy as np from pyhdf.SD import SD from mpop import CONFIG_PATH import mpop.channel #from mpop.projector import get_area_def import logging LOG = logging.getLogger(__name__) EOS_SATELLITE = {'aqua': 'eos2', 'modisa': 'eos2', 'terra': 'eos1', 'modist': 'eos1'} SCAN_LINE_ATTRS = ['year', 'day', 'msec', 'slat', 'slon', 'clat', 'clon', 'elat', 'elon', 'csol_z' ] GEO_PHYS_PRODUCTS = ['aot_869', 'chlor_a', 'chlor_oc5', 'poc', 'cdom_index', 'angstrom', 'pic', 'par', 'sst', 'nflh', 'ipar', 'Kd_490'] CHANNELS = ['Rrs_412', 'Rrs_443', 'Rrs_469', 'Rrs_488', 'Rrs_531', 'Rrs_547', 'Rrs_555', 'Rrs_645', 'Rrs_667', 'Rrs_678', 'nLw_412', 'nLw_443', 'nLw_469', 'nLw_488', 'nLw_531', 'nLw_547', 'nLw_555', 'nLw_645', 'nLw_667', 'nLw_678', ] # Flags and quality (the two latter only for SST products): FLAGS_QUALITY = ['l2_flags', 'qual_sst', 'qual_sst4'] SENSOR_BAND_PARAMS = ['wavelength', 'F0', 'vcal_offset', 'vcal_gain', 'Tau_r', 'k_oz'] # Navigation control points and tilt - no LONLAT: NAVIGATION_TILT = ['tilt', 'cntl_pt_cols', 'cntl_pt_rows'] # Geo-location - Longitude,latitude: LONLAT = ['longitude', 'latitude'] class ModisEosHdfLevel2(mpop.channel.GenericChannel): """NASA EOS-HDF Modis data struct""" def __init__(self, prodname, resolution=None): mpop.channel.GenericChannel.__init__(self, prodname) self.filled = False self.name = prodname self.resolution = resolution self.info = {} self._eoshdf_info = {} self.shape = None self.satid = "" self.orbit = None self.attr = None #self.scanline_attrs = {} self.data = None self.starttime = None self.endtime = None def __str__(self): return ("'%s: shape %s, resolution %sm'" % (self.name, self.shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled def read(self, filename, **kwargs): """Read the data""" from pyhdf.SD import SD import datetime # print "*** >>> Read the hdf-eos file!" root = SD(filename) # Get all the Attributes: # Common Attributes, Data Time, # Data Structure and Scene Coordinates for key in root.attributes().keys(): self._eoshdf_info[key] = root.attributes()[key] # Start Time - datetime object starttime = datetime.datetime.strptime(self._eoshdf_info['Start Time'][0:13], "%Y%j%H%M%S") msec = float(self._eoshdf_info['Start Time'][13:16]) / 1000. self.starttime = starttime + datetime.timedelta(seconds=msec) # End Time - datetime object endtime = datetime.datetime.strptime(self._eoshdf_info['End Time'][0:13], "%Y%j%H%M%S") msec = float(self._eoshdf_info['End Time'][13:16]) / 1000. self.endtime = endtime + datetime.timedelta(seconds=msec) # What is the leading 'H' doing here? sensor_name = self._eoshdf_info['Sensor Name'][1:-1].lower() try: self.satid = EOS_SATELLITE[sensor_name] except KeyError: LOG.error("Failed setting the satellite id - sat-name = ", sensor_name) self.orbit = self._eoshdf_info['Orbit Number'] self.shape = (self._eoshdf_info['Number of Scan Control Points'], self._eoshdf_info['Number of Pixel Control Points']) # try: if 1: value = root.select(self.name) attr = value.attributes() data = value.get() self.attr = attr band = data if self.name in FLAGS_QUALITY: self.data = band else: nodata = attr['bad_value_scaled'] self.data = (np.ma.masked_equal(band, nodata) * attr['slope'] + attr['intercept']) value.endaccess() # except: # pass root.end() self.filled = True def project(self, coverage): """Remaps the Modis EOS-HDF level2 ocean products to cartographic map-projection on a user defined area. """ LOG.info("Projecting product %s..." % (self.name)) #print("Inside project...") retv = ModisEosHdfLevel2(self.name) retv.data = coverage.project_array(self.data) retv.area = coverage.out_area retv.shape = retv.data.shape retv.resolution = self.resolution retv.info = self.info retv.filled = True valid_min = retv.data.min() valid_max = retv.data.max() retv.info['valid_range'] = np.array([valid_min, valid_max]) retv.info['var_data'] = retv.data return retv def load(satscene, **kwargs): """Read data from file and load it into *satscene*. Load data into the *channels*. *Channels* is a list or a tuple containing channels we will load data into. If None, all channels are loaded. """ del kwargs conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level3", raw=True): options[option] = value pathname = os.path.join(options["dir"], options['filename']) filename = satscene.time_slot.strftime(pathname) for prodname in GEO_PHYS_PRODUCTS + FLAGS_QUALITY: if prodname in satscene.channels_to_load: prod_chan = ModisEosHdfLevel2(prodname) prod_chan.read(filename) prod_chan.satid = satscene.satname.capitalize() prod_chan.resolution = 1000.0 prod_chan.shape = prod_chan.data.shape # All this for the netCDF writer: prod_chan.info['var_name'] = prodname prod_chan.info['var_data'] = prod_chan.data resolution_str = str(int(prod_chan.resolution)) + 'm' prod_chan.info['var_dim_names'] = ('y' + resolution_str, 'x' + resolution_str) prod_chan.info['long_name'] = prod_chan.attr['long_name'][:-1] try: prod_chan.info['standard_name'] = prod_chan.attr[ 'standard_name'][:-1] except KeyError: pass valid_min = np.min(prod_chan.data) valid_max = np.max(prod_chan.data) prod_chan.info['valid_range'] = np.array([valid_min, valid_max]) prod_chan.info['resolution'] = prod_chan.resolution if prodname == 'l2_flags': # l2 flags definitions for i in range(1, 33): key = "f%02d_name" % i prod_chan.info[key] = prod_chan.attr[key][:-1] satscene.channels.append(prod_chan) if prodname in CHANNELS: satscene[prodname].info['units'] = '%' else: satscene[prodname].info['units'] = prod_chan.attr['units'][:-1] LOG.info("Loading modis lvl2 product '%s' done" % prodname) # Check if there are any bands to load: channels_to_load = False for bandname in CHANNELS: if bandname in satscene.channels_to_load: channels_to_load = True break if channels_to_load: # print "FILE: ", filename eoshdf = SD(filename) # Get all the Attributes: # Common Attributes, Data Time, # Data Structure and Scene Coordinates info = {} for key in eoshdf.attributes().keys(): info[key] = eoshdf.attributes()[key] dsets = eoshdf.datasets() selected_dsets = [] for bandname in CHANNELS: if (bandname in satscene.channels_to_load and bandname in dsets): value = eoshdf.select(bandname) selected_dsets.append(value) # Get only the selected datasets attr = value.attributes() band = value.get() nodata = attr['bad_value_scaled'] mask = np.equal(band, nodata) satscene[bandname] = (np.ma.masked_where(mask, band) * attr['slope'] + attr['intercept']) satscene[bandname].info['units'] = '%' satscene[bandname].info['long_name'] = attr['long_name'][:-1] for dset in selected_dsets: dset.endaccess() LOG.info("Loading modis lvl2 Remote Sensing Reflectances done") eoshdf.end() lat, lon = get_lat_lon(satscene, None) from pyresample import geometry satscene.area = geometry.SwathDefinition(lons=lon, lats=lat) # print "Variant: ", satscene.variant satscene.variant = 'regional' # Temporary fix! LOG.info("Loading modis data done.") def get_lonlat(satscene, row, col): """Estimate lon and lat. """ estimate = False try: latitude, longitude = get_lat_lon(satscene, None) lon = longitude[row, col] lat = latitude[row, col] if (longitude.mask[row, col] == False and latitude.mask[row, col] == False): estimate = False except TypeError: pass except IndexError: pass except IOError: estimate = True if not estimate: return lon, lat def get_lat_lon(satscene, resolution): """Read lat and lon. """ del resolution conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level3", raw=True): options[option] = value pathname = os.path.join(options["dir"], options['filename']) filename = satscene.time_slot.strftime(pathname) root = SD(filename) lon = root.select('longitude') longitude = lon.get() lat = root.select('latitude') latitude = lat.get() return latitude, longitude mpop-1.5.0/mpop/satin/mpef_oca.py000066400000000000000000000272641317160620000167130ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2016 Adam.Dybbroe # Author(s): # Adam.Dybbroe # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """A reader for the EUMETSAT MPEF OCA cloud parameters. Data are segmented LRIT encoded Grib messages """ import tempfile import os.path from ConfigParser import ConfigParser from mpop.satin.gribformat import Grib import mpop.channel from mpop import CONFIG_PATH from mpop.plugin_base import Reader from trollsift import parser from glob import glob import pyresample as pr import numpy as np import logging LOG = logging.getLogger(__name__) CFG_DIR = os.environ.get('PPP_CONFIG_DIR', './') AREA_DEF_FILE = os.path.join(CFG_DIR, "areas.def") if not os.path.exists(AREA_DEF_FILE): raise IOError('Config file %s does not exist!' % AREA_DEF_FILE) LRIT_PATTERN = "L-000-{platform_name:_<5s}_-MPEF________-OCAE_____-{segment:_<9s}-{nominal_time:%Y%m%d%H%M}-{compressed:_<2s}" SCENE_TYPE_LAYERS = {111: 'Single Layer Water Cloud', 112: 'Single Layer Ice Cloud', 113: 'Multi Layer Cloud'} OCA_FIELDS = [{'Pixel scene type': 'Scene type'}, {'24': 'Measurement Cost', 'abbrev': 'JM', 'units': ''}, {'25': 'Upper Layer Cloud Optical Thickness', 'units': '', 'abbrev': 'ULCOT'}, {'26': 'Upper Layer Cloud Top Pressure', 'units': 'Pa', 'abbrev': 'ULCTP'}, {'27': 'Upper Layer Cloud Effective Radius', 'units': 'm', 'abbrev': 'ULCRE'}, {'28': 'Error in Upper Layer Cloud Optical Thickness', 'units': '', 'abbrev': 'ERR-ULCOT'}, {'29': 'Error in Upper Layer Cloud Top Pressure', 'units': 'Pa', 'abbrev': 'ERR-ULCTP'}, {'30': 'Error in Upper Layer Cloud Effective Radius', 'units': 'm', 'abbrev': 'ERR-ULCRE'}, {'31': 'Lower Layer Cloud Optical Thickness', 'units': '', 'abbrev': 'LLCOT'}, {'32': 'Lower Layer Cloud Top Pressure', 'units': 'Pa', 'abbrev': 'LLCTP'}, {'33': 'Error in Lower Layer Cloud Optical Thickness', 'units': '', 'abbrev': 'ERR-LLCOT'}, {'34': 'Error in Lower Layer Cloud Top Pressure', 'units': 'Pa', 'abbrev': 'ERR-LLCTP'}] FIELDNAMES = {'scenetype': ('Pixel scene type', None), 'cost': ('24', None), 'ul_cot': ('25', '28'), 'ul_ctp': ('26', '29'), 'reff': ('27', '30'), 'll_cot': ('31', '33'), 'll_ctp': ('32', '34')} class OCAField(object): """One OCA data field with metadata""" def __init__(self, units=None, long_name='', standard_name=''): self.units = units self.data = None self.error = None self.long_name = long_name self.standard_name = standard_name self.info = {} class OCAData(mpop.channel.GenericChannel): """The OCA scene data""" def __init__(self): mpop.channel.GenericChannel.__init__(self) self.name = "OCA" self.mda = {} self._keys = [] self._refs = {} self._lritfiles = None self._gribfilename = None self._store_grib = False self.resolution = 3000 self.scenetype = OCAField() self.cost = OCAField() self.ul_cot = OCAField() self.ll_cot = OCAField() self.ul_ctp = OCAField() self.ll_ctp = OCAField() self.reff = OCAField() self._projectables = [] for field in FIELDNAMES.keys(): self._projectables.append(field) self.timeslot = None self.area_def = pr.utils.load_area(AREA_DEF_FILE, 'met09globeFull') self.shape = None def readgrib(self): """Read the data""" oca = Grib(self._gribfilename) self.scenetype.data = oca.get('Pixel scene type')[::-1, ::-1] self.scenetype.long_name = OCA_FIELDS[0]['Pixel scene type'] for field in FIELDNAMES.keys(): setattr(getattr(self, field), 'data', oca.get( FIELDNAMES[field][0])[::-1, ::-1]) param = [s for s in OCA_FIELDS if FIELDNAMES[field][0] in s][0] if 'units' in param: setattr(getattr(self, field), 'units', param['units']) if 'abbrev' in param: setattr(getattr(self, field), 'standard_name', param['abbrev']) setattr(getattr(self, field), 'long_name', param[FIELDNAMES[field][0]]) param_name = FIELDNAMES[field][1] if param_name: setattr( getattr(self, field), 'error', oca.get(param_name)[::-1, ::-1]) if not self._store_grib: os.remove(self._gribfilename) def read_from_lrit(self, filenames, gribfilename=None): """Read and concatenate the LRIT segments""" self._lritfiles = filenames if len(filenames) == 0: print("No files provided!") return if gribfilename: self._store_grib = True self._gribfilename = gribfilename else: self._store_grib = False self._gribfilename = tempfile.mktemp(suffix='.grb') p__ = parser.Parser(LRIT_PATTERN) bstr = {} nsegments = 0 for lritfile in self._lritfiles: if os.path.basename(lritfile).find('PRO') > 0: print("PRO file... %s: Skip it..." % lritfile) continue res = p__.parse(os.path.basename(lritfile)) segm = int(res['segment'].strip('_')) if not self.timeslot: self.timeslot = res['nominal_time'] LOG.debug("Segment = %d", segm) nsegments = nsegments + 1 with open(lritfile) as fpt: fpt.seek(103) bstr[segm] = fpt.read() fstr = bstr[1] for idx in range(2, nsegments + 1): fstr = fstr + bstr[idx] with open(self._gribfilename, 'wb') as fpt: fpt.write(fstr) self.readgrib() def project(self, coverage): """Project the data""" LOG.debug("Projecting channel %s...", (self.name)) import copy res = copy.copy(self) res.name = self.name res.resolution = self.resolution res.filled = True res.area = coverage.out_area resolution_str_x = str(int(res.area.pixel_size_x)) + 'm' resolution_str_y = str(int(res.area.pixel_size_y)) + 'm' time_axis = 0 # Project the data for var in self._projectables: LOG.info("Projecting " + str(var)) res.__dict__[var] = copy.copy(self.__dict__[var]) data = coverage.project_array(self.__dict__[var].data) valid_min = np.min(data) valid_max = np.max(data) res.__dict__[var].data = data res.__dict__[var].info['var_name'] = var res.__dict__[var].info[ 'var_data'] = np.ma.expand_dims(data, time_axis) dim_names = ['y' + resolution_str_y, 'x' + resolution_str_x] dim_names.insert(0, 'time') res.__dict__[var].info['var_dim_names'] = dim_names res.__dict__[var].info['long_name'] = res.__dict__[var].long_name res.__dict__[var].info[ 'standard_name'] = res.__dict__[var].standard_name res.__dict__[var].info['valid_range'] = np.array( [valid_min, valid_max]) # res.__dict__[var].info['resolution'] = res.resolution return res def is_loaded(self): """Tells if the channel contains loaded data. """ return True class OCAReader(Reader): pformat = "mpef_oca" def load(self, satscene, *args, **kwargs): """Read data from files and load it into *satscene*. """ lonlat_is_loaded = False lritfiles = kwargs.get('filenames') if "OCA" not in satscene.channels_to_load: LOG.warning("No OCA product requested. Nothing to be done...") return area_name = satscene.area_id or satscene.area.area_id # platform_name = satscene.satname conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) # Reading the products product = "oca" classes = {product: OCAData} LOG.debug("Loading " + product) if not lritfiles: dummy, lritfiles = get_lrit_filenames(satscene, area_name) LOG.info("Filenames = " + str(lritfiles)) chn = classes[product]() chn.read_from_lrit(lritfiles) # Prepare info object for netCDF writer: resolution_str = str(int(chn.resolution)) + 'm' for field in chn._projectables: getattr(chn, field).info['var_name'] = field getattr(chn, field).info['var_data'] = getattr(chn, field).data getattr(chn, field).info[ 'var_dir_names'] = getattr(chn, field).data getattr(chn, field).info['var_dim_names'] = ('y' + resolution_str, 'x' + resolution_str) getattr(chn, field).info['long_name'] = getattr( chn, field).long_name getattr(chn, field).info['standard_name'] = getattr( chn, field).standard_name valid_min = np.min(getattr(chn, field).data) valid_max = np.max(getattr(chn, field).data) getattr(chn, field).info['valid_range'] = np.array( [valid_min, valid_max]) getattr(chn, field).info['resolution'] = chn.resolution satscene.channels.append(chn) LOG.info("Loading MPEF OCA cloud parameters done") return def get_lrit_filenames(scene, area_name): """Get the set of lrit filenames for the given scene """ conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, scene.fullname + ".cfg")) filename = conf.get(scene.instrument_name + "-level4", "filename", raw=True, vars=os.environ) directory = conf.get(scene.instrument_name + "-level4", "dir", vars=os.environ) pathname_tmpl = os.path.join(directory, filename) LOG.debug("Path = " + str(pathname_tmpl)) fparser = parser.Parser(pathname_tmpl) lrit_files = glob( parser.globify(pathname_tmpl, {'nominal_time': scene.time_slot})) prologue = None segmfiles = [] segm_numbers = [] for item in lrit_files: p__ = fparser.parse(item) segm = p__['segment'].strip('_') if segm == 'PRO': prologue = item else: segm_numbers.append(int(segm)) segmfiles.append(item) if not prologue: LOG.warning("No prologue file found for timeslot") segm_numbers.sort() if range(1, 11) == segm_numbers: LOG.info("All ten segment files found") else: LOG.warning("Less than 10 segments found: %s", str(segm_numbers)) return prologue, segmfiles mpop-1.5.0/mpop/satin/msg_hdf.py000066400000000000000000001712731317160620000165510ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2012, 2014, 2015. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Plugin for reading PPS's cloud products hdf files. """ import ConfigParser import os.path from mpop import CONFIG_PATH import mpop.channel import numpy as np import pyresample.utils import glob from mpop.utils import get_logger from mpop.projector import get_area_def from datetime import datetime LOG = get_logger('satin/msg_hdf') COMPRESS_LVL = 6 ctype_lut = ['0: Not processed', '1: Cloud free land', '2: Cloud free sea', '3: Snow/ice contaminated land', '4: Snow/ice contaminated sea', '5: Very low cumiliform cloud', '6: Very low stratiform cloud', '7: Low cumiliform cloud', '8: Low stratiform cloud', '9: Medium level cumiliform cloud', '10: Medium level stratiform cloud', '11: High and opaque cumiliform cloud', '12: High and opaque stratiform cloud', '13:Very high and opaque cumiliform cloud', '14: Very high and opaque stratiform cloud', '15: Very thin cirrus cloud', '16: Thin cirrus cloud', '17: Thick cirrus cloud', '18: Cirrus above low or medium level cloud', '19: Fractional or sub-pixel cloud', '20: Undefined'] phase_lut = ['1: Not processed or undefined', '2: Water', '4: Ice', '8: Tb11 below 260K', '16: value not defined', '32: value not defined', '64: value not defined', '128: value not defined'] quality_lut = ['1: Land', '2: Coast', '4: Night', '8: Twilight', '16: Sunglint', '32: High terrain', '64: Low level inversion', '128: Nwp data present', '256: Avhrr channel missing', '512: Low quality', '1024: Reclassified after spatial smoothing', '2048: Stratiform-Cumuliform Distinction performed', '4096: bit not defined', '8192: bit not defined', '16384: bit not defined', '32768: bit not defined'] processing_flags_lut = ["1: Not processed", "2: Cloudy", "4: Opaque cloud", "8: RTTOV IR simulations available", "16: Missing NWP data", "32: thermal inversion avaliable", "64: Missing AVHRR data", "128: RTTOV IR simulation applied", "256: Windowing technique applied", "512: bit not defined", "1024: bit not defined", "2048: bit not defined", "4096: bit not defined", "8192: bit not defined", "16384: Quality estimation avaliable", "32768: Low confidence"] class UnknownChannelError(Exception): pass def pcs_def_from_region(region): items = region.proj_dict.items() return ' '.join([t[0] + '=' + t[1] for t in items]) def _get_area_extent(cfac, lfac, coff, loff, numcols, numlines): """Get the area extent from msg parameters. """ xur = (numcols - coff) * 2 ** 16 / (cfac * 1.0) xur = np.deg2rad(xur) * 35785831.0 xll = (-1 - coff) * 2 ** 16 / (cfac * 1.0) xll = np.deg2rad(xll) * 35785831.0 xres = (xur - xll) / numcols xur, xll = xur - xres / 2, xll + xres / 2 yll = (numlines - loff) * 2 ** 16 / (-lfac * 1.0) yll = np.deg2rad(yll) * 35785831.0 yur = (-1 - loff) * 2 ** 16 / (-lfac * 1.0) yur = np.deg2rad(yur) * 35785831.0 yres = (yur - yll) / numlines yll, yur = yll + yres / 2, yur - yres / 2 return xll, yll, xur, yur def get_area_extent(filename): """Get the area extent of the data in *filename*. """ import h5py h5f = h5py.File(filename, 'r') aex = _get_area_extent(h5f.attrs["CFAC"], h5f.attrs["LFAC"], h5f.attrs["COFF"], h5f.attrs["LOFF"], h5f.attrs["NC"], h5f.attrs["NL"]) h5f.close() return aex def _get_palette(h5f, dsname): try: p = h5f[dsname].attrs['PALETTE'] return h5f[p].value except KeyError: return None class InfoObject(object): """Simple data and info container. """ def __init__(self): self.info = {} self.data = None # ---------------------------------------- class MsgCloudTypeData(object): """NWCSAF/MSG Cloud Type data layer """ def __init__(self): self.data = None self.scaling_factor = 1 self.offset = 0 self.num_of_lines = 0 self.num_of_columns = 0 self.product = "" self.id = "" class MsgCloudType(mpop.channel.GenericChannel): """NWCSAF/MSG Cloud Type data structure as retrieved from HDF5 file. Resolution sets the nominal resolution of the data. """ def __init__(self): mpop.channel.GenericChannel.__init__(self, "CloudType") self.filled = False self.name = "CloudType" self.package = "" self.saf = "" self.product_name = "" self.num_of_columns = 0 self.num_of_lines = 0 self.projection_name = "" self.pcs_def = "" self.xscale = 0 self.yscale = 0 self.ll_lon = 0.0 self.ll_lat = 0.0 self.ur_lon = 0.0 self.ur_lat = 0.0 self.region_name = "" self.cfac = 0 self.lfac = 0 self.coff = 0 self.loff = 0 self.nb_param = 0 self.gp_sc_id = 0 self.image_acquisition_time = 0 self.spectral_channel_id = 0 self.nominal_product_time = 0 self.sgs_product_quality = 0 self.sgs_product_completeness = 0 self.product_algorithm_version = "" self.cloudtype = None self.processing_flags = None self.cloudphase = None self.shape = None self.satid = "" self.qc_straylight = -1 self.cloudtype_palette = None self.cloudphase_palette = None def __str__(self): try: shape = self.cloudtype.shape except AttributeError: shape = self.shape return ("'%s: shape %s, resolution %sm'" % (self.name, shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled # ------------------------------------------------------------------ def read(self, filename): """Reader for the NWCSAF/MSG cloudtype. Use *filename* to read data. """ import h5py self.cloudtype = MsgCloudTypeData() self.processing_flags = MsgCloudTypeData() self.cloudphase = MsgCloudTypeData() LOG.debug("Filename = <" + str(filename) + ">") h5f = h5py.File(filename, 'r') # pylint: disable-msg=W0212 self.package = h5f.attrs["PACKAGE"] self.saf = h5f.attrs["SAF"] self.product_name = h5f.attrs["PRODUCT_NAME"] self.num_of_columns = h5f.attrs["NC"] self.num_of_lines = h5f.attrs["NL"] self.projection_name = h5f.attrs["PROJECTION_NAME"] self.region_name = h5f.attrs["REGION_NAME"] self.cfac = h5f.attrs["CFAC"] self.lfac = h5f.attrs["LFAC"] self.coff = h5f.attrs["COFF"] self.loff = h5f.attrs["LOFF"] self.nb_param = h5f.attrs["NB_PARAMETERS"] self.gp_sc_id = h5f.attrs["GP_SC_ID"] self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"] self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"] self.nominal_product_time = datetime.strptime(h5f.attrs["NOMINAL_PRODUCT_TIME"], "%Y%m%d%H%M") self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"] self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"] self.product_algorithm_version = h5f.attrs[ "PACKAGE"] + h5f.attrs["PRODUCT_NAME"] + h5f.attrs["PRODUCT_ALGORITHM_VERSION"] # pylint: enable-msg=W0212 # ------------------------ # The cloudtype data if 'CT' not in h5f.keys(): raise IOError("No field CT in product " + str(self.product_name) + "\n\tPerhaps you have found the wrong file for this product?") h5d = h5f['CT'] self.cloudtype.data = h5d[:, :] self.cloudtype.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.cloudtype.offset = h5d.attrs["OFFSET"] self.cloudtype.num_of_lines = h5d.attrs["N_LINES"] self.cloudtype.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.cloudtype.num_of_lines, self.cloudtype.num_of_columns) self.cloudtype.product = h5d.attrs["PRODUCT"] self.cloudtype.id = h5d.attrs["ID"] self.cloudtype_palette = _get_palette(h5f, 'CT') # ------------------------ # The cloud phase data h5d = h5f['CT_PHASE'] self.cloudphase.data = h5d[:, :] self.cloudphase.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.cloudphase.offset = h5d.attrs["OFFSET"] self.cloudphase.num_of_lines = h5d.attrs["N_LINES"] self.cloudphase.num_of_columns = h5d.attrs["N_COLS"] self.cloudphase.product = h5d.attrs["PRODUCT"] self.cloudphase.id = h5d.attrs["ID"] self.cloudphase_palette = _get_palette(h5f, 'CT_PHASE') # ------------------------ # The cloudtype processing/quality flags h5d = h5f['CT_QUALITY'] self.processing_flags.data = h5d[:, :] self.processing_flags.scaling_factor = \ h5d.attrs["SCALING_FACTOR"] self.processing_flags.offset = h5d.attrs["OFFSET"] self.processing_flags.num_of_lines = h5d.attrs["N_LINES"] self.processing_flags.num_of_columns = h5d.attrs["N_COLS"] self.processing_flags.product = h5d.attrs["PRODUCT"] self.processing_flags.id = h5d.attrs["ID"] # ------------------------ h5f.close() self.cloudtype = self.cloudtype.data self.cloudphase = self.cloudphase.data self.processing_flags = self.processing_flags.data self.area = get_area_from_file(filename) self.filled = True def save(self, filename, **kwargs): """Save the current cloudtype object to hdf *filename*, in pps format. """ import h5py ctype = self.convert2pps() LOG.info("Saving CType hdf file...") ctype.save(filename, **kwargs) h5f = h5py.File(filename, mode="a") h5f.attrs["straylight_contaminated"] = self.qc_straylight h5f.close() LOG.info("Saving CType hdf file done !") def project(self, coverage): """Remaps the NWCSAF/MSG Cloud Type to cartographic map-projection on area give by a pre-registered area-id. Faster version of msg_remap! """ LOG.info("Projecting channel %s..." % (self.name)) region = coverage.out_area dest_area = region.area_id retv = MsgCloudType() retv.name = self.name retv.package = self.package retv.saf = self.saf retv.product_name = self.product_name retv.region_name = dest_area retv.cfac = self.cfac retv.lfac = self.lfac retv.coff = self.coff retv.loff = self.loff retv.nb_param = self.nb_param retv.gp_sc_id = self.gp_sc_id retv.image_acquisition_time = self.image_acquisition_time retv.spectral_channel_id = self.spectral_channel_id retv.nominal_product_time = self.nominal_product_time retv.sgs_product_quality = self.sgs_product_quality retv.sgs_product_completeness = self.sgs_product_completeness retv.product_algorithm_version = self.product_algorithm_version retv.cloudtype = coverage.project_array(self.cloudtype) retv.cloudtype_palette = self.cloudtype_palette retv.cloudphase = coverage.project_array(self.cloudphase) retv.cloudphase_palette = self.cloudphase_palette retv.processing_flags = \ coverage.project_array(self.processing_flags) retv.qc_straylight = self.qc_straylight retv.region_name = dest_area retv.area = region retv.projection_name = region.proj_id retv.pcs_def = pcs_def_from_region(region) retv.num_of_columns = region.x_size retv.num_of_lines = region.y_size retv.xscale = region.pixel_size_x retv.yscale = region.pixel_size_y import pyproj prj = pyproj.Proj(region.proj4_string) aex = region.area_extent lonur, latur = prj(aex[2], aex[3], inverse=True) lonll, latll = prj(aex[0], aex[1], inverse=True) retv.ll_lon = lonll retv.ll_lat = latll retv.ur_lon = lonur retv.ur_lat = latur self.shape = region.shape retv.filled = True retv.resolution = self.resolution return retv def oldconvert2pps(self): """Converts the NWCSAF/MSG Cloud Type to the PPS format, in order to have consistency in output format between PPS and MSG. """ retv = PpsCloudType() retv.region = SafRegion() retv.region.xsize = self.num_of_columns retv.region.ysize = self.num_of_lines retv.region.id = self.region_name retv.region.pcs_id = self.projection_name retv.region.pcs_def = pcs_def_from_region(self.area) retv.region.area_extent = self.area.area_extent retv.satellite_id = self.satid luts = pps_luts() retv.cloudtype_lut = luts[0] retv.phaseflag_lut = [] retv.qualityflag_lut = [] retv.cloudtype_des = "MSG SEVIRI Cloud Type" retv.qualityflag_des = 'MSG SEVIRI bitwise quality/processing flags' retv.phaseflag_des = 'MSG SEVIRI Cloud phase flags' retv.cloudtype = self.cloudtype.astype('B') retv.phaseflag = self.cloudphase.astype('B') retv.qualityflag = ctype_procflags2pps(self.processing_flags) return retv def convert2pps(self): from mpop.satin.nwcsaf_pps import CloudType from nwcsaf_formats.pps_conversions import (old_ctype_palette, old_ctype_palette_data) retv = CloudType() region_type = np.dtype([('area_extent', ' 1: print "*** Warning, more than 1 datafile found: ", filenames filename = filenames[0] print("... read data from %s" % str(filename)) # read data from hdf5 file data_folder='U-MARF/MSG/Level1.5/' # Load data from hdf file with h5py.File(filename,'r') as hf: subset_info=hf.get(data_folder+'METADATA/SUBSET') for i in range(subset_info.len()): #print subset_info[i]['EntryName'], subset_info[i]['Value'] if subset_info[i]['EntryName'] == "VIS_IRSouthLineSelectedRectangle": VIS_IRSouthLine = int(subset_info[i]['Value']) if subset_info[i]['EntryName'] == "VIS_IRNorthLineSelectedRectangle": VIS_IRNorthLine = int(subset_info[i]['Value']) if subset_info[i]['EntryName'] == "VIS_IREastColumnSelectedRectangle": VIS_IREastColumn = int(subset_info[i]['Value']) if subset_info[i]['EntryName'] == "VIS_IRWestColumnSelectedRectangle": VIS_IRWestColumn = int(subset_info[i]['Value']) if subset_info[i]['EntryName'] == "HRVLowerNorthLineSelectedRectangle": HRVLowerNorthLine = int(subset_info[i]['Value']) if subset_info[i]['EntryName'] == "HRVLowerSouthLineSelectedRectangle": HRVLowerSouthLine = int(subset_info[i]['Value']) if subset_info[i]['EntryName'] == "HRVLowerEastColumnSelectedRectangle": HRVLowerEastColumn = int(subset_info[i]['Value']) if subset_info[i]['EntryName'] == "HRVLowerWestColumnSelectedRectangle": HRVLowerWestColumn = int(subset_info[i]['Value']) if subset_info[i]['EntryName'] == "HRVUpperSouthLineSelectedRectangle": HRVUpperSouthLine = int(subset_info[i]['Value']) # 0 if subset_info[i]['EntryName'] == "HRVUpperNorthLineSelectedRectangle": HRVUpperNorthLine = int(subset_info[i]['Value']) # 0 if subset_info[i]['EntryName'] == "HRVUpperEastColumnSelectedRectangle": HRVUpperEastColumn = int(subset_info[i]['Value']) # 0 if subset_info[i]['EntryName'] == "HRVUpperWestColumnSelectedRectangle": HRVUpperWestColumn = int(subset_info[i]['Value']) # 0 sat_status=hf.get(data_folder+'METADATA/HEADER/SatelliteStatus/SatelliteStatus_DESCR') for i in range(subset_info.len()): if sat_status[i]['EntryName']=="SatelliteDefinition-NominalLongitude": sat_lon = sat_status[i]['Value'] break #print 'VIS_IRSouthLine', VIS_IRSouthLine #print 'VIS_IRNorthLine', VIS_IRNorthLine #print 'VIS_IREastColumn', VIS_IREastColumn #print 'VIS_IRWestColumn', VIS_IRWestColumn #print 'sat_longitude', sat_lon, type(sat_lon), 'GEOS<'+'{:+06.1f}'.format(sat_lon)+'>' if 1 == 0: # works only if all pixels are on the disk from msg_pixcoord2area import msg_pixcoord2area print "VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn: ", VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn area_def = msg_pixcoord2area ( VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn, "vis", sat_lon ) else: # works also for pixels outside of the disk pname = 'GEOS<'+'{:+06.1f}'.format(sat_lon)+'>' # "GEOS<+009.5>" proj = {'proj': 'geos', 'a': '6378169.0', 'b': '6356583.8', 'h': '35785831.0', 'lon_0': str(sat_lon)} aex=(-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612) # define full disk projection from pyresample.geometry import AreaDefinition full_disk_def = AreaDefinition('full_disk', 'full_disk', pname, proj, 3712, 3712, aex ) # define name and calculate area for sub-demain area_name= 'MSG_'+'{:04d}'.format(VIS_IRNorthLine)+'_'+'{:04d}'.format(VIS_IRWestColumn)+'_'+'{:04d}'.format(VIS_IRSouthLine)+'_'+'{:04d}'.format(VIS_IREastColumn) aex = full_disk_def.get_area_extent_for_subset(3712-VIS_IRSouthLine,3712-VIS_IRWestColumn,3712-VIS_IRNorthLine,3712-VIS_IREastColumn) area_def = AreaDefinition(area_name, area_name, pname, proj, (VIS_IRWestColumn-VIS_IREastColumn)+1, (VIS_IRNorthLine-VIS_IRSouthLine)+1, aex ) #print area_def #print "REGION:", area_def.area_id, "{" #print "\tNAME:\t", area_def.name #print "\tPCS_ID:\t", area_def.proj_id #print ("\tPCS_DEF:\tproj="+area_def.proj_dict['proj']+", lon_0=" + area_def.proj_dict['lon_0'] + ", a="+area_def.proj_dict['a']+", b="+area_def.proj_dict['b']+", h="+area_def.proj_dict['h']) #print "\tXSIZE:\t", area_def.x_size #print "\tYSIZE:\t", area_def.y_size #print "\tAREA_EXTENT:\t", area_def.area_extent #print "};" # copy area to satscene satscene.area = area_def # write information used by mipp.xrit.MSG._Calibrator in a fake header file hdr = dict() # satellite ID number hdr["SatelliteDefinition"] = dict() hdr["SatelliteDefinition"]["SatelliteId"] = SatelliteIds[str(satscene.sat_nr())] # processing hdr["Level 1_5 ImageProduction"] = dict() hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"] = np_array([2,2,2,2,2,2,2,2,2,2,2,2], int) # calibration factors Level15ImageCalibration = hf.get(data_folder+'METADATA/HEADER/RadiometricProcessing/Level15ImageCalibration_ARRAY') hdr["Level1_5ImageCalibration"] = dict() for chn_name in channel_numbers.keys(): chn_nb = channel_numbers[chn_name]-1 hdr["Level1_5ImageCalibration"][chn_nb] = dict() #print chn_name, chn_nb, Level15ImageCalibration[chn_nb]['Cal_Slope'], Level15ImageCalibration[chn_nb]['Cal_Offset'] hdr["Level1_5ImageCalibration"][chn_nb]['Cal_Slope'] = Level15ImageCalibration[chn_nb]['Cal_Slope'] hdr["Level1_5ImageCalibration"][chn_nb]['Cal_Offset'] = Level15ImageCalibration[chn_nb]['Cal_Offset'] # loop over channels to load for chn_name in satscene.channels_to_load: dataset_name = data_folder+'DATA/'+dict_channel[chn_name]+'/IMAGE_DATA' if dataset_name in hf: data_tmp = hf.get(data_folder+'DATA/'+dict_channel[chn_name]+'/IMAGE_DATA') LOG.info('hdr["SatelliteDefinition"]["SatelliteId"]: '+str(hdr["SatelliteDefinition"]["SatelliteId"])) #LOG.info('hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"]', hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"]) chn_nb = channel_numbers[chn_name]-1 LOG.info('hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Slope"]: '+str(hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Slope"])) LOG.info('hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Offset"]: '+str(hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Offset"])) if calibrate: #Calibrator = _Calibrator(hdr, chn_name) bits_per_pixel = 10 ### !!! I have no idea if this is correct !!! Calibrator = _Calibrator(hdr, chn_name, bits_per_pixel) ## changed call in mipp/xrit/MSG.py data, calibration_unit = Calibrator (data_tmp, calibrate=1) else: data = data_tmp calibration_unit = "counts" LOG.info(chn_name+ " min/max: "+str(data.min())+","+str(data.max())+" "+calibration_unit ) satscene[chn_name] = ma.asarray(data) satscene[chn_name].info['units'] = calibration_unit satscene[chn_name].info['satname'] = satscene.satname satscene[chn_name].info['satnumber'] = satscene.number satscene[chn_name].info['instrument_name'] = satscene.instrument_name satscene[chn_name].info['time'] = satscene.time_slot satscene[chn_name].info['is_calibrated'] = True else: print "*** Warning, no data for channel "+ chn_name+ " in file "+ filename data = np_nan calibration_unit = "" LOG.info("*** Warning, no data for channel "+ chn_name+" in file "+filename) # do not append the channel chn_name mpop-1.5.0/mpop/satin/nc_osisaf_l2.py000066400000000000000000000310651317160620000174750ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2015, 2016 Adam.Dybbroe # Author(s): # Adam.Dybbroe # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """A reader for the OSISAF SST netCDF format """ import os.path from ConfigParser import ConfigParser import glob import mpop.channel from mpop import CONFIG_PATH from mpop.plugin_base import Reader from netCDF4 import Dataset import numpy as np import time from datetime import datetime import logging LOG = logging.getLogger(__name__) class InfoObject(object): """Simple data and metadata/header container. """ def __init__(self): self.info = {} self.data = None class OsisafNarSstProduct(mpop.channel.GenericChannel): def __init__(self, filename=None): mpop.channel.GenericChannel.__init__(self) self.name = "SST" self.mda = {} #self.header = {} self._projectables = [] self._keys = [] self._refs = {} self.time = None self.dtime = None self.sec_1981 = None self.sec_1970_1981 = time.mktime( (1981, 1, 1, 0, 0, 0, 0, 0, 0)) - time.timezone self.file = None self.sst = None self.l2pf = None # L2P flags self.dt = None # DT_analysis (K): (SST Deviation from previous day) self.stdv = None # Standard deviation self.bias = None # Bias self.lon = None self.lat = None self.shape = None if filename: self.read(filename) def read(self, filename, load_lonlat=True): """Read the OSISAF SST netCDF formatet data (from Ifremer)""" LOG.debug("OSISAF netCDF file format...") self.file = Dataset(filename, 'r') self.fillheader() # SST (K): sst_data = self.file.variables['sea_surface_temperature'] sstdata = sst_data[0] self.sst = InfoObject() # For some strange reason the array seem to start in the lower left!? self.sst.data = sstdata[::-1] self.sst.info = self.get_data_header(self.sst.info, sst_data) self._projectables.append('sst') # dtime: dtime = self.file.variables['sst_dtime'] dtime_data = dtime[0] * dtime.scale_factor + dtime.add_offset dtime_obj = InfoObject() dtime_obj.data = dtime_data[::-1] dtime_obj.info = self.get_data_header(dtime_obj.info, dtime) self.sec_1981 = dtime_obj.data + self.file.variables['time'][0] self.dtime = dtime_obj self._projectables.append('dtime') # DT_analysis (K): (SST Deviation from previous day) dta = self.file.variables['dt_analysis'] gain = 0.1 nodata = 255 offset = -12.7 data = dta[0] * dta.scale_factor + dta.add_offset valid_min = dta.valid_min valid_max = dta.valid_max dt_data = np.where(np.logical_and(np.greater(dta[0], valid_min), np.less(dta[0], valid_max)), (data - offset) / gain, nodata).astype('B') dt = InfoObject() dt.data = dt_data[::-1] dt.info = self.get_data_header(dt.info, dta) dt.info["nodata"] = nodata dt.info["gain"] = gain dt.info["offset"] = offset self.dt = dt self._projectables.append('dt') # Bias: bias = self.file.variables['sses_bias'] gain = 0.01 offset = -1.27 nodata = 255 x = bias[0] * bias.scale_factor + bias.add_offset valid_min = bias.valid_min valid_max = bias.valid_max bias_data = np.where(np.logical_and(np.greater(bias[0], valid_min), np.less(bias[0], valid_max)), (x - offset) / gain, nodata).astype('B') bias_obj = InfoObject() bias_obj.data = bias_data[::-1] bias_obj.info = self.get_data_header(bias_obj.info, bias) bias_obj.info["nodata"] = nodata bias_obj.info["gain"] = gain bias_obj.info["offset"] = offset self.bias = bias_obj self._projectables.append('bias') # Standard deviation: stdv = self.file.variables['sses_standard_deviation'] gain = 0.01 offset = 0.0 nodata = 255 x = stdv[0] * stdv.scale_factor + stdv.add_offset valid_min = stdv.valid_min valid_max = stdv.valid_max stdv_data = np.where(np.logical_and(np.greater(stdv[0], valid_min), np.less(stdv[0], valid_max)), (x - offset) / gain, nodata).astype('B') stdv_obj = InfoObject() stdv_obj.data = stdv_data[::-1] stdv_obj.info = self.get_data_header(stdv_obj.info, stdv) stdv_obj.info["nodata"] = nodata stdv_obj.info["gain"] = gain stdv_obj.info["offset"] = offset self.stdv = stdv_obj self._projectables.append('stdv') # L2P flags: l2pf = self.file.variables['l2p_flags'][0] l2pf_obj = InfoObject() l2pf_obj.data = l2pf[::-1] l2pf_obj.info = self.get_data_header(l2pf_obj.info, l2pf) self.l2pf = l2pf_obj self._projectables.append('l2pf') # Longitudes: lon = self.file.variables['lon'] self.lon = InfoObject() self.lon.data = lon[::-1].astype('f') self.lon.info = self.get_data_header(self.lon.info, lon) # Latitudes: lat = self.file.variables['lat'] self.lat = InfoObject() self.lat.data = lat[::-1].astype('f') self.lat.info = self.get_data_header(self.lat.info, lat) return def project(self, coverage): """Project the data""" LOG.debug("Projecting channel %s..." % (self.name)) import copy res = copy.copy(self) # Project the data for var in self._projectables: LOG.info("Projecting " + str(var)) res.__dict__[var] = copy.copy(self.__dict__[var]) res.__dict__[var].data = coverage.project_array( self.__dict__[var].data) res.name = self.name res.resolution = self.resolution res.filled = True res.area = coverage.out_area return res def is_loaded(self): """Tells if the channel contains loaded data. """ return True def get_data_header(self, header, datasetObj): """Get the attribute names in the datasets that are common""" import copy retv = copy.copy(header) try: retv["longt_name"] = datasetObj.long_name except AttributeError: print "WARNING: No attribute 'long_name'" try: retv["standard_name"] = datasetObj.standard_name except AttributeError: print "WARNING: No attribute 'standard_name'" try: retv["comment"] = datasetObj.comment except AttributeError: pass try: retv["units"] = datasetObj.units except AttributeError: pass try: retv["valid_min"] = datasetObj.valid_min except AttributeError: pass try: retv["valid_max"] = datasetObj.valid_max except AttributeError: pass try: retv["scale_factor"] = datasetObj.scale_factor except AttributeError: pass try: retv["add_offset"] = datasetObj.add_offset except AttributeError: pass return retv def fillheader(self): """Get global header info and store in object""" pltname = self.file.platform self.mda["platform"] = ''.join(pltname.split('_')) self.mda["sensor"] = self.file.sensor self.mda["spatial_resolution"] = self.file.spatial_resolution self.mda["title"] = self.file.title self.mda["comment"] = self.file.comment self.mda["contact"] = self.file.creator_email self.mda["institution"] = self.file.institution self.mda["start_time"] = datetime.strptime( self.file.start_time, '%Y%m%dT%H%M%SZ') self.mda["stop_time"] = datetime.strptime( self.file.stop_time, '%Y%m%dT%H%M%SZ') # Reference time of sst file in seconds since 1981-01-01 00:00:00: self.mda["time"] = self.file.variables['time'][0] self.mda['westernmost_longitude'] = self.file.westernmost_longitude self.mda['easternmost_longitude'] = self.file.easternmost_longitude self.mda['southernmost_latitude'] = self.file.southernmost_latitude self.mda['northernmost_latitude'] = self.file.northernmost_latitude def get_filename(scene, area_name): conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, scene.fullname + ".cfg")) filename = conf.get(scene.instrument_name + "-level4", "sst_product_filename", raw=True, vars=os.environ) directory = conf.get(scene.instrument_name + "-level4", "sst_product_dir", vars=os.environ) pathname_tmpl = os.path.join(directory, filename) LOG.debug("Path = " + str(pathname_tmpl)) if not scene.orbit: orbit = "" else: orbit = scene.orbit filename_tmpl = (scene.time_slot.strftime(pathname_tmpl) % {"area": area_name, "satellite": scene.satname + scene.number}) product = 'sst' file_list = glob.glob(filename_tmpl) if len(file_list) > 1: LOG.warning("More than 1 file matching for " + product + "! " + str(file_list)) return None elif len(file_list) == 0: LOG.warning( "No " + product + " matching!: " + filename_tmpl) return else: filename = file_list[0] return filename class OSISAF_SST_Reader(Reader): pformat = "nc_osisaf_l2" def load(self, satscene, *args, **kwargs): """Read data from file and load it into *satscene*. """ lonlat_is_loaded = False prodfilename = kwargs.get('filename') if "SST" not in satscene.channels_to_load: LOG.warning("No SST product requested. Nothing to be done...") return try: area_name = satscene.area_id or satscene.area.area_id except AttributeError: area_name = "satproj_?????_?????" conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) # Reading the products product = "sst" classes = {product: OsisafNarSstProduct} LOG.debug("Loading " + product) if isinstance(prodfilename, (list, tuple, set)): for fname in prodfilename: kwargs['filename'] = fname self.load(satscene, *args, **kwargs) return elif (prodfilename and 'OSISAF-L3C' in os.path.basename(prodfilename)): if os.path.basename(prodfilename).split("_")[2] == 'SST': filename = prodfilename else: LOG.warning( "Product file name is not as expected: " + str(prodfilename)) return else: filename = get_filename(satscene, area_name) LOG.info("Filename = " + str(filename)) chn = classes[product]() chn.read(filename, lonlat_is_loaded == False) satscene.channels.append(chn) lons, lats = chn.lon.data, chn.lat.data lonlat_is_loaded = True nodata_mask = False try: from pyresample import geometry lons = np.ma.masked_array(lons, nodata_mask) lats = np.ma.masked_array(lats, nodata_mask) area = geometry.SwathDefinition(lons=lons, lats=lats) except ImportError: area = None if area: chn.area = area else: chn.lon = lons chn.lat = lats LOG.info("Loading OSISAF SST parameters done") return mpop-1.5.0/mpop/satin/nc_pps_l2.py000066400000000000000000001203331317160620000170100ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014, 2015, 2016 Adam.Dybbroe # Author(s): # Adam.Dybbroe # Panu Lahtinen # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """PPS netcdf cloud product reader """ import os.path from ConfigParser import ConfigParser from ConfigParser import NoOptionError from glob import glob from datetime import datetime import numpy as np import h5py from trollsift import Parser import mpop.channel from mpop import CONFIG_PATH from mpop.plugin_base import Reader import logging LOG = logging.getLogger(__name__) class InconsistentDataDimensions(Exception): """Exception for inconsistent dimensions in the data""" pass def unzip_file(filename): """Unzip the file if file is bzipped = ending with 'bz2'""" import tempfile import bz2 if filename.endswith('bz2'): bz2file = bz2.BZ2File(filename) tmpfilename = tempfile.mktemp() try: ofpt = open(tmpfilename, 'wb') ofpt.write(bz2file.read()) ofpt.close() except IOError: import traceback traceback.print_exc() LOG.info("Failed to read bzipped file %s", str(filename)) os.remove(tmpfilename) return None return tmpfilename return None class GeolocationFlyweight(object): """Flyweight-thingy for geolocation: http://yloiseau.net/articles/DesignPatterns/flyweight/ """ def __init__(self, cls): self._cls = cls self._instances = dict() def __call__(self, *args, **kargs): """ we assume that this is only used for the gelocation object, filenames are listed in the second argument """ return self._instances.setdefault(tuple(args[1]), self._cls(*args, **kargs)) def clear_cache(self): """Clear cache""" del self._instances self._instances = dict() #@GeolocationFlyweight class PpsGeolocationData(object): '''Class for holding PPS geolocation data''' def __init__(self, shape, granule_lengths, filenames): self.filenames = filenames self.shape = shape self.granule_lengths = granule_lengths self.longitudes = None self.latitudes = None self.row_indices = None self.col_indices = None self.mask = None def read(self): """ Read longitudes and latitudes from geo filenames and assemble """ if self.longitudes is not None: return self self.longitudes = np.empty(self.shape, dtype=np.float32) self.latitudes = np.empty(self.shape, dtype=np.float32) self.mask = np.zeros(self.shape, dtype=np.bool) swath_index = 0 for idx, filename in enumerate(self.filenames): y0_ = swath_index y1_ = swath_index + self.granule_lengths[idx] swath_index = swath_index + self.granule_lengths[idx] get_lonlat_into(filename, self.longitudes[y0_:y1_, :], self.latitudes[y0_:y1_, :], self.mask[y0_:y1_, :]) self.longitudes = np.ma.array(self.longitudes, mask=self.mask, copy=False) self.latitudes = np.ma.array(self.latitudes, mask=self.mask, copy=False) LOG.debug("Geolocation read in for %s", str(self)) return self class HDF5MetaData(object): """ Small class for inspecting a HDF5 file and retrieve its metadata/header data. It is developed for JPSS/NPP data but is really generic and should work on most other hdf5 files. Supports """ def __init__(self, filename): self.metadata = {} self.filename = filename if not os.path.exists(filename): raise IOError("File %s does not exist!" % filename) def read(self): """Read the metadata""" filename = self.filename unzipped = unzip_file(filename) if unzipped: filename = unzipped with h5py.File(filename, 'r') as h5f: h5f.visititems(self.collect_metadata) self._collect_attrs('/', h5f.attrs) if unzipped: os.remove(unzipped) return self def _collect_attrs(self, name, attrs): """Collect atributes""" for key in attrs.keys(): # Throws a TypeError if key==DIMENSION_LIST and the value # is accessed # Observed at FMI (Panu) - maybe hdf5 version specific? # Should preferably be handled elsewhere and not in this generic class # FIXME! if key in ['DIMENSION_LIST']: continue value = np.squeeze(attrs[key]) if issubclass(value.dtype.type, str): self.metadata["%s/attr/%s" % (name, key)] = str(value) else: self.metadata["%s/attr/%s" % (name, key)] = value def collect_metadata(self, name, obj): """Collect metadata""" if isinstance(obj, h5py.Dataset): self.metadata["%s/shape" % name] = obj.shape self._collect_attrs(name, obj.attrs) def __getitem__(self, key): long_key = None for mkey in self.metadata.keys(): if mkey.endswith(key): if long_key is not None: raise KeyError("Multiple keys called %s" % key) long_key = mkey break return self.metadata[long_key] def keys(self): """Return metadata dictionary keys""" return self.metadata.keys() def get_data_keys(self): """Get data keys from the metadata""" data_keys = [] for key in self.metadata.keys(): if key.endswith("/shape"): data_key = key.split("/shape")[0] data_keys.append(data_key) return data_keys def get_data_keys_and_shapes(self): """Get data keys and array shapes from the metadata""" data_keys = {} for key in self.metadata.keys(): if key.endswith("/shape"): data_key = key.split("/shape")[0] shape = self.metadata[key] data_keys[data_key] = shape return data_keys class PPSMetaData(HDF5MetaData): """Class for holding PPS metadata""" def get_shape(self): """Get array shapes from metadata""" n_x = 0 n_y = 0 for key in self.metadata: if key.endswith('nx/shape'): n_x = self.metadata[key][0] if key.endswith('ny/shape'): n_y = self.metadata[key][0] return n_x, n_y def get_header_info(self): """Get platform name, orbit number and time slot as dictionary""" info = {} for key in self.metadata: if key.endswith('platform'): info['platform_name'] = self.metadata[key] elif key.endswith('orbit_number'): info['orbit'] = self.metadata[key] elif key.endswith('time_coverage_start'): info['time_slot'] = datetime.strptime(self.metadata[key][:-2], "%Y%m%dT%H%M%S") return info def get_dataset_attributes(self, var_name): """Get dataset attributes""" retv = {} for key in self.metadata: if key.split('/')[0] == var_name and key.find('attr') > 0: dictkey = key.split('/')[-1] if dictkey in ['DIMENSION_LIST']: continue retv[dictkey] = self.metadata[key] return retv def get_root_attributes(self): """Get attributes of the root directory""" retv = {} for key in self.metadata: if key.startswith('//attr'): dictkey = key.split('/')[-1] retv[dictkey] = self.metadata[key] return retv def get_filenames(scene, products, conf, time_interval, area_name): """Get list of filenames within time interval""" filename = conf.get(scene.instrument_name + "-level3", "cloud_product_filename", raw=True, vars=os.environ) directory = conf.get(scene.instrument_name + "-level3", "cloud_product_dir", vars=os.environ) pathname_tmpl = os.path.join(directory, filename) starttime, endtime = time_interval if not scene.orbit: orbit = "" else: orbit = scene.orbit flist_allproducts = [] for product in products: values = {"area": area_name, "satellite": scene.satname + scene.number, "product": product} if endtime: # Okay, we need to check for more than one granule/swath! # First get all files with all times matching in directory: values["orbit"] = '?????' filename_tmpl = os.path.join(directory, globify_date(filename) % values) else: values["orbit"] = str(orbit).zfill(5) or "*" filename_tmpl = scene.time_slot.strftime( pathname_tmpl) % values LOG.debug("File path = %s", str(filename_tmpl)) file_list = glob(filename_tmpl) if len(file_list) == 0: LOG.warning("No %s product matching", str(product)) elif len(file_list) > 1 and not endtime: LOG.warning("More than 1 file matching for %s: %s", str(product), str(file_list)) file_list = [] elif len(file_list) > 1: file_list = extract_filenames_in_time_window( file_list, starttime, endtime) if len(file_list) == 0: LOG.warning("No files found matching time window for product %s", product) flist_allproducts = flist_allproducts + file_list return flist_allproducts def extract_filenames_in_time_window(file_list, starttime, endtime): """Extract the filenames with time inside the time interval specified. NB! Only tested for EARS-NWC granules. This does not support assembling several locally received full swaths""" # New EARS-NWC filenames: # Ex.: # W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,NOAA19+CT_C_EUMS_20150819124700_\ # 33643.nc.bz2 pnew = Parser(EARS_PPS_FILE_MASK) # Old EARS-NWC filenames: # Ex.: # ctth_20130910_205300_metopb.h5.bz2 pold = Parser("{product:s}_{starttime:%Y%m%d_%H%M}00_{platform_name:s}.h5" "{compression:s}") plocal = Parser(LOCAL_PPS_FILE_MASK) valid_filenames = [] valid_times = [] LOG.debug("Time window: (%s, %s)", str(starttime), str(endtime)) for fname in file_list: try: data = pnew.parse(os.path.basename(fname)) except ValueError: try: data = pold.parse(os.path.basename(fname)) except ValueError: data = plocal.parse(os.path.basename(fname)) if (data['starttime'] >= starttime and data['starttime'] < endtime): valid_filenames.append(fname) valid_times.append(data['starttime']) LOG.debug("Start time %s inside window", str(data['starttime'])) else: pass # Can we rely on the files being sorted according to time? # Sort the filenames according to time: vtimes = np.array(valid_times) idx = np.argsort(vtimes) vfiles = np.array(valid_filenames) return np.take(vfiles, idx).tolist() class InfoObject(object): """Simple data and info container. """ def __init__(self): self.info = {} self.data = None class NwcSafPpsChannel(mpop.channel.GenericChannel): """Class for NWC-SAF PPS channel data""" def __init__(self): mpop.channel.GenericChannel.__init__(self) self.mda = {} self._projectables = [] self.shape = None self.granule_lengths = None self.filenames = None self.platform_name = None self.begin_time = None self.end_time = None self.orbit_begin = None self.orbit_end = None def read(self, pps_product): """Read the PPS v2014 formated data""" LOG.debug("Read the PPS product data...") self._projectables = pps_product.projectables self.granule_lengths = pps_product.granule_lengths self.shape = pps_product.shape self.filenames = pps_product.filenames self.orbit_begin = pps_product.orbit_begin self.orbit_end = pps_product.orbit_end self.platform_name = pps_product.platform_name self.begin_time = pps_product.begin_time self.end_time = pps_product.end_time # Take the metadata of the first granule and store as global #self.mda = pps_product.metadata[0].metadata mda = pps_product.metadata[0] self.mda = mda.metadata self.mda.update(mda.get_root_attributes()) for var_name in pps_product.mda.keys(): setattr(self, var_name, InfoObject()) # Fill the info dict... getattr(self, var_name).info = mda.get_dataset_attributes(var_name) try: getattr(self, var_name).data = self.mda[var_name] except KeyError: continue for var_name in self._projectables: setattr(self, var_name, InfoObject()) # Fill the info dict... getattr(self, var_name).info = mda.get_dataset_attributes(var_name) getattr(self, var_name).data = \ np.ma.masked_array(pps_product.raw_data[var_name], mask=pps_product.mask[var_name], fill_value=pps_product.fill_value[var_name]) return def project(self, coverage): """Project the data""" LOG.debug("Projecting channel %s...", self.name) import copy res = copy.copy(self) # Project the data for var in self._projectables: LOG.info("Projecting %s", str(var)) res.__dict__[var] = copy.copy(self.__dict__[var]) res.__dict__[var].data = coverage.project_array( self.__dict__[var].data) res.name = self.name res.resolution = self.resolution res.filled = True res.area = coverage.out_area return res def is_loaded(self): """Tells if the channel contains loaded data. """ return len(self._projectables) > 0 def save(self, filename, old=True, **kwargs): """Save to old format""" del kwargs if old: try: from nwcsaf_formats.ppsv2014_to_oldformat import write_product write_product(self, filename) except ImportError: LOG.error("Could not save to old format") raise else: raise NotImplementedError("Can't save to new pps format yet.") class PPSProductData(object): """Placeholder for the PPS netCDF product data. Reads the netCDF files using h5py. One file for each product and granule/swath. """ def __init__(self, filenames): self.filenames = filenames self.mda = {} self.projectables = [] self._keys = [] self._refs = {} self.shape = None self.product_name = 'unknown' self.platform_name = None self.begin_time = None self.end_time = None self.orbit_begin = None self.orbit_end = None self.metadata = [] self.raw_data = {} self.mask = {} self.fill_value = {} self.granule_lengths = [] def read(self): """Read the PPS v2014 formatet data""" self._read_metadata() for key in self.raw_data: LOG.debug("Shape of data: %s", str(self.raw_data[key].shape)) break self._read_data() return self def _set_members(self, hdd): '''Set platform_name, time_slot and orbit class members''' if not self.platform_name and 'platform_name' in hdd: self.platform_name = hdd['platform_name'] if not self.begin_time and 'time_slot' in hdd: self.begin_time = hdd['time_slot'] if 'time_slot' in hdd: self.end_time = hdd['time_slot'] if not self.orbit_begin and 'orbit' in hdd: self.orbit_begin = int(hdd['orbit']) if 'orbit' in hdd: self.orbit_end = int(hdd['orbit']) def _read_metadata(self): """Read metadata from all the files""" LOG.debug("Filenames: %s", str(self.filenames)) swath_length = 0 swath_width = None for fname in self.filenames: LOG.debug("Get and append metadata from file: %s", str(fname)) mda = PPSMetaData(fname).read() # Set the product_name variable: try: self.product_name = mda['product_name'] except KeyError: LOG.warning("No product_name in file!") width, granule_length = mda.get_shape() hdd = mda.get_header_info() self._set_members(hdd) self.metadata.append(mda) self.granule_lengths.append(granule_length) if swath_width: if swath_width != width: raise InconsistentDataDimensions('swath_width not the same ' 'between granules: %d %d', swath_width, width) swath_width = width swath_length = swath_length + granule_length # Take the first granule, and find what data fields it contains # and assume all granules have those same data fields: mda = self.metadata[0] dks = mda.get_data_keys_and_shapes() geolocation_fields = ['lon', 'lat', 'lat_reduced', 'lon_reduced'] coordinate_fields = ['nx', 'nx_reduced', 'ny', 'ny_reduced'] for key in dks: if key in geolocation_fields + coordinate_fields: LOG.debug("Key = %s", str(key)) continue shape = dks[key] if len(shape) == 3 and shape[0] == 1: shape = shape[1], shape[2] if shape == (self.granule_lengths[0], swath_width): self.projectables.append(key) else: self.mda.update({key: dks[key]}) # LOG.debug("Key, shape, granule_length, swath_width: %s %s %s %s", # str(key), str(shape), # str(self.granule_lengths[0]), str(swath_width)) # initiate data arrays self.shape = swath_length, swath_width # for field in dks: # if field in geolocation_fields + coordinate_fields: # continue # try: # dtype = mda[field + '/attr/valid_range'].dtype # self.raw_data[str(field)] = np.zeros(self.shape, dtype=dtype) # self.mask[field] = np.zeros(self.shape, dtype=np.bool) # except KeyError: # continue for field in self.projectables: dtype = mda[field + '/attr/valid_range'].dtype try: if not (np.equal(1.0 + mda[field + '/attr/add_offset'], 1.0) and np.equal(1.0 * mda[field + '/attr/scale_factor'], 1.0)): dtype = np.float32 except KeyError: pass self.raw_data[str(field)] = np.zeros(self.shape, dtype=dtype) self.mask[field] = np.zeros(self.shape, dtype=np.bool) def _read_data(self): """Loop over all granules and read one granule at a time and fill the data arrays""" LOG.debug("Read all %s product files...", self.product_name) swath_index = 0 for idx, mda in enumerate(self.metadata): del mda filename = self.filenames[idx] unzipped = unzip_file(filename) if unzipped: filename = unzipped h5f = h5py.File(filename, 'r') variables = {} for key, item in h5f.items(): if item.attrs.get("CLASS") != 'DIMENSION_SCALE': variables[key] = item # processed variables processed = set() non_processed = set(variables.keys()) - processed fields = {} for var_name in non_processed: if var_name in ['lon', 'lat', 'lon_reduced', 'lat_reduced']: continue var = variables[var_name] if ("standard_name" not in var.attrs.keys() and "long_name" not in var.attrs.keys()): LOG.warning("Data field %s is lacking both " "standard_name and long_name", var_name) continue if var_name not in self.projectables: self.metadata[idx].metadata[var_name] = var[:] continue data = var[:] if len(data.shape) == 3 and data.shape[0] == 1: data = data[0] if 'valid_range' in var.attrs.keys(): data = np.ma.masked_outside( data, *var.attrs['valid_range']) elif '_FillValue' in var.attrs.keys(): data = np.ma.masked_where(data, var.attrs['_FillValue']) if "scale_factor" in var.attrs.keys() and \ "add_offset" in var.attrs.keys(): dataset = (data * var.attrs.get("scale_factor", 1) + var.attrs.get("add_offset", 0)) else: dataset = data.copy() if '_FillValue' in var.attrs.keys(): dataset.fill_value = var.attrs['_FillValue'][0] fields[var_name] = dataset LOG.debug("long_name: %s", str(var.attrs['long_name'])) LOG.debug("Var = %s, shape = %s", str(var_name), str(dataset.shape)) processed |= set([var_name]) non_processed = set(variables.keys()) - processed if len(non_processed) > 0: LOG.warning("Remaining non-processed variables: %s", str(non_processed)) h5f.close() if unzipped: os.remove(unzipped) y0_ = swath_index y1_ = swath_index + self.granule_lengths[idx] swath_index = swath_index + self.granule_lengths[idx] for key in self.raw_data.keys(): if key not in self.projectables: continue try: self.raw_data[key][y0_:y1_, :] = fields[key].data self.mask[key][y0_:y1_, :] = fields[key].mask self.fill_value[key] = fields[key].fill_value except ValueError: LOG.exception('Mismatch in dimensions: y0_, y1_, ' 'fields[key].data.shape: %s %s %s', str(y0_), str(y1_), str(fields[key].data.shape)) raise return GEO_PRODUCT_NAME_DEFAULT = 'CMA' PPS_PRODUCTS = set(['CMA', 'CT', 'CTTH', 'PC', 'CPP']) LOCAL_PPS_FILE_MASK = ('S_NWC_{product:s}_{platform_name:s}_{orbit:5d}_' + '{starttime:%Y%m%dT%H%M%S}{dummy:1d}Z_' + '{starttime:%Y%m%dT%H%M%S}{dummy2:1d}Z.nc{compression:s}') EARS_PPS_FILE_MASK = ("W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,{platform_name:s}+" "{product:s}_C_EUMS_{starttime:%Y%m%d%H%M}00_{orbit:05d}.nc" "{compression:s}") # Old EARS-NWC filenames: # Ex.: # ctth_20130910_205300_metopb.h5.bz2 EARS_OLD_PPS_FILE_MASK = ("{product:s}_{starttime:%Y%m%d_%H%M}00_" + "{platform_name:s}.h5{compression:s}") class PPSReader(Reader): """Reader class for PPS files""" pformat = "nc_pps_l2" def __init__(self, *args, **kwargs): Reader.__init__(self, *args, **kwargs) # Source of the data, 'local' or 'ears' self._source = None # Parser for getting info from the file names self._parser = None # Satellite config self._config = None # Location of geolocation files, required for 'local' products self._cloud_product_geodir = None # Name of the product having geolocation for 'local' products self._geolocation_product_name = None def _read_config(self, sat_name, instrument_name): '''Read config for the satellite''' if self._config: return self._config = ConfigParser() configfile = os.path.join(CONFIG_PATH, sat_name + ".cfg") LOG.debug("Read configfile %s", configfile) self._config.read(configfile) try: self._cloud_product_geodir = \ self._config.get(instrument_name + "-level3", "cloud_product_geodir", raw=True, vars=os.environ) except NoOptionError: pass LOG.debug("cloud_product_geodir = %s", self._cloud_product_geodir) try: self._geolocation_product_name = \ self._config.get(instrument_name + "-level3", "geolocation_product_name", raw=True, vars=os.environ) except NoOptionError: if self._source != 'ears': LOG.warning("No geolocation product name given in config, " "using default: %s", GEO_PRODUCT_NAME_DEFAULT) self._geolocation_product_name = GEO_PRODUCT_NAME_DEFAULT def _determine_prod_and_geo_files(self, prodfilenames): """From the list of product files and the products to load determine the product files and the geolocation files that will be considered when reading the data """ # geofiles4product is a dict listing all geo-locations files applicable # for each product. # prodfiles4product is a dict listing all product files for a given # product name prodfiles4product = {} geofiles4product = {} if prodfilenames: if not isinstance(prodfilenames, (list, set, tuple)): prodfilenames = [prodfilenames] for fname in prodfilenames: # Only standard NWCSAF/PPS and EARS-NWC naming accepted! # No support for old file names (< PPSv2014) if (os.path.basename(fname).startswith("S_NWC") or os.path.basename(fname).startswith("W_XX-EUMETSAT")): if not self._parser: if os.path.basename(fname).startswith("S_NWC"): self._source = 'local' self._parser = Parser(LOCAL_PPS_FILE_MASK) else: self._source = 'ears' self._parser = Parser(EARS_PPS_FILE_MASK) else: LOG.info("Unrecognized NWCSAF/PPS file: %s", fname) continue parse_info = self._parser.parse(os.path.basename(fname)) prodname = parse_info['product'] if prodname not in prodfiles4product: prodfiles4product[prodname] = [] prodfiles4product[prodname].append(fname) # Assemble geolocation information if self._source == 'ears': # For EARS data, the files have geolocation in themselves for prodname, fnames in prodfiles4product.iteritems(): geofiles4product[prodname] = fnames else: # For locally processed data, use the geolocation from # the product defined in config if self._geolocation_product_name in prodfiles4product: for prodname in prodfiles4product.keys(): geofiles4product[prodname] = \ prodfiles4product[self._geolocation_product_name] else: # If the product files with geolocation are not used, # assume that they are still available on the disk. if self._cloud_product_geodir is None: LOG.warning("Config option 'cloud_product_geodir' is not " "available! Assuming same directory as " "products.") for prodname in prodfiles4product.keys(): geofiles4product[prodname] = [] for fname in prodfiles4product[prodname]: directory = self._cloud_product_geodir or \ os.path.abspath(fname) parse_info = \ self._parser.parse(os.path.basename(fname)) fname = fname.replace(parse_info['product'], self._geolocation_product_name) fname = os.path.join(directory, fname) geofiles4product[prodname].append(fname) # Check that each product file has a corresponding geolocation # file: ''' if self._geolocation_product_name: for prod in products: if prod not in geofiles4product: LOG.error("No product name %s in dict " "geofiles4product!", prod) continue if prod not in prodfiles4product: LOG.error("No product name %s in dict " "prodfiles4product!", prod) continue if len(geofiles4product[prod]) != \ len(prodfiles4product[prod]): LOG.error("Mismatch in number of product files and " "matching geolocation files!") ''' return prodfiles4product, geofiles4product def load(self, satscene, **kwargs): """Read data from file and load it into *satscene*. """ prodfilenames = kwargs.get('filename') time_interval = kwargs.get('time_interval') if prodfilenames and time_interval: LOG.warning("You have specified both a list of files " + "and a time interval") LOG.warning("Specifying a time interval will only take effect " + "if no files are specified") time_interval = None products = satscene.channels_to_load & set(PPS_PRODUCTS) if len(products) == 0: LOG.debug("No PPS cloud products to load, abort") return self._read_config(satscene.fullname, satscene.instrument_name) LOG.info("Products to load: %s", str(products)) # If a list of files are provided to the load call, we disregard the # direcorty and filename specifications/definitions in the config file. if not prodfilenames: try: area_name = satscene.area_id or satscene.area.area_id except AttributeError: area_name = "satproj_?????_?????" # Make the list of files for the requested products: if isinstance(time_interval, (tuple, set, list)) and \ len(time_interval) == 2: time_start, time_end = time_interval else: time_start, time_end = satscene.time_slot, None LOG.debug( "Start and end times: %s %s", str(time_start), str(time_end)) prodfilenames = get_filenames(satscene, products, self._config, (time_start, time_end), area_name) LOG.debug("Product files: %s", str(prodfilenames)) retv = self._determine_prod_and_geo_files(prodfilenames) prodfiles4product, geofiles4product = retv # Reading the products classes = {"CTTH": CloudTopTemperatureHeight, "CT": CloudType, "CMA": CloudMask, "PC": PrecipitationClouds, "CPP": CloudPhysicalProperties } nodata_mask = False read_external_geo = {} for product in products: LOG.debug("Loading %s", product) if product not in prodfiles4product: LOG.warning("No files found for product: %s", product) continue pps_band = PPSProductData(prodfiles4product[product]).read() chn = classes[product]() chn.read(pps_band) if not chn.name in satscene: LOG.info("Adding new channel %s", chn.name) satscene.channels.append(chn) # Check if geolocation is loaded: if not chn.area: read_external_geo[product] = satscene.channels[-1].name # Check if some 'channel'/product needs geolocation. If some # product does not have geolocation, get it from the # geofilename: from pyresample import geometry # Load geolocation for chn_name in read_external_geo.values(): LOG.debug("ch_name = %s", str(chn_name)) chn = satscene[chn_name] geofilenames = geofiles4product[chn_name] LOG.debug("Geo-files = %s", str(geofilenames)) geoloc = PpsGeolocationData(chn.shape, chn.granule_lengths, geofilenames).read() try: satscene[chn.name].area = geometry.SwathDefinition( lons=geoloc.longitudes, lats=geoloc.latitudes) area_name = ("swath_" + satscene.fullname + "_" + str(satscene.time_slot) + "_" + str(chn.shape) + "_" + chn.name) satscene[chn.name].area.area_id = area_name satscene[chn.name].area_id = area_name except ValueError: LOG.exception('Failed making a SwathDefinition: ' + 'min,max lons,lats = (%f %f") (%f,%f)', geoloc.longitudes.data.min(), geoloc.longitudes.data.max(), geoloc.latitudes.data.min(), geoloc.latitudes.data.max()) LOG.warning("No geolocation loaded for %s", str(chn_name)) # PpsGeolocationData.clear_cache() return class CloudType(NwcSafPpsChannel): """CloudType PPS channel object""" def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CT" class CloudTopTemperatureHeight(NwcSafPpsChannel): """Cloud top temperature and height PPS channel object""" def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CTTH" class CloudMask(NwcSafPpsChannel): """Cloud mask PPS channel object""" def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CMA" class PrecipitationClouds(NwcSafPpsChannel): """Precipitation clouds PPS channel object""" def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "PC" class CloudPhysicalProperties(NwcSafPpsChannel): """Cloud physical proeperties PPS channel""" def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CPP" def get_lonlat_into(filename, out_lons, out_lats, out_mask): """Read lon,lat from hdf5 file""" LOG.debug("Geo File = %s", filename) shape = out_lons.shape unzipped = unzip_file(filename) if unzipped: filename = unzipped mda = HDF5MetaData(filename).read() reduced_grid = False h5f = h5py.File(filename, 'r') if "column_indices" in h5f.keys(): col_indices = h5f["column_indices"][:] if "row_indices" in h5f.keys(): row_indices = h5f["row_indices"][:] if "nx_reduced" in h5f: col_indices = h5f["nx_reduced"][:] if "ny_reduced" in h5f: row_indices = h5f["ny_reduced"][:] for key in mda.get_data_keys(): if ((key.endswith("lat") or key.endswith("lon")) or (key.endswith("lat_reduced") or key.endswith("lon_reduced"))): lonlat = h5f[key] fillvalue = lonlat.attrs["_FillValue"] else: continue if key.endswith("lat"): lonlat.read_direct(out_lats) elif key.endswith("lon"): lonlat.read_direct(out_lons) elif key.endswith("lat_reduced"): lat_reduced = lonlat[:] reduced_grid = True elif key.endswith("lon_reduced"): lon_reduced = lonlat[:] if reduced_grid: from geotiepoints import SatelliteInterpolator cols_full = np.arange(shape[1]) rows_full = np.arange(shape[0]) satint = SatelliteInterpolator((lon_reduced, lat_reduced), (row_indices, col_indices), (rows_full, cols_full)) out_lons[:], out_lats[:] = satint.interpolate() new_mask = False # FIXME: this is to mask out the npp bowtie deleted pixels... if "NPP" in h5f.attrs['platform']: if shape[1] == 3200: # M-bands: new_mask = np.zeros((16, 3200), dtype=bool) new_mask[0, :1008] = True new_mask[1, :640] = True new_mask[14, :640] = True new_mask[15, :1008] = True new_mask[14, 2560:] = True new_mask[1, 2560:] = True new_mask[0, 2192:] = True new_mask[15, 2192:] = True new_mask = np.tile(new_mask, (out_lons.shape[0] / 16, 1)) elif shape[1] == 6400: # I-bands: LOG.info( "PPS on I-band resolution. Mask out bow-tie deletion pixels") LOG.warning("Not yet supported...") new_mask = np.zeros((32, 6400), dtype=bool) new_mask[0:2, :2016] = True new_mask[0:2, 4384:] = True new_mask[2:4, :1280] = True new_mask[2:4, 5120:] = True new_mask[28:30, :1280] = True new_mask[28:30, 5120:] = True new_mask[30:32, :2016] = True new_mask[30:32, 4384:] = True new_mask = np.tile(new_mask, (out_lons.shape[0] / 32, 1)) else: LOG.error("VIIRS shape not supported. " + "No handling of bow-tie deletion pixels: shape = ", str(shape)) out_mask[:] = np.logical_or( new_mask, np.logical_and(out_lats == fillvalue, out_lons == fillvalue)) # new_mask, np.logical_and(out_lats <= fillvalue, out_lons <= fillvalue)) h5f.close() if unzipped: os.remove(unzipped) def globify_date(filename): """Replace date formats with single character wildcards""" filename = filename.replace("%Y", "????") filename = filename.replace("%m", "??") filename = filename.replace("%d", "??") filename = filename.replace("%H", "??") filename = filename.replace("%M", "??") filename = filename.replace("%S", "??") return filename mpop-1.5.0/mpop/satin/nc_reader.py000066400000000000000000000435011317160620000170540ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Very simple netcdf reader for mpop. """ # TODO # - complete projection list and attribute list # - handle other units than "m" for coordinates # - handle units for data # - pluginize import warnings from ConfigParser import NoSectionError import numpy as np from netCDF4 import Dataset, num2date from mpop.instruments.visir import VisirCompositer from mpop.satellites import GenericFactory from mpop.satout.cfscene import TIME_UNITS from mpop.utils import get_logger LOG = get_logger("netcdf4/cf reader") # To be complete, get from appendix F of cf conventions MAPPING_ATTRIBUTES = {'grid_mapping_name': "proj", 'standard_parallel': ["lat_1", "lat_2"], 'latitude_of_projection_origin': "lat_0", 'longitude_of_projection_origin': "lon_0", 'longitude_of_central_meridian': "lon_0", 'perspective_point_height': "h", 'false_easting': "x_0", 'false_northing': "y_0", 'semi_major_axis': "a", 'semi_minor_axis': "b", 'inverse_flattening': "rf", 'ellipsoid': "ellps", # not in CF conventions... } # To be completed, get from appendix F of cf conventions PROJNAME = {"vertical_perspective": "nsper", "geostationary": "geos", "albers_conical_equal_area": "aea", "azimuthal_equidistant": "aeqd", "equirectangular": "eqc", "transverse_mercator": "tmerc", "stereographic": "stere", "general_oblique_transformation": "ob_tran" } def _load02(filename): """Load data from a netcdf4 file, cf-satellite v0.2 (2012-02-03). """ rootgrp = Dataset(filename, 'r') # processed variables processed = set() # Currently MPOP does not like unicode (so much). satellite_name, satellite_number = [str(i) for i in rootgrp.platform.rsplit("-", 1)] time_slot = rootgrp.variables["time"].getValue()[0] time_slot = num2date(time_slot, TIME_UNITS) processed |= set(["time"]) try: service = str(rootgrp.service) except AttributeError: service = "" instrument_name = str(rootgrp.instrument) try: orbit = str(rootgrp.orbit) except AttributeError: orbit = None try: scene = GenericFactory.create_scene(satellite_name, satellite_number, instrument_name, time_slot, orbit, None, service) except NoSectionError: scene = VisirCompositer(time_slot=time_slot) scene.satname = satellite_name scene.number = satellite_number scene.service = service dim_chart = {} for var_name, var in rootgrp.variables.items(): varname = None try: varname = var.standard_name except AttributeError: try: varname = var.long_name except AttributeError: pass if varname in ["band_data", "Band data"]: LOG.debug("Found some data: " + var_name) dims = var.dimensions for dim in dims: dim_chart[dim] = var_name for cnt, dim in enumerate(dims): if dim.startswith("band"): break data = var data.set_auto_maskandscale(False) area = None try: area_var_name = getattr(var,"grid_mapping") area_var = rootgrp.variables[area_var_name] proj4_dict = {} for attr, projattr in MAPPING_ATTRIBUTES.items(): try: the_attr = getattr(area_var, attr) if projattr == "proj": proj4_dict[projattr] = PROJNAME[the_attr] elif(isinstance(projattr, (list, tuple))): try: for i, subattr in enumerate(the_attr): proj4_dict[projattr[i]] = subattr except TypeError: proj4_dict[projattr[0]] = the_attr else: proj4_dict[projattr] = the_attr except AttributeError: pass y_name, x_name = dims[:cnt] + dims[cnt + 1:] x__ = rootgrp.variables[x_name][:] y__ = rootgrp.variables[y_name][:] if proj4_dict["proj"] == "geos": x__ *= proj4_dict["h"] y__ *= proj4_dict["h"] x_pixel_size = abs((np.diff(x__)).mean()) y_pixel_size = abs((np.diff(y__)).mean()) llx = x__[0] - x_pixel_size / 2.0 lly = y__[-1] - y_pixel_size / 2.0 urx = x__[-1] + x_pixel_size / 2.0 ury = y__[0] + y_pixel_size / 2.0 area_extent = (llx, lly, urx, ury) try: # create the pyresample areadef from pyresample.geometry import AreaDefinition area = AreaDefinition("myareaid", "myareaname", "myprojid", proj4_dict, len(x__), len(y__), area_extent) except ImportError: LOG.warning("Pyresample not found, " "cannot load area descrition") processed |= set([area_var_name, x_name, y_name]) LOG.debug("Grid mapping found and used.") except AttributeError: LOG.debug("No grid mapping found.") try: area_var = getattr(var,"coordinates") coordinates_vars = area_var.split(" ") lons = None lats = None for coord_var_name in coordinates_vars: coord_var = rootgrp.variables[coord_var_name] units = getattr(coord_var, "units") if(coord_var_name.lower().startswith("lon") or units.lower().endswith("east") or units.lower().endswith("west")): lons = coord_var[:] elif(coord_var_name.lower().startswith("lat") or units.lower().endswith("north") or units.lower().endswith("south")): lats = coord_var[:] if lons.any() and lats.any(): try: from pyresample.geometry import SwathDefinition area = SwathDefinition(lons=lons, lats=lats) except ImportError: LOG.warning("Pyresample not found, " "cannot load area descrition") processed |= set(coordinates_vars) LOG.debug("Lon/lat found and used.") except AttributeError: LOG.debug("No lon/lat found.") names = rootgrp.variables[dim][:] scales = data.scale_factor offsets = data.add_offset if len(names) == 1: scales = np.array([scales]) offsets = np.array([offsets]) LOG.info("Scales and offsets: %s %s %s" % (str(names), str(scales), str(offsets))) for nbr, name in enumerate(names): name = str(name) try: if cnt == 0: chn_data = data[nbr, :, :].squeeze() if cnt == 1: chn_data = data[:, nbr, :].squeeze() if cnt == 2: chn_data = data[:, :, nbr].squeeze() scene[name] = (np.ma.masked_equal(chn_data, data._FillValue) * scales[nbr] + offsets[nbr]) scene[name].info["units"] = var.units except KeyError: from mpop.channel import Channel scene.channels.append(Channel(name)) if area is not None: scene[name].area = area processed |= set([var_name, dim]) non_processed = set(rootgrp.variables.keys()) - processed for var_name in non_processed: var = rootgrp.variables[var_name] if not (hasattr(var, "standard_name") or hasattr(var, "long_name")): LOG.info("Delayed processing of " + var_name) continue dims = var.dimensions if len(dims) != 1: LOG.info("Don't know what to do with " + var_name) continue dim = dims[0] if var.standard_name == "radiation_wavelength": names = rootgrp.variables[dim][:] for nbr, name in enumerate(names): name = str(name) scene[name].wavelength_range[1] = var[nbr] try: bnds = rootgrp.variables[var.bounds][:] for nbr, name in enumerate(names): name = str(name) scene[name].wavelength_range[0] = bnds[nbr, 0] scene[name].wavelength_range[2] = bnds[nbr, 1] processed |= set([var.bounds]) except AttributeError: pass processed |= set([var_name]) non_processed = set(rootgrp.variables.keys()) - processed if len(non_processed) > 0: LOG.warning("Remaining non-processed variables: " + str(non_processed)) return scene def load_from_nc4(filename): """Load data from a netcdf4 file, cf-satellite v0.1 """ rootgrp = Dataset(filename, 'r') try: rootgrp.satellite_number warnings.warn("You are loading old style netcdf files...", DeprecationWarning) except AttributeError: return _load02(filename) if not isinstance(rootgrp.satellite_number, str): satellite_number = "%02d" % rootgrp.satellite_number else: satellite_number = str(rootgrp.satellite_number) time_slot = rootgrp.variables["time"].getValue()[0] time_slot = num2date(time_slot, TIME_UNITS) service = str(rootgrp.service) satellite_name = str(rootgrp.satellite_name) instrument_name = str(rootgrp.instrument_name) try: orbit = str(rootgrp.orbit) except AttributeError: orbit = None try: scene = GenericFactory.create_scene(satellite_name, satellite_number, instrument_name, time_slot, orbit, None, service) except NoSectionError: scene = VisirCompositer(time_slot=time_slot) scene.satname = satellite_name scene.number = satellite_number scene.service = service for var_name, var in rootgrp.variables.items(): area = None if var_name.startswith("band_data"): resolution = var.resolution str_res = str(int(resolution)) + "m" names = rootgrp.variables["bandname"+str_res][:] data = var[:, :, :].astype(var.dtype) data = np.ma.masked_outside(data, var.valid_range[0], var.valid_range[1]) try: area_var = getattr(var,"grid_mapping") area_var = rootgrp.variables[area_var] proj4_dict = {} for attr, projattr in MAPPING_ATTRIBUTES.items(): try: the_attr = getattr(area_var, attr) if projattr == "proj": proj4_dict[projattr] = PROJNAME[the_attr] elif(isinstance(projattr, (list, tuple))): try: for i, subattr in enumerate(the_attr): proj4_dict[projattr[i]] = subattr except TypeError: proj4_dict[projattr[0]] = the_attr else: proj4_dict[projattr] = the_attr except AttributeError: pass x__ = rootgrp.variables["x"+str_res][:] y__ = rootgrp.variables["y"+str_res][:] x_pixel_size = abs((x__[1] - x__[0])) y_pixel_size = abs((y__[1] - y__[0])) llx = x__[0] - x_pixel_size / 2.0 lly = y__[-1] - y_pixel_size / 2.0 urx = x__[-1] + x_pixel_size / 2.0 ury = y__[0] + y_pixel_size / 2.0 area_extent = (llx, lly, urx, ury) try: # create the pyresample areadef from pyresample.geometry import AreaDefinition area = AreaDefinition("myareaid", "myareaname", "myprojid", proj4_dict, data.shape[1], data.shape[0], area_extent) except ImportError: LOG.warning("Pyresample not found, " "cannot load area descrition") except AttributeError: LOG.debug("No grid mapping found.") try: area_var = getattr(var,"coordinates") coordinates_vars = area_var.split(" ") lons = None lats = None for coord_var_name in coordinates_vars: coord_var = rootgrp.variables[coord_var_name] units = getattr(coord_var, "units") if(coord_var_name.lower().startswith("lon") or units.lower().endswith("east") or units.lower().endswith("west")): lons = coord_var[:] elif(coord_var_name.lower().startswith("lat") or units.lower().endswith("north") or units.lower().endswith("south")): lats = coord_var[:] if lons and lats: try: from pyresample.geometry import SwathDefinition area = SwathDefinition(lons=lons, lats=lats) except ImportError: LOG.warning("Pyresample not found, " "cannot load area descrition") except AttributeError: LOG.debug("No lon/lat found.") for i, name in enumerate(names): name = str(name) if var.dimensions[0].startswith("band"): chn_data = data[i, :, :] elif var.dimensions[1].startswith("band"): chn_data = data[:, i, :] elif var.dimensions[2].startswith("band"): chn_data = data[:, :, i] else: raise ValueError("Invalid dimension names for band data") try: scene[name] = (chn_data * rootgrp.variables["scale"+str_res][i] + rootgrp.variables["offset"+str_res][i]) #FIXME complete this #scene[name].info except KeyError: # build the channel on the fly from mpop.channel import Channel wv_var = rootgrp.variables["nominal_wavelength"+str_res] wb_var = rootgrp.variables[getattr(wv_var, "bounds")] minmax = wb_var[i] scene.channels.append(Channel(name, resolution, (minmax[0], wv_var[i][0], minmax[1]))) scene[name] = (chn_data * rootgrp.variables["scale"+str_res][i] + rootgrp.variables["offset"+str_res][i]) if area is not None: scene[name].area = area area = None for attr in rootgrp.ncattrs(): scene.info[attr] = getattr(rootgrp, attr) scene.add_to_history("Loaded from netcdf4/cf by mpop") return scene mpop-1.5.0/mpop/satin/nwcsaf_hrw_hdf.py000066400000000000000000000454621317160620000201240ustar00rootroot00000000000000"""Loader for MSG, nwcsaf high resolution hdf5 format. """ from ConfigParser import ConfigParser from mpop import CONFIG_PATH import os from numpy import array as np_array from numpy import empty as np_empty from numpy import append as np_append from numpy import dtype as np_dtype from numpy import append as np_append from numpy import where as np_where from numpy import in1d as np_in1d from numpy import logical_and as np_logical_and from glob import glob from mpop.projector import get_area_def import datetime from copy import deepcopy try: import h5py except ImportError: print "... module h5py needs to be installed" quit() from mipp.xrit.MSG import _Calibrator import logging LOG = logging.getLogger(__name__) #from mpop.utils import debug_on #debug_on() GP_IDs = { 321: '08', # Meteosat 8 322: '09', # Meteosat 9 323: '10', # Meteosat 10 324: '11' } # Meteosat 11 dict_channel = {'CHANNEL00':'HRV', 'CHANNEL01':'VIS006','CHANNEL02':'VIS008','CHANNEL03':'IR_016','CHANNEL04':'IR_039','CHANNEL05':'WV_062',\ 'CHANNEL06':'WV_073','CHANNEL07':'IR_087','CHANNEL08':'IR_097','CHANNEL09':'IR_108','CHANNEL10':'IR_120','CHANNEL11':'IR_134'} # class definition of a high resolution wind data class HRW_class: def __init__(self): # see http://docs.scipy.org/doc/numpy/reference/generated/numpy.dtype.html self.date = None # datetime of the observation self.detailed = None # False-> basic, True -> detailed self.channel = np_array([], dtype='|S6') self.wind_id = np_array([], dtype=int) self.prev_wind_id = np_array([], dtype=int) self.segment_X = np_array([], dtype='f') self.segment_Y = np_array([], dtype='f') self.t_corr_method = np_array([], dtype=int) self.lon = np_array([], dtype='f') # 6.3862 [longitude in degree E] self.lat = np_array([], dtype='f') # 46.8823 [latitude in degree N] self.dlon = np_array([], dtype='f') # -0.011 [longitude in degree E] self.dlat = np_array([], dtype='f') # 0.01 [latitude in degree N] self.pressure = np_array([], dtype='f') # 64200.0 [p in Pa] self.wind_speed = np_array([], dtype='f') # 3.1 [v in m/s] self.wind_direction = np_array([], dtype='f') # 313.0 [v_dir in deg] self.temperature = np_array([], dtype='f') # 272.4 [T in K] self.conf_nwp = np_array([], dtype='f') self.conf_no_nwp = np_array([], dtype='f') self.t_type = np_array([], dtype=int) self.t_level_method = np_array([], dtype=int) self.t_winds = np_array([], dtype=int) self.t_corr_test = np_array([], dtype=int) self.applied_QI = np_array([], dtype=int) self.NWP_wind_levels = np_array([], dtype=int) self.num_prev_winds = np_array([], dtype=int) self.orographic_index= np_array([], dtype=int) self.cloud_type = np_array([], dtype=int) self.wind_channel = np_array([], dtype=int) self.correlation = np_array([], dtype=int) self.pressure_error = np_array([], dtype='f') # ---------------- add two data sets e.g. time steps --------------------- def __add__(self, HRW_class2): HRW_new = HRW_class() HRW_new.date = self.date # !!! does not make sense !!! HRW_new.detailed = self.detailed # !!! does not make sense !!! HRW_new.channel = np_append(self.channel, HRW_class2.channel) HRW_new.wind_id = np_append(self.wind_id, HRW_class2.wind_id) HRW_new.prev_wind_id = np_append(self.prev_wind_id, HRW_class2.prev_wind_id) HRW_new.segment_X = np_append(self.segment_X, HRW_class2.segment_X) HRW_new.segment_Y = np_append(self.segment_Y, HRW_class2.segment_Y) HRW_new.t_corr_method = np_append(self.t_corr_method, HRW_class2.t_corr_method) HRW_new.lon = np_append(self.lon, HRW_class2.lon) HRW_new.lat = np_append(self.lat, HRW_class2.lat) HRW_new.dlon = np_append(self.dlon, HRW_class2.dlon) HRW_new.dlat = np_append(self.dlat, HRW_class2.dlat) HRW_new.pressure = np_append(self.pressure, HRW_class2.pressure) HRW_new.wind_speed = np_append(self.wind_speed, HRW_class2.wind_speed) HRW_new.wind_direction = np_append(self.wind_direction, HRW_class2.wind_direction) HRW_new.temperature = np_append(self.temperature, HRW_class2.temperature) HRW_new.conf_nwp = np_append(self.conf_nwp, HRW_class2.conf_nwp) HRW_new.conf_no_nwp = np_append(self.conf_no_nwp, HRW_class2.conf_no_nwp) HRW_new.t_type = np_append(self.t_type, HRW_class2.t_type) HRW_new.t_level_method = np_append(self.t_level_method, HRW_class2.t_level_method) HRW_new.t_winds = np_append(self.t_winds, HRW_class2.t_winds) HRW_new.t_corr_test = np_append(self.t_corr_test, HRW_class2.t_corr_test) HRW_new.applied_QI = np_append(self.applied_QI, HRW_class2.applied_QI) HRW_new.NWP_wind_levels = np_append(self.NWP_wind_levels, HRW_class2.NWP_wind_levels) HRW_new.num_prev_winds = np_append(self.num_prev_winds, HRW_class2.num_prev_winds) HRW_new.orographic_index= np_append(self.orographic_index,HRW_class2.orographic_index) HRW_new.cloud_type = np_append(self.cloud_type, HRW_class2.cloud_type) HRW_new.wind_channel = np_append(self.wind_channel, HRW_class2.wind_channel) HRW_new.correlation = np_append(self.correlation, HRW_class2.correlation) HRW_new.pressure_error = np_append(self.pressure_error, HRW_class2.pressure_error) return HRW_new # ---------------- filter for certain criterias --------------------- def filter(self, **kwargs): # if empty than return self (already empty) if self.channel.size == 0: return self HRW_new = deepcopy(self) for key_filter in ['min_correlation', 'min_conf_nwp', 'min_conf_no_nwp', 'cloud_type', 'level']: if key_filter in kwargs.keys(): # if argument given is None or all keyword than skip this filter if kwargs[key_filter] == None or kwargs[key_filter] == 'all' or kwargs[key_filter] == 'ALL' or kwargs[key_filter] == 'A': continue n1 = str(HRW_new.channel.size) if key_filter == 'min_correlation': inds = np_where(HRW_new.correlation > kwargs[key_filter]) elif key_filter == 'min_conf_nwp': inds = np_where(HRW_new.conf_nwp > kwargs[key_filter]) elif key_filter == 'min_conf_no_nwp': inds = np_where(HRW_new.conf_no_nwp > kwargs[key_filter]) elif key_filter == 'cloud_type': mask = np_in1d(HRW_new.cloud_type, kwargs[key_filter]) inds = np_where(mask)[0] elif key_filter == 'level': if kwargs[key_filter] == 'H': # high level: < 440hPa like in the ISCCP inds = np_where(HRW_new.pressure < 44000 ) elif kwargs[key_filter] == 'M': # mid level: 440hPa ... 680hPa like in the ISCCP inds = np_where( np_logical_and(44000 < HRW_new.pressure, HRW_new.pressure < 68000) ) elif kwargs[key_filter] == 'L': # low level: > 680hPa like in the ISCCP inds = np_where(68000 < HRW_new.pressure) HRW_new.subset(inds) print " filter for "+key_filter+" = ", kwargs[key_filter],' ('+n1+'->'+str(HRW_new.channel.size)+')' return HRW_new # ---------------- reduce HRW_dataset to the given indices inds --------------------- def subset(self, inds): self.channel = self.channel [inds] self.wind_id = self.wind_id [inds] self.prev_wind_id = self.prev_wind_id [inds] self.segment_X = self.segment_X [inds] self.segment_Y = self.segment_Y [inds] self.t_corr_method = self.t_corr_method [inds] self.lon = self.lon [inds] self.lat = self.lat [inds] self.dlon = self.dlon [inds] self.dlat = self.dlat [inds] self.pressure = self.pressure [inds] self.wind_speed = self.wind_speed [inds] self.wind_direction = self.wind_direction [inds] self.temperature = self.temperature [inds] self.conf_nwp = self.conf_nwp [inds] self.conf_no_nwp = self.conf_no_nwp [inds] self.t_type = self.t_type [inds] self.t_level_method = self.t_level_method [inds] self.t_winds = self.t_winds [inds] self.t_corr_test = self.t_corr_test [inds] self.applied_QI = self.applied_QI [inds] self.NWP_wind_levels = self.NWP_wind_levels [inds] self.num_prev_winds = self.num_prev_winds [inds] self.orographic_index = self.orographic_index[inds] self.cloud_type = self.cloud_type [inds] self.wind_channel = self.wind_channel [inds] self.correlation = self.correlation [inds] self.pressure_error = self.pressure_error [inds] return self def load(satscene, calibrate=True, area_extent=None, read_basic_or_detailed='both', **kwargs): """Load MSG SEVIRI High Resolution Wind (HRW) data from hdf5 format. """ # Read config file content conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) values = {"orbit": satscene.orbit, "satname": satscene.satname, "number": satscene.number, "instrument": satscene.instrument_name, "satellite": satscene.fullname } LOG.info("assume seviri-level5") print "... assume seviri-level5" satscene.add_to_history("hdf5 data read by mpop/nwcsaf_hrw_hdf.py") # end of scan time 4 min after start end_time = satscene.time_slot + datetime.timedelta(minutes=4) # area !!! satscene.area filename = os.path.join( satscene.time_slot.strftime(conf.get("seviri-level5", "dir", raw=True)), satscene.time_slot.strftime(conf.get("seviri-level5", "filename", raw=True)) % values ) # define classes before we search for files (in order to return empty class if no file is found) HRW_basic = HRW_class() HRW_basic.detailed = False HRW_basic.date = satscene.time_slot HRW_detailed = HRW_class() HRW_detailed.detailed = True HRW_detailed.date = satscene.time_slot print "... search for file: ", filename filenames=glob(str(filename)) if len(filenames) != 0: if len(filenames) > 1: print "*** Warning, more than 1 datafile found: ", filenames filename = filenames[0] print("... read data from %s" % str(filename)) # create an instant of the HRW_class m_per_s_to_knots = 1.944 ## limit channels to read #hrw_channels=['HRV'] # limit basic or detailed or both #read_basic_or_detailed='detailed' #read_basic_or_detailed='basic' with h5py.File(filename,'r') as hf: #print hf.attrs.keys() #print hf.attrs.values() region_name = hf.attrs['REGION_NAME'].replace("_", "") print "... read HRW data for region ", region_name LOG.info("... read HRW data for region "+region_name) sat_ID = GP_IDs[int(hf.attrs["GP_SC_ID"])] print "... derived from Meteosat ", sat_ID LOG.info("... derived from Meteosat "+sat_ID) # print('List of arrays in this file: \n', hf.keys()), len(hf.keys()) if len(hf.keys()) == 0: print "*** Warning, empty file ", filename print "" else: for key in hf.keys(): if key[4:9] == "BASIC": if 'read_basic_or_detailed' in locals(): if read_basic_or_detailed.lower() == "detailed": continue HRW_data = HRW_basic # shallow copy elif key[4:12] == "DETAILED": if 'read_basic_or_detailed' in locals(): if read_basic_or_detailed.lower() == "basic": continue HRW_data = HRW_detailed # shallow copy hrw_chn = dict_channel[key[len(key)-9:]] if 'hrw_channels' in locals(): if hrw_channels != None: if hrw_chn not in hrw_channels: print "... "+hrw_chn+" is not in hrw_channels", hrw_channels print " skip reading this channel" continue # read all data channel = hf.get(key) # print '... read wind vectors of channel ', channel.name, hrw_chn # print " i lon lat speed[kn] dir pressure" #for i in range(channel.len()): # print '%3d %10.7f %10.7f %7.2f %7.1f %8.1f' % (channel[i]['wind_id'], channel[i]['lon'], channel[i]['lat'], \ # channel[i]['wind_speed']*m_per_s_to_knots, \ # channel[i]['wind_direction'], channel[i]['pressure']) # create string array with channel names channel_chararray = np_empty(channel.len(), dtype='|S6') channel_chararray[:] = hrw_chn HRW_data.channel = np_append(HRW_data.channel , channel_chararray ) HRW_data.wind_id = np_append(HRW_data.wind_id , channel[:]['wind_id'] ) HRW_data.prev_wind_id = np_append(HRW_data.prev_wind_id , channel[:]['prev_wind_id'] ) HRW_data.segment_X = np_append(HRW_data.segment_X , channel[:]['segment_X'] ) HRW_data.segment_Y = np_append(HRW_data.segment_Y , channel[:]['segment_Y'] ) HRW_data.t_corr_method = np_append(HRW_data.t_corr_method , channel[:]['t_corr_method'] ) HRW_data.lon = np_append(HRW_data.lon , channel[:]['lon'] ) HRW_data.lat = np_append(HRW_data.lat , channel[:]['lat'] ) HRW_data.dlon = np_append(HRW_data.dlon , channel[:]['dlon'] ) HRW_data.dlat = np_append(HRW_data.dlat , channel[:]['dlat'] ) HRW_data.pressure = np_append(HRW_data.pressure , channel[:]['pressure'] ) HRW_data.wind_speed = np_append(HRW_data.wind_speed , channel[:]['wind_speed'] ) HRW_data.wind_direction = np_append(HRW_data.wind_direction , channel[:]['wind_direction'] ) HRW_data.temperature = np_append(HRW_data.temperature , channel[:]['temperature'] ) HRW_data.conf_nwp = np_append(HRW_data.conf_nwp , channel[:]['conf_nwp'] ) HRW_data.conf_no_nwp = np_append(HRW_data.conf_no_nwp , channel[:]['conf_no_nwp'] ) HRW_data.t_type = np_append(HRW_data.t_type , channel[:]['t_type'] ) HRW_data.t_level_method = np_append(HRW_data.t_level_method , channel[:]['t_level_method'] ) HRW_data.t_winds = np_append(HRW_data.t_winds , channel[:]['t_winds'] ) HRW_data.t_corr_test = np_append(HRW_data.t_corr_test , channel[:]['t_corr_test'] ) HRW_data.applied_QI = np_append(HRW_data.applied_QI , channel[:]['applied_QI'] ) HRW_data.NWP_wind_levels = np_append(HRW_data.NWP_wind_levels , channel[:]['NWP_wind_levels'] ) HRW_data.num_prev_winds = np_append(HRW_data.num_prev_winds , channel[:]['num_prev_winds'] ) HRW_data.orographic_index = np_append(HRW_data.orographic_index, channel[:]['orographic_index'] ) HRW_data.cloud_type = np_append(HRW_data.cloud_type , channel[:]['cloud_type'] ) HRW_data.wind_channel = np_append(HRW_data.wind_channel , channel[:]['wind_channel'] ) HRW_data.correlation = np_append(HRW_data.correlation , channel[:]['correlation'] ) HRW_data.pressure_error = np_append(HRW_data.pressure_error , channel[:]['pressure_error'] ) # sort according to wind_id inds = HRW_data.wind_id.argsort() HRW_data.subset(inds) # changes HRW_data itself # sorting without conversion to numpy arrays #[e for (wid,pwid) in sorted(zip(HRW_data.wind_id,HRW_data.prev_wind_id))] else: print "*** Error, no file found" print "" sat_ID = "no file" # but we continue the program in order to add an empty channel below ## filter data according to the given optional arguments #n1 = str(HRW_data.channel.size) #HRW_data = HRW_data.filter(**kwargs) #print " apply filters "+' ('+n1+'->'+str(HRW_data.channel.size)+')' chn_name="HRW" satscene[chn_name].HRW_basic = HRW_basic.filter(**kwargs) # returns new object (deepcopied and filtered) satscene[chn_name].HRW_detailed = HRW_detailed.filter(**kwargs) # returns new object (deepcopied and filtered) satscene[chn_name].info['units'] = 'm/s' satscene[chn_name].info['satname'] = 'meteosat' satscene[chn_name].info['satnumber'] = sat_ID satscene[chn_name].info['instrument_name'] = 'seviri' satscene[chn_name].info['time'] = satscene.time_slot satscene[chn_name].info['is_calibrated'] = True mpop-1.5.0/mpop/satin/nwcsaf_msg.py000066400000000000000000003417611317160620000172720ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2012, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Marco Sassi for CRR, PC (partly), SPhR, PCPh, CRPh # Jörg Asmus for CRR, PC (partly), SPhR, PCPh, CRPH # Ulrich Hamann for CMa, bugfix SPhR.cape, 1st version generic class MsgNwcsafClass # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Plugin for reading NWCSAF MSG products hdf files. """ import ConfigParser import os.path from mpop import CONFIG_PATH import mpop.channel import numpy as np import pyresample.utils import glob from mpop.utils import get_logger from mpop.projector import get_area_def from os.path import basename LOG = get_logger('satin/nwcsaf_msg') COMPRESS_LVL = 6 def pcs_def_from_region(region): items = region.proj_dict.items() return ' '.join([t[0] + '=' + t[1] for t in items]) def _get_area_extent(cfac, lfac, coff, loff, numcols, numlines): """Get the area extent from msg parameters. """ # h = 35785831.0, see area_def.cfg xur = (numcols - coff) * 2 ** 16 / (cfac * 1.0) xur = np.deg2rad(xur) * 35785831.0 xll = (-1 - coff) * 2 ** 16 / (cfac * 1.0) xll = np.deg2rad(xll) * 35785831.0 xres = (xur - xll) / numcols xur, xll = xur - xres / 2, xll + xres / 2 yll = (numlines - loff) * 2 ** 16 / (-lfac * 1.0) yll = np.deg2rad(yll) * 35785831.0 yur = (-1 - loff) * 2 ** 16 / (-lfac * 1.0) yur = np.deg2rad(yur) * 35785831.0 yres = (yur - yll) / numlines yll, yur = yll + yres / 2, yur - yres / 2 print "msg_hdf _get_area_extent: xll, yll, xur, yur = ", xll, yll, xur, yur return xll, yll, xur, yur def get_area_extent(filename): """Get the area extent of the data in *filename*. """ import h5py h5f = h5py.File(filename, 'r') print "msg_hdf get_area_extent: CFAC, LFAC, COFF, LOFF, NC, NL = ", h5f.attrs["CFAC"], h5f.attrs["LFAC"], h5f.attrs["COFF"], h5f.attrs["LOFF"], h5f.attrs["NC"], h5f.attrs["NL"] aex = _get_area_extent(h5f.attrs["CFAC"], h5f.attrs["LFAC"], h5f.attrs["COFF"], h5f.attrs["LOFF"], h5f.attrs["NC"], h5f.attrs["NL"]) h5f.close() return aex def _get_palette(h5f, dsname): try: p = h5f[dsname].attrs['PALETTE'] return h5f[p].value except KeyError: return None # ------------------------------------------------------------------ class MsgCloudMaskData(object): """NWCSAF/MSG Cloud Mask data layer """ def __init__(self): self.data = None self.scaling_factor = 1 self.offset = 0 self.num_of_lines = 0 self.num_of_columns = 0 self.product = "" self.id = "" class MsgCloudMask(mpop.channel.GenericChannel): """NWCSAF/MSG Cloud Mask data structure as retrieved from HDF5 file. Resolution sets the nominal resolution of the data. """ def __init__(self, resolution=None): mpop.channel.GenericChannel.__init__(self, "CloudMask") self.filled = False self.name = "CloudMask" self.resolution = resolution self.package = "" self.saf = "" self.product_name = "" self.num_of_columns = 0 self.num_of_lines = 0 self.projection_name = "" self.pcs_def = "" self.xscale = 0 self.yscale = 0 self.ll_lon = 0.0 self.ll_lat = 0.0 self.ur_lon = 0.0 self.ur_lat = 0.0 self.region_name = "" self.cfac = 0 self.lfac = 0 self.coff = 0 self.loff = 0 self.nb_param = 0 self.gp_sc_id = 0 self.image_acquisition_time = 0 self.spectral_channel_id = 0 self.nominal_product_time = 0 self.sgs_product_quality = 0 self.sgs_product_completeness = 0 self.product_algorithm_version = "" self.CMa = None self.CMa_DUST = None self.CMa_QUALITY = None self.CMa_VOLCANIC = None self.shape = None self.satid = "" self.qc_straylight = -1 def __str__(self): return ("'%s: shape %s, resolution %sm'" % (self.name, self.CMa.shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled def read(self, filename, calibrate=True): """Reader for the NWCSAF/MSG cloudtype. Use *filename* to read data. """ import h5py self.CMa = MsgCloudMaskData() self.CMa_DUST = MsgCloudMaskData() self.CMa_QUALITY = MsgCloudMaskData() self.CMa_VOLCANIC = MsgCloudMaskData() h5f = h5py.File(filename, 'r') # pylint: disable-msg=W0212 self.package = h5f.attrs["PACKAGE"] self.saf = h5f.attrs["SAF"] self.product_name = h5f.attrs["PRODUCT_NAME"] self.num_of_columns = h5f.attrs["NC"] self.num_of_lines = h5f.attrs["NL"] self.projection_name = h5f.attrs["PROJECTION_NAME"] self.region_name = h5f.attrs["REGION_NAME"] self.cfac = h5f.attrs["CFAC"] self.lfac = h5f.attrs["LFAC"] self.coff = h5f.attrs["COFF"] self.loff = h5f.attrs["LOFF"] self.nb_param = h5f.attrs["NB_PARAMETERS"] self.gp_sc_id = h5f.attrs["GP_SC_ID"] self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"] self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"] self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"] self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"] self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"] self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"] # pylint: enable-msg=W0212 # ------------------------ # The cloud mask data print "... read cloud mask data" h5d = h5f['CMa'] self.CMa.data = h5d[:, :] self.CMa.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.CMa.offset = h5d.attrs["OFFSET"] self.CMa.num_of_lines = h5d.attrs["N_LINES"] self.CMa.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.CMa.num_of_lines, self.CMa.num_of_columns) self.CMa.product = h5d.attrs["PRODUCT"] self.CMa.id = h5d.attrs["ID"] self.CMa_palette = _get_palette(h5f, 'CMa') # ------------------------ # The cloud mask dust data print "... read cloud mask dust data" h5d = h5f['CMa_DUST'] self.CMa_DUST.data = h5d[:, :] self.CMa_DUST.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.CMa_DUST.offset = h5d.attrs["OFFSET"] self.CMa_DUST.num_of_lines = h5d.attrs["N_LINES"] self.CMa_DUST.num_of_columns = h5d.attrs["N_COLS"] self.CMa_DUST.product = h5d.attrs["PRODUCT"] self.CMa_DUST.id = h5d.attrs["ID"] self.CMa_DUST_palette = _get_palette(h5f, 'CMa_DUST') # ------------------------ # The cloud mask quality print "... read cloud mask quality" h5d = h5f['CMa_QUALITY'] self.CMa_QUALITY.data = h5d[:, :] self.CMa_QUALITY.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.CMa_QUALITY.offset = h5d.attrs["OFFSET"] self.CMa_QUALITY.num_of_lines = h5d.attrs["N_LINES"] self.CMa_QUALITY.num_of_columns = h5d.attrs["N_COLS"] self.CMa_QUALITY.product = h5d.attrs["PRODUCT"] self.CMa_QUALITY.id = h5d.attrs["ID"] # no palette for QUALITY # ------------------------ h5d = h5f['CMa_VOLCANIC'] print "... read volcanic dust mask" self.CMa_VOLCANIC.data = h5d[:, :] self.CMa_VOLCANIC.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.CMa_VOLCANIC.offset = h5d.attrs["OFFSET"] self.CMa_VOLCANIC.num_of_lines = h5d.attrs["N_LINES"] self.CMa_VOLCANIC.num_of_columns = h5d.attrs["N_COLS"] self.CMa_VOLCANIC.product = h5d.attrs["PRODUCT"] self.CMa_VOLCANIC.id = h5d.attrs["ID"] self.CMa_VOLCANIC_palette = _get_palette(h5f, 'CMa_VOLCANIC') # ------------------------ h5f.close() self.CMa = self.CMa.data self.CMa_DUST = self.CMa_DUST.data self.CMa_QUALITY = self.CMa_QUALITY.data self.CMa_VOLCANIC = self.CMa_VOLCANIC.data self.area = get_area_from_file(filename) self.filled = True def save(self, filename): """Save the current cloudtype object to hdf *filename*, in pps format. """ import h5py cma = self.convert2pps() LOG.info("Saving CMa hdf file...") cma.save(filename) h5f = h5py.File(filename, mode="a") h5f.attrs["straylight_contaminated"] = self.qc_straylight h5f.close() LOG.info("Saving CMa hdf file done !") def project(self, coverage): """Remaps the NWCSAF/MSG Cloud Type to cartographic map-projection on area give by a pre-registered area-id. Faster version of msg_remap! """ LOG.info("Projecting channel %s..." % (self.name)) region = coverage.out_area dest_area = region.area_id retv = MsgCloudMask() retv.name = self.name retv.package = self.package retv.saf = self.saf retv.product_name = self.product_name retv.region_name = dest_area retv.cfac = self.cfac retv.lfac = self.lfac retv.coff = self.coff retv.loff = self.loff retv.nb_param = self.nb_param retv.gp_sc_id = self.gp_sc_id retv.image_acquisition_time = self.image_acquisition_time retv.spectral_channel_id = self.spectral_channel_id retv.nominal_product_time = self.nominal_product_time retv.sgs_product_quality = self.sgs_product_quality retv.sgs_product_completeness = self.sgs_product_completeness retv.product_algorithm_version = self.product_algorithm_version retv.CMa = coverage.project_array(self.CMa) retv.CMa_DUST = coverage.project_array(self.CMa_DUST) retv.CMa_QUALITY = coverage.project_array(self.CMa_QUALITY) retv.CMa_VOLCANIC = coverage.project_array(self.CMa_VOLCANIC) retv.qc_straylight = self.qc_straylight retv.region_name = dest_area retv.area = region retv.projection_name = region.proj_id retv.pcs_def = pcs_def_from_region(region) retv.num_of_columns = region.x_size retv.num_of_lines = region.y_size retv.xscale = region.pixel_size_x retv.yscale = region.pixel_size_y import pyproj prj = pyproj.Proj(region.proj4_string) aex = region.area_extent lonur, latur = prj(aex[2], aex[3], inverse=True) lonll, latll = prj(aex[0], aex[1], inverse=True) retv.ll_lon = lonll retv.ll_lat = latll retv.ur_lon = lonur retv.ur_lat = latur self.shape = region.shape retv.filled = True retv.resolution = self.resolution return retv def convert2pps(self): """Converts the NWCSAF/MSG Cloud Mask to the PPS format, in order to have consistency in output format between PPS and MSG. """ import epshdf retv = PpsCloudMask() retv.region = epshdf.SafRegion() retv.region.xsize = self.num_of_columns retv.region.ysize = self.num_of_lines retv.region.id = self.region_name retv.region.pcs_id = self.projection_name retv.region.pcs_def = pcs_def_from_region(self.area) retv.region.area_extent = self.area.area_extent retv.satellite_id = self.satid retv.CMa_lut = pps_luts('CMa') retv.CMa_DUST_lut = pps_luts('CMa_DUST') retv.CMa_VOLCANIC_lut = pps_luts('CMa_VOLCANIC') retv.CMa_des = "MSG SEVIRI Cloud Mask" retv.CMa_DUST_des = 'MSG SEVIRI Cloud Mask DUST' retv.CMa_QUALITY_des = 'MSG SEVIRI bitwise quality/processing flags' retv.CMa_VOLCANIC_des = 'MSG SEVIRI Cloud Mask VOLCANIC' retv.CMa = self.CMa.astype('B') retv.CMa_DUST = self.CMa_DUST.astype('B') retv.CMa_QUALITY = self.CMa_QUALITY.astype('B') retv.CMa_VOLCANIC = self.CMa_VOLCANIC.astype('B') return retv def convert2nordrad(self): return NordRadCType(self) #----------------------------------------------------------------------- # ------------------------------------------------------------------ class MsgNwcsafData(object): """NWCSAF/MSG data layer """ def __init__(self): self.data = None self.scaling_factor = 1 self.offset = 0 self.num_of_lines = 0 self.num_of_columns = 0 self.product = "" self.id = "" class MsgNwcsafClass(mpop.channel.GenericChannel): """NWCSAF/MSG data structure as retrieved from HDF5 file. Resolution sets the nominal resolution of the data. """ def __init__(self, product, resolution=None): mpop.channel.GenericChannel.__init__(self, product) self.filled = False self.name = product self.var_names = None self.resolution = resolution self.package = "" self.saf = "" self.product_name = "" self.num_of_columns = 0 self.num_of_lines = 0 self.projection_name = "" self.pcs_def = "" self.xscale = 0 self.yscale = 0 self.ll_lon = 0.0 self.ll_lat = 0.0 self.ur_lon = 0.0 self.ur_lat = 0.0 self.region_name = "" self.cfac = 0 self.lfac = 0 self.coff = 0 self.loff = 0 self.nb_param = 0 self.gp_sc_id = 0 self.image_acquisition_time = 0 self.spectral_channel_id = 0 self.nominal_product_time = 0 self.sgs_product_quality = 0 self.sgs_product_completeness = 0 self.product_algorithm_version = "" if product == "CloudMask": self.CMa = None self.CMa_DUST = None self.CMa_QUALITY = None self.CMa_VOLCANIC = None elif product == "CT": self.CT = None self.CT_PHASE = None self.CT_QUALITY = None elif product == "CTTH": self.CTTH_TEMPER = None self.CTTH_HEIGHT = None self.CTTH_PRESS = None self.CTTH_EFFECT = None self.CTTH_QUALITY = None elif product == "CRR": self.crr = None self.crr_accum = None self.crr_intensity = None self.crr_quality = None self.processing_flags = None elif product == "PC": self.probability_1 = None self.processing_flags = None elif product == "SPhR": self.sphr_bl = None self.sphr_cape = None self.sphr_diffbl = None self.sphr_diffhl = None self.sphr_diffki = None self.sphr_diffli = None self.sphr_diffml = None self.sphr_diffshw = None self.sphr_difftpw = None self.sphr_hl = None self.sphr_ki = None self.sphr_li = None self.sphr_ml = None self.sphr_quality = None self.sphr_sflag = None self.sphr_shw = None self.sphr_tpw = None elif product == "PCPh": self.pcph_pc = MNone self.pcph_quality = None self.pcph_dataflag = None self.processing_flags = None elif product =="CRPh": self.crph_crr = None self.crph_accum = None self.crph_iqf = None self.crph_quality = None self.crph_dataflag = None self.processing_flags = None else: print "*** ERROR in MsgNWCSAF (nwcsaf_msg.py)" print " unknown NWCSAF product: ", product quit() self.shape = None self.satid = "" self.qc_straylight = -1 def __str__(self): return ("'%s: shape %s, resolution %sm'" % (self.name, self.shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled def read(self, filename, calibrate=True): """Reader for the NWCSAF/MSG cloudtype. Use *filename* to read data. """ import h5py if self.name == "CTTH": self.var_names = ('CTTH_TEMPER', 'CTTH_HEIGHT', 'CTTH_PRESS', 'CTTH_EFFECT', 'CTTH_QUALITY') elif self.name == "CloudType": self.var_names = ('CT', 'CT_PHASE', 'CT_QUALITY') elif self.name == "CloudMask": self.var_names = ('CMa', 'CMa_DUST', 'CMa_QUALITY', 'CMa_VOLCANIC') elif self.name == "SPhR": self.var_names = ('SPhR_BL','SPhR_CAPE','SPhR_HL','SPhR_KI','SPhR_LI','SPhR_ML','SPhR_QUALITY','SPhR_SHW','SPhR_TPW') else: print "*** ERROR in MsgNWCSAF read (nwcsaf_msg.py)" print " unknown NWCSAF product: ", product quit() h5f = h5py.File(filename, 'r') # pylint: disable-msg=W0212 self.package = h5f.attrs["PACKAGE"] self.saf = h5f.attrs["SAF"] self.product_name = h5f.attrs["PRODUCT_NAME"] self.num_of_columns = h5f.attrs["NC"] self.num_of_lines = h5f.attrs["NL"] self.projection_name = h5f.attrs["PROJECTION_NAME"] self.region_name = h5f.attrs["REGION_NAME"] self.cfac = h5f.attrs["CFAC"] self.lfac = h5f.attrs["LFAC"] self.coff = h5f.attrs["COFF"] self.loff = h5f.attrs["LOFF"] self.nb_param = h5f.attrs["NB_PARAMETERS"] self.gp_sc_id = h5f.attrs["GP_SC_ID"] self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"] self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"] self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"] self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"] self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"] self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"] # pylint: enable-msg=W0212 # ------------------------ for var_name in self.var_names: print "... read hdf5 variable ", var_name h5d = h5f[var_name] var1=MsgNwcsafData() var1.data = h5d[:, :] var1.scaling_factor = h5d.attrs["SCALING_FACTOR"] var1.offset = h5d.attrs["OFFSET"] var1.num_of_lines = h5d.attrs["N_LINES"] var1.num_of_columns = h5d.attrs["N_COLS"] self.shape = (var1.num_of_lines, var1.num_of_columns) var1.product = h5d.attrs["PRODUCT"] var1.id = h5d.attrs["ID"] # copy temporal var1 to self.var_name if calibrate: print "... apply scaling_factor", var1.scaling_factor, " and offset ", var1.offset setattr(self, var_name, var1.data*var1.scaling_factor +var1.offset ) else: setattr(self, var_name, var1.data) # !!! is there a check needed, if the palette exists? !!! # read 'product'_palette and copy it to self.'product'_palette setattr(self, var_name+"_palette", _get_palette(h5f, var_name) ) h5f.close() self.area = get_area_from_file(filename) self.filled = True def save(self, filename): """Save the current cloudtype object to hdf *filename*, in pps format. """ import h5py cma = self.convert2pps() LOG.info("Saving NWCSAF data as hdf file...") cma.save(filename) h5f = h5py.File(filename, mode="a") h5f.attrs["straylight_contaminated"] = self.qc_straylight h5f.close() LOG.info("Saving NWCSAF data hdf file done !") def project(self, coverage): """Remaps the NWCSAF/MSG Cloud Type to cartographic map-projection on area give by a pre-registered area-id. Faster version of msg_remap! """ LOG.info("Projecting channel %s..." % (self.name)) region = coverage.out_area dest_area = region.area_id retv = MsgNwcsafClass(self.name) retv.name = self.name retv.package = self.package retv.saf = self.saf retv.product_name = self.product_name retv.region_name = dest_area retv.cfac = self.cfac retv.lfac = self.lfac retv.coff = self.coff retv.loff = self.loff retv.nb_param = self.nb_param retv.gp_sc_id = self.gp_sc_id retv.image_acquisition_time = self.image_acquisition_time retv.spectral_channel_id = self.spectral_channel_id retv.nominal_product_time = self.nominal_product_time retv.sgs_product_quality = self.sgs_product_quality retv.sgs_product_completeness = self.sgs_product_completeness retv.product_algorithm_version = self.product_algorithm_version # loop for reprojecting data, e.g. retv.CMa = coverage.project_array(self.CMa) for var_name in self.var_names: setattr(retv, var_name, coverage.project_array(getattr(self, var_name))) # !!! BUG !!! copy palette is missing retv.qc_straylight = self.qc_straylight retv.region_name = dest_area retv.area = region retv.projection_name = region.proj_id retv.pcs_def = pcs_def_from_region(region) retv.num_of_columns = region.x_size retv.num_of_lines = region.y_size retv.xscale = region.pixel_size_x retv.yscale = region.pixel_size_y import pyproj prj = pyproj.Proj(region.proj4_string) aex = region.area_extent lonur, latur = prj(aex[2], aex[3], inverse=True) lonll, latll = prj(aex[0], aex[1], inverse=True) retv.ll_lon = lonll retv.ll_lat = latll retv.ur_lon = lonur retv.ur_lat = latur self.shape = region.shape retv.filled = True retv.resolution = self.resolution return retv def convert2pps(self): """Converts the NWCSAF/MSG data set to the PPS format, in order to have consistency in output format between PPS and MSG. """ import epshdf retv = PpsCloudMask() retv.region = epshdf.SafRegion() retv.region.xsize = self.num_of_columns retv.region.ysize = self.num_of_lines retv.region.id = self.region_name retv.region.pcs_id = self.projection_name retv.region.pcs_def = pcs_def_from_region(self.area) retv.region.area_extent = self.area.area_extent retv.satellite_id = self.satid # !!! UH: THIS PART IS TO BE DONE BY SOMEBODY WHO USES PPS !!! # loop for intersting variables for var_name in self.var_names: # get look-up tables, e.g. retv.CMa_lut = pps_luts('CMa') setattr( retv, var_name+"_lut", pps_luts(var_name) ) # get describing strings, e.g. retv.CMa_des = "MSG SEVIRI Cloud Mask" setattr( retv, var_name+"_des", pps_description(var_name) ) # if not processing flag, get astype, e.g. retv.cloudtype = self.cloudtype.astype('B') if var_name.find("QUALITY") != -1 and var_name.find("flag") != -1: setattr( retv, var_name, getattr(self, var_name).astype('B') ) elif var_name=="CT_QUALITY" or var_name=="qualityflag": retv.qualityflag = ctype_procflags2pps(self.processing_flags) elif var_name=="CTTH_QUALITY" or var_name=="processingflag": retv.processingflag = ctth_procflags2pps(self.processing_flags) elif var_name=="CMa_QUALITY" or var_name=="QUALITY": print "*** WARNING, no conversion for CMA and SPhR products flags yet!" # !!! UH: THIS PART IS TO BE DONE BY SOMEBODY WHO USES PPS !!! return retv def convert2nordrad(self): return NordRadCType(self) #----------------------------------------------------------------------- class MsgCloudTypeData(object): """NWCSAF/MSG Cloud Type data layer """ def __init__(self): self.data = None self.scaling_factor = 1 self.offset = 0 self.num_of_lines = 0 self.num_of_columns = 0 self.product = "" self.id = "" class MsgCloudType(mpop.channel.GenericChannel): """NWCSAF/MSG Cloud Type data structure as retrieved from HDF5 file. Resolution sets the nominal resolution of the data. """ def __init__(self): mpop.channel.GenericChannel.__init__(self, "CloudType") self.filled = False self.name = "CloudType" self.package = "" self.saf = "" self.product_name = "" self.num_of_columns = 0 self.num_of_lines = 0 self.projection_name = "" self.pcs_def = "" self.xscale = 0 self.yscale = 0 self.ll_lon = 0.0 self.ll_lat = 0.0 self.ur_lon = 0.0 self.ur_lat = 0.0 self.region_name = "" self.cfac = 0 self.lfac = 0 self.coff = 0 self.loff = 0 self.nb_param = 0 self.gp_sc_id = 0 self.image_acquisition_time = 0 self.spectral_channel_id = 0 self.nominal_product_time = 0 self.sgs_product_quality = 0 self.sgs_product_completeness = 0 self.product_algorithm_version = "" self.cloudtype = None self.processing_flags = None self.cloudphase = None self.shape = None self.satid = "" self.qc_straylight = -1 self.cloudtype_palette = None self.cloudphase_palette = None def __str__(self): return ("'%s: shape %s, resolution %sm'" % (self.name, self.cloudtype.shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled # ------------------------------------------------------------------ def read(self, filename, calibrate=True): """Reader for the NWCSAF/MSG cloudtype. Use *filename* to read data. """ import h5py self.cloudtype = MsgCloudTypeData() self.processing_flags = MsgCloudTypeData() self.cloudphase = MsgCloudTypeData() LOG.debug("Filename = <" + str(filename) + ">") h5f = h5py.File(filename, 'r') # pylint: disable-msg=W0212 self.package = h5f.attrs["PACKAGE"] self.saf = h5f.attrs["SAF"] self.product_name = h5f.attrs["PRODUCT_NAME"] self.num_of_columns = h5f.attrs["NC"] self.num_of_lines = h5f.attrs["NL"] self.projection_name = h5f.attrs["PROJECTION_NAME"] self.region_name = h5f.attrs["REGION_NAME"] self.cfac = h5f.attrs["CFAC"] self.lfac = h5f.attrs["LFAC"] self.coff = h5f.attrs["COFF"] self.loff = h5f.attrs["LOFF"] self.nb_param = h5f.attrs["NB_PARAMETERS"] self.gp_sc_id = h5f.attrs["GP_SC_ID"] self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"] self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"] self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"] self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"] self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"] self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"] # pylint: enable-msg=W0212 # ------------------------ # The cloudtype data h5d = h5f['CT'] self.cloudtype.data = h5d[:, :] self.cloudtype.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.cloudtype.offset = h5d.attrs["OFFSET"] self.cloudtype.num_of_lines = h5d.attrs["N_LINES"] self.cloudtype.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.cloudtype.num_of_lines, self.cloudtype.num_of_columns) self.cloudtype.product = h5d.attrs["PRODUCT"] self.cloudtype.id = h5d.attrs["ID"] self.cloudtype_palette = _get_palette(h5f, 'CT') / 255.0 # ------------------------ # The cloud phase data h5d = h5f['CT_PHASE'] self.cloudphase.data = h5d[:, :] self.cloudphase.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.cloudphase.offset = h5d.attrs["OFFSET"] self.cloudphase.num_of_lines = h5d.attrs["N_LINES"] self.cloudphase.num_of_columns = h5d.attrs["N_COLS"] self.cloudphase.product = h5d.attrs["PRODUCT"] self.cloudphase.id = h5d.attrs["ID"] self.cloudphase_palette = _get_palette(h5f, 'CT_PHASE') # ------------------------ # The cloudtype processing/quality flags h5d = h5f['CT_QUALITY'] self.processing_flags.data = h5d[:, :] self.processing_flags.scaling_factor = \ h5d.attrs["SCALING_FACTOR"] self.processing_flags.offset = h5d.attrs["OFFSET"] self.processing_flags.num_of_lines = h5d.attrs["N_LINES"] self.processing_flags.num_of_columns = h5d.attrs["N_COLS"] self.processing_flags.product = h5d.attrs["PRODUCT"] self.processing_flags.id = h5d.attrs["ID"] # ------------------------ h5f.close() self.cloudtype = self.cloudtype.data self.cloudphase = self.cloudphase.data self.processing_flags = self.processing_flags.data self.area = get_area_from_file(filename) self.filled = True def save(self, filename): """Save the current cloudtype object to hdf *filename*, in pps format. """ import h5py ctype = self.convert2pps() LOG.info("Saving CType hdf file...") ctype.save(filename) h5f = h5py.File(filename, mode="a") h5f.attrs["straylight_contaminated"] = self.qc_straylight h5f.close() LOG.info("Saving CType hdf file done !") def project(self, coverage): """Remaps the NWCSAF/MSG Cloud Type to cartographic map-projection on area give by a pre-registered area-id. Faster version of msg_remap! """ LOG.info("Projecting channel %s..." % (self.name)) region = coverage.out_area dest_area = region.area_id retv = MsgCloudType() retv.name = self.name retv.package = self.package retv.saf = self.saf retv.product_name = self.product_name retv.region_name = dest_area retv.cfac = self.cfac retv.lfac = self.lfac retv.coff = self.coff retv.loff = self.loff retv.nb_param = self.nb_param retv.gp_sc_id = self.gp_sc_id retv.image_acquisition_time = self.image_acquisition_time retv.spectral_channel_id = self.spectral_channel_id retv.nominal_product_time = self.nominal_product_time retv.sgs_product_quality = self.sgs_product_quality retv.sgs_product_completeness = self.sgs_product_completeness retv.product_algorithm_version = self.product_algorithm_version retv.cloudtype = coverage.project_array(self.cloudtype) retv.cloudtype_palette = self.cloudtype_palette retv.cloudphase = coverage.project_array(self.cloudphase) retv.cloudphase_palette = self.cloudphase_palette retv.processing_flags = \ coverage.project_array(self.processing_flags) retv.qc_straylight = self.qc_straylight retv.region_name = dest_area retv.area = region retv.projection_name = region.proj_id retv.pcs_def = pcs_def_from_region(region) retv.num_of_columns = region.x_size retv.num_of_lines = region.y_size retv.xscale = region.pixel_size_x retv.yscale = region.pixel_size_y import pyproj prj = pyproj.Proj(region.proj4_string) aex = region.area_extent lonur, latur = prj(aex[2], aex[3], inverse=True) lonll, latll = prj(aex[0], aex[1], inverse=True) retv.ll_lon = lonll retv.ll_lat = latll retv.ur_lon = lonur retv.ur_lat = latur self.shape = region.shape retv.filled = True retv.resolution = self.resolution return retv # is it necessary? # def convert2nordrad(self): # return NordRadCType(self) class MsgCTTHData(object): """CTTH data object. """ def __init__(self): self.data = None self.scaling_factor = 1 self.offset = 0 self.num_of_lines = 0 self.num_of_columns = 0 self.product = "" self.id = "" class MsgCTTH(mpop.channel.GenericChannel): """CTTH channel. """ def __init__(self, resolution=None): mpop.channel.GenericChannel.__init__(self, "CTTH") self.filled = False self.name = "CTTH" self.resolution = resolution self.package = "" self.saf = "" self.product_name = "" self.num_of_columns = 0 self.num_of_lines = 0 self.projection_name = "" self.region_name = "" self.cfac = 0 self.lfac = 0 self.coff = 0 self.loff = 0 self.nb_param = 0 self.gp_sc_id = 0 self.image_acquisition_time = 0 self.spectral_channel_id = 0 self.nominal_product_time = 0 self.sgs_product_quality = 0 self.sgs_product_completeness = 0 self.product_algorithm_version = "" self.cloudiness = None # Effective cloudiness self.processing_flags = None self.height = None self.temperature = None self.pressure = None self.satid = "" def __str__(self): return ("'%s: shape %s, resolution %sm'" % (self.name, self.shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled def read(self, filename, calibrate=True): import h5py self.cloudiness = MsgCTTHData() # Effective cloudiness self.temperature = MsgCTTHData() self.height = MsgCTTHData() self.pressure = MsgCTTHData() self.processing_flags = MsgCTTHData() h5f = h5py.File(filename, 'r') # The header # pylint: disable-msg=W0212 self.package = h5f.attrs["PACKAGE"] self.saf = h5f.attrs["SAF"] self.product_name = h5f.attrs["PRODUCT_NAME"] self.num_of_columns = h5f.attrs["NC"] self.num_of_lines = h5f.attrs["NL"] self.projection_name = h5f.attrs["PROJECTION_NAME"] self.region_name = h5f.attrs["REGION_NAME"] self.cfac = h5f.attrs["CFAC"] self.lfac = h5f.attrs["LFAC"] self.coff = h5f.attrs["COFF"] self.loff = h5f.attrs["LOFF"] self.nb_param = h5f.attrs["NB_PARAMETERS"] self.gp_sc_id = h5f.attrs["GP_SC_ID"] self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"] self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"] self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"] self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"] self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"] self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"] # pylint: enable-msg=W0212 # ------------------------ # The CTTH cloudiness data h5d = h5f['CTTH_EFFECT'] self.cloudiness.data = h5d[:, :] self.cloudiness.scaling_factor = \ h5d.attrs["SCALING_FACTOR"] self.cloudiness.offset = h5d.attrs["OFFSET"] self.cloudiness.num_of_lines = h5d.attrs["N_LINES"] self.cloudiness.num_of_columns = h5d.attrs["N_COLS"] self.cloudiness.product = h5d.attrs["PRODUCT"] self.cloudiness.id = h5d.attrs["ID"] # self.cloudiness.data = np.ma.masked_equal(self.cloudiness.data, 255) # self.cloudiness.data = np.ma.masked_equal(self.cloudiness.data, 0) self.cloudiness_palette = _get_palette(h5f, 'CTTH_EFFECT') / 255.0 # ------------------------ # The CTTH temperature data h5d = h5f['CTTH_TEMPER'] self.temperature.data = h5d[:, :] self.temperature.scaling_factor = \ h5d.attrs["SCALING_FACTOR"] self.temperature.offset = h5d.attrs["OFFSET"] self.temperature.num_of_lines = h5d.attrs["N_LINES"] self.shape = (self.temperature.num_of_lines, self.temperature.num_of_columns) self.temperature.num_of_columns = h5d.attrs["N_COLS"] self.temperature.product = h5d.attrs["PRODUCT"] self.temperature.id = h5d.attrs["ID"] # self.temperature.data = np.ma.masked_equal(self.temperature.data, 0) if calibrate: self.temperature = (self.temperature.data * self.temperature.scaling_factor + self.temperature.offset) else: self.temperature = self.temperature.data self.temperature_palette = _get_palette(h5f, 'CTTH_TEMPER') / 255.0 # ------------------------ # The CTTH pressure data h5d = h5f['CTTH_PRESS'] self.pressure.data = h5d[:, :] self.pressure.scaling_factor = \ h5d.attrs["SCALING_FACTOR"] self.pressure.offset = h5d.attrs["OFFSET"] self.pressure.num_of_lines = h5d.attrs["N_LINES"] self.pressure.num_of_columns = h5d.attrs["N_COLS"] self.pressure.product = h5d.attrs["PRODUCT"] self.pressure.id = h5d.attrs["ID"] # self.pressure.data = np.ma.masked_equal(self.pressure.data, 255) # self.pressure.data = np.ma.masked_equal(self.pressure.data, 0) if calibrate: self.pressure = (self.pressure.data * self.pressure.scaling_factor + self.pressure.offset) else: self.pressure = self.pressure.data self.pressure_palette = _get_palette(h5f, 'CTTH_PRESS') / 255.0 # ------------------------ # The CTTH height data h5d = h5f['CTTH_HEIGHT'] self.height.data = h5d[:, :] self.height.scaling_factor = \ h5d.attrs["SCALING_FACTOR"] self.height.offset = h5d.attrs["OFFSET"] self.height.num_of_lines = h5d.attrs["N_LINES"] self.height.num_of_columns = h5d.attrs["N_COLS"] self.height.product = h5d.attrs["PRODUCT"] self.height.id = h5d.attrs["ID"] # self.height.data = np.ma.masked_equal(self.height.data, 255) # self.height.data = np.ma.masked_equal(self.height.data, 0) if calibrate: self.height = (self.height.data * self.height.scaling_factor + self.height.offset) else: self.height = self.height.data self.height_palette = _get_palette(h5f, 'CTTH_HEIGHT') / 255.0 # ------------------------ # The CTTH processing/quality flags h5d = h5f['CTTH_QUALITY'] self.processing_flags.data = h5d[:, :] self.processing_flags.scaling_factor = \ h5d.attrs["SCALING_FACTOR"] self.processing_flags.offset = h5d.attrs["OFFSET"] self.processing_flags.num_of_lines = \ h5d.attrs["N_LINES"] self.processing_flags.num_of_columns = \ h5d.attrs["N_COLS"] self.processing_flags.product = h5d.attrs["PRODUCT"] self.processing_flags.id = h5d.attrs["ID"] self.processing_flags = \ np.ma.masked_equal(self.processing_flags.data, 0) h5f.close() self.shape = self.height.shape self.area = get_area_from_file(filename) self.filled = True def save(self, filename): """Save the current CTTH channel to HDF5 format. """ ctth = self.convert2pps() LOG.info("Saving CTTH hdf file...") ctth.save(filename) LOG.info("Saving CTTH hdf file done !") def project(self, coverage): """Project the current CTTH channel along the *coverage* """ dest_area = coverage.out_area dest_area_id = dest_area.area_id retv = MsgCTTH() retv.temperature = coverage.project_array(self.temperature) retv.height = coverage.project_array(self.height) retv.pressure = coverage.project_array(self.pressure) #retv.cloudiness = coverage.project_array(self.cloudiness) retv.processing_flags = \ coverage.project_array(self.processing_flags) retv.height_palette = self.height_palette retv.pressure_palette = self.pressure_palette retv.temperature_palette = self.temperature_palette retv.area = dest_area retv.region_name = dest_area_id retv.projection_name = dest_area.proj_id retv.num_of_columns = dest_area.x_size retv.num_of_lines = dest_area.y_size retv.shape = dest_area.shape retv.name = self.name retv.resolution = self.resolution retv.filled = True return retv # ---------------------------------------- class MsgPCData(object): """NWCSAF/MSG Precipitating Clouds data layer """ def __init__(self): self.data = None self.scaling_factor = 1 self.offset = 0 self.num_of_lines = 0 self.num_of_columns = 0 self.product = "" self.id = "" class MsgPC(mpop.channel.GenericChannel): """NWCSAF/MSG Precipitating Clouds data structure as retrieved from HDF5 file. Resolution sets the nominal resolution of the data. """ def __init__(self): mpop.channel.GenericChannel.__init__(self, "PC") self.filled = False self.name = "PC" self.package = "" self.saf = "" self.product_name = "" self.num_of_columns = 0 self.num_of_lines = 0 self.projection_name = "" self.pcs_def = "" self.xscale = 0 self.yscale = 0 self.ll_lon = 0.0 self.ll_lat = 0.0 self.ur_lon = 0.0 self.ur_lat = 0.0 self.region_name = "" self.cfac = 0 self.lfac = 0 self.coff = 0 self.loff = 0 self.nb_param = 0 self.gp_sc_id = 0 self.image_acquisition_time = 0 self.spectral_channel_id = 0 self.nominal_product_time = 0 self.sgs_product_quality = 0 self.sgs_product_completeness = 0 self.product_algorithm_version = "" self.probability_1 = None self.processing_flags = None self.shape = None self.satid = "" self.qc_straylight = -1 def __str__(self): return ("'%s: shape %s, resolution %sm'" % (self.name, self.probability_1.shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled # ------------------------------------------------------------------ def read(self, filename, calibrate=True): """Reader for the NWCSAF/MSG precipitating clouds. Use *filename* to read data. """ import h5py self.probability_1 = MsgPCData() self.processing_flags = MsgPCData() h5f = h5py.File(filename, 'r') # pylint: disable-msg=W0212 self.package = h5f.attrs["PACKAGE"] self.saf = h5f.attrs["SAF"] self.product_name = h5f.attrs["PRODUCT_NAME"] self.num_of_columns = h5f.attrs["NC"] self.num_of_lines = h5f.attrs["NL"] self.projection_name = h5f.attrs["PROJECTION_NAME"] self.region_name = h5f.attrs["REGION_NAME"] self.cfac = h5f.attrs["CFAC"] self.lfac = h5f.attrs["LFAC"] self.coff = h5f.attrs["COFF"] self.loff = h5f.attrs["LOFF"] self.nb_param = h5f.attrs["NB_PARAMETERS"] self.gp_sc_id = h5f.attrs["GP_SC_ID"] self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"] self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"] self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"] self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"] self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"] self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"] # pylint: enable-msg=W0212 # ------------------------ # The precipitating clouds data h5d = h5f['PC_PROB1'] self.probability_1.data = h5d[:, :] self.probability_1.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.probability_1.offset = h5d.attrs["OFFSET"] self.probability_1.num_of_lines = h5d.attrs["N_LINES"] self.probability_1.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.probability_1.num_of_lines, self.probability_1.num_of_columns) self.probability_1.product = h5d.attrs["PRODUCT"] self.probability_1.id = h5d.attrs["ID"] if calibrate: self.probability_1 = (self.probability_1.data * self.probability_1.scaling_factor + self.probability_1.offset) else: self.probability_1 = self.probability_1.data self.probability_1_palette = _get_palette(h5f, 'PC_PROB1') / 255.0 # ------------------------ # The PC processing/quality flags h5d = h5f['PC_QUALITY'] self.processing_flags.data = h5d[:, :] self.processing_flags.scaling_factor = \ h5d.attrs["SCALING_FACTOR"] self.processing_flags.offset = h5d.attrs["OFFSET"] self.processing_flags.num_of_lines = h5d.attrs["N_LINES"] self.processing_flags.num_of_columns = h5d.attrs["N_COLS"] self.processing_flags.product = h5d.attrs["PRODUCT"] self.processing_flags.id = h5d.attrs["ID"] self.processing_flags = np.ma.masked_equal( self.processing_flags.data, 0) # ------------------------ h5f.close() self.area = get_area_from_file(filename) self.filled = True def project(self, coverage): """Project the current PC channel along the *coverage* """ dest_area = coverage.out_area dest_area_id = dest_area.area_id retv = MsgPC() retv.probability_1 = coverage.project_array(self.probability_1) retv.processing_flags = \ coverage.project_array(self.processing_flags) retv.probability_1_palette = self.probability_1_palette retv.area = dest_area retv.region_name = dest_area_id retv.projection_name = dest_area.proj_id retv.num_of_columns = dest_area.x_size retv.num_of_lines = dest_area.y_size retv.shape = dest_area.shape retv.name = self.name retv.resolution = self.resolution retv.filled = True return retv # ------------------------------------------------------------------ def get_bit_from_flags(arr, nbit): """I don't know what this function does. """ res = np.bitwise_and(np.right_shift(arr, nbit), 1) return res.astype('b') # NEU Anfang NEW Beginn PyTroll-Workshop Kopenhagen 2014 class MsgCRRData(object): """NWCSAF/MSG Convective Rain Rate data layer """ def __init__(self): self.data = None self.scaling_factor = 1 self.offset = 0 self.num_of_lines = 0 self.num_of_columns = 0 self.product = "" self.id = "" class MsgCRR(mpop.channel.GenericChannel): """NWCSAF/MSG Convective Rain Rate data structure as retrieved from HDF5 file. Resolution sets the nominal resolution of the data. """ def __init__(self): mpop.channel.GenericChannel.__init__(self, "CRR") self.filled = False self.name = "CRR" # self.resolution = resolution self.package = "" self.saf = "" self.product_name = "" self.num_of_columns = 0 self.num_of_lines = 0 self.projection_name = "" self.pcs_def = "" self.xscale = 0 self.yscale = 0 self.ll_lon = 0.0 self.ll_lat = 0.0 self.ur_lon = 0.0 self.ur_lat = 0.0 self.region_name = "" self.cfac = 0 self.lfac = 0 self.coff = 0 self.loff = 0 self.nb_param = 0 self.gp_sc_id = 0 self.image_acquisition_time = 0 self.spectral_channel_id = 0 self.nominal_product_time = 0 self.sgs_product_quality = 0 self.sgs_product_completeness = 0 self.product_algorithm_version = "" self.crr = None self.crr_accum = None self.crr_intensity = None self.crr_quality = None self.crr_dataflag = None self.processing_flags = None self.shape = None self.satid = "" self.qc_straylight = -1 self.crr_palette = None self.crr_accum_palette = None self.crr_intensity_palette = None self.crr_quality_palette = None def __str__(self): return ("'%s: shape %s, resolution %sm'" % (self.name, self.crr.shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled # ------------------------------------------------------------------ def read(self, filename, calibrate=True): """Reader for the . Use *filename* to read data. """ import h5py self.crr = MsgCRRData() self.crr_accum = MsgCRRData() self.crr_intensity = MsgCRRData() self.crr_quality = MsgCRRData() self.processing_flags = MsgCRRData() LOG.debug("Filename = <" + str(filename) + ">") h5f = h5py.File(filename, 'r') # pylint: disable-msg=W0212 self.package = h5f.attrs["PACKAGE"] self.saf = h5f.attrs["SAF"] self.product_name = h5f.attrs["PRODUCT_NAME"] self.num_of_columns = h5f.attrs["NC"] self.num_of_lines = h5f.attrs["NL"] self.projection_name = h5f.attrs["PROJECTION_NAME"] self.region_name = h5f.attrs["REGION_NAME"] self.cfac = h5f.attrs["CFAC"] self.lfac = h5f.attrs["LFAC"] self.coff = h5f.attrs["COFF"] self.loff = h5f.attrs["LOFF"] self.nb_param = h5f.attrs["NB_PARAMETERS"] self.gp_sc_id = h5f.attrs["GP_SC_ID"] self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"] self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"] self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"] self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"] self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"] self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"] # pylint: enable-msg=W0212 # ------------------------ # The CRR data h5d = h5f['CRR'] self.crr.data = h5d[:, :] self.crr.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.crr.offset = h5d.attrs["OFFSET"] self.crr.num_of_lines = h5d.attrs["N_LINES"] self.crr.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.crr.num_of_lines, self.crr.num_of_columns) self.crr.product = h5d.attrs["PRODUCT"] self.crr.id = h5d.attrs["ID"] if calibrate: self.crr = (self.crr.data * self.crr.scaling_factor + self.crr.offset) else: self.crr = self.crr.data self.crr_palette = _get_palette(h5f, 'CRR') / 255.0 # ------------------------ # The CRR ACCUM data h5d = h5f['CRR_ACCUM'] self.crr_accum.data = h5d[:, :] self.crr_accum.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.crr_accum.offset = h5d.attrs["OFFSET"] self.crr_accum.num_of_lines = h5d.attrs["N_LINES"] self.crr_accum.num_of_columns = h5d.attrs["N_COLS"] self.crr_accum.product = h5d.attrs["PRODUCT"] self.crr_accum.id = h5d.attrs["ID"] if calibrate: self.crr_accum = (self.crr_accum.data * self.crr_accum.scaling_factor + self.crr_accum.offset) else: self.crr_accum = self.crr_accum.data self.crr_accum_palette = _get_palette(h5f, 'CRR_ACCUM') / 255.0 # ------------------------ # The CRR Intensity data h5d = h5f['CRR_INTENSITY'] self.crr_intensity.data = h5d[:, :] self.crr_intensity.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.crr_intensity.offset = h5d.attrs["OFFSET"] self.crr_intensity.num_of_lines = h5d.attrs["N_LINES"] self.crr_intensity.num_of_columns = h5d.attrs["N_COLS"] self.crr_intensity.product = h5d.attrs["PRODUCT"] self.crr_intensity.id = h5d.attrs["ID"] if calibrate: self.crr_intensity = (self.crr_intensity.data * self.crr_intensity.scaling_factor + self.crr_intensity.offset) else: self.crr_intensity = self.crr_intensity.data self.crr_intensity_palette = _get_palette(h5f, 'CRR_INTENSITY') / 255.0 # ------------------------ # The CRR quality data h5d = h5f['CRR_QUALITY'] self.crr_quality.data = h5d[:, :] self.crr_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.crr_quality.offset = h5d.attrs["OFFSET"] self.crr_quality.num_of_lines = h5d.attrs["N_LINES"] self.crr_quality.num_of_columns = h5d.attrs["N_COLS"] self.crr_quality.product = h5d.attrs["PRODUCT"] self.crr_quality.id = h5d.attrs["ID"] if calibrate: self.crr_quality = (self.crr_quality.data * self.crr_quality.scaling_factor + self.crr_quality.offset) else: self.crr_quality = self.crr_quality.data self.crr_quality_palette = _get_palette(h5f, 'CRR_QUALITY') # ------------------------ h5f.close() #self.crr = self.crr.data #self.crr_accum = self.crr_accum.data #self.crr_intensity = self.crr_intensity.data #self.crr_quality = self.crr_quality.data #self.processing_flags = self.processing_flags.data self.area = get_area_from_file(filename) self.filled = True def save(self, filename): """Save the current cloudtype object to hdf *filename*, in pps format. """ import h5py ctype = self.convert2pps() LOG.info("Saving CRR hdf file...") ctype.save(filename) h5f = h5py.File(filename, mode="a") h5f.attrs["straylight_contaminated"] = self.qc_straylight h5f.close() LOG.info("Saving CRR hdf file done !") def project(self, coverage): """Remaps the NWCSAF/MSG CRR to cartographic map-projection on area give by a pre-registered area-id. Faster version of msg_remap! """ LOG.info("Projecting channel %s..." % (self.name)) region = coverage.out_area dest_area = region.area_id retv = MsgCRR() retv.name = self.name retv.package = self.package retv.saf = self.saf retv.product_name = self.product_name retv.region_name = dest_area retv.cfac = self.cfac retv.lfac = self.lfac retv.coff = self.coff retv.loff = self.loff retv.nb_param = self.nb_param retv.gp_sc_id = self.gp_sc_id retv.image_acquisition_time = self.image_acquisition_time retv.spectral_channel_id = self.spectral_channel_id retv.nominal_product_time = self.nominal_product_time retv.sgs_product_quality = self.sgs_product_quality retv.sgs_product_completeness = self.sgs_product_completeness retv.product_algorithm_version = self.product_algorithm_version retv.crr = coverage.project_array(self.crr) retv.crr_palette = self.crr_palette retv.crr_accum = coverage.project_array(self.crr_accum) retv.crr_accum_palette = self.crr_accum_palette retv.crr_intensity = coverage.project_array(self.crr_intensity) retv.crr_intensity_palette = self.crr_intensity_palette retv.crr_quality = coverage.project_array(self.crr_quality) retv.crr_quality_palette = self.crr_quality_palette #retv.processing_flags = \ # coverage.project_array(self.processing_flags) retv.qc_straylight = self.qc_straylight retv.region_name = dest_area retv.area = region retv.projection_name = region.proj_id retv.pcs_def = pcs_def_from_region(region) retv.num_of_columns = region.x_size retv.num_of_lines = region.y_size retv.xscale = region.pixel_size_x retv.yscale = region.pixel_size_y import pyproj prj = pyproj.Proj(region.proj4_string) aex = region.area_extent lonur, latur = prj(aex[2], aex[3], inverse=True) lonll, latll = prj(aex[0], aex[1], inverse=True) retv.ll_lon = lonll retv.ll_lat = latll retv.ur_lon = lonur retv.ur_lat = latur self.shape = region.shape retv.filled = True retv.resolution = self.resolution return retv # def convert2nordrad(self): # return NordRadCType(self) class MsgSPhRData(object): """NWCSAF/MSG Convective Rain Rate data layer """ def __init__(self): self.data = None self.scaling_factor = 1 self.offset = 0 self.num_of_lines = 0 self.num_of_columns = 0 self.product = "" self.id = "" class MsgSPhR(mpop.channel.GenericChannel): """NWCSAF/MSG SPhR data structure as retrieved from HDF5 file. Resolution sets the nominal resolution of the data. Palette now missing """ def __init__(self): mpop.channel.GenericChannel.__init__(self, "SPhR") self.filled = False self.name = "SPhR" # self.resolution = resolution self.package = "" self.saf = "" self.product_name = "" self.num_of_columns = 0 self.num_of_lines = 0 self.projection_name = "" self.pcs_def = "" self.xscale = 0 self.yscale = 0 self.ll_lon = 0.0 self.ll_lat = 0.0 self.ur_lon = 0.0 self.ur_lat = 0.0 self.region_name = "" self.cfac = 0 self.lfac = 0 self.coff = 0 self.loff = 0 self.nb_param = 0 self.gp_sc_id = 0 self.image_acquisition_time = 0 self.spectral_channel_id = 0 self.nominal_product_time = 0 self.sgs_product_quality = 0 self.sgs_product_completeness = 0 self.product_algorithm_version = "" self.sphr = None self.sphr_bl = None self.sphr_cape = None self.sphr_diffbl = None self.sphr_diffhl = None self.sphr_diffki = None self.sphr_diffli = None self.sphr_diffml = None self.sphr_diffshw = None self.sphr_difftpw = None self.sphr_hl = None self.sphr_ki = None self.sphr_li = None self.sphr_ml = None self.sphr_quality = None self.sphr_sflag = None self.sphr_shw = None self.sphr_tpw = None self.processing_flags = None self.shape = None self.satid = "" self.qc_straylight = -1 self.sphr = None self.sphr_bl_palette = None self.sphr_cape_palette = None self.sphr_diffbl_palette = None self.sphr_diffhl_palette = None self.sphr_diffki_palette = None self.sphr_diffli_palette = None self.sphr_diffml_palette = None self.sphr_diffshw_palette = None self.sphr_difftpw_palette = None self.sphr_hl_palette = None self.sphr_ki_palette = None self.sphr_li_palette = None self.sphr_ml_palette = None self.sphr_quality_palette = None self.sphr_sflag_palette = None self.sphr_shw_palette = None self.sphr_tpw_palette = None def __str__(self): return ("'%s: shape %s, resolution %sm'" % (self.name, self.sphr_bl.shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled # ------------------------------------------------------------------ def read(self, filename, calibrate=True): """Reader for the . Use *filename* to read data. """ import h5py # Erste Zeile notwendig? self.sphr = MsgSPhRData() self.sphr_bl = MsgSPhRData() self.sphr_cape = MsgSPhRData() self.sphr_diffbl = MsgSPhRData() self.sphr_diffhl = MsgSPhRData() self.sphr_diffki = MsgSPhRData() self.sphr_diffli = MsgSPhRData() self.sphr_diffml = MsgSPhRData() self.sphr_diffshw = MsgSPhRData() self.sphr_difftpw = MsgSPhRData() self.sphr_hl = MsgSPhRData() self.sphr_ki = MsgSPhRData() self.sphr_li = MsgSPhRData() self.sphr_ml = MsgSPhRData() self.sphr_quality = MsgSPhRData() self.sphr_sflag = MsgSPhRData() self.sphr_shw = MsgSPhRData() self.sphr_tpw = MsgSPhRData() self.processing_flags = MsgSPhRData() LOG.debug("Filename = <" + str(filename) + ">") h5f = h5py.File(filename, 'r') # pylint: disable-msg=W0212 self.package = h5f.attrs["PACKAGE"] self.saf = h5f.attrs["SAF"] self.product_name = h5f.attrs["PRODUCT_NAME"] self.num_of_columns = h5f.attrs["NC"] self.num_of_lines = h5f.attrs["NL"] self.projection_name = h5f.attrs["PROJECTION_NAME"] self.region_name = h5f.attrs["REGION_NAME"] self.cfac = h5f.attrs["CFAC"] self.lfac = h5f.attrs["LFAC"] self.coff = h5f.attrs["COFF"] self.loff = h5f.attrs["LOFF"] self.nb_param = h5f.attrs["NB_PARAMETERS"] self.gp_sc_id = h5f.attrs["GP_SC_ID"] self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"] self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"] self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"] self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"] self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"] self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"] # pylint: enable-msg=W0212 # ------------------------ # The SPhR BL data h5d = h5f['SPhR_BL'] self.sphr_bl.data = h5d[:, :] self.sphr_bl.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_bl.offset = h5d.attrs["OFFSET"] self.sphr_bl.num_of_lines = h5d.attrs["N_LINES"] self.sphr_bl.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_bl.num_of_lines, self.sphr_bl.num_of_columns) self.sphr_bl.product = h5d.attrs["PRODUCT"] self.sphr_bl.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_bl.data ) * ( self.sphr_bl.data <= 128 ) # apply scaling factor and offset self.sphr_bl = mask * (self.sphr_bl.data * self.sphr_bl.scaling_factor + self.sphr_bl.offset) else: self.sphr_bl = self.sphr_bl.data self.sphr_bl_palette = _get_palette(h5f, 'SPhR_BL') / 255.0 # The SPhR Cape data h5d = h5f['SPhR_CAPE'] self.sphr_cape.data = h5d[:, :] self.sphr_cape.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_cape.offset = h5d.attrs["OFFSET"] self.sphr_cape.num_of_lines = h5d.attrs["N_LINES"] self.sphr_cape.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_cape.num_of_lines, self.sphr_cape.num_of_columns) self.sphr_cape.product = h5d.attrs["PRODUCT"] self.sphr_cape.id = h5d.attrs["ID"] if calibrate: mask = ( 128 < self.sphr_cape.data ) # apply scaling factor and offset self.sphr_cape = mask * (self.sphr_cape.data * self.sphr_cape.scaling_factor + self.sphr_cape.offset) else: self.sphr_cape = self.sphr_cape.data #self.sphr_cape_palette = _get_palette(h5f, 'SPhR_CAPE') / 255.0 # The SPhR DIFFBL data h5d = h5f['SPhR_DIFFBL'] self.sphr_diffbl.data = h5d[:, :] self.sphr_diffbl.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_diffbl.offset = h5d.attrs["OFFSET"] self.sphr_diffbl.num_of_lines = h5d.attrs["N_LINES"] self.sphr_diffbl.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_diffbl.num_of_lines, self.sphr_diffbl.num_of_columns) self.sphr_diffbl.product = h5d.attrs["PRODUCT"] self.sphr_diffbl.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_diffbl.data ) * ( self.sphr_diffbl.data <= 128 ) # apply scaling factor and offset self.sphr_diffbl = mask * (self.sphr_diffbl.data * self.sphr_diffbl.scaling_factor + self.sphr_diffbl.offset) else: self.sphr_diffbl = self.sphr_diffbl.data self.sphr_diffbl_palette = _get_palette(h5f, 'SPhR_DIFFBL') / 255.0 # The SPhR DIFFHL data h5d = h5f['SPhR_DIFFHL'] self.sphr_diffhl.data = h5d[:, :] self.sphr_diffhl.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_diffhl.offset = h5d.attrs["OFFSET"] self.sphr_diffhl.num_of_lines = h5d.attrs["N_LINES"] self.sphr_diffhl.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_diffhl.num_of_lines, self.sphr_diffhl.num_of_columns) self.sphr_diffhl.product = h5d.attrs["PRODUCT"] self.sphr_diffhl.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_diffhl.data ) * ( self.sphr_diffhl.data <= 128 ) # apply scaling factor and offset self.sphr_diffhl = mask * (self.sphr_diffhl.data * self.sphr_diffhl.scaling_factor + self.sphr_diffhl.offset) else: self.sphr_diffhl = self.sphr_diffhl.data self.sphr_diffhl_palette = _get_palette(h5f, 'SPhR_DIFFHL') / 255.0 # The SPhR DIFFKI data h5d = h5f['SPhR_DIFFKI'] self.sphr_diffki.data = h5d[:, :] self.sphr_diffki.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_diffki.offset = h5d.attrs["OFFSET"] self.sphr_diffki.num_of_lines = h5d.attrs["N_LINES"] self.sphr_diffki.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_diffki.num_of_lines, self.sphr_diffki.num_of_columns) self.sphr_diffki.product = h5d.attrs["PRODUCT"] self.sphr_diffki.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_diffki.data ) * ( self.sphr_diffki.data <= 128 ) # apply scaling factor and offset self.sphr_diffki = mask * (self.sphr_diffki.data * self.sphr_diffki.scaling_factor + self.sphr_diffki.offset) else: self.sphr_diffki = self.sphr_diffki.data self.sphr_diffki_palette = _get_palette(h5f, 'SPhR_DIFFKI') / 255.0 # The SPhR DIFFLI data h5d = h5f['SPhR_DIFFLI'] self.sphr_diffli.data = h5d[:, :] self.sphr_diffli.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_diffli.offset = h5d.attrs["OFFSET"] self.sphr_diffli.num_of_lines = h5d.attrs["N_LINES"] self.sphr_diffli.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_diffli.num_of_lines, self.sphr_diffli.num_of_columns) self.sphr_diffli.product = h5d.attrs["PRODUCT"] self.sphr_diffli.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_diffli.data ) * ( self.sphr_diffli.data <= 128 ) # apply scaling factor and offset self.sphr_diffli = mask * (self.sphr_diffli.data * self.sphr_diffli.scaling_factor + self.sphr_diffli.offset) else: self.sphr_diffli= self.sphr_diffli.data self.sphr_diffli_palette = _get_palette(h5f, 'SPhR_DIFFLI') / 255.0 # The SPhR DIFFML data h5d = h5f['SPhR_DIFFML'] self.sphr_diffml.data = h5d[:, :] self.sphr_diffml.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_diffml.offset = h5d.attrs["OFFSET"] self.sphr_diffml.num_of_lines = h5d.attrs["N_LINES"] self.sphr_diffml.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_diffml.num_of_lines, self.sphr_diffml.num_of_columns) self.sphr_diffml.product = h5d.attrs["PRODUCT"] self.sphr_diffml.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_diffml.data ) * ( self.sphr_diffml.data <= 128 ) # apply scaling factor and offset self.sphr_diffml = mask * (self.sphr_diffml.data * self.sphr_diffml.scaling_factor + self.sphr_diffml.offset) else: self.sphr_diffml = self.sphr_diffml.data self.sphr_diffml_palette = _get_palette(h5f, 'SPhR_DIFFML') / 255.0 # The SPhR DIFFSHW data h5d = h5f['SPhR_DIFFSHW'] self.sphr_diffshw.data = h5d[:, :] self.sphr_diffshw.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_diffshw.offset = h5d.attrs["OFFSET"] self.sphr_diffshw.num_of_lines = h5d.attrs["N_LINES"] self.sphr_diffshw.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_diffshw.num_of_lines, self.sphr_diffshw.num_of_columns) self.sphr_diffshw.product = h5d.attrs["PRODUCT"] self.sphr_diffshw.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_diffshw.data ) * ( self.sphr_diffshw.data <= 128 ) # apply scaling factor and offset self.sphr_diffshw = mask * (self.sphr_diffshw.data * self.sphr_diffshw.scaling_factor + self.sphr_diffshw.offset) else: self.sphr_diffshw = self.sphr_diffshw.data self.sphr_diffshw_palette = _get_palette(h5f, 'SPhR_DIFFSHW') / 255.0 # The SPhR DIFFTPW data h5d = h5f['SPhR_DIFFTPW'] self.sphr_difftpw.data = h5d[:, :] self.sphr_difftpw.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_difftpw.offset = h5d.attrs["OFFSET"] self.sphr_difftpw.num_of_lines = h5d.attrs["N_LINES"] self.sphr_difftpw.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_difftpw.num_of_lines, self.sphr_difftpw.num_of_columns) self.sphr_difftpw.product = h5d.attrs["PRODUCT"] self.sphr_difftpw.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_difftpw.data ) * ( self.sphr_difftpw.data <= 128 ) # apply scaling factor and offset self.sphr_difftpw = mask * (self.sphr_difftpw.data * self.sphr_difftpw.scaling_factor + self.sphr_difftpw.offset) else: self.sphr_difftpw = self.sphr_difftpw.data self.sphr_difftpw_palette = _get_palette(h5f, 'SPhR_DIFFTPW') / 255.0 # The SPhR HL data h5d = h5f['SPhR_HL'] self.sphr_hl.data = h5d[:, :] self.sphr_hl.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_hl.offset = h5d.attrs["OFFSET"] self.sphr_hl.num_of_lines = h5d.attrs["N_LINES"] self.sphr_hl.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_hl.num_of_lines, self.sphr_hl.num_of_columns) self.sphr_hl.product = h5d.attrs["PRODUCT"] self.sphr_hl.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_hl.data ) * ( self.sphr_hl.data <= 128 ) # apply scaling factor and offset self.sphr_hl = mask * (self.sphr_hl.data * self.sphr_hl.scaling_factor + self.sphr_hl.offset) else: self.sphr_hl = self.sphr_hl.data self.sphr_hl_palette = _get_palette(h5f, 'SPhR_HL') / 255.0 # The SPhR KI data h5d = h5f['SPhR_KI'] self.sphr_ki.data = h5d[:, :] self.sphr_ki.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_ki.offset = h5d.attrs["OFFSET"] self.sphr_ki.num_of_lines = h5d.attrs["N_LINES"] self.sphr_ki.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_ki.num_of_lines, self.sphr_ki.num_of_columns) self.sphr_ki.product = h5d.attrs["PRODUCT"] self.sphr_ki.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_ki.data ) * ( self.sphr_ki.data <= 128 ) # apply scaling factor and offset self.sphr_ki = mask * (self.sphr_ki.data * self.sphr_ki.scaling_factor + self.sphr_ki.offset) else: self.sphr_ki = self.sphr_ki.data self.sphr_ki_palette = _get_palette(h5f, 'SPhR_KI') / 255.0 # The SPhR LI data h5d = h5f['SPhR_LI'] self.sphr_li.data = h5d[:, :] self.sphr_li.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_li.offset = h5d.attrs["OFFSET"] self.sphr_li.num_of_lines = h5d.attrs["N_LINES"] self.sphr_li.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_li.num_of_lines, self.sphr_li.num_of_columns) self.sphr_li.product = h5d.attrs["PRODUCT"] self.sphr_li.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_li.data ) * ( self.sphr_li.data <= 128 ) # apply scaling factor and offset self.sphr_li = mask * (self.sphr_li.data * self.sphr_li.scaling_factor + self.sphr_li.offset) else: self.sphr_li = self.sphr_li.data self.sphr_li_palette = _get_palette(h5f, 'SPhR_LI') / 255.0 # The SPhR ML data h5d = h5f['SPhR_ML'] self.sphr_ml.data = h5d[:, :] self.sphr_ml.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_ml.offset = h5d.attrs["OFFSET"] self.sphr_ml.num_of_lines = h5d.attrs["N_LINES"] self.sphr_ml.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_ml.num_of_lines, self.sphr_ml.num_of_columns) self.sphr_ml.product = h5d.attrs["PRODUCT"] self.sphr_ml.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_ml.data ) * ( self.sphr_ml.data <= 128 ) # apply scaling factor and offset self.sphr_ml = mask * (self.sphr_ml.data * self.sphr_ml.scaling_factor + self.sphr_ml.offset) else: self.sphr_ml = self.sphr_ml.data self.sphr_ml_palette = _get_palette(h5f, 'SPhR_ML') / 255.0 # The SPhR QUALITY data h5d = h5f['SPhR_QUALITY'] self.sphr_quality.data = h5d[:, :] self.sphr_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_quality.offset = h5d.attrs["OFFSET"] self.sphr_quality.num_of_lines = h5d.attrs["N_LINES"] self.sphr_quality.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_quality.num_of_lines, self.sphr_quality.num_of_columns) self.sphr_quality.product = h5d.attrs["PRODUCT"] self.sphr_quality.id = h5d.attrs["ID"] if calibrate: mask = (self.sphr_quality.data != 0 ) # apply scaling factor and offset self.sphr_quality = mask * (self.sphr_quality.data * self.sphr_quality.scaling_factor + self.sphr_quality.offset) else: self.sphr_quality = self.sphr_quality.data # The SPhR SFLAG data h5d = h5f['SPhR_SFLAG'] self.sphr_sflag.data = h5d[:, :] self.sphr_sflag.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_sflag.offset = h5d.attrs["OFFSET"] self.sphr_sflag.num_of_lines = h5d.attrs["N_LINES"] self.sphr_sflag.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_sflag.num_of_lines, self.sphr_sflag.num_of_columns) self.sphr_sflag.product = h5d.attrs["PRODUCT"] self.sphr_sflag.id = h5d.attrs["ID"] self.sphr_sflag = self.sphr_sflag.data # The SPhR SHW data h5d = h5f['SPhR_SHW'] self.sphr_shw.data = h5d[:, :] self.sphr_shw.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_shw.offset = h5d.attrs["OFFSET"] self.sphr_shw.num_of_lines = h5d.attrs["N_LINES"] self.sphr_shw.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_shw.num_of_lines, self.sphr_shw.num_of_columns) self.sphr_shw.product = h5d.attrs["PRODUCT"] self.sphr_shw.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_shw.data ) * ( self.sphr_shw.data <= 128 ) # apply scaling factor and offset self.sphr_shw = mask * (self.sphr_shw.data * self.sphr_shw.scaling_factor + self.sphr_shw.offset) else: self.sphr_shw = self.sphr_shw.data self.sphr_shw_palette = _get_palette(h5f, 'SPhR_SHW') / 255.0 # The SPhR TPW data h5d = h5f['SPhR_TPW'] self.sphr_tpw.data = h5d[:, :] self.sphr_tpw.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.sphr_tpw.offset = h5d.attrs["OFFSET"] self.sphr_tpw.num_of_lines = h5d.attrs["N_LINES"] self.sphr_tpw.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.sphr_tpw.num_of_lines, self.sphr_tpw.num_of_columns) self.sphr_tpw.product = h5d.attrs["PRODUCT"] self.sphr_tpw.id = h5d.attrs["ID"] if calibrate: mask = ( 8 <= self.sphr_tpw.data ) * ( self.sphr_tpw.data <= 128 ) # apply scaling factor and offset self.sphr_tpw = mask * (self.sphr_tpw.data * self.sphr_tpw.scaling_factor + self.sphr_tpw.offset) print self.sphr_tpw.min(), self.sphr_tpw.max() else: self.sphr_tpw = self.sphr_tpw.data self.sphr_tpw_palette = _get_palette(h5f, 'SPhR_TPW') / 255.0 # ------------------------ h5f.close() #self.sphr = self.sphr.data #self.sphr_bl = self.sphr_bl.data #self.sphr_cape = self.sphr_cape.data #self.sphr_diffbl = self.sphr_diffbl.data #self.sphr_diffhl = self.sphr_diffhl.data #self.sphr_diffki = self.sphr_diffki.data #self.sphr_diffli = self.sphr_diffli.data #self.sphr_diffml = self.sphr_diffml.data #self.sphr_diffshw = self.sphr_diffshw.data #self.sphr_difftpw = self.sphr_difftpw.data #self.sphr_hl = self.sphr_hl.data #self.sphr_ki = self.sphr_ki.data #self.sphr_li = self.sphr_li.data #self.sphr_ml = self.sphr_ml.data #self.sphr_quality = self.sphr_quality.data #self.sphr_sflag = self.sphr_sflag.data #self.sphr_shw = self.sphr_shw.data #self.sphr_tpw = self.sphr_tpw.data self.processing_flags = self.processing_flags.data self.area = get_area_from_file(filename) self.filled = True def project(self, coverage): """Remaps the NWCSAF/MSG CRR to cartographic map-projection on area give by a pre-registered area-id. Faster version of msg_remap! """ LOG.info("Projecting channel %s..." % (self.name)) region = coverage.out_area dest_area = region.area_id retv = MsgSPhR() retv.name = self.name retv.package = self.package retv.saf = self.saf retv.product_name = self.product_name retv.region_name = dest_area retv.cfac = self.cfac retv.lfac = self.lfac retv.coff = self.coff retv.loff = self.loff retv.nb_param = self.nb_param retv.gp_sc_id = self.gp_sc_id retv.image_acquisition_time = self.image_acquisition_time retv.spectral_channel_id = self.spectral_channel_id retv.nominal_product_time = self.nominal_product_time retv.sgs_product_quality = self.sgs_product_quality retv.sgs_product_completeness = self.sgs_product_completeness retv.product_algorithm_version = self.product_algorithm_version retv.sphr_bl = coverage.project_array(self.sphr_bl) retv.sphr_bl_palette = self.sphr_bl_palette retv.sphr_ml = coverage.project_array(self.sphr_ml) retv.sphr_ml_palette = self.sphr_ml_palette retv.sphr_hl = coverage.project_array(self.sphr_hl) retv.sphr_hl_palette = self.sphr_hl_palette retv.sphr_ki = coverage.project_array(self.sphr_ki) retv.sphr_ki_palette = self.sphr_ki_palette retv.sphr_li = coverage.project_array(self.sphr_li) retv.sphr_li_palette = self.sphr_li_palette retv.sphr_tpw = coverage.project_array(self.sphr_tpw) retv.sphr_tpw_palette = self.sphr_tpw_palette retv.sphr_cape = coverage.project_array(self.sphr_cape) # no sphr_cape_palette retv.sphr_quality = coverage.project_array(self.sphr_quality) # no sphr_quality_palette retv.sphr_sflag = coverage.project_array(self.sphr_sflag) # no sphr_sflag_palette retv.sphr_shw = coverage.project_array(self.sphr_shw) retv.sphr_shw_palette = self.sphr_shw_palette retv.sphr_diffbl = coverage.project_array(self.sphr_diffbl) retv.sphr_diffbl_palette = self.sphr_diffbl_palette retv.sphr_diffml = coverage.project_array(self.sphr_diffml) retv.sphr_diffml_palette = self.sphr_diffml_palette retv.sphr_diffhl = coverage.project_array(self.sphr_diffhl) retv.sphr_diffhl_palette = self.sphr_diffhl_palette retv.sphr_diffki = coverage.project_array(self.sphr_diffki) retv.sphr_diffki_palette = self.sphr_diffki_palette retv.sphr_diffli = coverage.project_array(self.sphr_diffli) retv.sphr_diffli_palette = self.sphr_diffli_palette retv.sphr_difftpw = coverage.project_array(self.sphr_difftpw) retv.sphr_difftpw_palette = self.sphr_difftpw_palette retv.sphr_diffshw = coverage.project_array(self.sphr_diffshw) retv.sphr_diffshw_palette = self.sphr_diffshw_palette # retv.processing_flags = \ # coverage.project_array(self.processing_flags) retv.qc_straylight = self.qc_straylight retv.region_name = dest_area retv.area = region retv.projection_name = region.proj_id retv.pcs_def = pcs_def_from_region(region) retv.num_of_columns = region.x_size retv.num_of_lines = region.y_size retv.xscale = region.pixel_size_x retv.yscale = region.pixel_size_y import pyproj prj = pyproj.Proj(region.proj4_string) aex = region.area_extent lonur, latur = prj(aex[2], aex[3], inverse=True) lonll, latll = prj(aex[0], aex[1], inverse=True) retv.ll_lon = lonll retv.ll_lat = latll retv.ur_lon = lonur retv.ur_lat = latur self.shape = region.shape retv.filled = True retv.resolution = self.resolution return retv class MsgPCPhData(object): """NWCSAF/MSG PCPh data layer """ def __init__(self): self.data = None self.scaling_factor = 1 self.offset = 0 self.num_of_lines = 0 self.num_of_columns = 0 self.product = "" self.id = "" class MsgPCPh(mpop.channel.GenericChannel): """NWCSAF/MSG PCPh data structure as retrieved from HDF5 file. Resolution sets the nominal resolution of the data. Palette now missing """ def __init__(self): mpop.channel.GenericChannel.__init__(self, "PCPh") self.filled = False self.name = "PCPh" # self.resolution = resolution self.package = "" self.saf = "" self.product_name = "" self.num_of_columns = 0 self.num_of_lines = 0 self.projection_name = "" self.pcs_def = "" self.xscale = 0 self.yscale = 0 self.ll_lon = 0.0 self.ll_lat = 0.0 self.ur_lon = 0.0 self.ur_lat = 0.0 self.region_name = "" self.cfac = 0 self.lfac = 0 self.coff = 0 self.loff = 0 self.nb_param = 0 self.gp_sc_id = 0 self.image_acquisition_time = 0 self.spectral_channel_id = 0 self.nominal_product_time = 0 self.sgs_product_quality = 0 self.sgs_product_completeness = 0 self.product_algorithm_version = "" self.pcph = None self.pcph_pc = None self.pcph_quality = None self.pcph_dataflag = None self.processing_flags = None self.shape = None self.satid = "" self.qc_straylight = -1 self.pcph = None self.pcph_pc_palette = None self.pcph_quality_palette = None self.pcph_sflag_palette = None def __str__(self): return ("'%s: shape %s, resolution %sm'" % (self.name, self.pcph_pc.shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled # ------------------------------------------------------------------ def read(self, filename, calibrate=True): """Reader for the . Use *filename* to read data. """ import h5py # Erste Zeile notwendig? self.pcph = MsgPCPhData() self.pcph_pc = MsgPCPhData() self.pcph_quality = MsgPCPhData() self.pcph_dataflag = MsgPCPhData() self.processing_flags = MsgPCPhData() LOG.debug("Filename = <" + str(filename) + ">") h5f = h5py.File(filename, 'r') # pylint: disable-msg=W0212 self.package = h5f.attrs["PACKAGE"] self.saf = h5f.attrs["SAF"] self.product_name = h5f.attrs["PRODUCT_NAME"] self.num_of_columns = h5f.attrs["NC"] self.num_of_lines = h5f.attrs["NL"] self.projection_name = h5f.attrs["PROJECTION_NAME"] self.region_name = h5f.attrs["REGION_NAME"] self.cfac = h5f.attrs["CFAC"] self.lfac = h5f.attrs["LFAC"] self.coff = h5f.attrs["COFF"] self.loff = h5f.attrs["LOFF"] self.nb_param = h5f.attrs["NB_PARAMETERS"] self.gp_sc_id = h5f.attrs["GP_SC_ID"] self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"] self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"] self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"] self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"] self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"] self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"] # pylint: enable-msg=W0212 # ------------------------ # The PPh PC data h5d = h5f['PCPh_PC'] self.pcph_pc.data = h5d[:, :] self.pcph_pc.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.pcph_pc.offset = h5d.attrs["OFFSET"] self.pcph_pc.num_of_lines = h5d.attrs["N_LINES"] self.pcph_pc.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.pcph_pc.num_of_lines, self.pcph_pc.num_of_columns) self.pcph_pc.product = h5d.attrs["PRODUCT"] self.pcph_pc.id = h5d.attrs["ID"] if calibrate: self.pcph_pc = (self.pcph_pc.data * self.pcph_pc.scaling_factor + self.pcph_pc.offset) else: self.pcph_pc = self.pcph_pc.data self.pcph_pc_palette = _get_palette(h5f, 'PCPh_PC') / 255.0 # The PPh QUALITY data h5d = h5f['PCPh_QUALITY'] self.pcph_quality.data = h5d[:, :] self.pcph_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.pcph_quality.offset = h5d.attrs["OFFSET"] self.pcph_quality.num_of_lines = h5d.attrs["N_LINES"] self.pcph_quality.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.pcph_quality.num_of_lines, self.pcph_quality.num_of_columns) self.pcph_quality.product = h5d.attrs["PRODUCT"] self.pcph_quality.id = h5d.attrs["ID"] # The PPh DATA FLAG data h5d = h5f['PCPh_DATAFLAG'] self.pcph_dataflag.data = h5d[:, :] self.pcph_dataflag.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.pcph_dataflag.offset = h5d.attrs["OFFSET"] self.pcph_dataflag.num_of_lines = h5d.attrs["N_LINES"] self.pcph_dataflag.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.pcph_dataflag.num_of_lines, self.pcph_dataflag.num_of_columns) self.pcph_dataflag.product = h5d.attrs["PRODUCT"] self.pcph_dataflag.id = h5d.attrs["ID"] # ------------------------ h5f.close() self.processing_flags = self.processing_flags.data self.area = get_area_from_file(filename) self.filled = True def project(self, coverage): """Remaps the NWCSAF/MSG PCPh to cartographic map-projection on area give by a pre-registered area-id. Faster version of msg_remap! """ LOG.info("Projecting channel %s..." % (self.name)) region = coverage.out_area dest_area = region.area_id retv = MsgPCPh() retv.name = self.name retv.package = self.package retv.saf = self.saf retv.product_name = self.product_name retv.region_name = dest_area retv.cfac = self.cfac retv.lfac = self.lfac retv.coff = self.coff retv.loff = self.loff retv.nb_param = self.nb_param retv.gp_sc_id = self.gp_sc_id retv.image_acquisition_time = self.image_acquisition_time retv.spectral_channel_id = self.spectral_channel_id retv.nominal_product_time = self.nominal_product_time retv.sgs_product_quality = self.sgs_product_quality retv.sgs_product_completeness = self.sgs_product_completeness retv.product_algorithm_version = self.product_algorithm_version retv.pcph_pc = coverage.project_array(self.pcph_pc) retv.pcph_pc_palette = self.pcph_pc_palette #retv.processing_flags = \ # coverage.project_array(self.processing_flags) retv.qc_straylight = self.qc_straylight retv.region_name = dest_area retv.area = region retv.projection_name = region.proj_id retv.pcs_def = pcs_def_from_region(region) retv.num_of_columns = region.x_size retv.num_of_lines = region.y_size retv.xscale = region.pixel_size_x retv.yscale = region.pixel_size_y import pyproj prj = pyproj.Proj(region.proj4_string) aex = region.area_extent lonur, latur = prj(aex[2], aex[3], inverse=True) lonll, latll = prj(aex[0], aex[1], inverse=True) retv.ll_lon = lonll retv.ll_lat = latll retv.ur_lon = lonur retv.ur_lat = latur self.shape = region.shape retv.filled = True retv.resolution = self.resolution return retv class MsgCRPhData(object): """NWCSAF/MSG CRPh layer """ def __init__(self): self.data = None self.scaling_factor = 1 self.offset = 0 self.num_of_lines = 0 self.num_of_columns = 0 self.product = "" self.id = "" class MsgCRPh(mpop.channel.GenericChannel): """NWCSAF/MSG CRPh data structure as retrieved from HDF5 file. Resolution sets the nominal resolution of the data. Palette now missing """ def __init__(self): mpop.channel.GenericChannel.__init__(self, "CRPh") self.filled = False self.name = "CRPh" # self.resolution = resolution self.package = "" self.saf = "" self.product_name = "" self.num_of_columns = 0 self.num_of_lines = 0 self.projection_name = "" self.pcs_def = "" self.xscale = 0 self.yscale = 0 self.ll_lon = 0.0 self.ll_lat = 0.0 self.ur_lon = 0.0 self.ur_lat = 0.0 self.region_name = "" self.cfac = 0 self.lfac = 0 self.coff = 0 self.loff = 0 self.nb_param = 0 self.gp_sc_id = 0 self.image_acquisition_time = 0 self.spectral_channel_id = 0 self.nominal_product_time = 0 self.sgs_product_quality = 0 self.sgs_product_completeness = 0 self.product_algorithm_version = "" self.crph = None self.crph_crr = None self.crph_accum = None self.crph_IQF = None self.crph_quality = None self.crph_dataflag = None self.processing_flags = None self.shape = None self.satid = "" self.qc_straylight = -1 self.crph = None self.crph_pc_palette = None self.crph_quality_palette = None self.crph_sflag_palette = None def __str__(self): return ("'%s: shape %s, resolution %sm'" % (self.name, self.crph_crr.shape, self.resolution)) def is_loaded(self): """Tells if the channel contains loaded data. """ return self.filled # ------------------------------------------------------------------ def read(self, filename, calibrate=True): """Reader for the . Use *filename* to read data. """ import h5py # Erste Zeile notwendig? self.crph = MsgCRPhData() self.crph_crr = MsgCRPhData() self.crph_accum = MsgCRPhData() self.crph_iqf = MsgCRPhData() self.crph_quality = MsgCRPhData() self.crph_dataflag = MsgCRPhData() self.processing_flags = MsgCRPhData() LOG.debug("Filename = <" + str(filename) + ">") h5f = h5py.File(filename, 'r') # pylint: disable-msg=W0212 self.package = h5f.attrs["PACKAGE"] self.saf = h5f.attrs["SAF"] self.product_name = h5f.attrs["PRODUCT_NAME"] self.num_of_columns = h5f.attrs["NC"] self.num_of_lines = h5f.attrs["NL"] self.projection_name = h5f.attrs["PROJECTION_NAME"] self.region_name = h5f.attrs["REGION_NAME"] self.cfac = h5f.attrs["CFAC"] self.lfac = h5f.attrs["LFAC"] self.coff = h5f.attrs["COFF"] self.loff = h5f.attrs["LOFF"] self.nb_param = h5f.attrs["NB_PARAMETERS"] self.gp_sc_id = h5f.attrs["GP_SC_ID"] self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"] self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"] self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"] self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"] self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"] self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"] # pylint: enable-msg=W0212 # ------------------------ # The CRPh CRR data h5d = h5f['CRPh_CRR'] self.crph_crr.data = h5d[:, :] self.crph_crr.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.crph_crr.offset = h5d.attrs["OFFSET"] self.crph_crr.num_of_lines = h5d.attrs["N_LINES"] self.crph_crr.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.crph_crr.num_of_lines, self.crph_crr.num_of_columns) self.crph_crr.product = h5d.attrs["PRODUCT"] self.crph_crr.id = h5d.attrs["ID"] if calibrate: self.crph_crr = (self.crph_crr.data * self.crph_crr.scaling_factor + self.crph_crr.offset) else: self.crph_crr = self.crph_crr.data self.crph_crr_palette = _get_palette(h5f, 'CRPh_CRR') / 255.0 # The CRPh ACCUM data h5d = h5f['CRPh_ACUM'] self.crph_accum.data = h5d[:, :] self.crph_accum.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.crph_accum.offset = h5d.attrs["OFFSET"] self.crph_accum.num_of_lines = h5d.attrs["N_LINES"] self.crph_accum.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.crph_accum.num_of_lines, self.crph_accum.num_of_columns) self.crph_accum.product = h5d.attrs["PRODUCT"] self.crph_accum.id = h5d.attrs["ID"] if calibrate: self.crph_accum = (self.crph_accum.data * self.crph_accum.scaling_factor + self.crph_accum.offset) else: self.crph_accum = self.crph_accum.data self.crph_accum_palette = _get_palette(h5f, 'CRPh_ACUM') / 255.0 # The CRPH IQF data h5d = h5f['CRPh_IQF'] self.crph_iqf.data = h5d[:, :] self.crph_iqf.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.crph_iqf.offset = h5d.attrs["OFFSET"] self.crph_iqf.num_of_lines = h5d.attrs["N_LINES"] self.crph_iqf.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.crph_iqf.num_of_lines, self.crph_iqf.num_of_columns) self.crph_iqf.product = h5d.attrs["PRODUCT"] self.crph_iqf.id = h5d.attrs["ID"] # The CRPh QUALITY data h5d = h5f['CRPh_QUALITY'] self.crph_quality.data = h5d[:, :] self.crph_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.crph_quality.offset = h5d.attrs["OFFSET"] self.crph_quality.num_of_lines = h5d.attrs["N_LINES"] self.crph_quality.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.crph_quality.num_of_lines, self.crph_quality.num_of_columns) self.crph_quality.product = h5d.attrs["PRODUCT"] self.crph_quality.id = h5d.attrs["ID"] # The CRPh DATA FLAG data h5d = h5f['CRPh_DATAFLAG'] self.crph_dataflag.data = h5d[:, :] self.crph_dataflag.scaling_factor = h5d.attrs["SCALING_FACTOR"] self.crph_dataflag.offset = h5d.attrs["OFFSET"] self.crph_dataflag.num_of_lines = h5d.attrs["N_LINES"] self.crph_dataflag.num_of_columns = h5d.attrs["N_COLS"] self.shape = (self.crph_dataflag.num_of_lines, self.crph_dataflag.num_of_columns) self.crph_dataflag.product = h5d.attrs["PRODUCT"] self.crph_dataflag.id = h5d.attrs["ID"] # ------------------------ h5f.close() self.processing_flags = self.processing_flags.data self.area = get_area_from_file(filename) self.filled = True def project(self, coverage): """Remaps the NWCSAF/MSG CRPh to cartographic map-projection on area give by a pre-registered area-id. Faster version of msg_remap! """ LOG.info("Projecting channel %s..." % (self.name)) region = coverage.out_area dest_area = region.area_id retv = MsgCRPh() retv.name = self.name retv.package = self.package retv.saf = self.saf retv.product_name = self.product_name retv.region_name = dest_area retv.cfac = self.cfac retv.lfac = self.lfac retv.coff = self.coff retv.loff = self.loff retv.nb_param = self.nb_param retv.gp_sc_id = self.gp_sc_id retv.image_acquisition_time = self.image_acquisition_time retv.spectral_channel_id = self.spectral_channel_id retv.nominal_product_time = self.nominal_product_time retv.sgs_product_quality = self.sgs_product_quality retv.sgs_product_completeness = self.sgs_product_completeness retv.product_algorithm_version = self.product_algorithm_version retv.crph_crr = coverage.project_array(self.crph_crr) retv.crph_crr_palette = self.crph_crr_palette retv.crph_accum = coverage.project_array(self.crph_accum) retv.crph_accum_palette = self.crph_accum_palette # retv.processing_flags = \ # coverage.project_array(self.processing_flags) retv.qc_straylight = self.qc_straylight retv.region_name = dest_area retv.area = region retv.projection_name = region.proj_id retv.pcs_def = pcs_def_from_region(region) retv.num_of_columns = region.x_size retv.num_of_lines = region.y_size retv.xscale = region.pixel_size_x retv.yscale = region.pixel_size_y import pyproj prj = pyproj.Proj(region.proj4_string) aex = region.area_extent lonur, latur = prj(aex[2], aex[3], inverse=True) lonll, latll = prj(aex[0], aex[1], inverse=True) retv.ll_lon = lonll retv.ll_lat = latll retv.ur_lon = lonur retv.ur_lat = latur self.shape = region.shape retv.filled = True retv.resolution = self.resolution return retv """ NEU ENDE """ MSG_PGE_EXTENTIONS = ["PLAX.CTTH.0.h5", "PLAX.CLIM.0.h5", "h5"] def get_best_product(filename, area_extent): """Get the best of the available products for the *filename* template. """ for ext in MSG_PGE_EXTENTIONS: match_str = filename + "." + ext LOG.debug("glob-string for filename: " + str(match_str)) flist = glob.glob(match_str) if len(flist) == 0: LOG.warning("No matching %s.%s input MSG file." % (filename, ext)) else: # File found: if area_extent is None: LOG.warning("Didn't specify an area, taking " + flist[0]) return flist[0] for fname in flist: aex = get_area_extent(fname) #import pdb # pdb.set_trace() if np.all(np.max(np.abs(np.array(aex) - np.array(area_extent))) < 1000): LOG.info("MSG file found: %s" % fname) return fname LOG.info("Did not find any MSG file for specified area") def get_best_products(filename, area_extent): """Get the best of the available products for the *filename* template. """ filenames = [] for ext in MSG_PGE_EXTENTIONS: match_str = filename + "." + ext LOG.debug('Match string = ' + str(match_str)) flist = glob.glob(match_str) if len(flist) == 0: LOG.warning("No matching %s.%s input MSG file." % (filename, ext)) else: # File found: if area_extent is None: LOG.warning("Didn't specify an area, taking " + flist[0]) filenames.append(flist[0]) else: found = False for fname in flist: aex = get_area_extent(fname) if np.all(np.max(np.abs(np.array(aex) - np.array(area_extent))) < 1000): found = True LOG.info("MSG file found: %s" % fname) filenames.append(fname) if not found: LOG.info( "Did not find any MSG file for specified area") LOG.debug("Sorted filenames: %s", str(sorted(filenames))) return sorted(filenames) def get_area_from_file(filename): """Get the area from the h5 file. """ from pyresample.geometry import AreaDefinition import h5py aex = get_area_extent(filename) h5f = h5py.File(filename, 'r') pname = h5f.attrs["PROJECTION_NAME"] proj = {} if pname.startswith("GEOS"): proj["proj"] = "geos" proj["a"] = "6378169.0" proj["b"] = "6356583.8" proj["h"] = "35785831.0" proj["lon_0"] = str(float(pname.split("<")[1][:-1])) else: raise NotImplementedError("Only geos projection supported yet.") #h5f.attrs["REGION_NAME"] # alps #pname # GEOS<+009.5> #proj # {'a': '6378169.0', 'h': '35785831.0', 'b': '6356583.8', 'lon_0': '9.5', 'proj': 'geos'} #int(h5f.attrs["NC"]) # 349 #int(h5f.attrs["NL"]) # 151 #aex # (-613578.17189778585, 4094060.208733994, 433553.97518292483, 4547101.2335793395) area_def = AreaDefinition(h5f.attrs["REGION_NAME"], h5f.attrs["REGION_NAME"], pname, proj, int(h5f.attrs["NC"]), int(h5f.attrs["NL"]), aex) h5f.close() return area_def def load(scene, **kwargs): """Load data into the *channels*. *Channels* is a list or a tuple containing channels we will load data into. If None, all channels are loaded. """ print "*** read NWC-SAF data with nwcsaf_msg.py", scene.channels_to_load area_extent = kwargs.get("area_extent") calibrate = kwargs.get("calibrate", True) conf = ConfigParser.ConfigParser() conf.read(os.path.join(CONFIG_PATH, scene.fullname + ".cfg")) directory = conf.get(scene.instrument_name + "-level3", "dir", raw=True) filename_raw = conf.get(scene.instrument_name + "-level3", "filename", raw=True) pathname = os.path.join(directory, filename_raw) LOG.debug("Inside load: " + str(scene.channels_to_load)) if "CloudMask" in scene.channels_to_load: filename_wildcards = (scene.time_slot.strftime(pathname) % {"number": "01", "product": "CMa__"}) filename = get_best_product(filename_wildcards, area_extent) if filename != None: ct_chan = MsgCloudMask() ct_chan.read(filename,calibrate) ct_chan.satid = (scene.satname.capitalize() + str(scene.sat_nr()).rjust(2)) ct_chan.resolution = ct_chan.area.pixel_size_x scene.channels.append(ct_chan) if "CloudType" in scene.channels_to_load: filename_wildcards = (scene.time_slot.strftime(pathname) % {"number": "02", "product": "CT___"}) filenames = get_best_products(filename_wildcards, area_extent) if len(filenames) > 0: filename = filenames[-1] else: LOG.info("Did not find any MSG file for specified area") return ct_chan = MsgCloudType() ct_chan.read(filenames[-1]) LOG.debug("Uncorrected file: %s", filename) ct_chan.name = "CloudType" ct_chan.satid = (scene.satname.capitalize() + str(scene.sat_nr()).rjust(2)) ct_chan.resolution = ct_chan.area.pixel_size_x scene.channels.append(ct_chan) if "CloudType_plax" in scene.channels_to_load: filename_wildcards = (scene.time_slot.strftime(pathname) % {"number": "02", "product": "CT___"}) filenames = get_best_products(filename_wildcards, area_extent) if len(filenames) > 0: filename = filenames[0] else: LOG.info("Did not find any MSG file for specified area") return ct_chan_plax = MsgCloudType() if filename != None: LOG.debug("Parallax corrected file: %s", filename) ct_chan_plax.read(filename) ct_chan_plax.name = "CloudType_plax" ct_chan_plax.satid = (scene.satname.capitalize() + str(scene.sat_nr()).rjust(2)) ct_chan_plax.resolution = ct_chan_plax.area.pixel_size_x scene.channels.append(ct_chan_plax) print "*** hallo world***" if "CTTH" in scene.channels_to_load: filename_wildcards = (scene.time_slot.strftime(pathname) % {"number": "03", "product": "CTTH_"}) filename = get_best_product(filename_wildcards, area_extent) if filename != None: ct_chan = MsgCTTH() ct_chan.read(filename,calibrate) print "CCC", scene.sat_nr() ct_chan.satid = (scene.satname[0:8].capitalize() + str(scene.sat_nr()).rjust(2)) print "bullshit (nwcsat_msg.py) ", ct_chan.satid # "Meteosat 9" ct_chan.resolution = ct_chan.area.pixel_size_x scene.channels.append(ct_chan) if "CRR" in scene.channels_to_load: filename_wildcards = (scene.time_slot.strftime(pathname) % {"number": "05", "product": "CRR__"}) filename = get_best_product(filename_wildcards, area_extent) if filename != None: ct_chan = MsgCRR() ct_chan.read(filename,calibrate) ct_chan.name = "CRR_" # !!!!! changed as we create another channel named 'CRR' when transforming the format ct_chan.satid = (scene.satname.capitalize() + str(scene.sat_nr()).rjust(2)) ct_chan.resolution = ct_chan.area.pixel_size_x scene.channels.append(ct_chan) if "PC" in scene.channels_to_load: filename_wildcards = (scene.time_slot.strftime(pathname) % {"number": "04", "product": "PC___"}) filename = get_best_product(filename_wildcards, area_extent) if filename != None: ct_chan = MsgPC() ct_chan.read(filename,calibrate) ct_chan.name = "PC" ct_chan.satid = (scene.satname.capitalize() + str(scene.sat_nr()).rjust(2)) ct_chan.resolution = ct_chan.area.pixel_size_x scene.channels.append(ct_chan) if "SPhR" in scene.channels_to_load: filename_wildcards = (scene.time_slot.strftime(pathname) % {"number": "13", "product": "SPhR_"}) filename = get_best_product(filename_wildcards, area_extent) if filename != None: ct_chan = MsgSPhR() ct_chan.read(filename,calibrate) ct_chan.name = "SPhR" ct_chan.satid = (scene.satname.capitalize() + str(scene.sat_nr()).rjust(2)) ct_chan.resolution = ct_chan.area.pixel_size_x scene.channels.append(ct_chan) if "PCPh" in scene.channels_to_load: filename_wildcards = (scene.time_slot.strftime(pathname) % {"number": "14", "product": "PCPh_"}) filename = get_best_product(filename_wildcards, area_extent) if filename != None: ct_chan = MsgPCPh() ct_chan.read(filename,calibrate) ct_chan.name = "PCPh_" ct_chan.satid = (scene.satname.capitalize() + str(scene.sat_nr()).rjust(2)) ct_chan.resolution = ct_chan.area.pixel_size_x scene.channels.append(ct_chan) if "CRPh" in scene.channels_to_load: filename_wildcards = (scene.time_slot.strftime(pathname) % {"number": "14", "product": "CRPh_"}) filename = get_best_product(filename_wildcards, area_extent) if filename != None: ct_chan = MsgCRPh() ct_chan.read(filename,calibrate) ct_chan.name = "CRPh_" ct_chan.satid = (scene.satname.capitalize() + str(scene.sat_nr()).rjust(2)) ct_chan.resolution = ct_chan.area.pixel_size_x scene.channels.append(ct_chan) if 'filename' in locals() and filename != None: # print "nwcsaf_msg", len(filename), filename if len(filename) > 12: sat_nr= int(basename(filename)[10:11])+7 if int(scene.sat_nr()) != int(sat_nr): print "*** Warning, change Meteosat number to "+str(sat_nr)+" (input: "+scene.sat_nr()+")" #scene.number = str(sat_nr).zfill(2) # !!! update number !!! scene.number = str(sat_nr) LOG.info("Loading channels done.") mpop-1.5.0/mpop/satin/nwcsaf_pps.py000066400000000000000000000515571317160620000173070ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2012, 2013, 2015. # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Plugin for reading PPS's cloud products hdf files, in the fileformat used in PPS v2012, and before. """ import ConfigParser from ConfigParser import NoOptionError from datetime import datetime, timedelta import os.path import mpop.channel from mpop import CONFIG_PATH from mpop.utils import get_logger import numpy as np import h5py LOG = get_logger('satin/nwcsaf_pps') class InfoObject(object): """Simple data and info container. """ def __init__(self): self.info = {} self.data = None def pack_signed(data, data_type): bits = np.iinfo(data_type).bits scale_factor = (data.max() - data.min()) / (2**bits - 2) add_offset = (data.max() - data.min()) / 2 no_data = - 2**(bits - 1) pack = ((data - add_offset) / scale_factor).astype(data_type) return pack, scale_factor, add_offset, no_data class NwcSafPpsChannel(mpop.channel.GenericChannel): def __init__(self, filename=None): mpop.channel.GenericChannel.__init__(self) self._md = {} self._projectables = [] self._keys = [] self._refs = {} self.shape = None if filename: self.read(filename) def read(self, filename, load_lonlat=True): """Read product in hdf format from *filename* """ LOG.debug("Filename: %s" % filename) is_temp = False if not h5py.is_hdf5(filename): # Try see if it is bzipped: import bz2 bz2file = bz2.BZ2File(filename) import tempfile tmpfilename = tempfile.mktemp() try: ofpt = open(tmpfilename, 'wb') ofpt.write(bz2file.read()) ofpt.close() is_temp = True except IOError: import traceback traceback.print_exc() raise IOError("Failed to read the file %s" % filename) filename = tmpfilename if not h5py.is_hdf5(filename): if is_temp: os.remove(filename) raise IOError("File is not a hdf5 file!" % filename) h5f = h5py.File(filename, "r") # Read the global attributes self._md = dict(h5f.attrs) self._md["satellite"] = h5f.attrs['satellite_id'] self._md["orbit"] = h5f.attrs['orbit_number'] self._md["time_slot"] = (timedelta(seconds=long(h5f.attrs['sec_1970'])) + datetime(1970, 1, 1, 0, 0)) # Read the data and attributes # This covers only one level of data. This could be made recursive. for key, dataset in h5f.iteritems(): setattr(self, key, InfoObject()) getattr(self, key).info = dict(dataset.attrs) for skey, value in dataset.attrs.iteritems(): if isinstance(value, h5py.h5r.Reference): self._refs[(key, skey)] = h5f[value].name.split("/")[1] if type(dataset.id) is h5py.h5g.GroupID: LOG.warning("Format reader does not support groups") continue try: getattr(self, key).data = dataset[:] is_palette = (dataset.attrs.get("CLASS", None) == "PALETTE") if(len(dataset.shape) > 1 and not is_palette and key not in ["lon", "lat", "row_indices", "column_indices"]): self._projectables.append(key) if self.shape is None: self.shape = dataset.shape elif self.shape != dataset.shape: raise ValueError("Different variable shapes !") else: self._keys.append(key) except TypeError: setattr(self, key, np.dtype(dataset)) self._keys.append(key) h5f.close() if is_temp: os.remove(filename) if not load_lonlat: return # Setup geolocation # We need a no-data mask from one of the projectables to # mask out bow-tie deletion pixels from the geolocation array # So far only relevant for VIIRS. # Preferably the lon-lat data in the PPS VIIRS geolocation # file should already be masked. # The no-data values in the products are not only where geo-location is absent # Only the Cloud Type can be used as a proxy so far. # Adam Dybbroe, 2012-08-31 nodata_mask = False # np.ma.masked_equal(np.ones(self.shape), 0).mask for key in self._projectables: projectable = getattr(self, key) if key in ['cloudtype']: nodata_array = np.ma.array(projectable.data) nodata_mask = np.ma.masked_equal(nodata_array, 0).mask break try: from pyresample import geometry except ImportError: return tiepoint_grid = False if hasattr(self, "row_indices") and hasattr(self, "column_indices"): column_indices = self.column_indices.data row_indices = self.row_indices.data tiepoint_grid = True interpolate = False if hasattr(self, "lon") and hasattr(self, "lat"): if 'intercept' in self.lon.info: offset_lon = self.lon.info["intercept"] elif 'offset' in self.lon.info: offset_lon = self.lon.info["offset"] if 'gain' in self.lon.info: gain_lon = self.lon.info["gain"] lons = self.lon.data * gain_lon + offset_lon if 'intercept' in self.lat.info: offset_lat = self.lat.info["intercept"] elif 'offset' in self.lat.info: offset_lat = self.lat.info["offset"] if 'gain' in self.lat.info: gain_lat = self.lat.info["gain"] lats = self.lat.data * gain_lat + offset_lat if lons.shape != self.shape or lats.shape != self.shape: # Data on tiepoint grid: interpolate = True if not tiepoint_grid: errmsg = ("Interpolation needed but insufficient" + "information on the tiepoint grid") raise IOError(errmsg) else: # Geolocation available on the full grid: # We neeed to mask out nodata (VIIRS Bow-tie deletion...) # We do it for all instruments, checking only against the # nodata lons = np.ma.masked_array(lons, nodata_mask) lats = np.ma.masked_array(lats, nodata_mask) self.area = geometry.SwathDefinition(lons=lons, lats=lats) elif hasattr(self, "region") and self.region.data["area_extent"].any(): region = self.region.data proj_dict = dict([elt.split('=') for elt in region["pcs_def"].split(',')]) self.area = geometry.AreaDefinition(region["id"], region["name"], region["proj_id"], proj_dict, region["xsize"], region["ysize"], region["area_extent"]) if interpolate: from geotiepoints import SatelliteInterpolator cols_full = np.arange(self.shape[1]) rows_full = np.arange(self.shape[0]) satint = SatelliteInterpolator((lons, lats), (row_indices, column_indices), (rows_full, cols_full)) #satint.fill_borders("y", "x") lons, lats = satint.interpolate() self.area = geometry.SwathDefinition(lons=lons, lats=lats) def project(self, coverage): """Project what can be projected in the product. """ import copy res = copy.copy(self) # Project the data for var in self._projectables: LOG.info("Projecting " + str(var)) res.__dict__[var] = copy.copy(self.__dict__[var]) res.__dict__[var].data = coverage.project_array( self.__dict__[var].data) # Take care of geolocation res.region = copy.copy(self.region) region = copy.copy(res.region.data) area = coverage.out_area try: # It's an area region["area_extent"] = np.array(area.area_extent) region["xsize"] = area.x_size region["ysize"] = area.y_size region["xscale"] = area.pixel_size_x region["yscale"] = area.pixel_size_y region["lon_0"] = area.proj_dict.get("lon_0", 0) region["lat_0"] = area.proj_dict.get("lat_0", 0) region["lat_ts"] = area.proj_dict.get("lat_ts", 0) region["name"] = area.name region["id"] = area.area_id region["pcs_id"] = area.proj_id pcs_def = ",".join([key + "=" + val for key, val in area.proj_dict.iteritems()]) region["pcs_def"] = pcs_def res.region.data = region # If switching to area representation, try removing lon and lat try: delattr(res, "lon") res._keys.remove("lon") delattr(res, "lat") res._keys.remove("lat") except AttributeError: pass except AttributeError: # It's a swath lons, scale_factor, add_offset, no_data = \ pack_signed(area.lons[:], np.int16) res.lon = InfoObject() res.lon.data = lons res.lon.info["description"] = "geographic longitude (deg)" res.lon.info["intercept"] = add_offset res.lon.info["gain"] = scale_factor res.lon.info["no_data_value"] = no_data if "lon" not in res._keys: res._keys.append("lon") lats, scale_factor, add_offset, no_data = \ pack_signed(area.lats[:], np.int16) res.lat = InfoObject() res.lat.data = lats res.lat.info["description"] = "geographic latitude (deg)" res.lat.info["intercept"] = add_offset res.lat.info["gain"] = scale_factor res.lat.info["no_data_value"] = no_data if "lat" not in res._keys: res._keys.append("lat") # Remove region parameters if switching from area region["area_extent"] = np.zeros(4) region["xsize"] = 0 region["ysize"] = 0 region["xscale"] = 0 region["yscale"] = 0 region["lon_0"] = 0 region["lat_0"] = 0 region["lat_ts"] = 0 region["name"] = "" region["id"] = "" region["pcs_id"] = "" region["pcs_def"] = "" res.region.data = region return res def write(self, filename, **kwargs): """Write product in hdf format to *filename* """ LOG.debug("Writing to " + filename) h5f = h5py.File(filename, "w") for dataset in self._projectables: dset = h5f.create_dataset(dataset, data=getattr(self, dataset).data, compression='gzip', compression_opts=6) for key, value in getattr(self, dataset).info.iteritems(): dset.attrs[key] = value for thing in self._keys: try: dset = h5f.create_dataset(thing, data=getattr(self, thing).data, compression='gzip', compression_opts=6) for key, value in getattr(self, thing).info.iteritems(): dset.attrs[key] = value except AttributeError: h5f[thing] = getattr(self, thing) for key, value in self._md.iteritems(): if key in ["time_slot", "satellite"]: continue h5f.attrs[key] = value for (key, skey), value in self._refs.iteritems(): h5f[key].attrs[skey] = h5f[value].ref h5f.close() def is_loaded(self): """Tells if the channel contains loaded data. """ return len(self._projectables) > 0 class CloudType(NwcSafPpsChannel): def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CloudType" class CloudTopTemperatureHeight(NwcSafPpsChannel): def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CTTH" class CloudMask(NwcSafPpsChannel): def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CMa" class PrecipitationClouds(NwcSafPpsChannel): def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "PC" class CloudPhysicalProperties(NwcSafPpsChannel): def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CPP" def load(scene, geofilename=None, **kwargs): del kwargs import glob lonlat_is_loaded = False products = [] if "CTTH" in scene.channels_to_load: products.append("ctth") if "CloudType" in scene.channels_to_load: products.append("cloudtype") if "CMa" in scene.channels_to_load: products.append("cloudmask") if "PC" in scene.channels_to_load: products.append("precipclouds") if "CPP" in scene.channels_to_load: products.append("cpp") if len(products) == 0: return try: area_name = scene.area_id or scene.area.area_id except AttributeError: area_name = "satproj_?????_?????" conf = ConfigParser.ConfigParser() conf.read(os.path.join(CONFIG_PATH, scene.fullname + ".cfg")) directory = conf.get(scene.instrument_name + "-level3", "dir") try: geodir = conf.get(scene.instrument_name + "-level3", "geodir") except NoOptionError: LOG.warning("No option 'geodir' in level3 section") geodir = None filename = conf.get(scene.instrument_name + "-level3", "filename", raw=True) pathname_tmpl = os.path.join(directory, filename) if not geofilename and geodir: # Load geo file from config file: try: if not scene.orbit: orbit = "" else: orbit = scene.orbit geoname_tmpl = conf.get(scene.instrument_name + "-level3", "geofilename", raw=True) filename_tmpl = (scene.time_slot.strftime(geoname_tmpl) % {"orbit": orbit.zfill(5) or "*", "area": area_name, "satellite": scene.satname + scene.number}) file_list = glob.glob(os.path.join(geodir, filename_tmpl)) if len(file_list) > 1: LOG.warning("More than 1 file matching for geoloaction: " + str(file_list)) elif len(file_list) == 0: LOG.warning("No geolocation file matching!: " + filename_tmpl) else: geofilename = file_list[0] except NoOptionError: geofilename = None classes = {"ctth": CloudTopTemperatureHeight, "cloudtype": CloudType, "cloudmask": CloudMask, "precipclouds": PrecipitationClouds, "cpp": CloudPhysicalProperties } nodata_mask = False chn = None for product in products: LOG.debug("Loading " + product) if not scene.orbit: orbit = "" else: orbit = scene.orbit filename_tmpl = (scene.time_slot.strftime(pathname_tmpl) % {"orbit": orbit.zfill(5) or "*", "area": area_name, "satellite": scene.satname + scene.number, "product": product}) file_list = glob.glob(filename_tmpl) if len(file_list) > 1: LOG.warning("More than 1 file matching for " + product + "! " + str(file_list)) continue elif len(file_list) == 0: LOG.warning("No " + product + " matching!: " + filename_tmpl) continue else: filename = file_list[0] chn = classes[product]() chn.read(filename, lonlat_is_loaded == False) scene.channels.append(chn) # Setup geolocation # We need a no-data mask from one of the projectables to # mask out bow-tie deletion pixels from the geolocation array # So far only relevant for VIIRS. # Preferably the lon-lat data in the PPS VIIRS geolocation # file should already be masked. # The no-data values in the products are not only where geo-location is absent # Only the Cloud Type can be used as a proxy so far. # Adam Dybbroe, 2012-08-31 if hasattr(chn, '_projectables'): for key in chn._projectables: projectable = getattr(chn, key) if key in ['cloudtype']: nodata_array = np.ma.array(projectable.data) nodata_mask = np.ma.masked_equal(nodata_array, 0).mask break else: LOG.warning("Channel has no '_projectables' member." + " No nodata-mask set...") if chn is None: return # Is this safe!? AD 2012-08-25 shape = chn.shape interpolate = False if geofilename: geodict = get_lonlat(geofilename) lons, lats = geodict['lon'], geodict['lat'] if lons.shape != shape or lats.shape != shape: interpolate = True row_indices = geodict['row_indices'] column_indices = geodict['column_indices'] lonlat_is_loaded = True else: LOG.warning("No Geo file specified: " + "Geolocation will be loaded from product") if lonlat_is_loaded: if interpolate: from geotiepoints import SatelliteInterpolator cols_full = np.arange(shape[1]) rows_full = np.arange(shape[0]) satint = SatelliteInterpolator((lons, lats), (row_indices, column_indices), (rows_full, cols_full)) #satint.fill_borders("y", "x") lons, lats = satint.interpolate() try: from pyresample import geometry lons = np.ma.masked_array(lons, nodata_mask) lats = np.ma.masked_array(lats, nodata_mask) scene.area = geometry.SwathDefinition(lons=lons, lats=lats) except ImportError: scene.area = None scene.lat = lats scene.lon = lons LOG.info("Loading PPS parameters done.") def get_lonlat(filename): """Read lon,lat from hdf5 file""" import h5py LOG.debug("Geo File = " + filename) h5f = h5py.File(filename, 'r') # We neeed to mask out nodata (VIIRS Bow-tie deletion...) # We do it for all instruments, checking only against the nodata nodata = h5f['where']['lon']['what'].attrs['nodata'] gain = h5f['where']['lon']['what'].attrs['gain'] offset = h5f['where']['lon']['what'].attrs['offset'] longitudes = np.ma.array(h5f['where']['lon']['data'].value) lons = np.ma.masked_equal(longitudes, nodata) * gain + offset latitudes = np.ma.array(h5f['where']['lat']['data'].value) lats = np.ma.masked_equal(latitudes, nodata) * gain + offset col_indices = None row_indices = None if "column_indices" in h5f["where"].keys(): col_indices = h5f['/where/column_indices'].value if "row_indices" in h5f["where"].keys(): row_indices = h5f['/where/row_indices'].value h5f.close() return {'lon': lons, 'lat': lats, 'col_indices': col_indices, 'row_indices': row_indices} # return lons, lats mpop-1.5.0/mpop/satin/nwcsaf_pps_v2014.py000066400000000000000000000517061317160620000201370ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2012, 2013. # Author(s): # Martin Raspaud # Adam Dybbroe # Sara Hornquist # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Plugin for reading PPS's cloud products hdf files, in the fileformat used in PPS v2014. """ import ConfigParser from ConfigParser import NoOptionError from datetime import datetime, timedelta import os.path import mpop.channel from mpop import CONFIG_PATH from mpop.utils import get_logger import numpy as np import h5py LOG = get_logger('satin/nwcsaf_pps') class InfoObject(object): """Simple data and info container. """ def __init__(self): self.info = {} self.data = None def pack_signed(data, data_type): bits = np.iinfo(data_type).bits scale_factor = (data.max() - data.min()) / (2**bits - 2) add_offset = (data.max() - data.min()) / 2 no_data = - 2**(bits - 1) pack = ((data - add_offset) / scale_factor).astype(data_type) return pack, scale_factor, add_offset, no_data class NwcSafPpsChannel(mpop.channel.GenericChannel): def __init__(self, filename=None): mpop.channel.GenericChannel.__init__(self) self._md = {} self._projectables = [] self._keys = [] self._refs = {} self.shape = None if filename: self.read(filename) def read(self, filename, load_lonlat=True): """Read product in hdf format from *filename* """ LOG.debug("Filename: %s" % filename) is_temp = False if not h5py.is_hdf5(filename): # Try see if it is bzipped: import bz2 bz2file = bz2.BZ2File(filename) import tempfile tmpfilename = tempfile.mktemp() try: ofpt = open(tmpfilename, 'wb') ofpt.write(bz2file.read()) ofpt.close() is_temp = True except IOError: import traceback traceback.print_exc() raise IOError("Failed to read the file %s" % filename) filename = tmpfilename if not h5py.is_hdf5(filename): if is_temp: os.remove(filename) raise IOError("File is not a hdf5 file!" % filename) h5f = h5py.File(filename, "r") # Read the global attributes self._md = dict(h5f.attrs) self._md["satellite"] = h5f.attrs['platform'] self._md["orbit"] = h5f.attrs['orbit_number'] self._md["time_slot"] = (timedelta(seconds=long(h5f.attrs['sec_1970'])) + datetime(1970, 1, 1, 0, 0)) # Read the data and attributes # This covers only one level of data. This could be made recursive. for key, dataset in h5f.iteritems(): setattr(self, key, InfoObject()) getattr(self, key).info = dict(dataset.attrs) for skey, value in dataset.attrs.iteritems(): if isinstance(value, h5py.h5r.Reference): self._refs[(key, skey)] = h5f[value].name.split("/")[1] if type(dataset.id) is h5py.h5g.GroupID: LOG.warning("Format reader does not support groups") continue try: getattr(self, key).data = dataset[:] is_palette = (dataset.attrs.get("CLASS", None) == "PALETTE") if(len(dataset.shape) > 1 and not is_palette and key not in ["lon", "lat", "row_indices", "column_indices"]): self._projectables.append(key) if self.shape is None: self.shape = dataset.shape elif self.shape != dataset.shape: raise ValueError("Different variable shapes !") else: self._keys.append(key) except TypeError: setattr(self, key, np.dtype(dataset)) self._keys.append(key) h5f.close() if is_temp: os.remove(filename) if not load_lonlat: return # Setup geolocation # We need a no-data mask from one of the projectables to # mask out bow-tie deletion pixels from the geolocation array # So far only relevant for VIIRS. # Preferably the lon-lat data in the PPS VIIRS geolocation # file should already be masked. # The no-data values in the products are not only where geo-location is absent # Only the Cloud Type can be used as a proxy so far. # Adam Dybbroe, 2012-08-31 nodata_mask = False #np.ma.masked_equal(np.ones(self.shape), 0).mask for key in self._projectables: projectable = getattr(self, key) if key in ['cloudtype']: nodata_array = np.ma.array(projectable.data) nodata_mask = np.ma.masked_equal(nodata_array, 0).mask break try: from pyresample import geometry except ImportError: return tiepoint_grid = False if hasattr(self, "row_indices") and hasattr(self, "column_indices"): column_indices = self.column_indices.data row_indices = self.row_indices.data tiepoint_grid = True interpolate = False if hasattr(self, "lon") and hasattr(self, "lat"): if 'intercept' in self.lon.info: offset_lon = self.lon.info["intercept"] elif 'offset' in self.lon.info: offset_lon = self.lon.info["offset"] if 'gain' in self.lon.info: gain_lon = self.lon.info["gain"] lons = self.lon.data * gain_lon + offset_lon if 'intercept' in self.lat.info: offset_lat = self.lat.info["intercept"] elif 'offset' in self.lat.info: offset_lat = self.lat.info["offset"] if 'gain' in self.lat.info: gain_lat = self.lat.info["gain"] lats = self.lat.data * gain_lat + offset_lat if lons.shape != self.shape or lats.shape != self.shape: # Data on tiepoint grid: interpolate = True if not tiepoint_grid: errmsg = ("Interpolation needed but insufficient" + "information on the tiepoint grid") raise IOError(errmsg) else: # Geolocation available on the full grid: # We neeed to mask out nodata (VIIRS Bow-tie deletion...) # We do it for all instruments, checking only against the nodata lons = np.ma.masked_array(lons, nodata_mask) lats = np.ma.masked_array(lats, nodata_mask) self.area = geometry.SwathDefinition(lons=lons, lats=lats) elif hasattr(self, "region") and self.region.data["area_extent"].any(): region = self.region.data proj_dict = dict([elt.split('=') for elt in region["pcs_def"].split(',')]) self.area = geometry.AreaDefinition(region["id"], region["name"], region["proj_id"], proj_dict, region["xsize"], region["ysize"], region["area_extent"]) if interpolate: from geotiepoints import SatelliteInterpolator cols_full = np.arange(self.shape[1]) rows_full = np.arange(self.shape[0]) satint = SatelliteInterpolator((lons, lats), (row_indices, column_indices), (rows_full, cols_full)) #satint.fill_borders("y", "x") lons, lats = satint.interpolate() self.area = geometry.SwathDefinition(lons=lons, lats=lats) def project(self, coverage): """Project what can be projected in the product. """ import copy res = copy.copy(self) # Project the data for var in self._projectables: LOG.info("Projecting " + str(var)) res.__dict__[var] = copy.copy(self.__dict__[var]) res.__dict__[var].data = coverage.project_array( self.__dict__[var].data) # Take care of geolocation res.region = copy.copy(self.region) region = copy.copy(res.region.data) area = coverage.out_area try: # It's an area region["area_extent"] = np.array(area.area_extent) region["xsize"] = area.x_size region["ysize"] = area.y_size region["xscale"] = area.pixel_size_x region["yscale"] = area.pixel_size_y region["lon_0"] = area.proj_dict.get("lon_0", 0) region["lat_0"] = area.proj_dict.get("lat_0", 0) region["lat_ts"] = area.proj_dict.get("lat_ts", 0) region["name"] = area.name region["id"] = area.area_id region["pcs_id"] = area.proj_id pcs_def = ",".join([key + "=" + val for key, val in area.proj_dict.iteritems()]) region["pcs_def"] = pcs_def res.region.data = region # If switching to area representation, try removing lon and lat try: delattr(res, "lon") res._keys.remove("lon") delattr(res, "lat") res._keys.remove("lat") except AttributeError: pass except AttributeError: # It's a swath lons, scale_factor, add_offset, no_data = \ pack_signed(area.lons[:], np.int16) res.lon = InfoObject() res.lon.data = lons res.lon.info["description"] = "geographic longitude (deg)" res.lon.info["intercept"] = add_offset res.lon.info["gain"] = scale_factor res.lon.info["no_data_value"] = no_data if "lon" not in res._keys: res._keys.append("lon") lats, scale_factor, add_offset, no_data = \ pack_signed(area.lats[:], np.int16) res.lat = InfoObject() res.lat.data = lats res.lat.info["description"] = "geographic latitude (deg)" res.lat.info["intercept"] = add_offset res.lat.info["gain"] = scale_factor res.lat.info["no_data_value"] = no_data if "lat" not in res._keys: res._keys.append("lat") # Remove region parameters if switching from area region["area_extent"] = np.zeros(4) region["xsize"] = 0 region["ysize"] = 0 region["xscale"] = 0 region["yscale"] = 0 region["lon_0"] = 0 region["lat_0"] = 0 region["lat_ts"] = 0 region["name"] = "" region["id"] = "" region["pcs_id"] = "" region["pcs_def"] = "" res.region.data = region return res def write(self, filename): """Write product in hdf format to *filename* """ LOG.debug("Writing to " + filename) h5f = h5py.File(filename, "w") for dataset in self._projectables: dset = h5f.create_dataset(dataset, data=getattr(self, dataset).data, compression='gzip', compression_opts=6) for key, value in getattr(self, dataset).info.iteritems(): dset.attrs[key] = value for thing in self._keys: try: dset = h5f.create_dataset(thing, data=getattr(self, thing).data, compression='gzip', compression_opts=6) for key, value in getattr(self, thing).info.iteritems(): dset.attrs[key] = value except AttributeError: h5f[thing] = getattr(self, thing) for key, value in self._md.iteritems(): if key in ["time_slot", "satellite"]: continue h5f.attrs[key] = value for (key, skey), value in self._refs.iteritems(): h5f[key].attrs[skey] = h5f[value].ref h5f.close() def is_loaded(self): """Tells if the channel contains loaded data. """ return len(self._projectables) > 0 class CloudType(NwcSafPpsChannel): def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CloudType" class CloudTopTemperatureHeight(NwcSafPpsChannel): def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CTTH" class CloudMask(NwcSafPpsChannel): def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CMa" class PrecipitationClouds(NwcSafPpsChannel): def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "PC" class CloudPhysicalProperties(NwcSafPpsChannel): def __init__(self): NwcSafPpsChannel.__init__(self) self.name = "CPP" def load(scene, geofilename=None, **kwargs): del kwargs import glob lonlat_is_loaded = False products = [] if "CTTH" in scene.channels_to_load: products.append("CTTH") if "CloudType" in scene.channels_to_load: products.append("CT") if "CMa" in scene.channels_to_load: products.append("CMA") if "PC" in scene.channels_to_load: products.append("PC") if "CPP" in scene.channels_to_load: products.append("CPP") if len(products) == 0: return try: area_name = scene.area_id or scene.area.area_id except AttributeError: area_name = "satproj_?????_?????" conf = ConfigParser.ConfigParser() conf.read(os.path.join(CONFIG_PATH, scene.fullname+".cfg")) directory = conf.get(scene.instrument_name+"-level3", "dir") try: geodir = conf.get(scene.instrument_name+"-level3", "geodir") except NoOptionError: LOG.warning("No option 'geodir' in level3 section") geodir = None filename = conf.get(scene.instrument_name+"-level3", "filename", raw=True) pathname_tmpl = os.path.join(directory, filename) if not geofilename and geodir: # Load geo file from config file: try: if not scene.orbit: orbit = "" else: orbit = scene.orbit geoname_tmpl = conf.get(scene.instrument_name+"-level3", "geofilename", raw=True) filename_tmpl = (scene.time_slot.strftime(geoname_tmpl) %{"orbit": orbit.zfill(5) or "*", "area": area_name, "satellite": scene.satname + scene.number}) file_list = glob.glob(os.path.join(geodir, filename_tmpl)) if len(file_list) > 1: LOG.warning("More than 1 file matching for geoloaction: " + str(file_list)) elif len(file_list) == 0: LOG.warning("No geolocation file matching!: " + filename_tmpl) else: geofilename = file_list[0] except NoOptionError: geofilename = None classes = {"CTTH": CloudTopTemperatureHeight, "CT": CloudType, "CMA": CloudMask, "PC": PrecipitationClouds, "CPP": CloudPhysicalProperties } nodata_mask = False chn = None for product in products: LOG.debug("Loading " + product) if not scene.orbit: orbit = "" else: orbit = scene.orbit filename_tmpl = (scene.time_slot.strftime(pathname_tmpl) %{"orbit": orbit.zfill(5) or "*", "area": area_name, "satellite": scene.satname + scene.number, "product": product}) file_list = glob.glob(filename_tmpl) if len(file_list) > 1: LOG.warning("More than 1 file matching for " + product + "! " + str(file_list)) continue elif len(file_list) == 0: LOG.warning("No " + product + " matching!: " + filename_tmpl) continue else: filename = file_list[0] chn = classes[product]() chn.read(filename, lonlat_is_loaded==False) scene.channels.append(chn) # Setup geolocation # We need a no-data mask from one of the projectables to # mask out bow-tie deletion pixels from the geolocation array # So far only relevant for VIIRS. # Preferably the lon-lat data in the PPS VIIRS geolocation # file should already be masked. # The no-data values in the products are not only where geo-location is absent # Only the Cloud Type can be used as a proxy so far. # Adam Dybbroe, 2012-08-31 if hasattr(chn, '_projectables'): for key in chn._projectables: projectable = getattr(chn, key) if key in ['ct']: nodata_array = np.ma.array(projectable.data) nodata_mask = np.ma.masked_equal(\ nodata_array, projectable.info["_FillValue"]).mask break else: LOG.warning("Channel has no '_projectables' member." + " No nodata-mask set...") if chn is None: return # Is this safe!? AD 2012-08-25 shape = chn.shape interpolate = False if geofilename: geodict = get_lonlat(geofilename) lons, lats = geodict['lon'], geodict['lat'] if lons.shape != shape or lats.shape != shape: interpolate = True row_indices = geodict['row_indices'] column_indices = geodict['column_indices'] lonlat_is_loaded = True else: LOG.warning("No Geo file specified: " + "Geolocation will be loaded from product") if lonlat_is_loaded: if interpolate: from geotiepoints import SatelliteInterpolator cols_full = np.arange(shape[1]) rows_full = np.arange(shape[0]) satint = SatelliteInterpolator((lons, lats), (row_indices, column_indices), (rows_full, cols_full)) #satint.fill_borders("y", "x") lons, lats = satint.interpolate() try: from pyresample import geometry lons = np.ma.masked_array(lons, nodata_mask) lats = np.ma.masked_array(lats, nodata_mask) scene.area = geometry.SwathDefinition(lons=lons, lats=lats) except ImportError: scene.area = None scene.lat = lats scene.lon = lons LOG.info("Loading PPS parameters done.") def get_lonlat(filename): """Read lon,lat from hdf5 file""" import h5py LOG.debug("Geo File = " + filename) h5f = h5py.File(filename, 'r') # We neeed to mask out nodata (VIIRS Bow-tie deletion...) # We do it for all instruments, checking only against the nodata nodata = h5f['where']['lon']['what'].attrs['nodata'] gain = h5f['where']['lon']['what'].attrs['gain'] offset = h5f['where']['lon']['what'].attrs['offset'] longitudes = np.ma.array(h5f['where']['lon']['data'].value) lons = np.ma.masked_equal(longitudes, nodata) * gain + offset latitudes = np.ma.array(h5f['where']['lat']['data'].value) lats = np.ma.masked_equal(latitudes, nodata) * gain + offset col_indices = None row_indices = None if "column_indices" in h5f["where"].keys(): col_indices = h5f['/where/column_indices'].value if "row_indices" in h5f["where"].keys(): row_indices = h5f['/where/row_indices'].value h5f.close() return {'lon': lons, 'lat': lats, 'col_indices': col_indices, 'row_indices':row_indices} #return lons, lats mpop-1.5.0/mpop/satin/odim.py000066400000000000000000000024541317160620000160640ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012 SMHI # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Odim format reader. """ filename = "/data/temp/Martin.Raspaud/metop02_20120329_0043_28234_satproj_00000_12959_avhrr.h5" import h5py def get_lonlats(filename): f__ = h5py.File(filename, "r") lat_g = f__["where"]["lat"]["what"].attrs["gain"] lat_o = f__["where"]["lat"]["what"].attrs["offset"] lat = f__["where"]["lat"]["data"][:] * latg + lato lon_g = f__["where"]["lon"]["what"].attrs["gain"] lon_o = f__["where"]["lon"]["what"].attrs["offset"] lon = f__["where"]["lon"]["data"][:] * lon_g + lon_o return lon, lat mpop-1.5.0/mpop/satin/odyssey_radar.py000066400000000000000000000206401317160620000200010ustar00rootroot00000000000000import Image import glob import os from ConfigParser import ConfigParser import numpy as np import numpy.ma as ma from mpop import CONFIG_PATH import pyresample import logging import h5py LOG = logging.getLogger(__name__) ODIM_H5_FIELD_NAMES = { 'TH': 'total_power', # uncorrected reflectivity, horizontal 'TV': 'total_power', # uncorrected reflectivity, vertical 'DBZH': 'reflectivity', # corrected reflectivity, horizontal 'DBZV': 'reflectivity', # corrected reflectivity, vertical 'ZDR': 'differential_reflectivity', # differential reflectivity 'RHOHV': 'cross_correlation_ratio', 'LDR': 'linear_polarization_ratio', 'PHIDP': 'differential_phase', 'KDP': 'specific_differential_phase', 'SQI': 'normalized_coherent_power', 'SNR': 'signal_to_noise_ratio', 'VRAD': 'velocity', 'WRAD': 'spectrum_width', 'QIND': 'quality_index', 'RATE': 'precip', # precip 'ACRR': 'accu_precip', # 1 hour ACCU } def load(satscene, *args, **kwargs): """Loads the *channels* into the satellite *scene*. """ # # Dataset information # # Read config file content conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) values = {"orbit": satscene.orbit, "satname": satscene.satname, "number": satscene.number, "instrument": satscene.instrument_name, "satellite": satscene.fullname } # projection info projectionName = conf.get("radar-level2", "projection") projection = pyresample.utils.load_area(os.path.join(CONFIG_PATH, "areas.def"), projectionName) satscene.area = projection for chn_name in satscene.channels_to_load: filename = os.path.join( satscene.time_slot.strftime(conf.get("radar-level2", "dir", raw=True)) % values, satscene.time_slot.strftime(conf.get(chn_name, "filename", raw=True)) % values ) # Load data from the h5 file LOG.debug("filename: "+filename) filenames=glob.glob(str(filename)) if len(filenames) == 0: LOG.debug("no input file found: "+filename) print "no input file found:"+filename quit() else: filename = glob.glob(str(filename))[0] # open the file hfile = h5py.File(filename, 'r') odim_object = hfile['what'].attrs['object'] if odim_object != 'COMP': raise NotImplementedError('object: %s not implemented.' % (odim_object)) else: # File structure #>>> hfile.keys() #[u'dataset1', u'dataset2', u'how', u'what', u'where'] #>>> for f in hfile['what'].attrs.keys(): #... print "hfile['what'].attrs['",f,"']=", hfile['what'].attrs[f] # #hfile['what'].attrs[' object ']= COMP #hfile['what'].attrs[' version ']= H5rad 2.0 #hfile['what'].attrs[' date ']= 20151201 #hfile['what'].attrs[' time ']= 060000 #hfile['what'].attrs[' source ']= ORG:247 #>>> for f in hfile['where'].attrs.keys(): #... print "hfile['where'].attrs['",f,"']=", hfile['where'].attrs[f] # #hfile['where'].attrs[' projdef ']= +proj=laea +lat_0=55.0 +lon_0=10.0 +x_0=1950000.0 +y_0=-2100000.0 +units=m +ellps=WGS84 #hfile['where'].attrs[' xsize ']= 1900 #hfile['where'].attrs[' ysize ']= 2200 #hfile['where'].attrs[' xscale ']= 2000.0 #hfile['where'].attrs[' yscale ']= 2000.0 #hfile['where'].attrs[' LL_lon ']= -10.4345768386 #hfile['where'].attrs[' LL_lat ']= 31.7462153193 #hfile['where'].attrs[' UL_lon ']= -39.5357864125 #hfile['where'].attrs[' UL_lat ']= 67.0228327583 #hfile['where'].attrs[' UR_lon ']= 57.8119647501 #hfile['where'].attrs[' UR_lat ']= 67.6210371028 #hfile['where'].attrs[' LR_lon ']= 29.4210386356 #hfile['where'].attrs[' LR_lat ']= 31.9876502779 # hfile['how'].attrs['nodes'] # list of radar in composite #>>> for f in hfile['dataset1']['what'].attrs.keys(): #... print "hfile['dataset1'][what].attrs['",f,"']=", hfile['dataset1']['what'].attrs[f] # #hfile['dataset1'][what].attrs[' product ']= COMP #hfile['dataset1'][what].attrs[' startdate ']= 20151201 #hfile['dataset1'][what].attrs[' starttime ']= 055000 #hfile['dataset1'][what].attrs[' enddate ']= 20151201 #hfile['dataset1'][what].attrs[' endtime ']= 060500 #hfile['dataset1'][what].attrs[' quantity ']= RATE #hfile['dataset1'][what].attrs[' gain ']= 1.0 #hfile['dataset1'][what].attrs[' offset ']= 0.0 #hfile['dataset1'][what].attrs[' nodata ']= -9999000.0 #hfile['dataset1'][what].attrs[' undetect ']= -8888000.0 #>>> for f in hfile['dataset2']['what'].attrs.keys(): #... print "hfile['dataset2'][what].attrs['",f,"']=", hfile['dataset2']['what'].attrs[f] # #hfile['dataset2'][what].attrs[' product ']= COMP #hfile['dataset2'][what].attrs[' startdate ']= 20151201 #hfile['dataset2'][what].attrs[' starttime ']= 055000 #hfile['dataset2'][what].attrs[' enddate ']= 20151201 #hfile['dataset2'][what].attrs[' endtime ']= 060500 #hfile['dataset2'][what].attrs[' quantity ']= QIND #hfile['dataset2'][what].attrs[' gain ']= 1.0 #hfile['dataset2'][what].attrs[' offset ']= 0.0 #hfile['dataset2'][what].attrs[' nodata ']= -9999000.0 #hfile['dataset2'][what].attrs[' undetect ']= -8888000.0 _xsize = hfile['where'].attrs['xsize'] _ysize = hfile['where'].attrs['ysize'] #nbins= _xsize * _ysize #projection = hfile['where'].attrs['projdef'] datasets = [k for k in hfile if k.startswith('dataset')] datasets.sort() nsweeps = len(datasets) try: ds1_what = hfile[datasets[0]]['what'].attrs except KeyError: # if no how group exists mock it with an empty dictionary ds1_what = {} _type = '' if 'product' in ds1_what: LOG.debug("product: "+ds1_what['product']) if ds1_what['product'] == 'COMP': if 'quantity' in ds1_what: _type = ds1_what['quantity'] LOG.debug("product_type: "+_type) #for chn_name in satscene.channels_to_load: # if chn_name == _type: raw_data = hfile[datasets[0]]['data1']['data'][:] raw_data = raw_data.reshape(_ysize,_xsize) # flag no data if 'nodata' in ds1_what: nodata = ds1_what['nodata'] data = np.ma.masked_equal(raw_data, nodata) else: data = np.ma.masked_array(raw_data) mask = np.ma.masked_array( raw_data == nodata ) mask = np.ma.masked_equal( mask, False) # flag undetect data if 'undetect' in ds1_what: undetect = ds1_what['undetect'] data[data == undetect] = np.ma.masked #from trollimage.image import Image as trollimage #img = trollimage(mask, mode="L", fill_value=[1,1,1]) # [0,0,0] [1,1,1] #from trollimage.colormap import rainbow #img.colorize(rainbow) #img.show() #quit() # gain/offset adjustment if 'offset' in ds1_what: offset = ds1_what['offset'] else: offset = 0.0 if 'gain' in ds1_what: gain = ds1_what['gain'] else: gain = 1.0 data *= gain + offset satscene[chn_name] = data satscene[chn_name+'-MASK'] = mask LOG.debug(" *** channel:"+chn_name) if _type == 'DBZH': units = 'dBZ' if _type == 'RATE': units = 'mm/h' if _type == 'ACRR': units = 'mm' satscene[chn_name].info["units"] = units LOG.debug("channel:"+chn_name+" units:"+units) mpop-1.5.0/mpop/satin/pps_hdf.py000066400000000000000000000162441317160620000165610ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2012. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Esben S. Nielsen # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Plugin for reading PPS's cloud products hdf files. """ import ConfigParser import datetime import os.path from glob import glob import mpop.channel from mpop import CONFIG_PATH from mpop.utils import get_logger LOG = get_logger('satin/pps_hdf') class PpsCloudType(mpop.channel.GenericChannel): def __init__(self): mpop.channel.GenericChannel.__init__(self, "CloudType") self.region = None self.des = "" self.cloudtype_des = "" self.qualityflag_des = "" self.phaseflag_des = "" self.sec_1970 = 0 self.satellite_id = "" self.cloudtype_lut = [] self.qualityflag_lut = [] self.phaseflag_lut = [] self.cloudtype = None self.qualityflag = None self.phaseflag = None def copy(self, other): self.region = other.region self.des = other.des self.cloudtype_des = other.cloudtype_des self.qualityflag_des = other.qualityflag_des self.phaseflag_des = other.phaseflag_des self.sec_1970 = other.sec_1970 self.satellite_id = other.satellite_id self.cloudtype_lut = other.cloudtype_lut self.qualityflag_lut = other.qualityflag_lut self.phaseflag_lut = other.phaseflag_lut self.cloudtype = other.cloudtype self.qualityflag = other.qualityflag self.phaseflag = other.phaseflag def read(self, filename): import epshdf self.copy(epshdf.read_cloudtype(filename)) def is_loaded(self): return self.cloudtype is not None class PpsCTTH(mpop.channel.GenericChannel): def __init__(self): mpop.channel.GenericChannel.__init__(self, "CTTH") self.region = None self.des = "" self.ctt_des = "" self.cth_des = "" self.ctp_des = "" self.cloudiness_des = "" self.processingflag_des = "" self.sec_1970 = 0 self.satellite_id = "" self.processingflag_lut = [] self.temperature = None self.t_gain = 1.0 self.t_intercept = 0.0 self.t_nodata = 255 self.pressure = None self.p_gain = 1.0 self.p_intercept = 0.0 self.p_nodata = 255 self.height = None self.h_gain = 1.0 self.h_intercept = 0.0 self.h_nodata = 255 self.cloudiness = None self.c_nodata = 255 self.processingflag = None def copy(self, other): self.region = other.region self.des = other.des self.ctt_des = other.ctt_des self.cth_des = other.cth_des self.ctp_des = other.ctp_des self.cloudiness_des = other.cloudiness_des self.processingflag_des = other.processingflag_des self.sec_1970 = other.sec_1970 self.satellite_id = other.satellite_id self.processingflag_lut = other.processingflag_lut self.temperature = other.temperature self.t_gain = other.t_gain self.t_intercept = other.t_intercept self.t_nodata = other.t_nodata self.pressure = other.pressure self.p_gain = other.p_gain self.p_intercept = other.p_intercept self.p_nodata = other.p_nodata self.height = other.height self.h_gain = other.h_gain self.h_intercept = other.h_intercept self.h_nodata = other.h_nodata self.cloudiness = other.cloudiness self.c_nodata = other.c_nodata self.processingflag = other.processingflag def read(self, filename): import epshdf self.copy(epshdf.read_cloudtop(filename)) def load(scene, **kwargs): """Load data into the *channels*. *Channels* is a list or a tuple containing channels we will load data into. If None, all channels are loaded. """ del kwargs if("CTTH" not in scene.channels_to_load and "CloudType" not in scene.channels_to_load): return conf = ConfigParser.ConfigParser() conf.read(os.path.join(CONFIG_PATH, scene.fullname+".cfg")) directory = conf.get(scene.instrument_name+"-level3", "dir") filename = conf.get(scene.instrument_name+"-level3", "filename", raw=True) pathname = os.path.join(directory, filename) area_name = scene.area_id or scene.area.area_id times = [scene.time_slot, scene.time_slot + datetime.timedelta(minutes=1), scene.time_slot - datetime.timedelta(minutes=1)] if "CTTH" in scene.channels_to_load: for time_slot in times: filename = (time_slot.strftime(pathname) %{"orbit": scene.orbit, "number": scene.number, "area": area_name, "satellite": scene.fullname, "product": "ctth"}) flist = glob(filename) if len(flist) == 0: LOG.info("Can't find " + filename) elif len(flist) > 1: LOG.info("Too many files matching! " + str(flist)) break else: filename = flist[0] break if not os.path.exists(filename): LOG.info("Can't find any CTTH file, skipping") else: ct_chan = PpsCTTH() ct_chan.read(filename) ct_chan.area = scene.area scene.channels.append(ct_chan) if "CloudType" in scene.channels_to_load: for time_slot in times: filename = (time_slot.strftime(pathname) %{"orbit": scene.orbit, "number": scene.number, "area": area_name, "satellite": scene.fullname, "product": "cloudtype"}) flist = glob(filename) if len(flist) == 0: LOG.info("Can't find " + filename) elif len(flist) > 1: LOG.info("Too many files matching! " + str(flist)) break else: filename = flist[0] break if not os.path.exists(filename): LOG.info("Can't find any Cloudtype file, skipping") else: ct_chan = PpsCloudType() ct_chan.read(filename) ct_chan.area = scene.area scene.channels.append(ct_chan) LOG.info("Loading channels done.") mpop-1.5.0/mpop/satin/pps_odim.py000066400000000000000000000260511317160620000167450ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Interface to PPS level1 radiance data in ODIM format. """ import glob import os.path from ConfigParser import ConfigParser from datetime import datetime, timedelta import math import numpy as np from mpop import CONFIG_PATH import logging LOG = logging.getLogger(__name__) import h5py EPSILON = 0.001 class InfoObject(object): """Simple data and info container. """ def __init__(self): self.info = {} self.data = None def pack_signed(data, data_type): bits = np.iinfo(data_type).bits scale_factor = (data.max() - data.min()) / (2 ** bits - 2) add_offset = (data.max() - data.min()) / 2 no_data = - 2 ** (bits - 1) pack = ((data - add_offset) / scale_factor).astype(data_type) return pack, scale_factor, add_offset, no_data def load(satscene, *args, **kwargs): """Read data from file and load it into *satscene*. """ del args, kwargs conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value CASES[satscene.instrument_name](satscene, options) def load_channels(satscene, options): """Read avhrr/viirs/modis radiance (tb's and refl) data from file and load it into *satscene*. """ if "filename" not in options: raise IOError("No filename given, cannot load.") LOG.debug("Start loading channels") if satscene.instrument_name in ['avhrr']: chns = satscene.channels_to_load & set( ["1", "2", "3A", "3B", "4", "5"]) elif satscene.instrument_name in ['viirs']: chns = satscene.channels_to_load & set(["M05", "M07", "M10", "M12", "M14", "M15", "M16"]) if len(chns) == 0: return values = {"orbit": satscene.orbit, "satname": satscene.satname, "number": satscene.number, "instrument": satscene.instrument_name, "satellite": satscene.fullname } filename = os.path.join(satscene.time_slot.strftime(options["dir"]) % values, satscene.time_slot.strftime(options["filename"]) % values) file_list = glob.glob(filename) if len(file_list) > 1: raise IOError("More than one l1b file matching!") elif len(file_list) == 0: raise IOError("No PPS level 1 file matching!: " + filename) filename = file_list[0] LOG.debug("Loading from " + filename) available_channels = set([]) data_channels = {} info_channels = {} instrument_data = NwcSafPpsOdim(filename) idx = 1 while hasattr(instrument_data, 'image%d' % idx): channel = getattr(instrument_data, 'image%d' % idx) channel_name = channel.info['channel'].upper() #channel_des = channel.info['description'] available_channels |= set([channel_name]) data_channels[channel_name] = channel.data info_channels[channel_name] = channel.info idx = idx + 1 for chn in satscene.channels_to_load: if chn in available_channels: if info_channels[chn]['quantity'] in ["REFL"]: units = "%" elif info_channels[chn]['quantity'] in ["TB"]: units = "K" else: LOG.warning("Units not known! Unit = " + str(info_channels[chn]['quantity'])) gain = info_channels[chn]["gain"] intercept = info_channels[chn]["offset"] chn_array = np.ma.array(data_channels[chn]) missing_data = info_channels[chn]["missingdata"] chn_array = np.ma.masked_inside(chn_array, missing_data - EPSILON, missing_data + EPSILON) no_data = info_channels[chn]["nodata"] chn_array = np.ma.masked_inside(chn_array, no_data - EPSILON, no_data + EPSILON) satscene[chn] = chn_array satscene[chn].data = np.ma.masked_less(satscene[chn].data * gain + intercept, 0) satscene[chn].info['units'] = units else: LOG.warning("Channel " + str(chn) + " not available, not loaded.") # Compulsory global attributes satscene.info["title"] = (satscene.satname.capitalize() + satscene.number + " satellite, " + satscene.instrument_name.capitalize() + " instrument.") satscene.info["institution"] = "Data processed by EUMETSAT NWCSAF/PPS." satscene.add_to_history("PPS level 1 data read by mpop.") satscene.info["references"] = "No reference." satscene.info["comments"] = "No comment." lons = (instrument_data.lon.data * instrument_data.lon.info['gain'] + instrument_data.lon.info['offset']) lats = (instrument_data.lat.data * instrument_data.lat.info['gain'] + instrument_data.lat.info['offset']) try: from pyresample import geometry satscene.area = geometry.SwathDefinition(lons=lons, lats=lats) except ImportError: satscene.area = None satscene.lat = lats satscene.lon = lons class NwcSafPpsOdim(object): def __init__(self, filename=None): self._how = {} self._what = {} self._projectables = [] self._keys = [] self._refs = {} self.lon = None self.lat = None self.shape = None if filename: self.read(filename) def read(self, filename): """Read data in hdf5 format from *filename* """ h5f = h5py.File(filename, "r") # Read the /how attributes self._how = dict(h5f['how'].attrs) self._what = dict(h5f['what'].attrs) self._how["satellite"] = h5f['how'].attrs['platform'] # Which one to use?: self._how["time_slot"] = (timedelta(seconds=long(h5f['how'].attrs['startepochs'])) + datetime(1970, 1, 1, 0, 0)) self._what["time_slot"] = datetime.strptime(h5f['what'].attrs['date'] + h5f['what'].attrs['time'], "%Y%m%d%H%M%S") # Read the data and attributes # This covers only one level of data. This could be made recursive. for key, dataset in h5f.iteritems(): if "how" in dataset.name or "what" in dataset.name: continue if "image" in dataset.name: setattr(self, key, InfoObject()) getattr(self, key).info = dict(dataset.attrs) getattr(self, key).data = dataset['data'][:] if 'how' in dataset: for skey, value in dataset['how'].attrs.iteritems(): getattr(self, key).info[skey] = value if 'what' in dataset: for skey, value in dataset['what'].attrs.iteritems(): getattr(self, key).info[skey] = value if "where" in dataset.name: setattr(self, 'lon', InfoObject()) getattr(self, 'lon').data = h5f['/where/lon/data'][:] getattr(self, 'lon').info = dict(dataset.attrs) for skey, value in dataset['lon/what'].attrs.iteritems(): getattr(self, 'lon').info[skey] = value setattr(self, 'lat', InfoObject()) getattr(self, 'lat').data = h5f['/where/lat/data'][:] getattr(self, 'lat').info = dict(dataset.attrs) for skey, value in dataset['lat/what'].attrs.iteritems(): getattr(self, 'lat').info[skey] = value self.shape = self.lon.data.shape h5f.close() # Setup geolocation try: from pyresample import geometry except ImportError: return if hasattr(self, "lon") and hasattr(self, "lat"): lons = self.lon.data * \ self.lon.info["gain"] + self.lon.info["gain"] lats = self.lat.data * \ self.lat.info["gain"] + self.lat.info["gain"] self.area = geometry.SwathDefinition(lons=lons, lats=lats) else: LOG.warning("No longitudes or latitudes for data") # def project(self, coverage): # """Project what can be projected in the product. # """ # import copy # res = copy.copy(self) # area = coverage.out_area # Project the data # for var in self._projectables: # LOG.info("Projecting " + str(var)) # res.__dict__[var] = copy.copy(self.__dict__[var]) # res.__dict__[var].data = coverage.project_array( # self.__dict__[var].data) # Take care of geolocation # We support only swath data with full lon,lat arrays # lons, scale_factor, add_offset, no_data = \ # pack_signed(area.lons[:], np.int16) # res.lon = InfoObject() # res.lon.data = lons # res.lon.info["description"] = "geographic longitude (deg)" # res.lon.info["offset"] = add_offset # res.lon.info["gain"] = scale_factor # res.lon.info["nodata"] = no_data # if "lon" not in res._keys: # res._keys.append("lon") # lats, scale_factor, add_offset, no_data = \ # pack_signed(area.lats[:], np.int16) # res.lat = InfoObject() # res.lat.data = lats # res.lat.info["description"] = "geographic latitude (deg)" # res.lat.info["offset"] = add_offset # res.lat.info["gain"] = scale_factor # res.lat.info["nodata"] = no_data # if "lat" not in res._keys: # res._keys.append("lat") # return res # def is_loaded(self): # """Tells if the channel contains loaded data. # """ # return len(self._projectables) > 0 CASES = { "avhrr": load_channels, "viirs": load_channels } mpop-1.5.0/mpop/satin/s2_msi.py000066400000000000000000000036341317160620000163310ustar00rootroot00000000000000#!/usr/bin/python """Loader for s2, jpeg2000 format. """ #Matias Takala FMI 2016 import glob import os import pickle import re from ConfigParser import ConfigParser import numpy.ma as ma from pyresample import utils import glymur from mpop import CONFIG_PATH from mpop.satellites import GenericFactory #in this version Q_V is hardcoded but could be read from metadata QUANTIFICATION_VALUE = 10000 def parse_tile(file): tile = re.findall('T(\d{2}\w{3})_', file) f = open('s2tiles.pickle', 'r') s2tiles = pickle.load(f) f.close() return [tile[0], s2tiles[tile[0]]] def read_jp2_data(file): jp2 = glymur.Jp2k(file) data = jp2[:] / (QUANTIFICATION_VALUE + 0.0) return data def open_s2_tile(fname): data = read_jp2_data(fname) size = data.shape params = parse_tile(fname) areadef = utils.get_area_def( params[0], "Sentinel 2 tile " + params[0], 'PROJ EPSG:' + params[1][0], 'init=epsg:' + params[1][0], size[0], size[1], params[1][1]) return ([data, areadef]) def load(satscene): """Load jpeg2000 data. """ # Read config file content conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) for chn_name in satscene.channels_to_load: values = {"orbit": satscene.orbit, "satname": satscene.satname.upper(), "number": satscene.number, "instrument": satscene.instrument_name.upper(), "satellite": satscene.fullname.upper(), "band": chn_name} filename = os.path.join( conf.get("msi-level2", "dir"), satscene.time_slot.strftime(conf.get( "msi-level2", "filename", raw=True)) % values) filelist = glob.glob(filename) data_area = open_s2_tile(filelist[0]) satscene[chn_name] = ma.masked_array(data_area[0]) satscene[chn_name].area = data_area[1] mpop-1.5.0/mpop/satin/umarf_native.py000066400000000000000000000263511317160620000176160ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014 Martin Raspaud # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Reader for eumetsat's native format. TODO: - Add IMPFConfiguration Record - Make it an mpop reader (with geo info) - Make it work on a subset of channels - Put the hrv inside a square image (for not subsetted cases) - cleanup """ import numpy as np from mipp.xrit.MSG import (read_proheader, read_epiheader, _Calibrator, read_metadata) from mipp.xrit.loader import ImageLoader from mipp.xrit import Metadata no_data_value = 0 class UImageLoader(ImageLoader): def __init__(self, mda, data, mask=False, calibrate=False): ImageLoader.__init__(self, mda, None, mask, calibrate) self.data = data def _read(self, lines, cols, mda): return self.data def dec10to16(data): arr10 = data.astype(np.uint16).flat new_shape = list(data.shape[:-1]) + [(data.shape[-1] * 8) / 10] arr16 = np.zeros(new_shape, dtype=np.uint16) arr16.flat[::4] = np.left_shift(arr10[::5], 2) + \ np.right_shift((arr10[1::5]), 6) arr16.flat[1::4] = np.left_shift((arr10[1::5] & 63), 4) + \ np.right_shift((arr10[2::5]), 4) arr16.flat[2::4] = np.left_shift(arr10[2::5] & 15, 6) + \ np.right_shift((arr10[3::5]), 2) arr16.flat[3::4] = np.left_shift(arr10[3::5] & 3, 8) + \ arr10[4::5] return arr16 def show(data): """Show the stetched data. """ import Image as pil img = pil.fromarray(np.array((data - data.min()) * 255.0 / (data.max() - data.min()), np.uint8)) img.show() CHANNELS = {"VIS006": 1, "VIS008": 2, "IR_016": 3, "IR_039": 4, "WV_062": 5, "WV_073": 6, "IR_087": 7, "IR_097": 8, "IR_108": 9, "IR_120": 10, "IR_134": 11, "HRV": 12} def _get_metadata(hdr, ftr, channel_name): md = Metadata() md.calibrate = _Calibrator(hdr, channel_name) if channel_name == "HRV": md.pixel_size = (1000.134348869, 1000.134348869) else: md.pixel_size = (3000.403165817, 3000.403165817) md.sublon = hdr["ProjectionDescription"]["LongitudeOfSSP"] md.channel = channel_name if md.channel == "HRV": md.image_size = np.array((hdr["ReferenceGridHRV"]["NumberOfLines"], hdr["ReferenceGridHRV"]["NumberOfColumns"])) else: md.image_size = np.array((hdr["ReferenceGridVIS_IR"]["NumberOfLines"], hdr["ReferenceGridVIS_IR"]["NumberOfColumns"])) #md.satname = im.platform.lower() md.product_type = 'full disc' md.region_name = 'full disc' if md.channel == "HRV": md.first_pixel = hdr["ReferenceGridHRV"]["GridOrigin"] ns_, ew_ = md.first_pixel.split() del ns_ md.boundaries = np.array([[ ftr["LowerSouthLineActual"], ftr["LowerNorthLineActual"], ftr["LowerEastColumnActual"], ftr["LowerWestColumnActual"]], [ftr["UpperSouthLineActual"], ftr["UpperNorthLineActual"], ftr["UpperEastColumnActual"], ftr["UpperWestColumnActual"]]]) hcoff = 1856 * 3 hloff = 1856 * 3 md.coff = (ftr["Lower"+ew_.capitalize()+"ColumnActual"] + hcoff - 1) md.loff = hloff else: md.first_pixel = hdr["ReferenceGridVIS_IR"]["GridOrigin"] ns_, ew_ = md.first_pixel.split() md.boundaries = np.array([[ ftr["SouthernLineActual"], ftr["NorthernLineActual"], ftr["EasternColumnActual"], ftr["WesternColumnActual"]]]) lcoff = 1856 lloff = 1856 md.coff = lcoff md.loff = lloff md.no_data_value = no_data_value md.line_offset = 0 #md.time_stamp = im.time_stamp #md.production_time = im.production_time md.calibration_unit = 'counts' return md def load(satscene, calibrate=1): test_file = "/local_disk/data/satellite/umarf/MSG3-SEVI-MSG15-0100-NA-20131109121244.570000000Z-1080742.nat" _load_from_file(test_file, satscene, calibrate) def _load_from_file(filename, satscene, calibrate): hdr, ftr, umarf, data = linear_load(filename) for channel in satscene.channels_to_load: mda = _get_metadata(hdr, ftr, channel) if channel == "HRV": dat = dec10to16(data["hrv"]["line_data"]).reshape((int(umarf["NumberLinesHRV"]))) * 1.0 else: dat = dec10to16(data["visir"]["line_data"][:, CHANNELS[channel], :]) * 1.0 print dat.min(), dat.max() uil = UImageLoader(mda, dat, mask=False, calibrate=calibrate) md, res = uil() satscene[channel] = np.ma.masked_equal(res, 0) proj_params = 'proj=geos lon_0=9.50 lat_0=0.00 a=6378169.00 b=6356583.80 h=35785831.00'.split() proj_dict = {} for param in proj_params: key, val = param.split("=") proj_dict[key] = val from pyresample import geometry satscene[channel].area = geometry.AreaDefinition( satscene.satname + satscene.instrument_name + str(md.area_extent) + str(res.shape), "On-the-fly area", proj_dict["proj"], proj_dict, res.shape[1], res.shape[0], md.area_extent) def linear_load(filename): """First draft, works to retrieve counts. """ with open(filename) as fp_: umarf = {} for i in range(6): name = (fp_.read(30).strip("\x00"))[:-2].strip() umarf[name] = fp_.read(50).strip("\x00").strip() for i in range(27): name = fp_.read(30).strip("\x00") if name == '': fp_.read(32) continue name = name[:-2].strip() umarf[name] = {"size": fp_.read(16).strip("\x00").strip(), "adress": fp_.read(16).strip("\x00").strip()} for i in range(19): name = (fp_.read(30).strip("\x00"))[:-2].strip() umarf[name] = fp_.read(50).strip("\x00").strip() for i in range(18): name = (fp_.read(30).strip("\x00"))[:-2].strip() umarf[name] = fp_.read(50).strip("\x00").strip() from pprint import pprint pprint(umarf) uhdrlen = fp_.tell() print "UMARF header length", uhdrlen gp_pk_header = np.dtype([ ("HeaderVersionNo", ">i1"), ("PacketType", ">i1"), ("SubHeaderType", ">i1"), ("SourceFacilityId", ">i1"), ("SourceEnvId", ">i1"), ("SourceInstanceId", ">i1"), ("SourceSUId", ">i4"), ("SourceCPUId", ">i1", (4, )), ("DestFacilityId", ">i1"), ("DestEnvId", ">i1"), ("SequenceCount", ">u2"), ("PacketLength", ">i4"), ]) gp_pk_subheader = np.dtype([ ("SubHeaderVersionNo", ">i1"), ("ChecksumFlag", ">i1"), ("Acknowledgement", ">i1", (4, )), ("ServiceType", ">i1"), ("ServiceSubtype", ">i1"), ("PacketTime", ">i1", (6, )), ("SpacecraftId", ">i2"), ]) pk_head = np.dtype([("gp_pk_header", gp_pk_header), ("gp_pk_sh1", gp_pk_subheader)]) # read header pk_header = np.fromfile(fp_, pk_head, count=1) hdr_version = ord(fp_.read(1)) hdr = read_proheader(fp_) # skip IMPF CONFIGURATION fp_.seek(19786, 1) # read line data cols_visir = np.ceil(int(umarf["NumberColumnsVISIR"]) * 5.0 / 4) # 4640 if (int(umarf['WestColumnSelectedRectangle']) - int(umarf['EastColumnSelectedRectangle'])) < 3711: cols_hrv = np.ceil(int(umarf["NumberColumnsHRV"]) * 5.0 / 4) # 6960 else: cols_hrv = np.ceil(5568 * 5.0 / 4) # 6960 #fp_.seek(450400) # FIXME: works only if all channels are selected! selected = umarf["SelectedBandIDs"] visir_nb = selected.count("X", 0, 11) hrv_nb = selected.count("X", 11, 12) print visir_nb, hrv_nb visir_type = np.dtype([("gp_pk", pk_head), ("version", ">u1"), ("satid", ">u2"), ("time", ">u2", (5, )), ("lineno", ">u4"), ("chan_id", ">u1"), ("acq_time", ">u2", (3, )), ("line_validity", ">u1"), ("line_rquality", ">u1"), ("line_gquality", ">u1"), ("line_data", ">u1", (cols_visir, ))]) hrv_type = np.dtype([("gp_pk", pk_head), ("version", ">u1"), ("satid", ">u2"), ("time", ">u2", (5, )), ("lineno", ">u4"), ("chan_id", ">u1"), ("acq_time", ">u2", (3, )), ("line_validity", ">u1"), ("line_rquality", ">u1"), ("line_gquality", ">u1"), ("line_data", ">u1", (cols_hrv, ))]) if hrv_nb == 0: linetype = np.dtype([("visir", visir_type, (visir_nb, ))]) elif visir_nb == 0: linetype = np.dtype([("hrv", hrv_type, (hrv_nb * 3, ))]) else: linetype = np.dtype([("visir", visir_type, (visir_nb, )), ("hrv", hrv_type, (hrv_nb * 3, ))]) data_len = int(umarf["NumberLinesVISIR"]) # read everything in memory #res = np.fromfile(fp_, dtype=linetype, count=data_len) # lazy reading res = np.memmap(fp_, dtype=linetype, shape=(data_len, ), offset=450400, mode="r") fp_.seek(linetype.itemsize * data_len + 450400) # read trailer pk_header = np.fromfile(fp_, pk_head, count=1) ftr = read_epiheader(fp_) return hdr, ftr, umarf, res if __name__ == '__main__': test_file = "/local_disk/data/satellite/umarf/MSG3-SEVI-MSG15-0100-NA-20131109121244.570000000Z-1080742.nat" #test_file = sys.argv[1] hdr, ftr, umarf, res = linear_load(test_file) # display the data show(dec10to16(res["visir"]["line_data"][:, 1, :])[::-1, ::-1]) #show(dec10to16(res["hrv"]["line_data"]).reshape((int(umarf["NumberLinesHRV"]), -1))[::-1, ::-1]) mpop-1.5.0/mpop/satin/viirs_compact.py000066400000000000000000000436631317160620000200050ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014, 2015 Martin Raspaud # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Compact viirs format. """ import h5py import numpy as np from pyresample.geometry import SwathDefinition from datetime import timedelta import glob from ConfigParser import ConfigParser import os import logging import bz2 try: import tables except ImportError: tables = None from mpop import CONFIG_PATH logger = logging.getLogger(__name__) c = 299792458 # m.s-1 h = 6.6260755e-34 # m2kg.s-1 k = 1.380658e-23 # m2kg.s-2.K-1 def load(satscene, *args, **kwargs): del args files_to_load = [] files_to_delete = [] try: filename = kwargs.get("filename") logger.debug("reading %s", str(filename)) if filename is not None: if isinstance(filename, (list, set, tuple)): files = filename else: files = [filename] files_to_load = [] for filename in files: pathname, ext = os.path.splitext(filename) if ext == ".bz2": zipfile = bz2.BZ2File(filename) newname = os.path.join("/tmp", os.path.basename(pathname)) if not os.path.exists(newname): with open(newname, "wb") as fp_: fp_.write(zipfile.read()) zipfile.close() files_to_load.append(newname) files_to_delete.append(newname) else: files_to_load.append(filename) else: time_start, time_end = kwargs.get("time_interval", (satscene.time_slot, None)) conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value template = os.path.join(options["dir"], options["filename"]) second = timedelta(seconds=1) files_to_load = [] if time_end is not None: time = time_start - second * 85 files_to_load = [] while time <= time_end: fname = time.strftime(template) flist = glob.glob(fname) try: files_to_load.append(flist[0]) time += second * 80 except IndexError: pass time += second else: files_to_load = glob.glob(time_start.strftime(template)) chan_dict = {"M01": "M1", "M02": "M2", "M03": "M3", "M04": "M4", "M05": "M5", "M06": "M6", "M07": "M7", "M08": "M8", "M09": "M9", "M10": "M10", "M11": "M11", "M12": "M12", "M13": "M13", "M14": "M14", "M15": "M15", "M16": "M16", "DNB": "DNB"} channels = [(chn, chan_dict[chn]) for chn in satscene.channels_to_load if chn in chan_dict] try: channels_to_load, chans = zip(*channels) except ValueError: return m_chans = [] dnb_chan = [] for chn in chans: if chn.startswith('M'): m_chans.append(chn) elif chn.startswith('DNB'): dnb_chan.append(chn) else: raise ValueError("Reading of channel %s not implemented", chn) m_datas = [] m_lonlats = [] dnb_datas = [] dnb_lonlats = [] for fname in files_to_load: is_dnb = os.path.basename(fname).startswith('SVDNBC') logger.debug("Reading %s", fname) if is_dnb: if tables: h5f = tables.open_file(fname, "r") else: logger.warning("DNB data could not be read from %s, " "PyTables not available.", fname) continue else: h5f = h5py.File(fname, "r") if m_chans and not is_dnb: try: arr, m_units = read_m(h5f, m_chans) m_datas.append(arr) m_lonlats.append(navigate_m(h5f, m_chans[0])) except KeyError: pass if dnb_chan and is_dnb and tables: try: arr, dnb_units = read_dnb(h5f) dnb_datas.append(arr) dnb_lonlats.append(navigate_dnb(h5f)) except KeyError: pass h5f.close() if len(m_lonlats) > 0: m_lons = np.ma.vstack([lonlat[0] for lonlat in m_lonlats]) m_lats = np.ma.vstack([lonlat[1] for lonlat in m_lonlats]) if len(dnb_lonlats) > 0: dnb_lons = np.ma.vstack([lonlat[0] for lonlat in dnb_lonlats]) dnb_lats = np.ma.vstack([lonlat[1] for lonlat in dnb_lonlats]) m_i = 0 dnb_i = 0 for chn in channels_to_load: if m_datas and chn.startswith('M'): m_data = np.ma.vstack([dat[m_i] for dat in m_datas]) satscene[chn] = m_data satscene[chn].info["units"] = m_units[m_i] m_i += 1 if dnb_datas and chn.startswith('DNB'): dnb_data = np.ma.vstack([dat[dnb_i] for dat in dnb_datas]) satscene[chn] = dnb_data satscene[chn].info["units"] = dnb_units[dnb_i] dnb_i += 1 if m_datas: m_area_def = SwathDefinition(np.ma.masked_where(m_data.mask, m_lons), np.ma.masked_where(m_data.mask, m_lats)) else: logger.warning("No M channel data available.") if dnb_datas: dnb_area_def = SwathDefinition(np.ma.masked_where(dnb_data.mask, dnb_lons), np.ma.masked_where(dnb_data.mask, dnb_lats)) else: logger.warning("No DNB data available.") for chn in channels_to_load: if "DNB" not in chn and m_datas: satscene[chn].area = m_area_def if dnb_datas: for chn in dnb_chan: satscene[chn].area = dnb_area_def finally: for fname in files_to_delete: if os.path.exists(fname): os.remove(fname) def read_m(h5f, channels, calibrate=1): chan_dict = dict([(key.split("-")[1], key) for key in h5f["All_Data"].keys() if key.startswith("VIIRS")]) scans = h5f["All_Data"]["NumberOfScans"][0] res = [] units = [] arr_mask = np.ma.nomask for channel in channels: rads = h5f["All_Data"][chan_dict[channel]]["Radiance"] if channel in ("M9",): arr = rads[:scans * 16, :].astype(np.float32) arr[arr > 65526] = np.nan arr = np.ma.masked_array(arr, mask=arr_mask) else: arr = np.ma.masked_greater(rads[:scans * 16, :].astype(np.float32), 65526) try: arr = np.ma.where(arr <= rads.attrs['Threshold'], arr * rads.attrs['RadianceScaleLow'] + rads.attrs['RadianceOffsetLow'], arr * rads.attrs['RadianceScaleHigh'] + \ rads.attrs['RadianceOffsetHigh'],) arr_mask = arr.mask except KeyError: print "KeyError" pass unit = "W m-2 sr-1 μm-1" if calibrate == 0: raise NotImplementedError("Can't get counts from this data") if calibrate == 1: # do calibrate try: # First guess: VIS or NIR data a_vis = rads.attrs['EquivalentWidth'] b_vis = rads.attrs['IntegratedSolarIrradiance'] dse = rads.attrs['EarthSunDistanceNormalised'] arr *= 100 * np.pi * a_vis / b_vis * (dse ** 2) unit = "%" except KeyError: # Maybe it's IR data? try: a_ir = rads.attrs['BandCorrectionCoefficientA'] b_ir = rads.attrs['BandCorrectionCoefficientB'] lambda_c = rads.attrs['CentralWaveLength'] arr *= 1e6 arr = (h * c) / (k * lambda_c * \ np.log(1 + (2 * h * c ** 2) / ((lambda_c ** 5) * arr))) arr *= a_ir arr += b_ir unit = "K" except KeyError: logger.warning("Calibration failed.") elif calibrate != 2: raise ValueError("Calibrate parameter should be 1 or 2") arr[arr < 0] = 0 res.append(arr) units.append(unit) return res, units def read_dnb(h5f): scans = h5f.get_node("/All_Data/NumberOfScans").read()[0] res = [] units = [] rads_dset = h5f.get_node("/All_Data/VIIRS-DNB-SDR_All") arr = np.ma.masked_greater(rads_dset.Radiance.read()[:scans * 16, :], 1.0) unit = "W m-2 sr-1 μm-1" arr[arr < 0] = 0 res.append(arr) units.append(unit) return res, units def expand_array(data, scans, c_align, c_exp, scan_size=16, tpz_size=16, nties=200, track_offset=0.5, scan_offset=0.5): s_track, s_scan = np.mgrid[0:scans * scan_size, 0:nties*tpz_size] s_track = (s_track.reshape(scans, scan_size, nties, tpz_size) % scan_size + track_offset) / scan_size s_scan = (s_scan.reshape(scans, scan_size, nties, tpz_size) % tpz_size + scan_offset) / tpz_size a_scan = s_scan + s_scan * (1 - s_scan) * c_exp + s_track * (1 - s_track) * c_align a_track = s_track data_a = data[:scans * 2:2, np.newaxis, :-1, np.newaxis] data_b = data[:scans * 2:2, np.newaxis, 1:, np.newaxis] data_c = data[1:scans * 2:2, np.newaxis, 1:, np.newaxis] data_d = data[1:scans * 2:2, np.newaxis, :-1, np.newaxis] fdata = ((1 - a_track) * ((1 - a_scan) * data_a + a_scan * data_b) + a_track * ((1 - a_scan) * data_d + a_scan * data_c)) return fdata.reshape(scans * scan_size, nties * tpz_size) def lonlat2xyz(lon, lat): lat = np.deg2rad(lat) lon = np.deg2rad(lon) x = np.cos(lat) * np.cos(lon) y = np.cos(lat) * np.sin(lon) z = np.sin(lat) return x, y, z def xyz2lonlat(x, y, z): lon = np.rad2deg(np.arctan2(y, x)) lat = np.rad2deg(np.arctan2(z, np.sqrt(x ** 2 + y ** 2))) return lon, lat def navigate_m(h5f, channel): if not channel.startswith("M"): raise ValueError("Unknow channel type for band %s", channel) scans = h5f["All_Data"]["NumberOfScans"][0] geostuff = h5f["All_Data"]["VIIRS-MOD-GEO_All"] all_c_align = geostuff["AlignmentCoefficient"].value[np.newaxis, np.newaxis, :, np.newaxis] all_c_exp = geostuff["ExpansionCoefficient"].value[np.newaxis, np.newaxis, :, np.newaxis] all_lon = geostuff["Longitude"].value all_lat = geostuff["Latitude"].value res = [] # FIXME: this supposes there is only one tiepoint zone in the # track direction scan_size = h5f["All_Data/VIIRS-%s-SDR_All" % \ channel].attrs["TiePointZoneSizeTrack"][0] track_offset = h5f["All_Data/VIIRS-%s-SDR_All" % \ channel].attrs["PixelOffsetTrack"] scan_offset = h5f["All_Data/VIIRS-%s-SDR_All" % \ channel].attrs["PixelOffsetScan"] try: group_locations = h5f["All_Data/VIIRS-MOD-GEO_All/" "TiePointZoneGroupLocationScanCompact"].value except KeyError: group_locations = [0] param_start = 0 for tpz_size, nb_tpz, start in \ zip(h5f["All_Data/VIIRS-%s-SDR_All" % \ channel].attrs["TiePointZoneSizeScan"], h5f["All_Data/VIIRS-MOD-GEO_All/NumberOfTiePointZonesScan"].value, group_locations): lon = all_lon[:, start:start + nb_tpz + 1] lat = all_lat[:, start:start + nb_tpz + 1] c_align = all_c_align[:, :, param_start:param_start + nb_tpz, :] c_exp = all_c_exp[:, :, param_start:param_start + nb_tpz, :] param_start += nb_tpz nties = nb_tpz if (np.max(lon) - np.min(lon) > 90) or (np.max(abs(lat)) > 60): x, y, z = lonlat2xyz(lon, lat) x, y, z = (expand_array(x, scans, c_align, c_exp, scan_size, tpz_size, nties, track_offset, scan_offset), expand_array(y, scans, c_align, c_exp, scan_size, tpz_size, nties, track_offset, scan_offset), expand_array(z, scans, c_align, c_exp, scan_size, tpz_size, nties, track_offset, scan_offset)) res.append(xyz2lonlat(x, y, z)) else: res.append((expand_array(lon, scans, c_align, c_exp, scan_size, tpz_size, nties, track_offset, scan_offset), expand_array(lat, scans, c_align, c_exp, scan_size, tpz_size, nties, track_offset, scan_offset))) lons, lats = zip(*res) return np.hstack(lons), np.hstack(lats) def navigate_dnb(h5f): scans = h5f.get_node("/All_Data/NumberOfScans").read()[0] geo_dset = h5f.get_node("/All_Data/VIIRS-DNB-GEO_All") all_c_align = geo_dset.AlignmentCoefficient.read()[np.newaxis, np.newaxis, :, np.newaxis] all_c_exp = geo_dset.ExpansionCoefficient.read()[np.newaxis, np.newaxis, :, np.newaxis] all_lon = geo_dset.Longitude.read() all_lat = geo_dset.Latitude.read() res = [] # FIXME: this supposes there is only one tiepoint zone in the # track direction scan_size = h5f.get_node_attr("/All_Data/VIIRS-DNB-SDR_All", "TiePointZoneSizeTrack")[0] track_offset = h5f.get_node_attr("/All_Data/VIIRS-DNB-SDR_All", "PixelOffsetTrack")[0] scan_offset = h5f.get_node_attr("/All_Data/VIIRS-DNB-SDR_All", "PixelOffsetScan")[0] try: group_locations = geo_dset.TiePointZoneGroupLocationScanCompact.read() except KeyError: group_locations = [0] param_start = 0 for tpz_size, nb_tpz, start in \ zip(h5f.get_node_attr("/All_Data/VIIRS-DNB-SDR_All", "TiePointZoneSizeScan"), geo_dset.NumberOfTiePointZonesScan.read(), group_locations): lon = all_lon[:, start:start + nb_tpz + 1] lat = all_lat[:, start:start + nb_tpz + 1] c_align = all_c_align[:, :, param_start:param_start + nb_tpz, :] c_exp = all_c_exp[:, :, param_start:param_start + nb_tpz, :] param_start += nb_tpz nties = nb_tpz if (np.max(lon) - np.min(lon) > 90) or (np.max(abs(lat)) > 60): x, y, z = lonlat2xyz(lon, lat) x, y, z = (expand_array(x, scans, c_align, c_exp, scan_size, tpz_size, nties, track_offset, scan_offset), expand_array(y, scans, c_align, c_exp, scan_size, tpz_size, nties, track_offset, scan_offset), expand_array(z, scans, c_align, c_exp, scan_size, tpz_size, nties, track_offset, scan_offset)) res.append(xyz2lonlat(x, y, z)) else: res.append((expand_array(lon, scans, c_align, c_exp, scan_size, tpz_size, nties, track_offset, scan_offset), expand_array(lat, scans, c_align, c_exp, scan_size, tpz_size, nties, track_offset, scan_offset))) lons, lats = zip(*res) return np.hstack(lons), np.hstack(lats) if __name__ == '__main__': #filename = "/local_disk/data/satellite/polar/compact_viirs/SVMC_npp_d20140114_t1245125_e1246367_b11480_c20140114125427496143_eum_ops.h5" filename = "/local_disk/data/satellite/polar/compact_viirs/mymy.h5" h5f = h5py.File(filename, 'r') # ch1, ch2, ch3, ch4 = read(h5f, ["M5", "M4", "M2", "M12"]) # img = GeoImage((ch1, ch2, ch3), # None, # None, # fill_value=None, # mode="RGB") # img.enhance(stretch="linear") # img.enhance(gamma=2.0) # img.show() lons, lats = navigate_m(h5f) mpop-1.5.0/mpop/satin/viirs_sdr.py000066400000000000000000001215451317160620000171430ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, 2012, 2013, 2014, 2015, 2016, 2017. # Author(s): # # Adam Dybbroe # Kristian Rune Larsen # Lars Ørum Rasmussen # Martin Raspaud # # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Interface to VIIRS SDR format Format documentation: http://npp.gsfc.nasa.gov/science/sciencedocuments/082012/474-00001-03_CDFCBVolIII_RevC.pdf """ import hashlib import logging import os.path from ConfigParser import ConfigParser from datetime import datetime, timedelta import h5py import numpy as np from mpop import CONFIG_PATH # ------------------------------------------------------------------------------ from mpop.plugin_base import Reader from mpop.utils import strftime NO_DATE = datetime(1958, 1, 1) EPSILON_TIME = timedelta(days=2) VIIRS_MBAND_GRANULE_SIZE = (768, 3200) VIIRS_DNB_GRANULE_SIZE = (768, 4064) VIIRS_IBAND_GRANULE_SIZE = (768 * 2, 3200 * 2) VIIRS_IR_BANDS = ('M16', 'M15', 'M14', 'M13', 'M12', 'I5', 'I4') VIIRS_VIS_BANDS = ('M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9', 'M10', 'M11', 'I1', 'I2', 'I3') VIIRS_DNB_BANDS = ('DNB', ) logger = logging.getLogger(__name__) class HDF5MetaData(object): """ Small class for inspecting a HDF5 file and retrieve its metadata/header data. It is developed for JPSS/NPP data but is really generic and should work on most other hdf5 files. Supports """ def __init__(self, filename): self.metadata = {} self.filename = filename if not os.path.exists(filename): raise IOError("File %s does not exist!" % filename) def read(self): h5f = h5py.File(self.filename, 'r') h5f.visititems(self.collect_metadata) self._collect_attrs('/', h5f.attrs) h5f.close() return self def _collect_attrs(self, name, attrs): for key, value in attrs.iteritems(): value = np.squeeze(value) if issubclass(value.dtype.type, str): self.metadata["%s/attr/%s" % (name, key)] = str(value) else: self.metadata["%s/attr/%s" % (name, key)] = value def collect_metadata(self, name, obj): if isinstance(obj, h5py.Dataset): self.metadata["%s/shape" % name] = obj.shape self._collect_attrs(name, obj.attrs) def __getitem__(self, key): long_key = None for mkey in self.metadata.keys(): if mkey.endswith(key): if long_key is not None: raise KeyError("Multiple keys called %s" % key) long_key = mkey # Test this to be able to read SDR files with several granules # in one file (Matias' files) break return self.metadata[long_key] def keys(self): return self.metadata.keys() def get_data_keys(self): data_keys = [] for key in self.metadata.keys(): if key.endswith("/shape"): data_key = key.split("/shape")[0] data_keys.append(data_key) return data_keys class NPPMetaData(HDF5MetaData): def _parse_npp_datatime(self, datestr, timestr): time_val = datetime.strptime(datestr + timestr, '%Y%m%d%H%M%S.%fZ') if abs(time_val - NO_DATE) < EPSILON_TIME: raise ValueError("Datetime invalid %s " % time_val) return time_val def get_ring_lonlats(self): return self['G-Ring_Longitude'], self['G-Ring_Latitude'] def get_begin_time(self): return self._parse_npp_datatime(self['AggregateBeginningDate'], self['AggregateBeginningTime']) def get_end_time(self): return self._parse_npp_datatime(self['AggregateEndingDate'], self['AggregateEndingTime']) def get_begin_orbit_number(self): return int(self['AggregateBeginningOrbitNumber']) def get_end_orbit_number(self): return int(self['AggregateEndingOrbitNumber']) def get_geofilname(self): return self['N_GEO_Ref'] def get_shape(self): shape = self['Radiance/shape'] band = self['Band_ID'] if band[0] == 'M': # if shape != VIIRS_MBAND_GRANULE_SIZE: if ((shape[0] % VIIRS_MBAND_GRANULE_SIZE[0]) != 0 and (shape[1] % VIIRS_MBAND_GRANULE_SIZE[1]) != 0): raise ValueError( "Unsupported granule size %s for %s" % (shape, band)) elif band == "DNB": if ((shape[0] % VIIRS_DNB_GRANULE_SIZE[0]) != 0 and (shape[1] % VIIRS_MBAND_GRANULE_SIZE[1]) != 0): raise ValueError( "Unsupported granule size %s for %s" % (shape, band)) elif band[0] == "I": if ((shape[0] % VIIRS_IBAND_GRANULE_SIZE[0] != 0) and (shape[1] % VIIRS_IBAND_GRANULE_SIZE[1] != 0)): raise ValueError( "Unsupported granule size %s for %s" % (shape, band)) return shape def get_band_description(self): band = self['Band_ID'] for band_desc in ('I', 'M', 'DNB', "N/A"): if band.startswith(band_desc): if band_desc == 'N/A': return 'DNB' return band_desc return None def _band_data_keys(self, data_type): """ :param data_type: Reflectance, Radiance or BrightnessTemperature :type data_type: string :returns: HDF5 data key and scale factor keys in a two element tuple """ data_key = None factors_keys = None for key in self.get_data_keys(): if key.endswith(data_type): data_key = key factors_keys = key + "Factors" return (data_key, factors_keys) def get_reflectance_keys(self): return self._band_data_keys("Reflectance") def get_radiance_keys(self): return self._band_data_keys("Radiance") def get_brightness_temperature_keys(self): return self._band_data_keys("BrightnessTemperature") def get_unit(self, calibrate=1): band = self['Band_ID'] if calibrate == 2 and band not in VIIRS_DNB_BANDS: return "W m-2 um-1 sr-1" if band in VIIRS_IR_BANDS: return "K" elif band in VIIRS_VIS_BANDS: return '%' elif band in VIIRS_DNB_BANDS: return 'W m-2 sr-1' return None # # # http://yloiseau.net/articles/DesignPatterns/flyweight/ class GeolocationFlyweight(object): def __init__(self, cls): self._cls = cls self._instances = dict() def __call__(self, *args, **kargs): """ we assume that this is only used for the gelocation object, filenames are listed in the second argument """ return self._instances.setdefault(tuple(args[1]), self._cls(*args, **kargs)) def clear_cache(self): del self._instances self._instances = dict() @GeolocationFlyweight class ViirsGeolocationData(object): def __init__(self, shape, filenames): self.filenames = filenames self.longitudes = None self.shape = shape self.latitudes = None self.elevation = None self.mask = None def read(self): """ Read longitudes and latitudes from geo filenames and assemble """ if self.longitudes is not None: return self self.longitudes = np.empty(self.shape, dtype=np.float32) self.latitudes = np.empty(self.shape, dtype=np.float32) self.elevation = np.empty(self.shape, dtype=np.float32) self.mask = np.zeros(self.shape, dtype=np.bool) granule_length = self.shape[0] / len(self.filenames) for index, filename in enumerate(self.filenames): swath_index = index * granule_length y0_ = swath_index y1_ = swath_index + granule_length get_lonlat_into(filename, self.longitudes[y0_:y1_, :], self.latitudes[y0_:y1_, :], self.elevation[y0_:y1_, :], self.mask[y0_:y1_, :]) self.longitudes = np.ma.array(self.longitudes, mask=self.mask, copy=False) self.latitudes = np.ma.array(self.latitudes, mask=self.mask, copy=False) self.elevation = np.ma.array(self.elevation, mask=self.mask, copy=False) logger.debug("Geolocation read in for... " + str(self)) return self # ------------------------------------------------------------------------------ def _get_invalid_info(granule_data): """Get a detailed report of the missing data. N/A: not applicable MISS: required value missing at time of processing OBPT: onboard pixel trim (overlapping/bow-tie pixel removed during SDR processing) OGPT: on-ground pixel trim (overlapping/bow-tie pixel removed during EDR processing) ERR: error occurred during processing / non-convergence ELINT: ellipsoid intersect failed / instrument line-of-sight does not intersect the Earth’s surface VDNE: value does not exist / processing algorithm did not execute SOUB: scaled out-of-bounds / solution not within allowed range """ if issubclass(granule_data.dtype.type, np.integer): msg = ("na:" + str((granule_data == 65535).sum()) + " miss:" + str((granule_data == 65534).sum()) + " obpt:" + str((granule_data == 65533).sum()) + " ogpt:" + str((granule_data == 65532).sum()) + " err:" + str((granule_data == 65531).sum()) + " elint:" + str((granule_data == 65530).sum()) + " vdne:" + str((granule_data == 65529).sum()) + " soub:" + str((granule_data == 65528).sum())) if issubclass(granule_data.dtype.type, np.floating): msg = ("na:" + str((granule_data == -999.9).sum()) + " miss:" + str((granule_data == -999.8).sum()) + " obpt:" + str((granule_data == -999.7).sum()) + " ogpt:" + str((granule_data == -999.6).sum()) + " err:" + str((granule_data == -999.5).sum()) + " elint:" + str((granule_data == -999.4).sum()) + " vdne:" + str((granule_data == -999.3).sum()) + " soub:" + str((granule_data == -999.2).sum())) return msg class ViirsBandData(object): """Placeholder for the VIIRS M&I-band data. Reads the SDR data - one hdf5 file for each band. Not yet considering the Day-Night Band """ def __init__(self, filenames, calibrate=1): self.begin_time = 0 self.end_time = 0 self.orbit_begin = 0 self.orbit_end = 0 self.band_id = 'unknown' self.data = None self.mask = None self.raw_data = None self.scale = 1.0 # gain self.offset = 0.0 # intercept self.filenames = sorted(filenames) self.units = 'unknown' self.geo_filenames = [] self.calibrate = calibrate self.data = None self.geolocation = None self.band_desc = None self.band_uid = None self.metadata = [] def read(self): self._read_metadata() logger.debug("Shape of data: " + str(self.raw_data.shape)) self._read_data() return self def _read_metadata(self): logger.debug("Filenames: " + str(self.filenames)) for fname in self.filenames: logger.debug("Get and append metadata from file: " + str(fname)) md = NPPMetaData(fname).read() self.metadata.append(md) self.geo_filenames.append(md.get_geofilname()) # Check if the geo-filenames actually exists: # geofilename = md.get_geofilname() # if os.path.exists(geofilename): # self.geo_filenames.append(geofilename) # else: # logger.warning("Geo file defined in metadata header " + # "does not exist: " + str(geofilename)) # # initiate data arrays granule_length, swath_width = self.metadata[0].get_shape() shape = (granule_length * len(self.metadata), swath_width) #self.data = np.ma.array(np.zeros(shape, dtype=np.float32), fill_value=0) self.raw_data = np.zeros(shape, dtype=np.float32) self.mask = np.zeros(shape, dtype=np.bool) self.orbit_begin = self.metadata[0].get_begin_orbit_number() self.orbit_end = self.metadata[-1].get_end_orbit_number() self.begin_time = self.metadata[0].get_begin_time() self.end_time = self.metadata[-1].get_end_time() self.units = self.metadata[0].get_unit(self.calibrate) self.band_desc = self.metadata[0].get_band_description() self.band_id = self.metadata[0]['Band_ID'] if self.band_id == "N/A": self.band_id = "DNB" def _read_data(self): """Read one VIIRS M- or I-band channel: Data and attributes (meta data) - *calibrate* set to 1 (default) returns reflectances for visual bands, tb for ir bands, and radiance for dnb. - *calibrate* set to 2 returns radiances. """ granule_length, swath_width = self.metadata[0].get_shape() for index, md in enumerate(self.metadata): h5f = h5py.File(md.filename, 'r') # find appropiate band data to insert data_key = None factors_key = None if self.calibrate == 1: data_key, factors_key = md.get_reflectance_keys() if data_key is None: data_key, factors_key = md.get_brightness_temperature_keys() logger.debug("data and factors keys: " + str(data_key) + str(factors_key)) # handle dnb data if data_key is None and self.band_id == "DNB": data_key, factors_key = md.get_radiance_keys() elif self.calibrate == 2: data_key, factors_key = md.get_radiance_keys() # # get granule data and scale and offset values try: granule_factors_data = h5f[factors_key].value except KeyError: # # We can't find the factors this must be DNB # if self.band_id != "DNB": # raise # We can't find the factors, so this must be the DNB or the M13 # band: logger.debug("Band id = " + str(self.band_id)) if self.band_id != "DNB" and self.band_id != "M13": raise if self.band_id == "DNB": # The unit is W/sr cm-2 in the file! but we need 'W sr-1 # m-2' granule_factors_data = (10000., 0.) else: # M13 is stored in floats with no scale or offset: granule_factors_data = (1., 0.) granule_data = h5f[data_key].value self.scale, self.offset = granule_factors_data[0:2] # The VIIRS reflectances are between 0 and 1. # mpop standard is '%' if self.units == '%': # To get reflectances in percent! self.scale *= np.int8(100) self.offset *= np.int8(100) swath_index = index * granule_length y0_ = swath_index y1_ = swath_index + granule_length self.raw_data[y0_:y1_, :] = granule_data self.raw_data[y0_:y1_, :] *= self.scale self.raw_data[y0_:y1_, :] += self.offset logger.debug("dtype(granule_data) = " + str(granule_data.dtype)) # Masking spurious data # according to documentation, mask integers >= 65328, floats <= # -999.3 if issubclass(granule_data.dtype.type, np.integer): self.mask[y0_:y1_, :] = granule_data >= 65528 if issubclass(granule_data.dtype.type, np.floating): self.mask[y0_:y1_, :] = granule_data < -999.2 logger.debug("%s", _get_invalid_info(granule_data)) # Is it necessary to mask negatives? #self.mask[y0_:y1_, :] |= self.raw_data[y0_:y1_, :] < 0 h5f.close() self.data = np.ma.array(self.raw_data, mask=self.mask, copy=False) self.band_uid = self.band_desc + hashlib.sha1(self.mask).hexdigest() def read_lonlat(self, geofilepaths=None, geodir=None): if geofilepaths is None: if geodir is None: geodir = os.path.dirname(self.metadata[0].filename) logger.debug("Geo-files = " + str(self.geo_filenames)) geofilepaths = [os.path.join(geodir, geofilepath) for geofilepath in self.geo_filenames] geofilepaths = sorted(geofilepaths) logger.debug("Geo-files = " + str(geofilepaths)) self.geolocation = ViirsGeolocationData(self.data.shape, geofilepaths).read() class ViirsSDRReader(Reader): pformat = "viirs_sdr" def __init__(self, *args, **kwargs): Reader.__init__(self, *args, **kwargs) self.geofiles = [] self.shape = None def get_sunsat_angles(self, **kwargs): """Get sun-satellite viewing geometry for a given band type (M, I, or DNB) Optional arguments: bandtype = 'M', 'I', or 'DNB' Return sun-zenith, sun-azimuth, sat-zenith, sat-azimuth """ if 'bandtype' in kwargs: bandtype = kwargs['bandtype'] else: bandtype = 'M' if bandtype.startswith('M'): geofilenames = [geofile for geofile in self.geofiles if os.path.basename(geofile).startswith('GMTCO')] if len(geofilenames) == 0: # Try the geoid instead: geofilenames = [geofile for geofile in self.geofiles if os.path.basename(geofile).startswith('GMODO')] elif bandtype.startswith('I'): geofilenames = [geofile for geofile in self.geofiles if os.path.basename(geofile).startswith('GITCO')] if len(geofilenames) == 0: # Try the geoid instead: geofilenames = [geofile for geofile in self.geofiles if os.path.basename(geofile).startswith('GIMGO')] elif bandtype.startswith('DNB'): geofilenames = [geofile for geofile in self.geofiles if os.path.basename(geofile).startswith('GDNBO')] else: logger.error("Band type %s not supported", bandtype) return None geofilenames = sorted(geofilenames) data = {} mask = {} h5names = ['SolarZenithAngle', 'SolarAzimuthAngle', 'SatelliteZenithAngle', 'SatelliteAzimuthAngle'] local_names = ['sunz', 'sun_azi', 'satz', 'sat_azi'] for item in local_names: data[item] = np.empty(self.shape, dtype=np.float32) mask[item] = np.zeros(self.shape, dtype=np.bool) granule_length = self.shape[0] / len(geofilenames) for index, filename in enumerate(geofilenames): swath_index = index * granule_length y0_ = swath_index y1_ = swath_index + granule_length for angle, param_name in zip(h5names, local_names): get_viewing_angle_into(filename, data[param_name][y0_:y1_, :], mask[param_name][y0_:y1_, :], angle) for item in local_names: data[item] = np.ma.array(data[item], mask=mask[item], copy=False) return (data['sunz'], data['sun_azi'], data['satz'], data['sat_azi']) def get_elevation(self, **kwargs): """Get elevation/topography for a given band type (M, I, or DNB) Optional arguments: bandtype = 'M', 'I', or 'DNB' Return elevation """ if 'bandtype' in kwargs: bandtype = kwargs['bandtype'] else: bandtype = 'M' if bandtype.startswith('M'): geofilenames = [geofile for geofile in self.geofiles if os.path.basename(geofile).startswith('GMTCO')] if len(geofilenames) == 0: # Try the geoid instead: geofilenames = [geofile for geofile in self.geofiles if os.path.basename(geofile).startswith('GMODO')] elif bandtype.startswith('I'): geofilenames = [geofile for geofile in self.geofiles if os.path.basename(geofile).startswith('GITCO')] if len(geofilenames) == 0: # Try the geoid instead: geofilenames = [geofile for geofile in self.geofiles if os.path.basename(geofile).startswith('GIMGO')] elif bandtype.startswith('DNB'): geofilenames = [geofile for geofile in self.geofiles if os.path.basename(geofile).startswith('GDNBO')] else: logger.error("Band type %s not supported", bandtype) return None geofilenames = sorted(geofilenames) hdata = np.empty(self.shape, dtype=np.float32) hmask = np.zeros(self.shape, dtype=np.bool) granule_length = self.shape[0] / len(geofilenames) for index, filename in enumerate(geofilenames): swath_index = index * granule_length y0_ = swath_index y1_ = swath_index + granule_length get_elevation_into(filename, hdata[y0_:y1_, :], hmask[y0_:y1_, :]) hdata = np.ma.array(hdata, mask=hmask, copy=False) return hdata def load(self, satscene, calibrate=1, time_interval=None, area=None, filename=None, **kwargs): """Read viirs SDR reflectances and Tbs from file and load it into *satscene*. """ if satscene.instrument_name != "viirs": raise ValueError("Wrong instrument, expecting viirs") if kwargs: logger.warning( "Unsupported options for viirs reader: %s", str(kwargs)) conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg")) options = {} for option, value in conf.items(satscene.instrument_name + "-level2", raw=True): options[option] = value band_list = [s.name for s in satscene.channels] chns = satscene.channels_to_load & set(band_list) if len(chns) == 0: return if time_interval: time_start, time_end = time_interval else: time_start, time_end = satscene.time_slot, None import glob if "filename" not in options: raise IOError("No filename given, cannot load") values = {"orbit": satscene.orbit, "satname": satscene.satname, "instrument": satscene.instrument_name, "satellite": satscene.satname #"satellite": satscene.fullname } file_list = [] if filename is not None: if not isinstance(filename, (list, set, tuple)): filename = [filename] geofile_list = [] for fname in filename: if os.path.basename(fname).startswith("SV"): file_list.append(fname) elif os.path.basename(fname).startswith("G"): geofile_list.append(fname) else: logger.info("Unrecognized SDR file: %s", fname) if file_list: directory = os.path.dirname(file_list[0]) if geofile_list: geodirectory = os.path.dirname(geofile_list[0]) if not file_list: filename_tmpl = strftime( satscene.time_slot, options["filename"]) % values directory = strftime(satscene.time_slot, options["dir"]) % values if not os.path.exists(directory): #directory = globify(options["dir"]) % values directory = globify( strftime(satscene.time_slot, options["dir"])) % values logger.debug( "Looking for files in directory " + str(directory)) directories = glob.glob(directory) if len(directories) > 1: raise IOError("More than one directory for npp scene... " + "\nSearch path = %s\n\tPlease check npp.cfg file!" % directory) elif len(directories) == 0: raise IOError("No directory found for npp scene. " + "\nSearch path = %s\n\tPlease check npp.cfg file!" % directory) else: directory = directories[0] file_list = glob.glob(os.path.join(directory, filename_tmpl)) # Only take the files in the interval given: logger.debug("Number of files before segment selection: " + str(len(file_list))) for fname in file_list: if os.path.basename(fname).startswith("SVM14"): logger.debug("File before segmenting: " + os.path.basename(fname)) file_list = _get_swathsegment( file_list, time_start, time_end, area) logger.debug("Number of files after segment selection: " + str(len(file_list))) for fname in file_list: if os.path.basename(fname).startswith("SVM14"): logger.debug("File after segmenting: " + os.path.basename(fname)) logger.debug("Template = " + str(filename_tmpl)) # 22 VIIRS bands (16 M-bands + 5 I-bands + DNB) if len(file_list) % 22 != 0: logger.warning("Number of SDR files is not divisible by 22!") if len(file_list) == 0: logger.debug( "File template = " + str(os.path.join(directory, filename_tmpl))) raise IOError("No VIIRS SDR file matching!: " + "Start time = " + str(time_start) + " End time = " + str(time_end)) geo_dir_string = options.get("geo_dir", None) if geo_dir_string: geodirectory = strftime( satscene.time_slot, geo_dir_string) % values else: geodirectory = directory logger.debug("Geodir = " + str(geodirectory)) geofile_list = [] geo_filenames_string = options.get("geo_filenames", None) if geo_filenames_string: geo_filenames_tmpl = strftime(satscene.time_slot, geo_filenames_string) % values geofile_list = glob.glob(os.path.join(geodirectory, geo_filenames_tmpl)) logger.debug("List of geo-files: " + str(geofile_list)) # Only take the files in the interval given: geofile_list = _get_swathsegment( geofile_list, time_start, time_end) logger.debug("List of geo-files (after time interval selection): " + str(geofile_list)) filenames = [os.path.basename(s) for s in file_list] glob_info = {} self.geofiles = geofile_list logger.debug("Channels to load: " + str(satscene.channels_to_load)) for chn in satscene.channels_to_load: # Take only those files in the list matching the band: # (Filename starts with 'SV' and then the band-name) fnames_band = [] try: fnames_band = [s for s in filenames if s.find('SV' + chn) >= 0] except TypeError: logger.warning('Band frequency not available from VIIRS!') logger.info('Asking for channel' + str(chn) + '!') if len(fnames_band) == 0: continue filename_band = [ os.path.join(directory, fname) for fname in fnames_band] logger.debug("fnames_band = " + str(filename_band)) band = ViirsBandData(filename_band, calibrate=calibrate).read() logger.debug('Band id = ' + band.band_id) # If the list of geo-files is not specified in the config file or # some of them do not exist, we rely on what is written in the # band-data metadata header: if len(geofile_list) < len(filename_band): geofilenames_band = [os.path.join(geodirectory, gfile) for gfile in band.geo_filenames] logger.debug("Geolocation filenames for band: " + str(geofilenames_band)) # Check if the geo-filenames found from the metadata actually # exist and issue a warning if they do not: for geofilename in geofilenames_band: if not os.path.exists(geofilename): logger.warning("Geo file defined in metadata header " + "does not exist: " + str(geofilename)) elif band.band_id.startswith('M'): geofilenames_band = [geofile for geofile in geofile_list if os.path.basename(geofile).startswith('GMTCO')] if len(geofilenames_band) != len(filename_band): # Try the geoid instead: num_geo = len(geofilenames_band) geofilenames_band = [geofile for geofile in geofile_list if os.path.basename(geofile).startswith('GMODO')] if len(geofilenames_band) != len(filename_band): num_geo = max(num_geo, len(geofilenames_band)) num_band = len(filename_band) raise IOError("Incomplete dataset. " + "Found %d geolocation files" % num_geo + " and %d %s files" % (num_band, band.band_id)) elif band.band_id.startswith('I'): geofilenames_band = [geofile for geofile in geofile_list if os.path.basename(geofile).startswith('GITCO')] if len(geofilenames_band) != len(filename_band): # Try the geoid instead: num_geo = len(geofilenames_band) geofilenames_band = [geofile for geofile in geofile_list if os.path.basename(geofile).startswith('GIMGO')] if len(geofilenames_band) != len(filename_band): num_geo = max(num_geo, len(geofilenames_band)) num_band = len(filename_band) raise IOError("Incomplete dataset. " + "Found %d geolocation files" % num_geo + " and %d %s files" % (num_band, band.band_id)) elif band.band_id.startswith('D'): geofilenames_band = [geofile for geofile in geofile_list if os.path.basename(geofile).startswith('GDNBO')] if len(geofilenames_band) != len(filename_band): num_geo = len(geofilenames_band) num_band = len(filename_band) raise IOError("Incomplete dataset. " + "Found %d GDNBO files " % num_geo + "and %d %s files" % (num_band, band.band_id)) band.read_lonlat(geofilepaths=geofilenames_band) if not band.band_desc: logger.warning('Band name = ' + band.band_id) raise AttributeError('Band description not supported!') satscene[chn].data = band.data satscene[chn].info['units'] = band.units satscene[chn].info['band_id'] = band.band_id satscene[chn].info['start_time'] = band.begin_time satscene[chn].info['end_time'] = band.end_time if chn in ['M01', 'M02', 'M03', 'M04', 'M05', 'M06', 'M07', 'M08', 'M09', 'M10', 'M11', 'I01', 'I02', 'I03']: satscene[chn].info['sun_zen_correction_applied'] = True # We assume the same geolocation should apply to all M-bands! # ...and the same to all I-bands: from pyresample import geometry satscene[chn].area = geometry.SwathDefinition( lons=np.ma.masked_where(band.data.mask, band.geolocation.longitudes, copy=True), lats=np.ma.masked_where(band.data.mask, band.geolocation.latitudes, copy=True)) height = np.ma.masked_where(band.data.mask, band.geolocation.elevation, copy=True) area_name = ("swath_" + satscene.fullname + "_" + str(satscene.time_slot) + "_" + str(satscene[chn].data.shape) + "_" + band.band_uid) satscene[chn].area.area_id = area_name satscene[chn].area_id = area_name if self.shape is None: self.shape = band.data.shape # except ImportError: # satscene[chn].area = None # satscene[chn].lat = np.ma.array(band.latitude, mask=band.data.mask) # satscene[chn].lon = np.ma.array(band.longitude, mask=band.data.mask) # if 'institution' not in glob_info: ## glob_info['institution'] = band.global_info['N_Dataset_Source'] # if 'mission_name' not in glob_info: ## glob_info['mission_name'] = band.global_info['Mission_Name'] ViirsGeolocationData.clear_cache() # Compulsory global attribudes satscene.info["title"] = (satscene.satname.capitalize() + " satellite, " + satscene.instrument_name.capitalize() + " instrument.") if 'institution' in glob_info: satscene.info["institution"] = glob_info['institution'] if 'mission_name' in glob_info: satscene.add_to_history(glob_info['mission_name'] + " VIIRS SDR read by mpop") else: satscene.add_to_history("NPP/JPSS VIIRS SDR read by mpop") satscene.info["references"] = "No reference." satscene.info["comments"] = "No comment." satscene.info["start_time"] = min([chn.info["start_time"] for chn in satscene if chn.is_loaded()]) satscene.info["end_time"] = max([chn.info["end_time"] for chn in satscene if chn.is_loaded()]) def get_lonlat_into(filename, out_lons, out_lats, out_height, out_mask): """Read lon,lat from hdf5 file""" logger.debug("Geo File = " + filename) md = HDF5MetaData(filename).read() h5f = h5py.File(filename, 'r') for key in md.get_data_keys(): if key.endswith("Latitude"): h5f[key].read_direct(out_lats) out_mask[:] = out_lats < -999 if key.endswith("Longitude"): h5f[key].read_direct(out_lons) if key.endswith("Height"): h5f[key].read_direct(out_height) h5f.close() def get_elevation_into(filename, out_height, out_mask): """Read elevation/height from hdf5 file""" logger.debug("Geo File = " + filename) md = HDF5MetaData(filename).read() h5f = h5py.File(filename, 'r') for key in md.get_data_keys(): if key.endswith("Height"): h5f[key].read_direct(out_height) out_mask[:] = out_height < -999 h5f.close() def get_viewing_angle_into(filename, out_val, out_mask, param): """Read a sun-sat viewing angle from hdf5 file""" logger.debug("Sun-Sat viewing geometry = " + filename) if param not in ['SolarZenithAngle', 'SolarAzimuthAngle', 'SatelliteZenithAngle', 'SatelliteAzimuthAngle']: logger.warning('Viewing geometry parameter %s not supported!', param) return None md = HDF5MetaData(filename).read() h5f = h5py.File(filename, 'r') for key in md.get_data_keys(): if key.endswith('/' + param): h5f[key].read_direct(out_val) out_mask[:] = out_val < -999 h5f.close() def globify(filename): filename = filename.replace("%Y", "????") filename = filename.replace("%m", "??") filename = filename.replace("%d", "??") filename = filename.replace("%H", "??") filename = filename.replace("%M", "??") filename = filename.replace("%S", "??") return filename def _get_times_from_npp(filename): bname = os.path.basename(filename) sll = bname.split('_') start_time = datetime.strptime(sll[2] + sll[3][:-1], "d%Y%m%dt%H%M%S") end_time = datetime.strptime(sll[2] + sll[4][:-1], "d%Y%m%de%H%M%S") if end_time < start_time: end_time += timedelta(days=1) return start_time, end_time def _get_swathsegment(filelist, time_start, time_end=None, area=None): """ Return only the granule files for the time interval or area. """ if area is not None: from trollsched.spherical import SphPolygon from trollsched.boundary import AreaBoundary lons, lats = area.get_boundary_lonlats() area_boundary = AreaBoundary((lons.side1, lats.side1), (lons.side2, lats.side2), (lons.side3, lats.side3), (lons.side4, lats.side4)) area_boundary.decimate(500) contour_poly = area_boundary.contour_poly segment_files = [] for filename in filelist: timetup = _get_times_from_npp(filename) # Search for multiple granules using an area if area is not None: md = NPPMetaData(filename) md.read() coords = np.vstack(md.get_ring_lonlats()) poly = SphPolygon(np.deg2rad(coords)) if poly.intersection(contour_poly) is not None: segment_files.append(filename) continue # Search for single granule using time start if time_end is None: if time_start >= timetup[0] and time_start <= timetup[1]: segment_files.append(filename) continue # search for multiple granules else: # check that granule start time is inside interval if timetup[0] >= time_start and timetup[0] <= time_end: segment_files.append(filename) continue # check that granule end time is inside interval if timetup[1] >= time_start and timetup[1] <= time_end: segment_files.append(filename) continue segment_files.sort() return segment_files mpop-1.5.0/mpop/satin/xmlformat.py000066400000000000000000000137511317160620000171470ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012 Martin Raspaud # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Reads a format from an xml file to create dtypes and scaling factor arrays. """ from xml.etree.ElementTree import ElementTree import numpy as np VARIABLES = {} TYPEC = {"boolean": ">i1", "integer2": ">i2", "integer4": ">i4", "uinteger2": ">u2", "uinteger4": ">u4", } def process_delimiter(elt, ascii=False): """Process a 'delimiter' tag. """ del elt, ascii def process_field(elt, ascii=False): """Process a 'field' tag. """ # NOTE: if there is a variable defined in this field and it is different # from the default, we could change the value and restart. scale = np.uint8(1) if elt.get("type") == "bitfield" and not ascii: current_type = ">u" + str(int(elt.get("length")) / 8) scale = np.dtype(current_type).type(1) elif(elt.get("length") is not None): if ascii: add = 33 else: add = 0 current_type = "S" + str(int(elt.get("length")) + add) else: current_type = TYPEC[elt.get("type")] try: scale = (10 / float(elt.get("scaling-factor", "10").replace("^", "e"))) except ValueError: scale = (10 / np.array(elt.get("scaling-factor").replace("^", "e").split(","), dtype=np.float)) return ((elt.get("name"), current_type, scale)) def process_array(elt, ascii=False): """Process an 'array' tag. """ del ascii chld = elt.getchildren() if len(chld) > 1: print "stop" raise ValueError() chld = chld[0] try: name, current_type, scale = CASES[chld.tag](chld) size = None except ValueError: name, current_type, size, scale = CASES[chld.tag](chld) del name myname = elt.get("name") or elt.get("label") if elt.get("length").startswith("$"): length = int(VARIABLES[elt.get("length")[1:]]) else: length = int(elt.get("length")) if size is not None: return (myname, current_type, (length, ) + size, scale) else: return (myname, current_type, (length, ), scale) CASES = {"delimiter": process_delimiter, "field": process_field, "array": process_array, } def to_dtype(val): """Parse *val* to return a dtype. """ return np.dtype([i[:-1] for i in val]) def to_scaled_dtype(val): """Parse *val* to return a dtype. """ res = [] for i in val: if i[1].startswith("S"): res.append((i[0], i[1]) + i[2:-1]) else: try: res.append((i[0], i[-1].dtype) + i[2:-1]) except AttributeError: res.append((i[0], type(i[-1])) + i[2:-1]) return np.dtype(res) def to_scales(val): """Parse *val* to return an array of scale factors. """ res = [] for i in val: if len(i) == 3: res.append((i[0], type(i[2]))) else: try: res.append((i[0], i[3].dtype, i[2])) except AttributeError: res.append((i[0], type(i[3]), i[2])) dtype = np.dtype(res) scales = np.zeros((1, ), dtype=dtype) for i in val: try: scales[i[0]] = i[-1] except ValueError: scales[i[0]] = np.repeat(np.array(i[-1]), i[2][1]).reshape(i[2]) return scales def parse_format(xml_file): """Parse the xml file to create types, scaling factor types, and scales. """ tree = ElementTree() tree.parse(xml_file) for param in tree.find("parameters").getchildren(): VARIABLES[param.get("name")] = param.get("value") types_scales = {} for prod in tree.find("product"): ascii = (prod.tag in ["mphr", "sphr"]) res = [] for i in prod: lres = CASES[i.tag](i, ascii) if lres is not None: res.append(lres) types_scales[(prod.tag, int(prod.get("subclass")))] = res types = {} stypes = {} scales = {} for key, val in types_scales.items(): types[key] = to_dtype(val) stypes[key] = to_scaled_dtype(val) scales[key] = to_scales(val) return types, stypes, scales def _apply_scales(array, scales, dtype): """Apply scales to the array. """ new_array = np.empty(array.shape, dtype) for i in array.dtype.names: try: new_array[i] = array[i] * scales[i] except TypeError: if np.all(scales[i] == 1): new_array[i] = array[i] else: raise return new_array class XMLFormat(object): """XMLFormat object. """ def __init__(self, filename): self.types, self.stypes, self.scales = parse_format(filename) self.translator = {} for key, val in self.types.items(): self.translator[val] = (self.scales[key], self.stypes[key]) def dtype(self, key): """Get the dtype for the format object. """ return self.types[key] def apply_scales(self, array): """Apply scales to *array*. """ return _apply_scales(array, *self.translator[array.dtype]) if __name__ == '__main__': pass mpop-1.5.0/mpop/satout/000077500000000000000000000000001317160620000147565ustar00rootroot00000000000000mpop-1.5.0/mpop/satout/__init__.py000066400000000000000000000015361317160620000170740ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009-2010. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Satout Package initializer. """ mpop-1.5.0/mpop/satout/cfscene.py000066400000000000000000000563621317160620000167520ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2014, 2016. # Author(s): # Kristian Rune Larssen # Adam Dybbroe # Martin Raspaud # Esben S. Nielsen # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """The :mod:`satout.cfscene` module provide a proxy class and utilites for conversion of mpop scene to cf conventions. """ import numpy as np import numpy.ma as ma from netCDF4 import date2num from mpop.channel import Channel from mpop.satout.netcdf4 import netcdf_cf_writer import logging LOG = logging.getLogger('cfscene') #CF_DATA_TYPE = np.int16 CF_FLOAT_TYPE = np.float64 TIME_UNITS = "seconds since 1970-01-01 00:00:00" class InfoObject(object): """Simple data and info container. """ def __init__(self): self.info = {} self.data = None class CFScene(object): """Scene proxy class for cf conventions. The constructor should be called with the *scene* to transform as argument. """ def __init__(self, scene, dtype=np.int16, band_axis=2, area_aggregation=True, time_dimension=False): if not issubclass(dtype, np.integer): raise TypeError('Only integer saving allowed for CF data') time_axis = -1 if time_dimension: time_axis = 0 self.info = scene.info.copy() if "time" in self.info: del self.info["time"] CF_DATA_TYPE = dtype # Other global attributes self.info["Conventions"] = "CF-1.5" #self.info["platform"] = scene.satname + "-" + str(scene.number) self.info["platform"] = scene.fullname self.info["instrument"] = scene.instrument_name if scene.variant: self.info["service"] = scene.variant if scene.orbit: self.info["orbit"] = scene.orbit self.time = InfoObject() self.time.data = date2num(scene.time_slot, TIME_UNITS) if time_dimension: var_dim_names = ("time", ) else: var_dim_names = () self.time.info = {"var_name": "time", "var_data": self.time.data, "var_dim_names": var_dim_names, "long_name": "Nominal time of the image", "standard_name": "time", "units": TIME_UNITS} grid_mappings = [] areas = [] area = None area_units = [] counter = 0 gm_counter = 0 area_counter = 0 for chn in scene: if not chn.is_loaded(): continue if not isinstance(chn, Channel): setattr(self, chn.name, chn) continue fill_value = np.iinfo(CF_DATA_TYPE).min if ma.count_masked(chn.data) == chn.data.size: # All data is masked data = np.ones(chn.data.shape, dtype=CF_DATA_TYPE) * fill_value scale = 1 offset = 0 else: chn_max = chn.data.max() chn_min = chn.data.min() scale = ((chn_max - chn_min) / (2 ** np.iinfo(CF_DATA_TYPE).bits - 2.0)) # Handle the case where all data has the same value. if scale == 0: scale = 1 if np.iinfo(CF_DATA_TYPE).kind == 'i': # Signed data type offset = (chn_max + chn_min) / 2.0 else: # Unsigned data type offset = chn_min - scale if isinstance(chn.data, np.ma.MaskedArray): data = ( (chn.data.data - offset) / scale).astype(CF_DATA_TYPE) data[chn.data.mask] = fill_value else: data = ((chn.data - offset) / scale).astype(CF_DATA_TYPE) if time_dimension: data = np.ma.expand_dims(data, time_axis) elif area_aggregation: data = np.ma.expand_dims(data, band_axis) # it's a grid mapping try: if chn.area.proj_dict not in grid_mappings: # create new grid mapping grid_mappings.append(chn.area.proj_dict) area = InfoObject() area.data = 0 area.info = {"var_name": "grid_mapping_" + str(gm_counter), "var_data": area.data, "var_dim_names": ()} area.info.update(proj2cf(chn.area.proj_dict)) area.info.setdefault("units", "m") setattr(self, area.info["var_name"], area) gm_counter += 1 else: # use an existing grid mapping str_gmc = str(grid_mappings.index(chn.area.proj_dict)) area = InfoObject() area.data = 0 area.info = {"var_name": "grid_mapping_" + str_gmc, "var_data": area.data, "var_dim_names": ()} area.info.update(proj2cf(chn.area.proj_dict)) area.info.setdefault("units", "m") if(chn.area in areas): str_arc = str(areas.index(chn.area)) xy_names = ["y" + str_arc, "x" + str_arc] else: areas.append(chn.area) str_arc = str(area_counter) area_counter += 1 x__ = InfoObject() chn.area.get_proj_coords(cache=True) x__.data = chn.area.projection_x_coords[0, :] x__.info = {"var_name": "x" + str_arc, "var_data": x__.data, "var_dim_names": ("x" + str_arc,), "units": "rad", "standard_name": "projection_x_coordinate", "long_name": "x coordinate of projection"} if area.info["grid_mapping_name"] == "geostationary": x__.data /= float( area.info["perspective_point_height"]) xpix = np.arange(len(x__.data), dtype=np.uint16) xsca = ((x__.data[-1] - x__.data[0]) / (xpix[-1] + xpix[0])) xoff = x__.data[0] - xpix[0] * xsca x__.data = xpix x__.info["var_data"] = xpix x__.info["scale_factor"] = xsca x__.info["add_offset"] = xoff setattr(self, x__.info["var_name"], x__) y__ = InfoObject() y__.data = chn.area.projection_y_coords[:, 0] y__.info = {"var_name": "y" + str_arc, "var_data": y__.data, "var_dim_names": ("y" + str_arc,), "units": "rad", "standard_name": "projection_y_coordinate", "long_name": "y coordinate of projection"} if area.info["grid_mapping_name"] == "geostationary": y__.data /= float( area.info["perspective_point_height"]) ypix = np.arange(len(y__.data), dtype=np.uint16) ysca = ((y__.data[-1] - y__.data[0]) / (ypix[-1] + ypix[0])) yoff = y__.data[0] - ypix[0] * ysca y__.data = ypix y__.info["var_data"] = ypix y__.info["scale_factor"] = ysca y__.info["add_offset"] = yoff setattr(self, y__.info["var_name"], y__) xy_names = [y__.info["var_name"], x__.info["var_name"]] # It's not a grid mapping, go for lons and lats except AttributeError: area = None if(chn.area in areas): str_arc = str(areas.index(chn.area)) coordinates = ("lat" + str_arc + " " + "lon" + str_arc) else: areas.append(chn.area) str_arc = str(area_counter) area_counter += 1 lons = InfoObject() try: lons.data = chn.area.lons[:] except AttributeError: lons.data = scene.area.lons[:] lons.info = {"var_name": "lon" + str_arc, "var_data": lons.data, "var_dim_names": ("y" + str_arc, "x" + str_arc), "_FillValue": lons.data.fill_value, "units": "degrees east", "long_name": "longitude coordinate", "standard_name": "longitude"} if lons.data is not None: setattr(self, lons.info["var_name"], lons) lats = InfoObject() try: lats.data = chn.area.lats[:] except AttributeError: lats.data = scene.area.lats[:] lats.info = {"var_name": "lat" + str_arc, "var_data": lats.data, "var_dim_names": ("y" + str_arc, "x" + str_arc), "_FillValue": lats.data.fill_value, "units": "degrees north", "long_name": "latitude coordinate", "standard_name": "latitude"} if lats.data is not None: setattr(self, lats.info["var_name"], lats) if lats.data is not None and lons.data is not None: coordinates = (lats.info["var_name"] + " " + lons.info["var_name"]) xy_names = ["y" + str_arc, "x" + str_arc] if (area_aggregation and not time_dimension and (chn.area, chn.info['units']) in area_units): str_cnt = str(area_units.index((chn.area, chn.info['units']))) # area has been used before band = getattr(self, "band" + str_cnt) # data band.data = np.concatenate((band.data, data), axis=band_axis) band.info["var_data"] = band.data # bandname bandname = getattr(self, "bandname" + str_cnt) bandname.data = np.concatenate((bandname.data, np.array([chn.name]))) bandname.info["var_data"] = bandname.data # offset off_attr = np.concatenate((off_attr, np.array([offset]))) band.info["add_offset"] = off_attr # scale sca_attr = np.concatenate((sca_attr, np.array([scale]))) band.info["scale_factor"] = sca_attr # wavelength bounds bwl = getattr(self, "wl_bnds" + str_cnt) bwl.data = np.vstack((bwl.data, np.array([chn.wavelength_range[0], chn.wavelength_range[2]]))) bwl.info["var_data"] = bwl.data # nominal_wavelength nwl = getattr(self, "nominal_wavelength" + str_cnt) nwl.data = np.concatenate((nwl.data, np.array([chn.wavelength_range[1]]))) nwl.info["var_data"] = nwl.data else: # first encounter of this area and unit str_cnt = str(counter) counter += 1 area_units.append((chn.area, chn.info["units"])) # data band = InfoObject() band.data = data dim_names = xy_names if time_dimension: dim_names.insert(time_axis, 'time') elif area_aggregation: dim_names.insert(band_axis, 'band' + str_cnt) band.info = {"var_name": "Image" + str_cnt, "var_data": band.data, 'var_dim_names': dim_names, "_FillValue": fill_value, "long_name": "Band data", "units": chn.info["units"], "resolution": chn.resolution} # bandname var_dim_names = ("band" + str_cnt,) bandname = InfoObject() bandname.data = np.array([chn.name], 'O') bandname.info = {"var_name": "band" + str_cnt, "var_data": bandname.data, "var_dim_names": var_dim_names, "standard_name": "band_name"} setattr(self, "bandname" + str_cnt, bandname) # offset off_attr = np.array([offset]) band.info["add_offset"] = off_attr # scale sca_attr = np.array([scale]) band.info["scale_factor"] = sca_attr # wavelength bounds wlbnds = InfoObject() wlbnds.data = np.array([[chn.wavelength_range[0], chn.wavelength_range[2]]]) wlbnds.info = {"var_name": "wl_bnds" + str_cnt, "var_data": wlbnds.data, "var_dim_names": ("band" + str_cnt, "nv")} setattr(self, wlbnds.info["var_name"], wlbnds) # nominal_wavelength nomwl = InfoObject() nomwl.data = np.array([chn.wavelength_range[1]]) nomwl.info = {"var_name": "nominal_wavelength" + str_cnt, "var_data": nomwl.data, "var_dim_names": ("band" + str_cnt,), "standard_name": "radiation_wavelength", "units": "um", "bounds": wlbnds.info["var_name"]} setattr(self, "nominal_wavelength" + str_cnt, nomwl) # grid mapping or lon lats if area is not None: band.info["grid_mapping"] = area.info["var_name"] else: band.info["coordinates"] = coordinates # Add other (custom) attributes: # Only scalar attributes! for key in chn.info.keys(): if key not in band.info.keys(): if (type(chn.info[key]) == str or type(chn.info[key]) == int or type(chn.info[key]) == float): band.info[key] = chn.info[key] setattr(self, "band" + str_cnt, band) for i, area_unit in enumerate(area_units): # compute data reduction fill_value = np.iinfo(CF_DATA_TYPE).min band = getattr(self, "band" + str(i)) valid_min, valid_max = band.data.min(), band.data.max() band.info["valid_range"] = np.array([valid_min, valid_max]), def save(self, filename, *args, **kwargs): return netcdf_cf_writer(filename, self, kwargs.get("compression", True)) def proj2cf(proj_dict): """Return the cf grid mapping from a proj dict. Description of the cf grid mapping: http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.4/ch05s06.html Table of the available grid mappings: http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.4/apf.html """ cases = {"geos": geos2cf, "stere": stere2cf, "merc": merc2cf, "aea": aea2cf, "laea": laea2cf, "ob_tran": obtran2cf, "eqc": eqc2cf, } return cases[proj_dict["proj"]](proj_dict) def geos2cf(proj_dict): """Return the cf grid mapping from a geos proj dict. """ return {"grid_mapping_name": "geostationary", "latitude_of_projection_origin": 0.0, "longitude_of_projection_origin": eval(proj_dict["lon_0"]), "semi_major_axis": eval(proj_dict["a"]), "semi_minor_axis": eval(proj_dict["b"]), "perspective_point_height": eval(proj_dict["h"]) } def eqc2cf(proj_dict): """Return the cf grid mapping from a eqc proj dict. However, please be aware that this is not an official CF projection. See http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.4/apf.html. """ return {"grid_mapping_name": "equirectangular", "latitude_of_true_scale": eval(proj_dict.get("lat_ts", "0")), "latitude_of_projection_origin": eval(proj_dict["lat_0"]), "longitude_of_projection_origin": eval(proj_dict["lon_0"]), "false_easting": eval(proj_dict.get("x_0", "0")), "false_northing": eval(proj_dict.get("y_0", "0")) } def stere2cf(proj_dict): """Return the cf grid mapping from a stereographic proj dict. """ return {"grid_mapping_name": "stereographic", "latitude_of_projection_origin": eval(proj_dict["lat_0"]), "longitude_of_projection_origin": eval(proj_dict["lon_0"]), "scale_factor_at_projection_origin": eval( proj_dict.get("x_0", "1.0")), "false_easting": eval(proj_dict.get("x_0", "0")), "false_northing": eval(proj_dict.get("y_0", "0")) } def merc2cf(proj_dict): """Return the cf grid mapping from a mercator proj dict. """ raise NotImplementedError( "CF grid mapping from a PROJ.4 mercator projection is not implemented") def aea2cf(proj_dict): """Return the cf grid mapping from a Albers Equal Area proj dict. """ #standard_parallels = [] # for item in ['lat_1', 'lat_2']: # if item in proj_dict: # standard_parallels.append(eval(proj_dict[item])) if 'lat_2' in proj_dict: standard_parallel = [eval(proj_dict['lat_1']), eval(proj_dict['lat_2'])] else: standard_parallel = [eval(proj_dict['lat_1'])] lat_0 = 0.0 if 'lat_0' in proj_dict: lat_0 = eval(proj_dict['lat_0']) x_0 = 0.0 if 'x_0' in proj_dict: x_0 = eval(proj_dict['x_0']) y_0 = 0.0 if 'y_0' in proj_dict: y_0 = eval(proj_dict['y_0']) retv = {"grid_mapping_name": "albers_conical_equal_area", "standard_parallel": standard_parallel, "latitude_of_projection_origin": lat_0, "longitude_of_central_meridian": eval(proj_dict["lon_0"]), "false_easting": x_0, "false_northing": y_0 } retv = build_dict("albers_conical_equal_area", proj_dict, standard_parallel=["lat_1", "lat_2"], latitude_of_projection_origin="lat_0", longitude_of_central_meridian="lon_0", false_easting="x_0", false_northing="y_0") return retv def laea2cf(proj_dict): """Return the cf grid mapping from a Lambert azimuthal equal-area proj dict. http://trac.osgeo.org/gdal/wiki/NetCDF_ProjectionTestingStatus """ x_0 = eval(proj_dict.get('x_0', '0.0')) y_0 = eval(proj_dict.get('y_0', '0.0')) retv = {"grid_mapping_name": "lambert_azimuthal_equal_area", "longitude_of_projection_origin": eval(proj_dict["lon_0"]), "latitude_of_projection_origin": eval(proj_dict["lat_0"]), "false_easting": x_0, "false_northing": y_0 } retv = build_dict("lambert_azimuthal_equal_area", proj_dict, longitude_of_projection_origin="lon_0", latitude_of_projection_origin="lat_0", false_easting="x_0", false_northing="y_0") return retv def obtran2cf(proj_dict): """Return a grid mapping from a rotated pole grid (General Oblique Transformation projection) proj dict. Please be aware this is not yet supported by CF! """ LOG.warning("The General Oblique Transformation " + "projection is not CF compatible yet...") x_0 = eval(proj_dict.get('x_0', '0.0')) y_0 = eval(proj_dict.get('y_0', '0.0')) retv = {"grid_mapping_name": "general_oblique_transformation", "longitude_of_projection_origin": eval(proj_dict["lon_0"]), "grid_north_pole_latitude": eval(proj_dict["o_lat_p"]), "grid_north_pole_longitude": eval(proj_dict["o_lon_p"]), "false_easting": x_0, "false_northing": y_0 } retv = build_dict("general_oblique_transformation", proj_dict, longitude_of_projection_origin="lon_0", grid_north_pole_latitude="o_lat_p", grid_north_pole_longitude="o_lon_p", false_easting="x_0", false_northing="y_0") return retv def build_dict(proj_name, proj_dict, **kwargs): new_dict = {} new_dict["grid_mapping_name"] = proj_name for key, val in kwargs.items(): if isinstance(val, (list, tuple)): new_dict[key] = [eval(proj_dict[x]) for x in val if x in proj_dict] elif val in proj_dict: new_dict[key] = eval(proj_dict[val]) # add a, b, rf and/or ellps if "a" in proj_dict: new_dict["semi_major_axis"] = eval(proj_dict["a"]) if "b" in proj_dict: new_dict["semi_minor_axis"] = eval(proj_dict["b"]) if "rf" in proj_dict: new_dict["inverse_flattening"] = eval(proj_dict["rf"]) if "ellps" in proj_dict: new_dict["ellipsoid"] = proj_dict["ellps"] return new_dict def aeqd2cf(proj_dict): return build_dict("azimuthal_equidistant", proj_dict, standard_parallel=["lat_1", "lat_2"], latitude_of_projection_origin="lat_0", longitude_of_central_meridian="lon_0", false_easting="x_0", false_northing="y_0") mpop-1.5.0/mpop/satout/netcdf4.py000066400000000000000000000213551317160620000166650ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2014, 2016. # Author(s): # Kristian Rune Larssen # Adam Dybbroe # Martin Raspaud # Esben S. Nielsen # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """mpop netcdf4 writer interface. """ __revision__ = 0.1 import numpy as np import logging logger = logging.getLogger(__name__) def save(scene, filename, compression=True, dtype=np.int16, band_axis=2, area_aggregation=True, time_dimension=False): """Saves the scene as a NetCDF4 file, with CF conventions. *band_axis* gives which axis to use for the band dimension. For example, use band_axis=0 to get dimensions like (band, y, x). *area_aggregation* determines if bands on the same area should be gathered together or not. Default is True, meaning aggregation. If *area_aggregation* is False, the band_axis gets obsolete. Area aggregation is currently not possible when using *time_dimension*. *time_dimension* is a boolean and if True a time axis (dimension=1) is added in front, like (time, y, x), and the band_axis is omitted. Thus each data/band go in a separate dataset. """ from mpop.satout.cfscene import CFScene scene.add_to_history("Saved as netcdf4/cf by pytroll/mpop.") return netcdf_cf_writer(filename, CFScene(scene, dtype, band_axis, area_aggregation, time_dimension), compression=compression) class WriterDimensionError(Exception): """ Basic writer exception """ pass def attribute_dispenser(info): """ returns valid attribute key value pairs (for cosmetic reasons, sorted is better than random)""" for k, v in sorted(info.iteritems()): if k.startswith('var_'): continue yield (k, v) def variable_dispenser(root_object, object_list): """ Assembles a list of meta info objects """ # Handle members with info objects for v in dir(root_object): obj = getattr(root_object, v) if callable(obj): continue # Try to find members with 'info' attribute defined # if in list members search through list to find # elements with the 'info' attribute defined try: # test for info attribute on list elements for under_obj in obj: try: under_obj.info variable_dispenser(under_obj, object_list) except AttributeError: pass except TypeError: try: # test for info attribute scalar members obj.info variable_dispenser(obj, object_list) except AttributeError: pass # Handle local info objects try: # handle output of member variables without info attribute if 'var_children' in root_object.info: object_list.extend(root_object.info['var_children']) # handle object with info attribute object_list.append(root_object.info) except AttributeError: pass def find_tag(info_list, tag): """ Iterates through info objects to find specific tag. Returns list of matching values. """ tag_data = [] for info in info_list: try: tag_data.append(info[tag]) except KeyError: pass return tag_data def find_FillValue_tags(info_list): """ Iterates through info objects to find _FillValue tags for var_names """ fill_value_dict = {} for info in info_list: try: fill_value_dict[info['var_name']] = info['_FillValue'] except KeyError: pass try: fill_value_dict[info['var_name']] = None except KeyError: pass return fill_value_dict def find_info(info_list, tag): """ Iterates through info objects to find specific tag. Return list of matching info objects. """ tag_info_objects = [] for info in info_list: if tag in info: tag_info_objects.append(info) return tag_info_objects def dtype(element): """ Return the dtype of an array or the type of the element. """ if hasattr(element, "dtype"): return element.dtype else: return type(element) def shape(element): """ Return the shape of an array or empty tuple if not an array. """ if hasattr(element, "shape"): return element.shape else: return () def netcdf_cf_writer(filename, root_object, compression=True): """ Write data to file to netcdf file. """ from netCDF4 import Dataset rootgrp = Dataset(filename, 'w') try: info_list = [] variable_dispenser(root_object, info_list) # find available dimensions dim_names = find_tag(info_list, 'var_dim_names') # go through all cases of 'var_callback' and create objects which are # linked to by the 'var_data' keyword. This ensures that data are only # read in when needed. cb_infos = find_info(info_list, 'var_callback') for info in cb_infos: # execute the callback functors info['var_data'] = info['var_callback']() var_data = find_tag(info_list, 'var_data') # create dimensions in NetCDF file, dimension lengths are based on # array sizes used_dim_names = {} for names, values in zip(dim_names, [shape(v) for v in var_data]): # case of a scalar if len(names) == 0: continue for dim_name, dim_size in zip(names, values): # ensure unique dimension names if dim_name in used_dim_names: if dim_size != used_dim_names[dim_name]: raise WriterDimensionError("Dimension name " + dim_name + " already in use") else: continue rootgrp.createDimension(dim_name, dim_size) used_dim_names[dim_name] = dim_size # create variables var_names = find_tag(info_list, 'var_name') nc_vars = [] fill_value_dict = find_FillValue_tags(info_list) for name, vtype, dim_name in zip(var_names, [dtype(vt) for vt in var_data], dim_names): # in the case of arrays containing strings: if str(vtype) == "object": vtype = str nc_vars.append(rootgrp.createVariable( name, vtype, dim_name, zlib=compression, fill_value=fill_value_dict[name])) # insert attributes, search through info objects and create global # attributes and attributes for each variable. for info in info_list: if 'var_name' in info: # handle variable attributes nc_var = rootgrp.variables[info['var_name']] nc_var.set_auto_maskandscale(False) for j, k in attribute_dispenser(info): if j not in ["_FillValue"]: setattr(nc_var, j, k) else: # handle global attributes for j, k in attribute_dispenser(info): try: setattr(rootgrp, j, k) except TypeError as err: logger.warning("Not saving %s with value %s because %s", str(j), str(k), str(err)) # insert data for name, vname, vdata in zip(var_names, nc_vars, var_data): vname[:] = vdata finally: rootgrp.close() if __name__ == '__main__': from mpop.satellites.meteosat09 import Meteosat09SeviriScene import datetime TIME = datetime.datetime(2009, 10, 8, 14, 30) GLOB = Meteosat09SeviriScene(area_id="EuropeCanary", time_slot=TIME) GLOB.load([0.6, 10.8]) save(GLOB, 'tester.nc') mpop-1.5.0/mpop/saturn/000077500000000000000000000000001317160620000147535ustar00rootroot00000000000000mpop-1.5.0/mpop/saturn/__init__.py000066400000000000000000000016121317160620000170640ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of the mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Saturn Package initializer. """ mpop-1.5.0/mpop/saturn/assemble_segments.py000066400000000000000000000715761317160620000210450ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Spherical geometry module. """ import math import numpy as np EPSILON = 0.0000001 class Coordinate(object): """Point on earth in terms of lat and lon. """ lat = None lon = None x__ = None y__ = None z__ = None def __init__(self, lat=None, lon=None, x__=None, y__=None, z__=None, R=1): self.R = R if lat is not None and lon is not None: self.lat = math.radians(lat) self.lon = math.radians(lon) self._update_cart() else: self.x__ = x__ self.y__ = y__ self.z__ = z__ self._update_lonlat() def _update_cart(self): """Convert lon/lat to cartesian coordinates. """ self.x__ = math.cos(self.lat) * math.cos(self.lon) self.y__ = math.cos(self.lat) * math.sin(self.lon) self.z__ = math.sin(self.lat) def _update_lonlat(self): """Convert cartesian to lon/lat. """ self.lat = math.degrees(math.asin(self.z__ / self.R)) self.lon = math.degrees(math.atan2(self.y__, self.x__)) def __ne__(self, other): if(abs(self.lat - other.lat) < EPSILON and abs(self.lon - other.lon) < EPSILON): return 0 else: return 1 def __eq__(self, other): return not self.__ne__(other) def __str__(self): return str((math.degrees(self.lat), math.degrees(self.lon))) def __repr__(self): return str((math.degrees(self.lat), math.degrees(self.lon))) def cross2cart(self, point): """Compute the cross product, and convert to cartesian coordinates (assuming radius 1). """ lat1 = self.lat lon1 = self.lon lat2 = point.lat lon2 = point.lon res = Coordinate( x__=(math.sin(lat1 - lat2) * math.sin((lon1 + lon2) / 2) * math.cos((lon1 - lon2) / 2) - math.sin(lat1 + lat2) * math.cos((lon1 + lon2) / 2) * math.sin((lon1 - lon2) / 2)), y__=(math.sin(lat1 - lat2) * math.cos((lon1 + lon2) / 2) * math.cos((lon1 - lon2) / 2) + math.sin(lat1 + lat2) * math.sin((lon1 + lon2) / 2) * math.sin((lon1 - lon2) / 2)), z__=(math.cos(lat1) * math.cos(lat2) * math.sin(lon1 - lon2))) return res def distance(self, point): """Vincenty formula. """ dlambda = self.lon - point.lon num = ((math.cos(point.lat) * math.sin(dlambda)) ** 2 + (math.cos(self.lat) * math.sin(point.lat) - math.sin(self.lat) * math.cos(point.lat) * math.cos(dlambda)) ** 2) den = (math.sin(self.lat) * math.sin(point.lat) + math.cos(self.lat) * math.cos(point.lat) * math.cos(dlambda)) return math.atan2(math.sqrt(num), den) def norm(self): """Return the norm of the vector. """ return math.sqrt(self.x__ ** 2 + self.y__ ** 2 + self.z__ ** 2) def normalize(self): """normalize the vector. """ norm = self.norm() self.x__ /= norm self.y__ /= norm self.z__ /= norm return self def cross(self, point): """cross product with another vector. """ x__ = self.y__ * point.z__ - self.z__ * point.y__ y__ = self.z__ * point.x__ - self.x__ * point.z__ z__ = self.x__ * point.y__ - self.y__ * point.x__ return Coordinate(x__=x__, y__=y__, z__=z__) def dot(self, point): """dot product with another vector. """ return (self.x__ * point.x__ + self.y__ * point.y__ + self.z__ * point.z__) class Arc(object): """An arc of the great circle between two points. """ start = None end = None def __init__(self, start, end): self.start, self.end = start, end def center_angle(self): """Angle of an arc at the center of the sphere. """ val = (math.cos(self.start.lat - self.end.lat) + math.cos(self.start.lon - self.end.lon) - 1) if val > 1: val = 1 elif val < -1: val = -1 return math.acos(val) def __eq__(self, other): if(self.start == other.start and self.end == other.end): return 1 return 0 def __ne__(self, other): return not self.__eq__(other) def __str__(self): return str((str(self.start), str(self.end))) def angle(self, other_arc): """Oriented angle between two arcs. """ if self.start == other_arc.start: a__ = self.start b__ = self.end c__ = other_arc.end elif self.start == other_arc.end: a__ = self.start b__ = self.end c__ = other_arc.start elif self.end == other_arc.end: a__ = self.end b__ = self.start c__ = other_arc.start elif self.end == other_arc.start: a__ = self.end b__ = self.start c__ = other_arc.end else: raise ValueError("No common point in angle computation.") ua_ = a__.cross(b__) ub_ = a__.cross(c__) val = ua_.dot(ub_) / (ua_.norm() * ub_.norm()) if abs(val - 1) < EPSILON: angle = 0 elif abs(val + 1) < EPSILON: angle = math.pi else: angle = math.acos(val) n__ = ua_.normalize() if n__.dot(c__) > 0: return -angle else: return angle def intersections(self, other_arc): """Gives the two intersections of the greats circles defined by the current arc and *other_arc*. """ if self.end.lon - self.start.lon > math.pi: self.end.lon -= 2 * math.pi if other_arc.end.lon - other_arc.start.lon > math.pi: other_arc.end.lon -= 2 * math.pi if self.end.lon - self.start.lon < -math.pi: self.end.lon += 2 * math.pi if other_arc.end.lon - other_arc.start.lon < -math.pi: other_arc.end.lon += 2 * math.pi ea_ = self.start.cross2cart(self.end).normalize() eb_ = other_arc.start.cross2cart(other_arc.end).normalize() cross = ea_.cross(eb_) lat = math.atan2(cross.z__, math.sqrt(cross.x__ ** 2 + cross.y__ ** 2)) lon = math.atan2(-cross.y__, cross.x__) return (Coordinate(math.degrees(lat), math.degrees(lon)), Coordinate(math.degrees(-lat), math.degrees(modpi(lon + math.pi)))) def intersects(self, other_arc): """Says if two arcs defined by the current arc and the *other_arc* intersect. An arc is defined as the shortest tracks between two points. """ for i in self.intersections(other_arc): a__ = self.start b__ = self.end c__ = other_arc.start d__ = other_arc.end ab_ = a__.distance(b__) cd_ = c__.distance(d__) if(abs(a__.distance(i) + b__.distance(i) - ab_) < EPSILON and abs(c__.distance(i) + d__.distance(i) - cd_) < EPSILON): return True return False def intersection(self, other_arc): """Says where, if two arcs defined by the current arc and the *other_arc* intersect. An arc is defined as the shortest tracks between two points. """ for i in self.intersections(other_arc): a__ = self.start b__ = self.end c__ = other_arc.start d__ = other_arc.end ab_ = a__.distance(b__) cd_ = c__.distance(d__) if(abs(a__.distance(i) + b__.distance(i) - ab_) < EPSILON and abs(c__.distance(i) + d__.distance(i) - cd_) < EPSILON): return i return None def modpi(val): """Puts *val* between -pi and pi. """ return (val + math.pi) % (2 * math.pi) - math.pi def modpi2(val): """Puts *val* between 0 and 2pi. """ return val % (2 * math.pi) def point_inside(point, corners): """Is a point inside the 4 corners ? This uses great circle arcs as area boundaries. """ arc1 = Arc(corners[0], corners[1]) arc2 = Arc(corners[1], corners[2]) arc3 = Arc(corners[2], corners[3]) arc4 = Arc(corners[3], corners[0]) arc5 = Arc(corners[1], point) arc6 = Arc(corners[3], point) angle1 = modpi(arc1.angle(arc2)) angle1bis = modpi(arc1.angle(arc5)) angle2 = modpi(arc3.angle(arc4)) angle2bis = modpi(arc3.angle(arc6)) return (np.sign(angle1) == np.sign(angle1bis) and abs(angle1) > abs(angle1bis) and np.sign(angle2) == np.sign(angle2bis) and abs(angle2) > abs(angle2bis)) def overlaps(area_corners, segment_corners): """Are two areas overlapping ? This uses great circle arcs as area boundaries. """ for i in area_corners: if point_inside(i, segment_corners): return True for i in segment_corners: if point_inside(i, area_corners): return True area_arc1 = Arc(area_corners[0], area_corners[1]) area_arc2 = Arc(area_corners[1], area_corners[2]) area_arc3 = Arc(area_corners[2], area_corners[3]) area_arc4 = Arc(area_corners[3], area_corners[0]) segment_arc1 = Arc(segment_corners[0], segment_corners[1]) segment_arc2 = Arc(segment_corners[1], segment_corners[2]) segment_arc3 = Arc(segment_corners[2], segment_corners[3]) segment_arc4 = Arc(segment_corners[3], segment_corners[0]) for i in (area_arc1, area_arc2, area_arc3, area_arc4): for j in (segment_arc1, segment_arc2, segment_arc3, segment_arc4): if i.intersects(j): return True return False def get_intersections(b__, boundaries): """Get the intersections of *b__* with *boundaries*. Returns both the intersection coordinates and the concerned boundaries. """ intersections = [] bounds = [] for other_b in boundaries: inter = b__.intersection(other_b) if inter is not None: intersections.append(inter) bounds.append(other_b) return intersections, bounds def get_first_intersection(b__, boundaries): """Get the first intersection on *b__* with *boundaries*. """ intersections, bounds = get_intersections(b__, boundaries) del bounds dists = np.array([b__.start.distance(p__) for p__ in intersections]) indices = dists.argsort() if len(intersections) > 0: return intersections[indices[0]] return None def get_next_intersection(p__, b__, boundaries): """Get the next intersection from the intersection of arcs *p__* and *b__* along segment *b__* with *boundaries*. """ new_b = Arc(p__, b__.end) intersections, bounds = get_intersections(new_b, boundaries) dists = np.array([b__.start.distance(p2) for p2 in intersections]) indices = dists.argsort() if len(intersections) > 0 and intersections[indices[0]] != p__: return intersections[indices[0]], bounds[indices[0]] elif len(intersections) > 1: return intersections[indices[1]], bounds[indices[1]] return None, None def polygon(area_corners, segment_corners): """Get the intersection polygon between two areas. """ area_boundaries = [Arc(area_corners[0], area_corners[1]), Arc(area_corners[1], area_corners[2]), Arc(area_corners[2], area_corners[3]), Arc(area_corners[3], area_corners[0])] segment_boundaries = [Arc(segment_corners[0], segment_corners[1]), Arc(segment_corners[1], segment_corners[2]), Arc(segment_corners[2], segment_corners[3]), Arc(segment_corners[3], segment_corners[0])] angle1 = area_boundaries[0].angle(area_boundaries[1]) angle2 = segment_boundaries[0].angle(segment_boundaries[1]) if np.sign(angle1) != np.sign(angle2): segment_corners.reverse() segment_boundaries = [Arc(segment_corners[0], segment_corners[1]), Arc(segment_corners[1], segment_corners[2]), Arc(segment_corners[2], segment_corners[3]), Arc(segment_corners[3], segment_corners[0])] poly = [] boundaries = area_boundaries other_boundaries = segment_boundaries b__ = None for b__ in boundaries: if point_inside(b__.start, segment_corners): poly.append(b__.start) break else: inter = get_first_intersection(b__, other_boundaries) if inter is not None: poly.append(inter) break if len(poly) == 0: return None while len(poly) < 2 or poly[0] != poly[-1]: inter, b2_ = get_next_intersection(poly[-1], b__, other_boundaries) if inter is None: poly.append(b__.end) idx = (boundaries.index(b__) + 1) % len(boundaries) b__ = boundaries[idx] else: poly.append(inter) b__ = b2_ boundaries, other_boundaries = other_boundaries, boundaries return poly[:-1] R = 1 def get_area(corners): """Get the area of the convex area defined by *corners*. """ c1_ = corners[0] area = 0 for idx in range(1, len(corners) - 1): b1_ = Arc(c1_, corners[idx]) b2_ = Arc(c1_, corners[idx + 1]) b3_ = Arc(corners[idx], corners[idx + 1]) e__ = (abs(b1_.angle(b2_)) + abs(b2_.angle(b3_)) + abs(b3_.angle(b1_))) area += R ** 2 * e__ - math.pi return area def overlap_rate(swath_corners, area_corners): """Get how much a swath overlaps an area. """ area_area = get_area(area_corners) inter_area = get_area(polygon(area_corners, swath_corners)) return inter_area / area_area def min_distances(area_corners, segment_corners): """Min distances between each corner of *area_corners* and *segment_corners*. """ dists = np.ones(4) * np.infty for i, ic_ in enumerate(area_corners): for jc_ in segment_corners: dist = ic_.distance(jc_) if dists[i] > dist: dists[i] = dist return dists def should_wait(area_corners, segment_corners, previous_segment_corners): """Are the newest cornest still inside the area ? is the last segment boundary overlapping any boundary of the area ? In this case we should wait for the next segment to arrive. """ dists = min_distances(segment_corners, previous_segment_corners) indices = np.argsort(dists) new_corners = np.array(segment_corners)[indices[2:]] if len(new_corners) != 2: raise ValueError("More than 2 corners differ from previous segment...") new_arc = Arc(new_corners[0], new_corners[1]) for i in new_corners: if point_inside(i, area_corners): return True area_arc1 = Arc(area_corners[0], area_corners[1]) area_arc2 = Arc(area_corners[1], area_corners[2]) area_arc3 = Arc(area_corners[2], area_corners[3]) area_arc4 = Arc(area_corners[3], area_corners[0]) for i in (area_arc1, area_arc2, area_arc3, area_arc4): if i.intersects(new_arc): return True return False import unittest class TestSphereGeometry(unittest.TestCase): """Testing sphere geometry from this module. """ def test_angle(self): """Testing the angle value between two arcs. """ base = 0 p0_ = Coordinate(base, base) p1_ = Coordinate(base + 1, base) p2_ = Coordinate(base, base + 1) p3_ = Coordinate(base - 1, base) p4_ = Coordinate(base, base - 1) arc1 = Arc(p0_, p1_) arc2 = Arc(p0_, p2_) arc3 = Arc(p0_, p3_) arc4 = Arc(p0_, p4_) self.assertAlmostEqual(arc1.angle(arc2), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc2.angle(arc3), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc3.angle(arc4), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc4.angle(arc1), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc1.angle(arc4), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(arc4.angle(arc3), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(arc3.angle(arc2), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(arc2.angle(arc1), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(arc1.angle(arc3), math.pi, msg="this should be pi") self.assertAlmostEqual(arc3.angle(arc1), math.pi, msg="this should be pi") self.assertAlmostEqual(arc2.angle(arc4), math.pi, msg="this should be pi") self.assertAlmostEqual(arc4.angle(arc2), math.pi, msg="this should be pi") p5_ = Coordinate(base + 1, base + 1) p6_ = Coordinate(base - 1, base + 1) p7_ = Coordinate(base - 1, base - 1) p8_ = Coordinate(base + 1, base - 1) arc5 = Arc(p0_, p5_) arc6 = Arc(p0_, p6_) arc7 = Arc(p0_, p7_) arc8 = Arc(p0_, p8_) self.assertAlmostEqual(arc1.angle(arc5), math.pi / 4, 3, msg="this should be pi/4") self.assertAlmostEqual(arc5.angle(arc2), math.pi / 4, 3, msg="this should be pi/4") self.assertAlmostEqual(arc2.angle(arc6), math.pi / 4, 3, msg="this should be pi/4") self.assertAlmostEqual(arc6.angle(arc3), math.pi / 4, 3, msg="this should be pi/4") self.assertAlmostEqual(arc3.angle(arc7), math.pi / 4, 3, msg="this should be pi/4") self.assertAlmostEqual(arc7.angle(arc4), math.pi / 4, 3, msg="this should be pi/4") self.assertAlmostEqual(arc4.angle(arc8), math.pi / 4, 3, msg="this should be pi/4") self.assertAlmostEqual(arc8.angle(arc1), math.pi / 4, 3, msg="this should be pi/4") self.assertAlmostEqual(arc1.angle(arc6), 3 * math.pi / 4, 3, msg="this should be 3pi/4") c0_ = Coordinate(0, 180) c1_ = Coordinate(1, 180) c2_ = Coordinate(0, -179) c3_ = Coordinate(-1, -180) c4_ = Coordinate(0, 179) arc1 = Arc(c0_, c1_) arc2 = Arc(c0_, c2_) arc3 = Arc(c0_, c3_) arc4 = Arc(c0_, c4_) self.assertAlmostEqual(arc1.angle(arc2), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc2.angle(arc3), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc3.angle(arc4), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc4.angle(arc1), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc1.angle(arc4), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(arc4.angle(arc3), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(arc3.angle(arc2), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(arc2.angle(arc1), -math.pi / 2, msg="this should be -pi/2") # case of the north pole c0_ = Coordinate(90, 0) c1_ = Coordinate(89, 0) c2_ = Coordinate(89, -90) c3_ = Coordinate(89, 180) c4_ = Coordinate(89, 90) arc1 = Arc(c0_, c1_) arc2 = Arc(c0_, c2_) arc3 = Arc(c0_, c3_) arc4 = Arc(c0_, c4_) self.assertAlmostEqual(arc1.angle(arc2), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc2.angle(arc3), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc3.angle(arc4), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc4.angle(arc1), math.pi / 2, msg="this should be pi/2") self.assertAlmostEqual(arc1.angle(arc4), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(arc4.angle(arc3), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(arc3.angle(arc2), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(arc2.angle(arc1), -math.pi / 2, msg="this should be -pi/2") self.assertAlmostEqual(Arc(c1_, c2_).angle(arc1), math.pi/4, 3, msg="this should be pi/4") self.assertAlmostEqual(Arc(c4_, c3_).angle(arc4), -math.pi/4, 3, msg="this should be -pi/4") self.assertAlmostEqual(Arc(c1_, c4_).angle(arc1), -math.pi/4, 3, msg="this should be -pi/4") def test_inside(self): """Testing if a point is inside for other points. """ c1_ = Coordinate(-11, -11) c2_ = Coordinate(11, -11) c3_ = Coordinate(11, 11) c4_ = Coordinate(-11, 11) corners = [c1_, c2_, c3_, c4_] point = Coordinate(0, 0) self.assertTrue(point_inside(point, corners)) point = Coordinate(0, 12) self.assertFalse(point_inside(point, corners)) c1_ = Coordinate(-1, 180) c2_ = Coordinate(1, 179) c3_ = Coordinate(1, -179) c4_ = Coordinate(-1, -179) corners = [c1_, c2_, c3_, c4_] point = Coordinate(0, 180) self.assertTrue(point_inside(point, corners)) point = Coordinate(12, 180) self.assertFalse(point_inside(point, corners)) point = Coordinate(-12, 180) self.assertFalse(point_inside(point, corners)) point = Coordinate(0, 192) self.assertFalse(point_inside(point, corners)) point = Coordinate(0, -192) self.assertFalse(point_inside(point, corners)) # case of the north pole c1_ = Coordinate(89, 0) c2_ = Coordinate(89, 90) c3_ = Coordinate(89, 180) c4_ = Coordinate(89, -90) corners = [c1_, c2_, c3_, c4_] point = Coordinate(90, 90) self.assertTrue(point_inside(point, corners)) def test_intersects(self): """Test if two arcs intersect. """ p0_ = Coordinate(0, 0) p1_ = Coordinate(1, 0) p2_ = Coordinate(0, 1) p3_ = Coordinate(-1, 0) p4_ = Coordinate(0, -1) p5_ = Coordinate(1, 1) p6_ = Coordinate(-1, 1) arc13 = Arc(p1_, p3_) arc24 = Arc(p2_, p4_) arc32 = Arc(p3_, p2_) arc41 = Arc(p4_, p1_) arc40 = Arc(p4_, p0_) arc56 = Arc(p5_, p6_) arc45 = Arc(p4_, p5_) arc02 = Arc(p0_, p2_) arc35 = Arc(p3_, p5_) self.assertTrue(arc13.intersects(arc24)) self.assertFalse(arc32.intersects(arc41)) self.assertFalse(arc56.intersects(arc40)) self.assertFalse(arc56.intersects(arc40)) self.assertFalse(arc45.intersects(arc02)) self.assertTrue(arc35.intersects(arc24)) p0_ = Coordinate(0, 180) p1_ = Coordinate(1, 180) p2_ = Coordinate(0, -179) p3_ = Coordinate(-1, -180) p4_ = Coordinate(0, 179) p5_ = Coordinate(1, -179) p6_ = Coordinate(-1, -179) arc13 = Arc(p1_, p3_) arc24 = Arc(p2_, p4_) arc32 = Arc(p3_, p2_) arc41 = Arc(p4_, p1_) arc40 = Arc(p4_, p0_) arc56 = Arc(p5_, p6_) arc45 = Arc(p4_, p5_) arc02 = Arc(p0_, p2_) arc35 = Arc(p3_, p5_) self.assertTrue(arc13.intersects(arc24)) self.assertFalse(arc32.intersects(arc41)) self.assertFalse(arc56.intersects(arc40)) self.assertFalse(arc56.intersects(arc40)) self.assertFalse(arc45.intersects(arc02)) self.assertTrue(arc35.intersects(arc24)) # case of the north pole p0_ = Coordinate(90, 0) p1_ = Coordinate(89, 0) p2_ = Coordinate(89, 90) p3_ = Coordinate(89, 180) p4_ = Coordinate(89, -90) p5_ = Coordinate(89, 45) p6_ = Coordinate(89, 135) arc13 = Arc(p1_, p3_) arc24 = Arc(p2_, p4_) arc32 = Arc(p3_, p2_) arc41 = Arc(p4_, p1_) arc40 = Arc(p4_, p0_) arc56 = Arc(p5_, p6_) arc45 = Arc(p4_, p5_) arc02 = Arc(p0_, p2_) arc35 = Arc(p3_, p5_) self.assertTrue(arc13.intersects(arc24)) self.assertFalse(arc32.intersects(arc41)) self.assertFalse(arc56.intersects(arc40)) self.assertFalse(arc56.intersects(arc40)) self.assertFalse(arc45.intersects(arc02)) self.assertTrue(arc35.intersects(arc24)) def test_overlaps(self): """Test if two areas overlap. """ p1_ = Coordinate(89, 0) p2_ = Coordinate(89, 90) p3_ = Coordinate(89, 180) p4_ = Coordinate(89, -90) p5_ = Coordinate(89, 45) p6_ = Coordinate(89, 135) p7_ = Coordinate(89, -135) p8_ = Coordinate(89, -45) self.assertTrue(overlaps([p1_, p2_, p3_, p4_], [p5_, p6_, p7_, p8_])) self.assertFalse(overlaps([p1_, p5_, p2_, p6_], [p3_, p7_, p4_, p8_])) p1_ = Coordinate(1, -1) p2_ = Coordinate(1, 1) p3_ = Coordinate(-1, 1) p4_ = Coordinate(-1, -1) p5_ = Coordinate(0, 0) p6_ = Coordinate(0, 2) p7_ = Coordinate(2, 2) p8_ = Coordinate(2, 0) self.assertTrue(overlaps([p1_, p2_, p3_, p4_], [p5_, p6_, p7_, p8_])) self.assertFalse(overlaps([p1_, p8_, p5_, p4_], [p2_, p3_, p6_, p7_])) def test_overlap_rate(self): """Test how much two areas overlap. """ p1_ = Coordinate(1, -1) p2_ = Coordinate(1, 1) p3_ = Coordinate(-1, 1) p4_ = Coordinate(-1, -1) p5_ = Coordinate(0, 0) p6_ = Coordinate(0, 2) p7_ = Coordinate(2, 2) p8_ = Coordinate(2, 0) self.assertAlmostEqual(overlap_rate([p1_, p2_, p3_, p4_], [p5_, p6_, p7_, p8_]), 0.25, 3) c1_ = [(60.5944, 82.829699999999974), (52.859999999999999, 36.888300000000001), (66.7547, 2.8773), (80.395899999999997, 98.145499999999984)] c2_ = [(62.953206630716465, 7.8098183315148422), (62.953206630716465, 26.189349044600252), (53.301561187195546, 26.189349044600252), (53.301561187195546, 7.8098183315148422)] cor1 = [Coordinate(t[0], t[1]) for t in c1_] cor2 = [Coordinate(t[0], t[1]) for t in c2_] self.assertAlmostEqual(overlap_rate(cor1, cor2), 0.07, 2) c1_ = [(60.5944, 82.829699999999974), (52.859999999999999, 36.888300000000001), (66.7547, 2.8773), (80.395899999999997, 98.145499999999984)] c2_ = [(65.98228561983025, 12.108984194981202), (65.98228561983025, 30.490647126520301), (57.304862819933433, 30.490647126520301), (57.304862819933433, 12.108984194981202)] cor1 = [Coordinate(t[0], t[1]) for t in c1_] cor2 = [Coordinate(t[0], t[1]) for t in c2_] self.assertAlmostEqual(overlap_rate(cor1, cor2), 0.5, 2) if __name__ == '__main__': unittest.main() mpop-1.5.0/mpop/saturn/filelist.py000066400000000000000000000114101317160620000171350ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2013, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Filelist class. """ import logging import os import shutil import tempfile from mpop.utils import ensure_dir logger = logging.getLogger(__name__) class FileList(list): """List of files. """ def __init__(self, *args): list.__init__(self, *args) def put_date(self, date): """Return an new filelist with the given *date*. """ return FileList([date.strftime(i) for i in self]) def put_metadata(self, metadata): """Fill in the filelist with the given *metadata*. """ return FileList([i % metadata for i in self]) def _get_by_ext(self): """Convert the filelist into a dict with extensions as keys. """ types = {} for filename in self: file_tuple = os.path.splitext(filename) ext = file_tuple[1][:4] types[ext] = types.get(ext, FileList()) + FileList([filename]) return types def save_object(self, obj, hook=None): """save *obj* to the filelist. """ files_by_ext = self._get_by_ext() for extkey in files_by_ext: path, filename = os.path.split(files_by_ext[extkey][0]) origname = filename try: ensure_dir(files_by_ext[extkey][0]) handle, tmpfilename = tempfile.mkstemp(extkey, "mpop_tmp", path) os.fsync(handle) obj.save(tmpfilename) os.fsync(handle) os.chmod(tmpfilename, 0644) os.fsync(handle) except Exception: logger.exception("Something went wrong in saving file... " "Dumping trace.") logger.warning("Job skipped, going on with the next.") continue for filename in files_by_ext[extkey][1:]: path2, trash = os.path.split(filename) del trash ensure_dir(filename) handle2, tmpfilename2 = tempfile.mkstemp(extkey, "mpop_tmp", path2) os.fsync(handle2) try: shutil.copy(tmpfilename, tmpfilename2) os.fsync(handle2) os.close(handle2) except (IOError, OSError): logger.exception("Copying file %s to %s failed" %(tmpfilename,tmpfilename2)) logger.info("Retrying...") try: shutil.copy(tmpfilename, tmpfilename2) os.fsync(handle2) os.close(handle2) logger.info("Went OK this time...") except (IOError, OSError): logger.exception("No way...") try: os.rename(tmpfilename2, filename) if hook: hook(obj, filename=origname, uri=filename) except (IOError, OSError): logger.exception("Renaming file %s to %s failed" %(tmpfilename2,filename)) logger.info("Retrying...") try: os.rename(tmpfilename2, filename) if hook: hook(obj, filename=origname, uri=filename) except (IOError, OSError): logger.exception("No way...") logger.debug("Done saving "+filename) os.rename(tmpfilename, files_by_ext[extkey][0]) os.fsync(handle) os.close(handle) if hook: hook(obj, filename=origname, uri=files_by_ext[extkey][0]) logger.debug("Done saving "+files_by_ext[extkey][0]) mpop-1.5.0/mpop/saturn/filewatcher.py000066400000000000000000000121611317160620000176230ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Watch files coming in a given directory. """ import datetime import glob import os from Queue import Empty from threading import Thread, Condition import time import logging LOG = logging.getLogger(__name__) ZERO = datetime.timedelta(seconds=0) class FileWatcher(Thread): """Looks for new files, and queues them. """ def __init__(self, filename_template, file_queue, frequency): """Looks for new files arriving at the given *frequency*, and queues them. """ Thread.__init__(self) self.queue = file_queue self.template = filename_template self.frequency = datetime.timedelta(minutes=frequency) self.running = True self.cond = Condition() def terminate(self): """Terminate thread. """ self.running = False self.cond.acquire() self.cond.notify() self.cond.release() LOG.debug("Termination request received in FileWatcher") def wait(self, secs): if self.running: self.cond.wait(secs) def run(self): """Run the file watcher. """ filelist = set() sleep_time = 8 while self.running: self.cond.acquire() if isinstance(self.template, (list, tuple)): new_filelist = [] for template in self.template: new_filelist += glob.glob(template) new_filelist = set(new_filelist) else: new_filelist = set(glob.glob(self.template)) files_to_process = list(new_filelist - filelist) filelist = new_filelist files_dict = {} for fil in files_to_process: files_dict[fil] = os.path.getmtime(fil) files_to_process.sort(lambda x, y: cmp(files_dict[x], files_dict[y])) if len(files_to_process) != 0 and self.running: sleep_time = 8 times = [] for i in files_to_process: LOG.debug("queueing %s..." % i) self.queue.put(i) times.append(os.stat(i).st_ctime) times.sort() since_creation = datetime.timedelta(seconds=time.time() - times[-1]) if(self.frequency > since_creation): to_wait = self.frequency - since_creation LOG.info( "Waiting at least " + str(to_wait) + " for next file") sleep_time = (to_wait.seconds + to_wait.microseconds / 1000000.0) self.wait(sleep_time) sleep_time = 8 elif self.running: LOG.info("no new file has come, waiting %s secs" % str(sleep_time)) self.wait(sleep_time) if sleep_time < 60: sleep_time *= 2 self.cond.release() LOG.info("FileWatcher terminated.") class FileProcessor(Thread): """Execute *fun* on filenames provided by from *file_queue*. If *refresh* is a positive number, run *fun* every given number of seconds with None as argument. """ def __init__(self, file_queue, fun, refresh=None): Thread.__init__(self) self.queue = file_queue self.fun = fun self.running = True self.refresh = refresh def terminate(self): """Terminate thread. """ self.running = False LOG.debug("Termination request received in FileProcessor") def run(self): """Execute the given function on files from the file queue. """ while self.running: try: filename = self.queue.get(block=True, timeout=self.refresh) LOG.debug("processing %s" % filename) except Empty: filename = None try: self.fun(filename) except: LOG.exception("Something wrong happened in %s for %s. Skipping." % (str(self.fun), filename)) LOG.info("FileProcessor terminated.") def stop(self): """Stops a running process. """ self.running = False mpop-1.5.0/mpop/saturn/gatherer.py000066400000000000000000000742431317160620000171400ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Classes for gathering segmented data. """ import datetime import glob import os.path from ConfigParser import ConfigParser, NoSectionError, NoOptionError from fnmatch import fnmatch import numpy as np import sys import mpop.utils from mpop import CONFIG_PATH from mpop.scene import Satellite, SatelliteInstrumentScene from mpop.projector import get_area_def from mpop.satellites import GenericFactory DEFAULT_TIMELINESS = datetime.timedelta(minutes=5) DEFAULT_GRANULARITY = datetime.timedelta(minutes=5) LOG = mpop.utils.get_logger("gatherer") if sys.version_info < (2, 5): import time def strptime(string, fmt=None): """This function is available in the datetime module only from Python >= 2.5. """ return datetime.datetime(*time.strptime(string, fmt)[:6]) else: strptime = datetime.datetime.strptime def globify(filename): """Replace datetime string variable with ?'s. """ filename = filename.replace("%Y", "????") filename = filename.replace("%j", "???") filename = filename.replace("%m", "??") filename = filename.replace("%d", "??") filename = filename.replace("%H", "??") filename = filename.replace("%M", "??") filename = filename.replace("%S", "??") return filename def beginning(filename): """Find the beginning of *filename* not having any wildcards (? or *). Returns a duplet containing the position of the first wildcard, and the same position if the datetime variables where expanded. """ posqm = filename.find("?") posst = filename.find("*") if posqm == -1 and posst == -1: pos = len(filename) elif posqm == -1: pos = posst elif posst == -1: pos = posqm else: pos = min(posst, posqm) secondpos = pos if filename[:pos].find("%Y") > -1: secondpos += 2 if filename[:pos].find("%j") > -1: secondpos += 1 return (pos, secondpos) class Granule(SatelliteInstrumentScene): """The granule object. """ def __init__(self, filename=None, time_slot=None, satellite=None, instrument=None): # Setting up a granule from metadata if filename is None: SatelliteInstrumentScene.__init__(self, time_slot=time_slot, satellite=(satellite.satname, satellite.number, satellite.variant), instrument=instrument) conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, self.fullname + ".cfg")) self.file_template = str(conf.get(instrument+"-granules", "filename", raw=True)) self.directory = str(conf.get(instrument+"-granules", "dir", raw=True)) self.file_name = time_slot.strftime(self.file_template) self.directory = time_slot.strftime(self.directory) self.file_type = conf.get(instrument + "-granules", "type") self.granularity = datetime.timedelta( seconds=int(conf.get(instrument + "-granules", "granularity"))) self.span = float(conf.get(instrument + "-granules", "full_scan_period")) self.scan_width = int(conf.get(instrument + "-granules", "scan_width")) return # Setting up a granule from a filename filelist = glob.glob(os.path.join(CONFIG_PATH, "*.cfg")) the_directory, the_name = os.path.split(filename) self.satname = None for fil in filelist: conf = ConfigParser() conf.read(fil) try: instruments = eval(conf.get("satellite", "instruments")) for instrument in instruments: directory = str(conf.get(instrument+"-granules", "dir")) file_template = str(conf.get(instrument+"-granules", "filename", raw=True)) file_glob = globify(file_template) if(os.path.samefile(the_directory, directory) and fnmatch(the_name, file_glob)): try: self.file_template = file_template self.file_name = the_name pos1, pos2 = beginning(self.file_template) time_slot = strptime(self.file_name[:pos2], self.file_template[:pos1]) SatelliteInstrumentScene.__init__( self, time_slot=time_slot, satellite=(conf.get("satellite", "satname"), conf.get("satellite", "number"), conf.get("satellite", "variant")), instrument=instrument) self.file_type = conf.get(instrument + "-granules", "type") self.directory = the_directory self.granularity = datetime.timedelta( seconds=int(conf.get(instrument + "-granules", "granularity"))) self.span = float(conf.get(instrument + "-granules", "full_scan_period")) self.scan_width = int(conf.get(instrument + "-granules", "scan_width")) except (NoSectionError, NoOptionError): raise IOError("Inconsistency detected in " + fil) break if self.satname is not None: break except (NoSectionError, NoOptionError): pass if not self.satname: raise ValueError("Can't find any matching satellite for "+filename) def __cmp__(self, obj): return (cmp(self.satname, obj.satname) or cmp(self.number, obj.number) or cmp(self.variant, obj.variant) or cmp(self.time_slot, obj.time_slot)) def __str__(self): return "G:" + os.path.join(self.directory, self.file_name) def get_lonlat(self, row, col): """Get the longitude and latitude for the current scene at the given row and col. """ conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, self.fullname + ".cfg")) try: reader_name = conf.get(self.instrument_name + "-level1", 'format') except NoSectionError: reader_name = conf.get(self.instrument_name + "-level2", 'format') reader = "mpop.satin." + reader_name try: reader_module = __import__(reader, globals(), locals(), ['get_lonlat']) except ImportError: LOG.exception("Problem finding a reader...") raise ImportError("No "+reader+" reader found.") return reader_module.get_lonlat(self, row, col) @property def gross_area(self): """Build a gross area of the segment based on its corners. """ from pyresample.geometry import SwathDefinition nlines = self.granularity.seconds / self.span top_left = self.get_lonlat(0, 0) top_right = self.get_lonlat(0, self.scan_width - 1) bottom_left = self.get_lonlat(nlines - 1, 0) bottom_right = self.get_lonlat(nlines - 1, self.scan_width - 1) lons = np.array([[top_left[0], top_right[0]], [bottom_left[0], bottom_right[0]]]) lats = np.array([[top_left[1], top_right[1]], [bottom_left[1], bottom_right[1]]]) return SwathDefinition(lons, lats) class SegmentedSwath(Satellite): def __init__(self, area, (satname, number, variant)): Satellite.__init__(self, (satname, number, variant)) self.area = get_area_def(area) self.granules = [] self.planned_granules = [] self.timeout = None def add(self, granule): """Add a granule to the swath """ if not self.granules: self._compute_plan(granule.time_slot, granule.granularity, granule.instrument_name) try: self.planned_granules.remove(granule) except ValueError: LOG.warning(str(granule) + " not in " + str([str(gran) for gran in self.planned_granules])) self.granules.append(granule) if self.planned_granules: self.timeout = self.planned_granules[-1].time_slot else: self.timeout = datetime.datetime.now() def _compute_plan(self, utc_time, granularity, instrument): """Compute the planned granules for the current area. """ nts = utc_time new_granule = Granule(time_slot=nts, satellite=self, instrument=instrument) if new_granule.gross_area.overlaps(self.area): self.planned_granules.append(new_granule) while True: nts = nts - granularity new_granule = Granule(time_slot=nts, satellite=self, instrument=instrument) if new_granule.gross_area.overlaps(self.area): self.planned_granules.append(new_granule) else: break nts = utc_time while True: nts = nts + granularity new_granule = Granule(time_slot=nts, satellite=self, instrument=instrument) if new_granule.gross_area.overlaps(self.area): self.planned_granules.append(new_granule) else: break self.planned_granules.sort() self.timeout = self.planned_granules[-1].time_slot def __repr__(self): granules = [str(granule) for granule in self.granules] return "swath " + str(granules) + " on area " + self.area.area_id def concatenate(self, channels=None): """Concatenate the granules into a swath. Returns a loaded satellite scene. """ self.granules.sort() conffile = (self.granules[0].variant + self.granules[0].satname + self.granules[0].number + ".cfg") conf = ConfigParser() conf.read(os.path.join(CONFIG_PATH, conffile)) try: # call concat reader_name = conf.get(self.granules[0].instrument_name + "-level1", 'format') str_granules = [str(granule) for granule in self.granules] LOG.debug("Concatenating "+str(str_granules)) try: reader_name = eval(reader_name) except NameError: reader_name = str(reader_name) # read the data reader = "mpop.satin."+reader_name try: reader_module = __import__(reader, globals(), locals(), ['concatenate']) except ImportError: LOG.exception("ImportError when loading plugin for format " + str(reader_name)) raise ImportError("No "+reader+" reader found.") scene = reader_module.concatenate(self.granules, channels) except NoSectionError: #concatenate loaded granules. scenes = [GenericFactory.create_scene(granule.satname, granule.number, granule.instrument_name, granule.time_slot, None, None, granule.variant) for granule in self.granules] for granule in scenes: granule.load(channels) scene = mpop.scene.assemble_segments(scenes) return scene class Gatherer(Satellite): """The mighty gatherer class. """ def __init__(self, areas_of_interest, timeliness=DEFAULT_TIMELINESS, satname=None, number=None, variant=None): Satellite.__init__(self, (satname, number, variant)) self.timeliness = timeliness self.swaths = {} self.finished_swaths = [] for area in areas_of_interest: self.swaths[area] = SegmentedSwath(area, (satname, number, variant)) def add(self, granule): """Add a *granule* to the gatherer. """ interesting = False for area_name in self.swaths: if granule.gross_area.overlaps(self.swaths[area_name].area): interesting = True if (len(self.swaths[area_name].planned_granules) > 0 and granule not in self.swaths[area_name].planned_granules): LOG.debug("Following swath is starting, closing the current" " swath") LOG.debug("Swath was " + str(self.swaths[area_name]) + " while " + str(self.swaths[area_name].planned_granules) + " remains.") self.finished_swaths.append(self.swaths[area_name]) self.swaths[area_name] = SegmentedSwath(area_name, (granule.satname, granule.number, granule.variant)) LOG.debug("Add " + str(granule) + " to " + str(self.swaths[area_name])) self.swaths[area_name].add(granule) if len(self.swaths[area_name].planned_granules) == 0: self.finished_swaths.append(self.swaths[area_name]) self.swaths[area_name] = SegmentedSwath(area_name, (granule.satname, granule.number, granule.variant)) return interesting def timeout(self): """Finishes swaths that are timed out. """ now = datetime.datetime.utcnow() for area_name, swath in self.swaths.items(): if swath.timeout and swath.timeout + self.timeliness < now: self.finished_swaths.append(swath) self.swaths[area_name] = SegmentedSwath(area_name, (self.satname, self.number, self.variant)) def clear(self): """Clean up the finished swaths. """ self.finished_swaths = [] def __str__(self): return self.fullname # Typical example: # when new_file: # gatherer.add(Granule(new_file)) # for swath in gatherer.finished: # scene = swath.assemble() # do_something_with(scene) # gatherer.finished.clear() # when its time (gatherer.next_timeout): # gatherer.finish_timeouts() # for swath in gatherer.finished: # scene = swath.assemble() # do_something_with(scene) # gatherer.finished.clear() import unittest OldConfigParser = None def patch_glob(): """Patch fnmatch. """ def fake_glob(*args): """Fake glob. """ del args return ["test.cfg"] glob.old_glob = glob.glob glob.glob = fake_glob def unpatch_glob(): """Unpatch glob. """ glob.glob = glob.old_glob delattr(glob, "old_glob") def patch_configparser(): """Patch to fake ConfigParser. """ class FakeConfigParser: """Dummy ConfigParser class. """ def __init__(self, *args, **kwargs): pass def read(self, *args, **kwargs): """Dummy read method """ del args, kwargs self = self def get(self, section, item, **kwargs): """Dummy get method """ del kwargs self = self if(section == "avhrr-granules" and item == "filename"): return "myfile_%Y%m%d_%H%M.cooltype" if(section == "avhrr-granules" and item == "type"): return "cooltype" if(section == "avhrr-granules" and item == "granularity"): return 60 if(section == "avhrr-granules" and item == "full_scan_period"): return 0.1667 if(section == "avhrr-granules" and item == "scan_width"): return 2048 if(section == "avhrr-granules" and item == "dir"): return "/path/to/my/data" if(section == "satellite" and item == "instruments"): return "('avhrr', )" if(section == "satellite" and item == "satname"): return "metop" if(section == "satellite" and item == "number"): return "02" if(section == "satellite" and item == "variant"): return "regional" global OldConfigParser, ConfigParser OldConfigParser = ConfigParser ConfigParser = FakeConfigParser def unpatch_configparser(): """Unpatch fake ConfigParser. """ global ConfigParser ConfigParser = OldConfigParser class TestGranules(unittest.TestCase): """Testing granules. """ def setUp(self): """Patching stuff. """ patch_configparser() patch_glob() def tearDown(self): """Unpatching stuff. """ unpatch_configparser() unpatch_glob() def test_init(self): """Testing init function. """ g_1 = Granule("/path/to/my/data/myfile_20101010_1010.cooltype") self.assertEquals(g_1.satname, "metop") self.assertEquals(g_1.number, "02") self.assertEquals(g_1.variant, "regional") self.assertEquals(g_1.time_slot, datetime.datetime(2010, 10, 10, 10, 10)) self.assertEquals(g_1.instrument_name, "avhrr") sat = Satellite("metop", "02", "regional") g_1 = Granule(instrument="avhrr", satellite=sat, time_slot=datetime.datetime(2010, 10, 10, 10, 10)) self.assertEquals(os.path.join(g_1.directory, g_1.file_name), "/path/to/my/data/myfile_20101010_1010.cooltype") def test_cmp(self): """test the __cmp__ function. """ g_1 = Granule("/path/to/my/data/myfile_20101010_1010.cooltype") g_2 = Granule("/path/to/my/data/myfile_20101010_1011.cooltype") self.assertTrue(g_1 < g_2) self.assertTrue(g_2 > g_1) self.assertTrue(g_1 == g_1) OldGranule = None def patch_granule(): """Faking Granule. """ class FakeArea: """Fake area class. """ def __init__(self, inside): self.inside = inside def overlaps(self, other): """Fake overlaping function. """ del other return self.inside class FakeGranule: """Fake granule class. """ def __init__(self, time_slot, satellite): self.time_slot = time_slot self.satellite = satellite self.span = 1 def __cmp__(self, other): return cmp(self.time_slot, other.time_slot) @property def gross_area(self): """Approximate area of the granule. """ if(self.time_slot > 2 and self.time_slot < 8): return FakeArea(inside=True) else: return FakeArea(inside=False) def __repr__(self): return "G:" + str(self.time_slot) global Granule, OldGranule OldGranule = Granule Granule = FakeGranule def patch_granule_with_time(): """Faking Granule. """ class FakeArea: """Fake area class. """ def __init__(self, inside): self.inside = inside def overlaps(self, other): """Fake overlaping function. """ return self.inside class FakeGranule: """Fake granule class. """ satname = "bli" number = "blu" variant = "bla" def __init__(self, time_slot, satellite): self.time_slot = time_slot self.satellite = satellite self.span = datetime.timedelta(minutes=1) def __cmp__(self, other): return cmp(self.time_slot, other.time_slot) @property def gross_area(self): """Approximate area of the granule. """ start_time = datetime.datetime(2010, 10, 10, 0, 2) end_time = datetime.datetime(2010, 10, 10, 0, 8) if(self.time_slot > start_time and self.time_slot < end_time): return FakeArea(inside=True) else: return FakeArea(inside=False) def __repr__(self): return "G:" + str(self.time_slot) global Granule, OldGranule OldGranule = Granule Granule = FakeGranule def unpatch_granule(): global Granule Granule = OldGranule class TestSegmentedSwath(unittest.TestCase): """Testing SegmentedSwath. """ def setUp(self): patch_granule() def tearDown(self): unpatch_granule() def test_init(self): """Test initialisation. """ swath = SegmentedSwath("bla", "bli", "blu", "blo") self.assertEquals(swath.area, "bla") def test_add(self): """Test adding. """ swath = SegmentedSwath("bla", "bli", "blu", "blo") granule = Granule(5, "kurt") swath.add(granule) self.assertEquals(granule.time_slot, swath.granules[0].time_slot) times = [granule.time_slot for granule in swath.planned_granules] self.assertEquals(times, [3, 4, 6, 7]) self.assertEquals(swath.timeout, 7) granule = Granule(4, "kurt") swath.add(granule) times = [granule.time_slot for granule in swath.planned_granules] self.assertEquals(times, [3, 6, 7]) times = [granule.time_slot for granule in swath.granules] times.sort() self.assertEquals(times, [4, 5]) self.assertEquals(swath.timeout, 7) granule = Granule(6, "kurt") swath.add(granule) times = [granule.time_slot for granule in swath.planned_granules] self.assertEquals(times, [3, 7]) times = [granule.time_slot for granule in swath.granules] times.sort() self.assertEquals(times, [4, 5, 6]) self.assertEquals(swath.timeout, 7) granule = Granule(7, "kurt") swath.add(granule) times = [granule.time_slot for granule in swath.planned_granules] self.assertEquals(times, [3]) times = [granule.time_slot for granule in swath.granules] times.sort() self.assertEquals(times, [4, 5, 6, 7]) self.assertEquals(swath.timeout, 3) granule = Granule(3, "kurt") swath.add(granule) times = [granule.time_slot for granule in swath.planned_granules] self.assertEquals(times, []) times = [granule.time_slot for granule in swath.granules] times.sort() self.assertEquals(times, [3, 4, 5, 6, 7]) self.assertTrue((datetime.datetime.now() - swath.timeout).seconds < 1) self.assertTrue(isinstance(swath.timeout, datetime.datetime)) def test_compute_plan(self): """Test planning of comming granules. """ swath = SegmentedSwath("bla", "bli", "blu", "blo") swath._compute_plan(5, 1) times = [granule.time_slot for granule in swath.planned_granules] self.assertEquals(times, [3, 4, 5, 6, 7]) def patch_now(): """Patching the now function from datetime. """ def fakenow(): """Fake now function. """ return datetime.datetime(2010, 10, 10, 0, 10) datetime.datetime.oldnow = datetime.datetime.now datetime.datetime.now = fakenow def unpatch_now(): """Returning the original now. """ datetime.datetime.now = datetime.datetime.oldnow delattr(datetime.datetime, "oldnow") class TestGatherer(unittest.TestCase): """Testing Gatherer. """ def setUp(self): patch_granule_with_time() def tearDown(self): unpatch_granule() def test_init(self): """Testing initialisation. """ gatherer = Gatherer(["bli", "blu"], satname="gla", number="glo", variant="glu") self.assertTrue("bli" in gatherer.swaths) self.assertTrue(isinstance(gatherer.swaths["bli"], SegmentedSwath)) self.assertTrue("blu" in gatherer.swaths) self.assertTrue(isinstance(gatherer.swaths["blu"], SegmentedSwath)) def test_add(self): """Testing adding of granules to the gatherer. """ timeliness = (datetime.datetime.utcnow() - datetime.datetime(2010, 10, 9, 23, 0)) gatherer = Gatherer(["bli", "blu"], timeliness=timeliness, satname="gla", number="glo", variant="glu") gatherer.add(Granule(datetime.datetime(2010, 10, 10, 0, 5), "blaf")) gatherer.add(Granule(datetime.datetime(2010, 10, 10, 0, 3), "blaf")) gatherer.add(Granule(datetime.datetime(2010, 10, 10, 0, 4), "blaf")) times = [granule.time_slot for granule in gatherer.swaths["bli"].granules] ideal_times = [datetime.datetime(2010, 10, 10, 0, 5), datetime.datetime(2010, 10, 10, 0, 3), datetime.datetime(2010, 10, 10, 0, 4)] self.assertEquals(times, ideal_times) # put the timeliness to zero and add a new granule: # This should add the granule as normal. gatherer.timeliness = datetime.timedelta(seconds=0) gatherer.add(Granule(datetime.datetime(2010, 10, 10, 0, 7), "blaf")) times = [granule.time_slot for granule in gatherer.swaths["bli"].granules] ideal_times = [datetime.datetime(2010, 10, 10, 0, 5), datetime.datetime(2010, 10, 10, 0, 3), datetime.datetime(2010, 10, 10, 0, 4), datetime.datetime(2010, 10, 10, 0, 7)] self.assertEquals(times, ideal_times) gatherer.timeliness = timeliness gatherer.add(Granule(datetime.datetime(2010, 10, 10, 0, 6), "blaf")) self.assertFalse(gatherer.swaths["bli"].granules) self.assertFalse(gatherer.swaths["blu"].granules) self.assertEquals(len(gatherer.finished_swaths), 2) def test_timeout(self): """Test the timeout. """ timeliness = (datetime.datetime.utcnow() - datetime.datetime(2010, 10, 9, 23, 0)) gatherer = Gatherer(["kurt", "blu"], timeliness=timeliness, satname="gla", number="glo", variant="glu") gatherer.add(Granule(datetime.datetime(2010, 10, 10, 0, 5), "blaf")) gatherer.add(Granule(datetime.datetime(2010, 10, 10, 0, 3), "blaf")) gatherer.add(Granule(datetime.datetime(2010, 10, 10, 0, 4), "blaf")) gatherer.timeout() times = [granule.time_slot for granule in gatherer.swaths["kurt"].granules] ideal_times = [datetime.datetime(2010, 10, 10, 0, 5), datetime.datetime(2010, 10, 10, 0, 3), datetime.datetime(2010, 10, 10, 0, 4)] self.assertEquals(times, ideal_times) gatherer.timeliness = datetime.timedelta(seconds=0) gatherer.timeout() times = [granule.time_slot for granule in gatherer.finished_swaths[0].granules] ideal_times = [datetime.datetime(2010, 10, 10, 0, 5), datetime.datetime(2010, 10, 10, 0, 3), datetime.datetime(2010, 10, 10, 0, 4)] self.assertEquals(times, ideal_times) self.assertFalse(gatherer.swaths["kurt"].granules) self.assertFalse(gatherer.swaths["kurt"].planned_granules) self.assertFalse(gatherer.swaths["blu"].granules) self.assertFalse(gatherer.swaths["blu"].planned_granules) if __name__ == "__main__": unittest.main() mpop-1.5.0/mpop/saturn/runner.py000066400000000000000000000233231317160620000166410ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2013, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Batch run stuff. """ import datetime import getopt import logging import sys from mpop.channel import NotLoadedError from mpop.satellites import get_sat_instr_compositer from mpop.saturn.tasklist import TaskList from mpop.satellites import GenericFactory LOG = logging.getLogger("runner") def usage(scriptname): """Print usefull information for running the script. """ print(""" usage: %s [options] Available options: -r --rgb Output only the pure rgb images from the product list -p --pge Output only the cloudproduct derived images from the product list -d --date Run on the specified date (eg. 200910081430) -a --area Run on the specified area (eg. eurol) -c --composite Output the specified composite. Available composites are: overview natural fog nightfog convection airmass ir9 wv_low wv_high greensnow redsnow cloudtop hr_overview PGE02 PGE02b PGE02bj PGE02c PGE02cj PGE02d PGE02e PGE03 CtypeHDF NordRad CtthHDF -v Be verbose --vv Be very verbose -h --help Print this message """%scriptname) def parse_options(): """Parse command line options. """ time_slots = [] console = logging.StreamHandler() console.setFormatter(logging.Formatter("[%(levelname)s: %(asctime)s :" " %(name)s] %(message)s", '%Y-%m-%d %H:%M:%S')) try: opts, args = getopt.getopt(sys.argv[1:], "hd:va:c:pr", ["vv", "help", "date=", "pge", "rgb", "area=", "composite="]) del args except getopt.GetoptError, err: print err usage(sys.argv[0]) sys.exit(2) mode = set([]) areas = set([]) composites = set([]) for opt, arg in opts: if opt == "-v": console.setLevel(logging.INFO) logging.getLogger('').addHandler(console) elif opt == "--vv": console.setLevel(logging.DEBUG) logging.getLogger('').addHandler(console) elif opt in ("-h", "--help"): usage(sys.argv[0]) sys.exit() elif opt in ("-d", "--date"): time_slots.append(datetime.datetime.strptime(arg, "%Y%m%d%H%M")) elif opt in ("-p", "--pge"): mode |= set(["pge"]) elif opt in ("-r", "--rgb"): mode |= set(["rgb"]) elif opt in ("-a", "--area"): areas |= set([arg]) elif opt in ("-c", "--composites"): composites |= set([arg]) else: raise ValueError("Option %s not recognized.") return time_slots, mode, areas, composites class SequentialRunner(object): """Runs scenes in a sequential order, as opposed to parallelized running. """ def __init__(self, satellite, instrument, tasklist, precompute=False): if isinstance(tasklist, str): tasklist = TaskList(tasklist) self.tasklist = tasklist self.data = None self.satellite = satellite[0] self.number = satellite[1] self.variant = satellite[2] self.instrument = instrument self.klass = get_sat_instr_compositer((self.satellite, self.number, self.variant), self.instrument) self.precompute = precompute self.running = True def stop(self): """Stops the runner. """ self.running = False def run_from_cmd(self, hook=None): """Batch run mpop. """ time_slots, mode, areas, composites = parse_options() tasklist = self.tasklist.shape(self.klass, mode, areas, composites) for time_slot in time_slots: self.data = GenericFactory.create_scene(self.satellite, self.number, self.instrument, time_slot, orbit=None, variant=self.variant) prerequisites = tasklist.get_prerequisites(self.klass) self.data.load(prerequisites) self.run_from_data(tasklist, hook) def run_from_data(self, tasklist=None, radius=None, hook=None): """Run on given data. """ if tasklist is None: tasklist = self.tasklist for area, productlist in tasklist.items(): prerequisites = tasklist.get_prerequisites(self.klass, area) if not self.running: LOG.info("Running interrupted") return local_data = self.data.project(area, prerequisites, self.precompute, mode="nearest", radius=radius) for product, flist in productlist.items(): fun = getattr(local_data.image, product) flist = flist.put_date(local_data.time_slot) metadata = {"area": area} LOG.info("Orbit = " + str(local_data.orbit)) if local_data.orbit is not None: metadata["orbit"] = int(local_data.orbit) LOG.info("Instrument Name = " + str(local_data.instrument_name)) if local_data.instrument_name is not None: metadata["instrument_name"] = str(local_data.instrument_name) flist = flist.put_metadata(metadata) try: LOG.debug("Doing "+product+".") if not self.running: del local_data LOG.info("Running interrupted") return img = fun() img.info["product_name"] = product img.info["instrument_name"] = metadata["instrument_name"] img.info["start_time"] = self.data.info.get("start_time", self.data.time_slot) img.info["end_time"] = self.data.info.get("end_time", self.data.time_slot) flist.save_object(img, hook) del img except (NotLoadedError, KeyError, ValueError), err: LOG.warning("Error in "+product+": "+str(err)) LOG.info("Skipping "+product) del local_data def run_from_local_data(self, tasklist=None, extra_tags=None, hook=None): """Run on given local data (already projected). """ if tasklist is None: tasklist = self.tasklist metadata = {} area_name = self.data.area_id or self.data.area_def.area_id tasks, dummy = tasklist.split(area_name) if area_name not in tasks: LOG.debug("Nothing to do for " + area_name) return for product, flist in tasks[area_name].items(): try: fun = getattr(self.data.image, product) except AttributeError: LOG.warning("Missing composite function: " + str(product)) continue flist = flist.put_date(self.data.time_slot) if self.data.orbit is not None: metadata["orbit"] = int(self.data.orbit) LOG.info("Instrument Name = " + str(self.data.instrument_name)) if self.data.instrument_name is not None: metadata["instrument_name"] = str(self.data.instrument_name) metadata["satellite"] = self.data.fullname metadata["area"] = area_name flist = flist.put_metadata(metadata) try: LOG.debug("Doing "+product+".") img = fun() img.info["product_name"] = product img.info["instrument_name"] = metadata["instrument_name"] img.info["start_time"] = self.data.info.get("start_time", self.data.time_slot) img.info["end_time"] = self.data.info.get("end_time", self.data.time_slot) if extra_tags: img.tags.update(extra_tags) flist.save_object(img, hook) del img except (NotLoadedError, KeyError), err: LOG.warning("Error in "+product+": "+str(err)) LOG.info("Skipping "+product) if __name__ == "__main__": pass mpop-1.5.0/mpop/saturn/tasklist.py000066400000000000000000000132261317160620000171670ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2012. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Tasklist class and helper function. """ from mpop.saturn.filelist import FileList from mpop import CONFIG_PATH class TaskList(dict): """Defines a tasklist. """ def __init__(self, product_file=None): dict.__init__(self) if product_file is not None: global_vars = {"__file__":product_file} local_vars = {} execfile(product_file, global_vars, local_vars) tasks = local_vars["PRODUCTS"] for area in tasks: self[area] = {} for product in tasks[area]: self[area][product] = FileList(tasks[area][product]) def split(self, *keys): """Split the tasklist along the *keys* parameter: keys in the first part, non keys in the second. """ tl1 = TaskList() tl2 = TaskList() for key, item in self.items(): if key in keys: tl1[key] = item else: tl2[key] = item return tl1, tl2 def get_prerequisites(self, klass, area_id=None): """Get the channels we need to load to fulfill the tasklist according to methods defined in *klass*. If area is provided, account only for tasks on this area. """ if area_id is None: areas = self.keys() elif(isinstance(area_id, (list, tuple, set))): areas = list(area_id) else: areas = [area_id] prerequisites = set() for area in areas: productlist = self.get(area, {}) for product in productlist: if(hasattr(klass, product) and hasattr(getattr(klass, product), "prerequisites")): prerequisites |= getattr(getattr(klass, product), "prerequisites") return prerequisites def shape(self, klass, mode=set(), original_areas=set(), specific_composites=set()): """Shape the given the tasklist according to the options. """ composites = set() if len(original_areas) == 0: original_areas = set(self.keys()) new_tasklist = TaskList() if len(specific_composites) == 0 and len(mode) ==0: for area in original_areas: new_tasklist[area] = self.get(area, {}) for i in dir(klass): if hasattr(getattr(klass, i), "prerequisites"): if("pge" in mode and ("CloudType" in getattr(getattr(klass, i), "prerequisites") or "CTTH" in getattr(getattr(klass, i), "prerequisites"))): composites |= set([i]) elif("rgb" in mode and ("CloudType" not in getattr(getattr(klass, i), "prerequisites") or "CTTH" not in getattr(getattr(klass, i), "prerequisites"))): composites |= set([i]) for area in original_areas: new_tasklist.setdefault(area, {}) for product in specific_composites: filelist = self.get(area, {}).get(product, [area+"_"+product+".png"]) new_tasklist[area].setdefault(product, FileList()) new_tasklist[area][product].extend(filelist) for product in composites: filelist = self.get(area, {}).get(product, FileList()) if len(filelist) > 0: new_tasklist[area].setdefault(product, FileList()) new_tasklist[area][product].extend(filelist) return new_tasklist def get_product_list(satscene): """Returns the tasklist corresponding to the satellite described in *satscene*, which can be a scene object, a list or a tuple. If the corresponding file could not be found, the function returns more generic tasklists (variant and name based, then only variant based), or None if no file can be found. NB: the product files are looked for in the CONFIG_PATH directory. """ if isinstance(satscene, (list, tuple)): if len(satscene) != 3: raise ValueError("Satscene must be a triplet (variant, name, " "number) or a scene object.") components = satscene else: components = [satscene.variant, satscene.satname, satscene.number] import os.path for i in range(len(components)): pathname = os.path.join(CONFIG_PATH, "".join(components[:len(components)-i])) if os.path.exists(pathname+"_products.py"): return TaskList(pathname+"_products.py") return None mpop-1.5.0/mpop/saturn/two_line_elements.py000066400000000000000000000344011317160620000210430ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, 2013. # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Module to compute satellite positions from TLE. """ import datetime import sys import urllib2 import numpy as np CK2 = 5.413080e-4 CK4 = 0.62098875e-6 E6A = 1.0e-6 QOMS2T = 1.88027916e-9 S = 1.01222928 XJ3 = -0.253881e-5 XKE = 0.743669161e-1 XKMPER = 6378.137 XMNPDA = 1440.0 AE = 1.0 # earth flattening F = 1/298.257223563 if sys.version_info < (2, 5): import time def strptime(string, fmt=None): """This function is available in the datetime module only from Python >= 2.5. """ return datetime.datetime(*time.strptime(string, fmt)[:6]) else: strptime = datetime.datetime.strptime class Tle(object): """The TLE object holds information and methods for orbit position estimation. """ def __init__(self, tle=None, satellite=None): self.tle = tle if satellite: tles_dict = {} import glob filelist = glob.glob("/data/24/saf/polar_in/tle/tle-*.txt") if len(filelist) > 0: filelist.sort() tlef = open(filelist[-1]) tles = [item.strip() for item in tlef] tlef.close() for i in xrange(0, len(tles) - 2, 3): tles_dict[tles[i]] = tles[i+1]+"\n"+tles[i+2] else: for fname in ["resource.txt", "weather.txt"]: url = "http://celestrak.com/NORAD/elements/" + fname tles = urllib2.urlopen(url).readlines() tles = [item.strip() for item in tles] for i in xrange(0, len(tles) - 2, 3): tles_dict[tles[i]] = tles[i+1]+"\n"+tles[i+2] self._read_tle(tles_dict[satellite.upper()]) self._preprocess() def _read_tle(self, lines): """Read the raw tle. """ def _read_tle_decimal(rep): """Read tle decimal point numbers. """ num = int(rep[:-2]) * 1.0e-5 exp = int(rep[-2:]) return num * 10 ** exp tlist = lines.split() self.tle = {} self.tle["satnumber"] = tlist[1][:5] self.tle["classification"] = tlist[1][5:] self.tle["id_launch_year"] = tlist[2][:2] self.tle["id_launch_number"] = tlist[2][2:5] self.tle["id_launch_piece"] = tlist[2][5:] self.tle["epoch_year"] = int(tlist[3][:2]) self.tle["epoch_day"] = float(tlist[3][2:]) self.tle["epoch"] = (strptime(tlist[3][:2], "%y") + datetime.timedelta(days=float(tlist[3][2:]) - 1)) self.tle["mean_motion_derivative"] = float(tlist[4]) self.tle["mean_motion_sec_derivative"] = _read_tle_decimal(tlist[5]) self.tle["bstar"] = _read_tle_decimal(tlist[6]) self.tle["ephemeris_type"] = int(tlist[7]) self.tle["element_number"] = int(tlist[8][:-1]) self.tle["inclination"] = float(tlist[11]) self.tle["right_ascension"] = float(tlist[12]) self.tle["excentricity"] = int(tlist[13]) * 10 ** -7 self.tle["arg_perigee"] = float(tlist[14]) self.tle["mean_anomaly"] = float(tlist[15]) self.tle["mean_motion"] = float(tlist[16][:11]) self.tle["orbit"] = int(tlist[16][11:-1]) def _preprocess(self): """Derivate some values from raw tle. """ self.tle["inclination"] = np.deg2rad(self.tle["inclination"]) self.tle["right_ascension"] = np.deg2rad(self.tle["right_ascension"]) self.tle["arg_perigee"] = np.deg2rad(self.tle["arg_perigee"]) self.tle["mean_anomaly"] = np.deg2rad(self.tle["mean_anomaly"]) self.tle["mean_motion"] *= (np.pi * 2 / XMNPDA) self.tle["mean_motion_derivative"] *= np.pi * 2 / XMNPDA ** 2 self.tle["mean_motion_sec_derivative"] *= np.pi * 2 / XMNPDA ** 3 self.tle["bstar"] *= AE n_0 = self.tle["mean_motion"] k_e = XKE k_2 = CK2 i_0 = self.tle["inclination"] e_0 = self.tle["excentricity"] a_1 = (k_e / n_0) ** (2.0/3) delta_1 = ((3/2.0) * (k_2 / a_1**2) * ((3 * np.cos(i_0)**2 - 1) / (1 - e_0**2)**(2.0/3))) a_0 = a_1 * (1 - delta_1/3 - delta_1**2 - (134.0/81) * delta_1**3) delta_0 = ((3/2.0) * (k_2 / a_0**2) * ((3 * np.cos(i_0)**2 - 1) / (1 - e_0**2)**(2.0/3))) # original mean motion n_0pp = n_0 / (1 + delta_0) self.tle["original_mean_motion"] = n_0pp # semi major axis a_0pp = a_0 / (1 - delta_0) self.tle["semi_major_axis"] = a_0pp self.tle["period"] = np.pi * 2 / n_0pp self.tle["perigee"] = (a_0pp * (1 - e_0) / AE - AE) * XKMPER now = self.tle["epoch"] self.tle["right_ascension_lon"] = (self.tle["right_ascension"] - gmst(now)) if self.tle["right_ascension_lon"] > np.pi: self.tle["right_ascension_lon"] -= 2 * np.pi # pylint: disable-msg=C0103 def get_position(self, current_time): """Get cartesian position and velocity. """ # for near earth orbits, period must be < 255 minutes perigee = self.tle["perigee"] a_0pp = self.tle["semi_major_axis"] e_0 = self.tle["excentricity"] i_0 = self.tle["inclination"] n_0pp = self.tle["original_mean_motion"] k_2 = CK2 k_4 = CK4 k_e = XKE bstar = self.tle["bstar"] w_0 = self.tle["arg_perigee"] M_0 = self.tle["mean_anomaly"] W_0 = self.tle["right_ascension"] t_0 = self.tle["epoch"] A30 = -XJ3 * AE**3 if perigee < 98: s = 20/XKMPER + AE qoms2t = (QOMS2T ** 0.25 + S - s) ** 4 elif perigee < 156: s = a_0pp * (1 - e_0) - S + AE qoms2t = (QOMS2T ** 0.25 + S - s) ** 4 else: qoms2t = QOMS2T s = S theta = np.cos(i_0) xi = 1 / (a_0pp - s) beta_0 = np.sqrt(1 - e_0 ** 2) eta = a_0pp * e_0 * xi C_2 = (qoms2t * xi**4 * n_0pp * (1 - eta**2)**(-3.5) * (a_0pp * (1 + 1.5 * eta**2 + 4 * e_0 * eta + e_0 * eta**3) + 1.5 * (k_2 * xi) / (1 - eta**2) * (-0.5 + 1.5 * theta**2)* (8 + 24 * eta**2 + 3 * eta**4))) C_1 = bstar * C_2 C_3 = (qoms2t * xi ** 5 * A30 * n_0pp * AE * np.sin(i_0) / (k_2 * e_0)) coef = 2 * qoms2t * xi**4 * a_0pp * beta_0**2*(1-eta**2)**(-7/2.0) C_4 = (coef * n_0pp * ((2 * eta * (1 + e_0 * eta) + e_0/2.0 + (eta**3)/2.0) - 2 * k_2 * xi / (a_0pp * (1 - eta**2)) * (3*(1-3*theta**2) * (1 + (3*eta**2)/2.0 - 2*e_0*eta - e_0*eta**3/2.0) + 3/4.0*(1-theta**2)* (2*eta**2 - e_0*eta - e_0*eta**3)*np.cos(2*w_0)))) C_5 = coef * (1 + 11/4.0 * eta * (eta + e_0) + e_0 * eta**3) D_2 = 4 * a_0pp * xi * C_1**2 D_3 = 4/3.0 * a_0pp * xi**2 * (17*a_0pp + s) * C_1**3 D_4 = 2/3.0 * a_0pp * xi**3 * (221*a_0pp + 31*s) * C_1**4 # Secular effects of atmospheric drag and gravitation dt = _days(current_time - t_0) * XMNPDA M_df = (M_0 + (1 + 3*k_2*(-1 + 3*theta**2)/(2*a_0pp**2 * beta_0**3) + 3*k_2**2*(13 - 78*theta**2 + 137*theta**4)/ (16*a_0pp**4*beta_0**7))* n_0pp*dt) w_df = (w_0 + (-3*k_2*(1 - 5*theta**2)/(2*a_0pp**2*beta_0**4) + 3 * k_2**2 * (7 - 114*theta**2 + 395*theta**4)/ (16*a_0pp*beta_0**8) + 5*k_4*(3-36*theta**2+49*theta**4)/ (4*a_0pp**4*beta_0**8))* n_0pp*dt) W_df = (W_0 + (-3*k_2*theta/(a_0pp**2*beta_0**4) + 3*k_2**2*(4*theta- 19*theta**3)/(2*a_0pp**4*beta_0**8) + 5*k_4*theta*(3-7*theta**2)/(2*a_0pp**4*beta_0**8))* n_0pp*dt) deltaw = bstar * C_3 * np.cos(w_0)*dt deltaM = (-2/3.0 * qoms2t * bstar * xi**4 * AE / (e_0*eta) * ((1 + eta * np.cos(M_df))**3 - (1 + eta * np.cos(M_0))**3)) M_p = M_df + deltaw + deltaM w = w_df - deltaw - deltaM W = (W_df - 21/2.0 * (n_0pp * k_2 * theta)/(a_0pp**2 * beta_0**2) * C_1 * dt**2) e = (e_0 - bstar * C_4 * dt - bstar * C_5 * (np.sin(M_p) - np.sin(M_0))) a = a_0pp * (1 - C_1 * dt - D_2 * dt**2 - D_3 * dt**3 - D_4 * dt**4)**2 L = M_p + w + W + n_0pp * (3/2.0 * C_1 * dt**2 + (D_2 + 2 * C_1 ** 2) * dt**3 + 1/4.0 * (3*D_3 + 12*C_1*D_2 + 10*C_1**3)*dt**4 + 1.0/5 * (3*D_4 + 12*C_1*D_3 + 6*D_2**2 + 30*C_1**2*D_2 + 15*C_1**4)*dt**5) beta = np.sqrt(1 - e**2) n = k_e / (a ** (3/2.0)) # Long-period periodic terms a_xN = e * np.cos(w) a_yNL = A30 * np.sin(i_0) / (4.0 * k_2 * a * beta**2) L_L = a_yNL/2 * a_xN * ((3 + 5 * theta) / (1 + theta)) L_T = L + L_L a_yN = e * np.sin(w) + a_yNL U = (L_T - W) % (np.pi * 2) Epw = U for i in range(10): DeltaEpw = ((U - a_yN * np.cos(Epw) + a_xN * np.sin(Epw) - Epw) / (-a_yN * np.sin(Epw) - a_xN * np.cos(Epw) + 1)) Epw = Epw + DeltaEpw if DeltaEpw < 10e-12: break # preliminary quantities for short-period periodics ecosE = a_xN * np.cos(Epw) + a_yN * np.sin(Epw) esinE = a_xN * np.sin(Epw) - a_yN * np.cos(Epw) e_L = (a_xN**2 + a_yN**2)**(0.5) p_L = a * (1 - e_L**2) r = a * (1 - ecosE) rdot = k_e * np.sqrt(a)/r * esinE rfdot = k_e * np.sqrt(p_L) / r cosu = a / r * (np.cos(Epw) - a_xN + (a_yN * (esinE) / (1 + np.sqrt(1 - e_L**2)))) sinu = a / r * (np.sin(Epw) - a_yN + (a_xN * (esinE) / (1 + np.sqrt(1 - e_L**2)))) u = np.arctan2(sinu, cosu) cos2u = np.cos(2*u) sin2u = np.sin(2*u) Deltar = k_2/(2*p_L) * (1 - theta**2) * cos2u Deltau = -k_2/(4*p_L**2) * (7*theta**2 - 1) * sin2u DeltaW = 3*k_2 * theta / (2 * p_L**2) * sin2u Deltai = 3*k_2 * theta / (2 * p_L**2) * cos2u * np.sin(i_0) Deltardot = - k_2 * n / p_L * (1 - theta**2) * sin2u Deltarfdot = k_2 * n / p_L * ((1 - theta**2) * cos2u - 3/2.0 * (1 - 3*theta**2)) # osculating quantities r_k = r * (1 - 3/2.0 * k_2 * np.sqrt(1 - e_L**2)/p_L**2 * (3 * theta**2 - 1)) + Deltar u_k = u + Deltau W_k = W + DeltaW i_k = i_0 + Deltai rdot_k = rdot + Deltardot rfdot_k = rfdot + Deltarfdot M_x = -np.sin(W_k) * np.cos(i_k) M_y = np.cos(W_k) * np.cos(i_k) M_z = np.sin(i_k) N_x = np.cos(W_k) N_y = np.sin(W_k) N_z = 0 U_x = M_x * np.sin(u_k) + N_x * np.cos(u_k) U_y = M_y * np.sin(u_k) + N_y * np.cos(u_k) U_z = M_z * np.sin(u_k) + N_z * np.cos(u_k) V_x = M_x * np.cos(u_k) - N_x * np.sin(u_k) V_y = M_y * np.cos(u_k) - N_y * np.sin(u_k) V_z = M_z * np.cos(u_k) - N_z * np.sin(u_k) r_x = r_k * U_x r_y = r_k * U_y r_z = r_k * U_z rdot_x = rdot_k * U_x + rfdot_k * V_x rdot_y = rdot_k * U_y + rfdot_k * V_y rdot_z = rdot_k * U_z + rfdot_k * V_z return r_x, r_y, r_z, rdot_x, rdot_y, rdot_z def get_latlonalt(self, current_time): """Get lon lat and altitude for current time """ pos_x, pos_y, pos_z, vel_x, vel_y, vel_z = \ self.get_position(current_time) del vel_x, vel_y, vel_z lon = ((np.arctan2(pos_y * XKMPER, pos_x * XKMPER) - gmst(current_time)) % (2 * np.pi)) if lon > np.pi: lon -= np.pi * 2 if lon <= -np.pi: lon += np.pi * 2 r = np.sqrt(pos_x ** 2 + pos_y ** 2) lat = np.arctan2(pos_z, r) e2 = F * (2 - F) while True: lat2 = lat c = 1/(np.sqrt(1 - e2 * (np.sin(lat2) ** 2))) lat = np.arctan2(pos_z + c * e2 *np.sin(lat2), r) if abs(lat - lat2) < 1e-10: break alt = r / np.cos(lat)- c alt *= XKMPER return lat, lon, alt # pylint: enable-msg=C0103 def _jdays(current_time): """Get the julian day of *current_time*. """ d_t = current_time - datetime.datetime(2000, 1, 1, 12, 0) return _days(d_t) def _days(d_t): """Get the days (floating point) from *d_t*. """ return (d_t.days + (d_t.seconds + d_t.microseconds / (1000000.0)) / (24 * 3600.0)) def gmst(current_time): """Greenwich mean sidereal current_time, in radians. http://celestrak.com/columns/v02n02/ """ now = current_time #now = datetime.datetime(1995, 10, 1, 9, 0) now0 = datetime.datetime(now.year, now.month, now.day) epoch = datetime.datetime(2000, 1, 1, 12, 0) du2 = _days(now - epoch) d_u = _days(now0 - epoch) dus = (du2 - d_u) * 86400 t_u = d_u / 36525.0 theta_g_0 = (24110.54841 + t_u * (8640184.812866 + t_u * (0.093104 - t_u * 6.2 * 10e-6))) theta_g = (theta_g_0 + dus * 1.00273790934) % 86400 return (theta_g / 86400.0) * 2 * np.pi if __name__ == "__main__": pass mpop-1.5.0/mpop/scene.py000066400000000000000000001134751317160620000151210ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010-2017. # Author(s): # Martin Raspaud # Esben S. Nielsen # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """The :mod:`mpop.scene` module defines satellite scenes. They are defined as generic classes, to be inherited when needed. A scene is a set of :mod:`mpop.channel` objects for a given time, and sometimes also for a given area. """ import ConfigParser import copy import datetime import imp import logging import os.path import sys import types import weakref import numpy as np import mpop.satin from mpop import CONFIG_PATH from mpop.channel import Channel, NotLoadedError from mpop.utils import OrderedConfigParser LOG = logging.getLogger(__name__) try: # Work around for on demand import of pyresample. pyresample depends # on scipy.spatial which memory leaks on multiple imports is_pyresample_loaded = False from pyresample.geometry import AreaDefinition, SwathDefinition import mpop.projector is_pyresample_loaded = True except ImportError: LOG.warning("pyresample missing. Can only work in satellite projection") class Satellite(object): """This is the satellite class. It contains information on the satellite. """ def __init__(self, (satname, number, variant)=(None, None, None)): try: self.satname = satname or "" or self.satname except AttributeError: self.satname = satname or "" try: self.number = number or "" or self.number except AttributeError: self.number = number or "" try: self.variant = variant or "" or self.variant except AttributeError: self.variant = variant or "" @property def fullname(self): """Full name of the satellite, that is platform name and number (eg "metop02"). """ return self.variant + self.satname + self.number def sat_nr(self, string=False): import re sat_nr = re.findall(r'\d+', self.fullname)[0] if string: return sat_nr else: return int(sat_nr) @classmethod def remove_attribute(cls, name): """Remove an attribute from the class. """ return delattr(cls, name) @classmethod def add_method(cls, func): """Add a method to the class. """ return setattr(cls, func.__name__, func) def add_method_to_instance(self, func): """Add a method to the instance. """ return setattr(self, func.__name__, types.MethodType(func, self.__class__)) class SatelliteScene(Satellite): """This is the satellite scene class. It is a capture of the satellite (channels) data at given *time_slot* and *area_id*/*area*. """ def __init__(self, time_slot=None, area_id=None, area=None, orbit=None, satellite=(None, None, None)): Satellite.__init__(self, satellite) # if(time_slot is not None and # not isinstance(time_slot, datetime.datetime)): # raise TypeError("Time_slot must be a datetime.datetime instance.") self.time_slot = time_slot self.area_id = None self.area_def = None if area_id is not None: from warnings import warn warn("The *area_id* attribute is deprecated." "Please use *area* instead.", DeprecationWarning) if not isinstance(area_id, str): raise TypeError("Area must be a string.") self.area = area_id if area is not None: self.area = area self.orbit = orbit self.info = {} self.lat = None self.lon = None def get_area(self): """Getter for area. """ return self.area_def or self.area_id def set_area(self, area): """Setter for area. """ if area is None: self.area_def = None self.area_id = None elif isinstance(area, str): self.area_id = area self.area_def = None else: try: dummy = area.area_extent dummy = area.x_size dummy = area.y_size dummy = area.proj_id dummy = area.proj_dict self.area_def = area self.area_id = None except AttributeError: try: dummy = area.lons dummy = area.lats self.area_def = area self.area_id = None except AttributeError: raise TypeError(("Malformed area argument. " "Should be a string or an area object. " "Not %s") % type(area)) area = property(get_area, set_area) class SatelliteInstrumentScene(SatelliteScene): """This is the satellite instrument class. It is an abstract channel container, from which all concrete satellite scenes should be derived. The constructor accepts as optional arguments the *time_slot* of the scene, the *area* on which the scene is defined (this can be used for slicing of big datasets, or can be set automatically when loading), and *orbit* which is a string giving the orbit number. """ channel_list = [] def __init__(self, time_slot=None, area_id=None, area=None, orbit=None, satellite=(None, None, None), instrument=None): SatelliteScene.__init__(self, time_slot, area_id, area, orbit, satellite) try: self.instrument_name = instrument or self.instrument_name except AttributeError: self.instrument_name = None self.channels = [] self.end_time = None if isinstance(self.time_slot, (tuple, list)): self.time_slot, self.end_time = self.time_slot try: conf = OrderedConfigParser() conf.read(os.path.join(CONFIG_PATH, self.fullname + ".cfg")) for section in conf.sections(): if(not section[:-1].endswith("level") and not section.endswith("granules") and section.startswith(self.instrument_name)): name = eval(conf.get(section, "name")) try: w_range = eval(conf.get(section, "frequency")) except ConfigParser.NoOptionError: w_range = (-np.inf, -np.inf, -np.inf) try: resolution = eval(conf.get(section, "resolution")) except ConfigParser.NoOptionError: resolution = 0 self.channels.append(Channel(name=name, wavelength_range=w_range, resolution=resolution)) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): for name, w_range, resolution in self.channel_list: self.channels.append(Channel(name=name, wavelength_range=w_range, resolution=resolution)) self.channels_to_load = set([]) def __getitem__(self, key, aslist=False): if isinstance(key, float): channels = [chn for chn in self.channels if(hasattr(chn, "wavelength_range") and chn.wavelength_range[0] <= key and chn.wavelength_range[2] >= key)] channels = sorted(channels, lambda ch1, ch2: ch1.__cmp__(ch2, key)) elif isinstance(key, str): channels = [chn for chn in self.channels if chn.name == key] channels = sorted(channels) elif isinstance(key, int): channels = [chn for chn in self.channels if int(np.round(chn.resolution)) == key] channels = sorted(channels) elif isinstance(key, (tuple, list)): if len(key) == 0: raise KeyError("Key list must contain at least one element.") channels = self.__getitem__(key[0], aslist=True) if len(key) > 1 and len(channels) > 0: dummy_instance = SatelliteInstrumentScene() dummy_instance.channels = channels channels = dummy_instance.__getitem__(key[1:], aslist=True) else: raise TypeError("Malformed key: " + str(key)) if len(channels) == 0: raise KeyError("No channel corresponding to " + str(key) + ".") elif aslist: return channels else: return channels[0] def __delitem__(self, key): """Delete a channel. """ chan = self[key] self.channels.remove(chan) del chan def __setitem__(self, key, data): # Add a channel if it is not already in the scene. Works only if key is # a string. try: if key not in self: # if it's a blob with name and data, add it as is. if hasattr(data, "name") and hasattr(data, "data"): self.channels.append(data) else: kwargs = {"name": key} for attr in ["wavelength_range", "resolution"]: try: kwargs[attr] = getattr(data, attr) except (AttributeError, NameError): pass self.channels.append(Channel(**kwargs)) except AttributeError: pass # Add the data. if isinstance(data, np.ma.core.MaskedArray): self[key].data = data else: try: self[key].data = data.data except AttributeError: self[key].data = data # if isinstance(data, Channel): # self.channels.append(Channel(name=key, # wavelength_range=data.wavelength_range, # resolution=data.resolution)) # self[key].data = data.data # else: # try: # self[key].data = data # except KeyError: # self.channels.append(Channel(name=key)) # self[key].data = data def __str__(self): return "\n".join([str(chn) for chn in self.channels]) def __iter__(self): return self.channels.__iter__() def _set_reader(self, pformat): """Gets the reader for *pformat* format, and puts it in the `reader` attribute. """ elements = pformat.split(".") if len(elements) == 1: reader_module = pformat reading_element = 'load' reader = "mpop.satin." + reader_module # Loading old style plugins reader_module = pformat LOG.info("old style plugin: " + pformat) try: # Look for builtin reader imp.find_module(reader_module, mpop.satin.__path__) except ImportError: # Look for custom reader loader = __import__(reader_module, globals(), locals(), [reading_element]) else: loader = __import__(reader, globals(), locals(), [reading_element]) # Build a custom Reader plugin on the fly... from mpop.plugin_base import Reader reader_class = type(elements[-1].capitalize() + "Reader", (Reader,), {"pformat": elements[-1]}) reader_instance = reader_class(self) # ... and set its "load" attribute with the "load" function of the # loader module loader = getattr(loader, reading_element) setattr(reader_instance, reading_element, loader) setattr(self, elements[-1] + "_reader", reader_instance) else: reader_module = ".".join(elements[:-1]) reading_element = elements[-1] reader = "mpop.satin." + reader_module try: # Look for builtin reader imp.find_module(reader_module, mpop.satin.__path__) except ImportError: # Look for custom reader loader = __import__(reader_module, globals(), locals(), [reading_element]) else: loader = __import__(reader, globals(), locals(), [reading_element]) loader = getattr(loader, reading_element) reader_instance = loader(self) setattr(self, loader.pformat + "_reader", reader_instance) return reader_instance def load(self, channels=None, load_again=False, area_extent=None, **kwargs): """Load instrument data into the *channels*. *Channels* is a list or a tuple containing channels we will load data into, designated by there center wavelength (float), resolution (integer) or name (string). If None, all channels are loaded. The *load_again* boolean flag allows to reload the channels even they have already been loaded, to mirror changes on disk for example. This is false by default. The *area_extent* keyword lets you specify which part of the data to load. Given as a 4-element sequence, it defines the area extent to load in satellite projection. The other keyword arguments are passed as is to the reader plugin. Check the corresponding documentation for more details. """ # Set up the list of channels to load. if channels is None: for chn in self.channels: self.channels_to_load |= set([chn.name]) elif isinstance(channels, (list, tuple, set)): self.channels_to_load = set() for chn in channels: try: self.channels_to_load |= set([self[chn].name]) except KeyError: self.channels_to_load |= set([chn]) else: raise TypeError("Channels must be a list/" "tuple/set of channel keys!") loaded_channels = [chn.name for chn in self.loaded_channels()] if load_again: for chn in self.channels_to_load: if chn in loaded_channels: self.unload(chn) loaded_channels = [] else: for chn in loaded_channels: self.channels_to_load -= set([chn]) # find the plugin to use from the config file conf = ConfigParser.ConfigParser() try: conf.read(os.path.join(CONFIG_PATH, self.fullname + ".cfg")) if len(conf.sections()) == 0: raise ConfigParser.NoSectionError(("Config file did " "not make sense")) levels = [section for section in conf.sections() if section.startswith(self.instrument_name + "-level")] except ConfigParser.NoSectionError: LOG.warning("Can't load data, no config file for " + self.fullname) self.channels_to_load = set() return levels.sort() if levels[0] == self.instrument_name + "-level1": levels = levels[1:] if len(levels) == 0: raise ConfigParser.NoSectionError( self.instrument_name + "-levelN (N>1) to tell me how to" + " read data... Not reading anything.") for level in levels: if len(self.channels_to_load) == 0: return if "reader_level" in kwargs.keys(): if kwargs["reader_level"] != None: LOG.debug( "Using explecit definition of reader level: " + kwargs["reader_level"]) if kwargs["reader_level"] != level: continue LOG.debug("Looking for sources in section " + level) reader_name = conf.get(level, 'format') try: reader_name = eval(reader_name) except NameError: reader_name = str(reader_name) LOG.debug("Using plugin mpop.satin." + reader_name) # read the data reader = "mpop.satin." + reader_name try: reader_instance = self._set_reader(reader_name) if area_extent is not None: if(isinstance(area_extent, (tuple, list)) and len(area_extent) == 4): kwargs["area_extent"] = area_extent else: raise ValueError("Area extent must be a sequence of " "four numbers.") reader_instance.load(self, **kwargs) except ImportError, err: LOG.exception("ImportError while loading " + reader_name + ": " + str(err)) continue loaded_channels = set([chn.name for chn in self.loaded_channels()]) just_loaded = loaded_channels & self.channels_to_load if len(just_loaded) == 0: LOG.info("No channels loaded with " + reader + ".") self.channels_to_load -= loaded_channels LOG.debug("Successfully loaded: " + str(just_loaded)) if len(self.channels_to_load) > 0: LOG.warning("Unable to import channels " + str(self.channels_to_load)) self.channels_to_load = set() def save(self, filename, to_format="netcdf4", **options): """Saves the current scene into a file of format *to_format*. Supported formats are: - *netcdf4*: NetCDF4 with CF conventions. """ writer = "satout." + to_format try: writer_module = __import__(writer, globals(), locals(), ["save"]) except ImportError, err: raise ImportError("Cannot load " + writer + " writer: " + str(err)) return writer_module.save(self, filename, **options) def unload(self, *channels): """Unloads *channels* from memory. :meth:`mpop.scene.SatelliteInstrumentScene.load` must be called again to reload the data. """ for chn in channels: try: self[chn].data = None except AttributeError: LOG.warning("Can't unload channel" + str(chn)) def add_to_history(self, message): """Adds a message to history info. """ timestr = datetime.datetime.utcnow().isoformat() timed_message = str(timestr + " - " + message) if not self.info.get("history", ""): self.info["history"] = timed_message else: self.info["history"] += "\n" + timed_message def check_channels(self, *channels): """Check if the *channels* are loaded, raise an error otherwise. """ for chan in channels: if not self[chan].is_loaded(): raise NotLoadedError("Required channel %s not loaded," " aborting." % chan) return True def loaded_channels(self): """Return the set of loaded_channels. """ return set([chan for chan in self.channels if chan.is_loaded()]) def get_orbital(self): from pyorbital.orbital import Orbital from pyorbital import tlefile from pyorbital.tlefile import get_norad_line sat_line = get_norad_line(self.satname, self.number) self.orbital = Orbital(sat_line) return self.orbital def estimate_cth(self, cth_atm="best", time_slot=None): """ General purpose =============== Estimation of the cloud top height using the 10.8 micron channel limitations: this is the most simple approach a simple fit of the ir108 to the temperature profile * no correction for water vapour or any other trace gas * no viewing angle dependency * no correction for semi-transparent clouds * no special treatment of temperature inversions Example call ============ data.estimate_cth(cth_atm="best") input arguments =============== cth_atm * using temperature profile to estimate the cloud top height possible choices are (see estimate_cth in mpop/tools.py): "standard", "tropics", "midlatitude summer", "midlatitude winter", "subarctic summer", "subarctic winter" this will choose the corresponding atmospheric AFGL temperature profile * new choice: "best" -> choose according to central (lon,lat) and time from: "tropics", "midlatitude summer", "midlatitude winter", "subarctic summer", "subarctic winter" time_slot current observation time as (datetime.datetime() object) time_slot option can be omitted, the function tries to use self.time_slot """ print "*** Simple estimation of Cloud Top Height with IR_108 channel" # check if IR_108 is loaded loaded_channels = [chn.name for chn in self.loaded_channels()] if "IR_108" not in loaded_channels: print "*** Error in estimate_cth (mpop/scene.py)" print " IR_108 is required to estimate CTH, but not loaded" quit() else: ir108 = self["IR_108"].data # choose atmosphere if cth_atm.lower() == "best": # get central lon/lat coordinates (yc, xc) = ir108.shape (lon, lat) = self.area.get_lonlat(yc / 2, xc / 2) if time_slot == None: if hasattr(self, 'time_slot'): time_slot = self.time_slot else: print "*** Error, in estimate_cth (mpop/channel.py)" print " when using cth_atm=\"best\" also the time_slot information is required!" quit() # automatic choise of temperature profile doy = time_slot.timetuple().tm_yday print "... automatic choise of temperature profile lon=", lon, " lat=", lat, ", time=", str(time_slot), ", doy=", doy if abs(lat) <= 30.0: cth_atm = "tropics" elif doy < 80 or doy <= 264: # northern summer if lat < -60.0: cth_atm = "subarctic winter" elif -60.0 <= lat and lat < -30.0: cth_atm = "midlatitude winter" elif 30.0 < lat and lat <= 60.0: cth_atm = "midlatitude summer" elif 60.0 < lat: cth_atm = "subarctic summer" else: # northern winter if lat < -60.0: cth_atm = "subarctic summer" elif -60.0 <= lat and lat < -30.0: cth_atm = "midlatitude summer" elif 30.0 < lat and lat <= 60.0: cth_atm = "midlatitude winter" elif 60 < lat: cth_atm = "subarctic winter" print " choosing temperature profile for ", cth_atm # estimate cloud top height by searching first fit of ir108 with # temperature profile from mpop.tools import estimate_cth cth = estimate_cth(ir108, cth_atm=cth_atm) # create new channel named "CTH" self.channels.append(Channel(name="CTH", wavelength_range=[0., 0., 0.], resolution=self["IR_108"].resolution, data=cth, calibration_unit="m")) # copy additional information from IR_108 self["CTH"].info = self["IR_108"].info self["CTH"].info['units'] = 'm' self["CTH"].area = self["IR_108"].area self["CTH"].area_id = self["IR_108"].area_id self["CTH"].area_def = self["IR_108"].area_def self["CTH"].resolution = self["IR_108"].resolution return cth def parallax_corr(self, fill="False", estimate_cth=False, cth_atm='best', replace=False): """ perform the CTH parallax corretion for all loaded channels """ loaded_channels = [chn.name for chn in self.loaded_channels()] if len(loaded_channels) == 0: return # loop over channels and check, if one is a normal radiance channel # having the method to calculate the viewing geometry for chn in self.loaded_channels(): if hasattr(chn, 'get_viewing_geometry'): # calculate the viewing geometry of the SEVIRI sensor print "... calculate viewing geometry using ", chn.name (azi, ele) = chn.get_viewing_geometry( self.get_orbital(), self.time_slot) break # choose best way to get CTH for parallax correction if not estimate_cth: if "CTTH" in loaded_channels: # make a copy of CTH, as it might get replace by its parallax # corrected version cth = copy.deepcopy(self["CTTH"].height) else: print "*** Error in parallax_corr (mpop.scene.py)" print " parallax correction needs some cloud top height information" print " please load the NWC-SAF CTTH product (recommended) or" print " activate the option data.parallax_corr( estimate_cth=True )" quit() else: if "IR_108" in loaded_channels: # try to estimate CTH with IR_108 self.estimate_cth() cth = self["CTH"].data else: print "*** Error in parallax_corr (mpop.scene.py)" print " parallax correction needs some cloud top height information" print " you specified the estimation of CTH with the IR_108, but " print " this channel is not loaded" quit() # perform parallax correction for each loaded channel for chn in self.loaded_channels(): if hasattr(chn, 'parallax_corr'): print "... perform parallax correction for ", chn.name if replace: chn_name_PC = chn.name print " replace channel ", chn_name_PC else: chn_name_PC = chn.name + "_PC" print " create channel ", chn_name_PC # take care of the parallax correction self[chn_name_PC] = chn.parallax_corr( cth=cth, azi=azi, ele=ele, fill=fill) else: LOG.warning("Channel " + str(chn.name) + " has no attribute parallax_corr," "thus parallax effect wont be corrected.") print "Channel " + str(chn.name) + " has no attribute parallax_corr," print "thus parallax effect wont be corrected." return self def project(self, dest_area, channels=None, precompute=False, mode=None, radius=None, nprocs=1): """Make a copy of the current snapshot projected onto the *dest_area*. Available areas are defined in the region configuration file (ACPG). *channels* tells which channels are to be projected, and if None, all channels are projected and copied over to the return snapshot. If *precompute* is set to true, the projecting data is saved on disk for reusage. *mode* sets the mode to project in: 'quick' which works between cartographic projections, and, as its denomination indicates, is quick (but lower quality), and 'nearest' which uses nearest neighbour for best projection. A *mode* set to None uses 'quick' when possible, 'nearest' otherwise. *radius* defines the radius of influence for neighbour search in 'nearest' mode (in metres). Setting it to None, or omitting it will fallback to default values (5 times the channel resolution) or 10,000m if the resolution is not available. Note: channels have to be loaded to be projected, otherwise an exception is raised. """ if not is_pyresample_loaded: # Not much point in proceeding then return self _channels = set([]) if channels is None: for chn in self.loaded_channels(): _channels |= set([chn]) elif isinstance(channels, (list, tuple, set)): for chn in channels: try: _channels |= set([self[chn]]) except KeyError: LOG.warning("Channel " + str(chn) + " not found," "thus not projected.") else: raise TypeError("Channels must be a list/" "tuple/set of channel keys!") res = copy.copy(self) if isinstance(dest_area, str): dest_area = mpop.projector.get_area_def(dest_area) res.area = dest_area res.channels = [] if not _channels <= self.loaded_channels(): LOG.warning("Cannot project nonloaded channels: %s.", _channels - self.loaded_channels()) LOG.info("Will project the other channels though.") _channels = _channels and self.loaded_channels() cov = {} for chn in sorted(_channels, key=lambda x: x.resolution, reverse=True): if chn.area is None: if self.area is None: area_name = ("swath_" + self.fullname + "_" + str(self.time_slot) + "_" + str(chn.shape)) chn.area = area_name else: if is_pyresample_loaded: try: chn.area = AreaDefinition( self.area.area_id + str(chn.shape), self.area.name, self.area.proj_id, self.area.proj_dict, chn.shape[1], chn.shape[0], self.area.area_extent, self.area.nprocs) except AttributeError: try: dummy = self.area.lons dummy = self.area.lats chn.area = self.area area_name = ("swath_" + self.fullname + "_" + str(self.time_slot) + "_" + str(chn.shape)) chn.area.area_id = area_name except AttributeError: chn.area = self.area + str(chn.shape) else: chn.area = self.area + str(chn.shape) else: # chn.area is not None # if (is_pyresample_loaded and # (not hasattr(chn.area, "area_id") or # not chn.area.area_id)): # area_name = ("swath_" + self.fullname + "_" + # str(self.time_slot) + "_" # + str(chn.shape) + "_" # + str(chn.name)) # chn.area.area_id = area_name # This leaks memory ! #LOG.debug("chn.area = " + str(chn.area)) #LOG.debug("type(chn.area) = " + str(type(chn.area))) if is_pyresample_loaded: area_name = ("swath_" + self.fullname + "_" + str(self.time_slot) + "_" + str(chn.shape) + "_" + str(chn.name)) LOG.debug("pyresample is loaded... area-name = " + str(area_name)) if hasattr(chn.area, "area_id") and not chn.area.area_id: LOG.debug("chn.area has area_id attribute...") chn.area.area_id = area_name elif not hasattr(chn.area, "area_id") and not isinstance(chn.area, str): setattr(chn.area, 'area_id', area_name) if isinstance(chn.area, str): area_id = chn.area else: area_id = chn.area_id or chn.area.area_id if area_id not in cov: if radius is None: if chn.resolution > 0: radius = 5 * chn.resolution else: radius = 10000 cov[area_id] = mpop.projector.Projector(chn.area, dest_area, mode=mode, radius=radius, nprocs=nprocs) if precompute: try: cov[area_id].save() except IOError: LOG.exception("Could not save projection.") try: res.channels.append(chn.project(cov[area_id])) except NotLoadedError: LOG.warning("Channel " + str(chn.name) + " not loaded, " "thus not projected.") # Compose with image object try: if res._CompositerClass is not None: # Pass weak ref to compositor to allow garbage collection res.image = res._CompositerClass(weakref.proxy(res)) except AttributeError: pass return res if sys.version_info < (2, 5): def any(iterable): for element in iterable: if element: return True return False def assemble_segments(segments): """Assemble the scene objects listed in *segment_list* and returns the resulting scene object. """ from mpop.satellites import GenericFactory channels = set([]) for seg in segments: channels |= set([chn.name for chn in seg.loaded_channels()]) seg = segments[0] new_scene = GenericFactory.create_scene(seg.satname, seg.number, seg.instrument_name, seg.time_slot, seg.orbit, variant=seg.variant) swath_definitions = {} for chn in channels: new_scene[chn] = np.ma.concatenate([seg[chn].data for seg in segments if seg[chn].is_loaded()]) try: area_names = tuple([seg[chn].area.area_id for seg in segments if seg[chn].is_loaded()]) if area_names not in swath_definitions: lons = np.ma.concatenate([seg[chn].area.lons[:] for seg in segments if seg[chn].is_loaded()]) lats = np.ma.concatenate([seg[chn].area.lats[:] for seg in segments if seg[chn].is_loaded()]) new_scene[chn].area = SwathDefinition(lons=lons, lats=lats) area_name = "+".join(area_names) new_scene[chn].area.area_id = area_name new_scene[chn].area_id = area_name swath_definitions[area_names] = new_scene[chn].area else: new_scene[chn].area = swath_definitions[area_names] new_scene[chn].area_id = new_scene[chn].area.area_id except AttributeError: pass try: lons = np.ma.concatenate([seg.area.lons[:] for seg in segments]) lats = np.ma.concatenate([seg.area.lats[:] for seg in segments]) new_scene.area = SwathDefinition(lons=lons, lats=lats) for chn in channels: if any([seg[chn].area for seg in segments]): try: lon_arrays = [] lat_arrays = [] for seg in segments: if seg[chn].area is not None: lon_arrays.append(seg[chn].area.lons[:]) lat_arrays.append(seg[chn].area.lats[:]) else: lon_arrays.append(seg.area.lons[:]) lat_arrays.append(seg.area.lats[:]) lons = np.ma.concatenate(lon_arrays) lats = np.ma.concatenate(lat_arrays) new_scene[chn].area = SwathDefinition(lons=lons, lats=lats) except AttributeError: pass except AttributeError: pass return new_scene mpop-1.5.0/mpop/tests/000077500000000000000000000000001317160620000146015ustar00rootroot00000000000000mpop-1.5.0/mpop/tests/__init__.py000066400000000000000000000036441317160620000167210ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014 Martin Raspaud # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """The tests package. """ from mpop.tests import (test_satin_helpers, #test_pp_core, # crash test_channel, test_image, test_geo_image, #test_mipp, test_projector, #test_satellites, test_scene, #test_seviri, #test_viirs_sdr, #test_visir, ) import unittest def suite(): """The global test suite. """ mysuite = unittest.TestSuite() mysuite.addTests(test_satin_helpers.suite()) #mysuite.addTests(test_pp_core.suite()) mysuite.addTests(test_channel.suite()) mysuite.addTests(test_image.suite()) mysuite.addTests(test_geo_image.suite()) #mysuite.addTests(test_mipp.suite()) mysuite.addTests(test_projector.suite()) #mysuite.addTests(test_satellites.suite()) mysuite.addTests(test_scene.suite()) #mysuite.addTests(test_seviri.suite()) #mysuite.addTests(test_viirs_sdr.suite()) #mysuite.addTests(test_visir.suite()) return mysuite mpop-1.5.0/mpop/tests/test_channel.py000066400000000000000000000416421317160620000176310ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """Module for testing the mpop.channel module. """ import unittest from mpop.channel import GenericChannel, Channel import string import numpy as np # epsilon E = 0.0001 class TestGenericChannel(unittest.TestCase): """Class for testing the GenericChannel class. """ chan = None chan2 = None def test_init(self): """Creation of a generic channel. """ self.chan = GenericChannel(name="newchan") self.assertEqual(self.chan.name, "newchan") numb = int(np.random.uniform(100000)) self.assertRaises(TypeError, GenericChannel, name=numb) self.chan = GenericChannel() self.assertTrue(self.chan.name is None) def test_cmp(self): """Comparison of generic channels. """ self.chan = GenericChannel(name = "newchan") self.chan2 = GenericChannel(name = "mychan") self.assertTrue(self.chan > self.chan2) self.chan = GenericChannel(name = "newchan") self.chan2 = "mychan" self.assertTrue(self.chan > self.chan2) self.chan = GenericChannel(name = "newchan") self.chan2 = GenericChannel(name = "newchan") self.assert_(self.chan == self.chan2) self.chan = GenericChannel() self.chan2 = GenericChannel(name = "newchan") self.assert_(self.chan < self.chan2) self.chan = GenericChannel(name = "newchan") self.chan2 = GenericChannel(name = "_mychan") self.assert_(self.chan < self.chan2) self.chan = GenericChannel(name = "_newchan") self.chan2 = GenericChannel(name = "mychan") self.assert_(self.chan > self.chan2) def test_area(self): """Area setting and retrieving. """ self.chan = GenericChannel(name = "newchan") self.chan.area = "bla" self.assert_(self.chan.area == "bla") self.chan.area = None self.assert_(self.chan.area == None) class DummyArea(object): def __init__(self, area_extent, x_size, y_size, proj_id, proj_dict): self.area_extent = area_extent self.x_size = x_size self.y_size = y_size self.proj_id = proj_id self.proj_dict = proj_dict self.chan.area = DummyArea(1, 2, 3, 4, 5) self.assert_(self.chan.area.area_extent == 1) class DummyArea(object): def __init__(self, lons, lats): self.lons = lons self.lats = lats self.chan.area = DummyArea(1, 2) self.assert_(self.chan.area.lats == 2) self.assertRaises(TypeError, setattr, self.chan, "area", 1) class TestChannel(unittest.TestCase): """Class for testing the Channel class. """ chan = None chan2 = None def test_init(self): """Creation of a channel. """ self.assertRaises(ValueError, Channel) # Name self.chan = Channel(name = "newchan") self.assertEqual(self.chan.name, "newchan") self.assertEqual(self.chan.wavelength_range, [-np.inf, -np.inf, -np.inf]) self.assertEqual(self.chan.resolution, 0) self.assert_(self.chan.data is None) numb = int(np.random.uniform(100000)) self.assertRaises(TypeError, Channel, name = numb) numb = np.random.uniform() * 100000 self.assertRaises(TypeError, Channel, name = numb) # Resolution numb = int(np.random.uniform(100000)) self.assertRaises(ValueError, Channel, resolution = numb) numb = int(np.random.uniform(100000)) self.chan = Channel(name = "newchan", resolution = numb) self.assertEqual(self.chan.name, "newchan") self.assertEqual(self.chan.wavelength_range, [-np.inf, -np.inf, -np.inf]) self.assertEqual(self.chan.resolution, numb) self.assert_(self.chan.data is None) self.assertRaises(TypeError, Channel, name = "newchan", resolution = "a") # Wavelength numbs = [np.random.uniform(100), np.random.uniform(100), np.random.uniform(100)] numbs.sort() self.chan = Channel(wavelength_range = numbs) self.assertEqual(self.chan.name, None) self.assertEqual(self.chan.wavelength_range, numbs) self.assertEqual(self.chan.resolution, 0) self.assert_(self.chan.data is None) self.assertRaises(TypeError, Channel, wavelength_range = numbs[0:1]) numbs.sort(reverse = True) self.assertRaises(ValueError, Channel, wavelength_range = numbs) numbs = [int(np.random.uniform(100)), int(np.random.uniform(100)), int(np.random.uniform(100))] numbs.sort() self.assertRaises(TypeError, Channel, wavelength_range = numbs) self.assertRaises(TypeError, Channel, wavelength_range = random_string(4)) numb = np.random.uniform(100000) self.assertRaises(TypeError, Channel, wavelength_range = numb) numb = int(np.random.uniform(100000)) self.assertRaises(TypeError, Channel, wavelength_range = numb) # Data data = np.random.rand(3, 3) self.assertRaises(ValueError, Channel, data = data) self.chan = Channel(name = "newchan", data = data) self.assertEqual(self.chan.name, "newchan") self.assertEqual(self.chan.wavelength_range, [-np.inf, -np.inf, -np.inf]) self.assertEqual(self.chan.resolution, 0) self.assert_(np.all(self.chan.data == data)) mask = np.array(np.random.rand(3, 3) * 2, dtype = int) data = np.ma.array(data, mask = mask) self.chan = Channel(name = "newchan", data = data) self.assertEqual(self.chan.name, "newchan") self.assertEqual(self.chan.wavelength_range, [-np.inf, -np.inf, -np.inf]) self.assertEqual(self.chan.resolution, 0) self.assert_(np.all(self.chan.data == data)) self.assertRaises(TypeError, Channel, name = "newchan", data = random_string(4)) numb = np.random.uniform(100000) self.assertRaises(TypeError, Channel, name = "newchan", data = numb) numb = int(np.random.uniform(100000)) self.assertRaises(TypeError, Channel, name = "newchan", data = numb) numbs = [np.random.uniform(100), np.random.uniform(100), np.random.uniform(100)] self.assertRaises(TypeError, Channel, name = "newchan", data = numbs) def test_cmp(self): """Comparison of channels. """ self.chan = Channel(name = "newchan") self.chan2 = Channel(name = "mychan") self.assertTrue(self.chan > self.chan2) self.chan = Channel(name = "newchan") self.chan2 = "mychan" self.assertTrue(self.chan > self.chan2) self.chan = Channel(name = "newchan") self.chan2 = Channel(name = "newchan") self.assert_(self.chan == self.chan2) self.chan = Channel(wavelength_range=(1., 2., 3.)) self.chan2 = Channel(name = "newchan") self.assert_(self.chan < self.chan2) self.chan = Channel(name = "newchan") self.chan2 = Channel(name = "_mychan") self.assert_(self.chan < self.chan2) self.chan = Channel(name = "_newchan") self.chan2 = Channel(name = "mychan") self.assert_(self.chan > self.chan2) self.chan = Channel(name = random_string(4), wavelength_range = (1., 2., 3.)) self.chan2 = Channel(name = random_string(4), wavelength_range = (4., 5., 6.)) self.assert_(self.chan < self.chan2) self.chan = Channel(name = "_" + random_string(4), wavelength_range = (1., 2., 3.)) self.chan2 = Channel(name = random_string(4), wavelength_range = (4., 5., 6.)) self.assert_(self.chan > self.chan2) def test_str(self): """String output for a channel. """ self.chan = Channel(name="newchan", wavelength_range=(1., 2., 3.), resolution=1000) self.assertEqual(str(self.chan), "'newchan: (1.000,2.000,3.000)μm, resolution 1000m," " not loaded'") self.chan.data = np.random.rand(3, 3) self.assertEqual(str(self.chan), "'newchan: (1.000,2.000,3.000)μm, " "shape (3, 3), " "resolution 1000m'") def test_is_loaded(self): """Check load status of a channel. """ data = np.random.rand(3, 3) self.chan = Channel(name = "newchan") self.assert_(not self.chan.is_loaded()) self.chan = Channel(name = "newchan", data = data) self.assert_(self.chan.is_loaded()) def test_as_image(self): """Check the geo_image version of the channel. """ data = np.random.rand(3, 3) self.chan = Channel(name="newchan", data=data) img = self.chan.as_image(False) self.assert_(np.allclose(img.channels[0], data)) self.assertEqual(img.mode, "L") img = self.chan.as_image(True) self.assertEqual(img.channels[0].max(), 1) self.assertEqual(img.channels[0].min(), 0) def test_check_range(self): """Check the range of a channel. """ self.chan = Channel(name = "newchan") self.assertRaises(ValueError, self.chan.check_range) numb = np.random.uniform(10) self.assertRaises(ValueError, self.chan.check_range, numb) # ndarray data = np.random.rand(3, 3) self.chan = Channel(name = "newchan", data = data) min_range = (data.max() - data.min()) / 2 self.assert_(np.all(data == self.chan.check_range(min_range))) zeros = np.zeros_like(data) min_range = (data.max() - data.min()) + E self.assert_(np.all(zeros == self.chan.check_range(min_range))) # masked array mask = np.array(np.random.rand(3, 3) * 2, dtype = int) mask[1, 1] = False data = np.ma.array(data, mask = mask) self.chan = Channel(name = "newchan", data = data) min_range = (data.max() - data.min()) / 2 self.assert_(np.all(data == self.chan.check_range(min_range))) self.assertEquals(data.count(), self.chan.check_range(min_range).count()) zeros = np.zeros_like(data) min_range = (data.max() - data.min()) + E self.assert_(np.all(zeros == self.chan.check_range(min_range))) data = np.ma.array(data, mask = True) self.chan = Channel(name = "newchan", data = data) self.assertEquals(0, self.chan.check_range(min_range).count()) self.assertEquals(data.count(), self.chan.check_range(min_range).count()) # Wrong type arguments self.assertRaises(TypeError, self.chan.check_range, random_string(4)) self.assertRaises(TypeError, self.chan.check_range, [np.random.uniform()]) def test_sunzen_corr(self): '''Test Sun zenith angle correction. ''' import datetime as dt chan = Channel(name='test') original_value = 10. chan.data = original_value * np.ones((2,11)) lats = np.zeros((2,11)) # equator lons = np.array([np.linspace(-90, 90, 11), np.linspace(-90, 90, 11)]) # Equinox, so the Sun is at the equator time_slot = dt.datetime(2014,3,20,16,57) new_ch = chan.sunzen_corr(time_slot, lonlats=(lons, lats), limit=80.) # Test minimum after correction, accuracy of three decimals is enough #self.assertTrue(np.abs(10.000 - np.min(new_ch.data)) < 10**-3) self.assertAlmostEqual(10.000, np.min(new_ch.data), places=3) # Test maximum after correction self.assertAlmostEqual(57.588, np.max(new_ch.data), places=3) # There should be ten values at zenith angle >= 80 deg, and # these are all equal self.assertTrue(np.where(new_ch.data == \ np.max(new_ch.data))[0].shape[0] == 10) # All values should be larger than the starting values self.assertTrue(np.all(new_ch.data > original_value)) # Channel name self.assertEqual(new_ch.name, chan.name+'_SZC') # Test channel name in the info dict self.assertEqual(new_ch.name, chan.info['sun_zen_corrected']) # Test with several locations and arbitrary data chan = Channel(name='test2') chan.data = np.array([[0., 67.31614275, 49.96271995, 99.41046645, 29.08660989], [87.61007584, 79.6683524, 53.20397351, 29.88260374, 62.33623915], [60.49283004, 54.04267222, 32.72365906, 91.44995651, 32.27232955], [63.71580638, 69.57673795, 7.63064373, 32.15683105, 9.05786335], [65.61434337, 33.2317155, 18.77672384, 30.13527574, 23.22572904]]) lons = np.array([[116.28695847, 164.1125604, 40.77223701, -113.54699788, 133.15558442], [-17.18990601, 75.17472034, 12.81618371, -40.75524952, 40.70898002], [42.74662341, 164.05671859, -166.58469404, -58.16684483, -144.97963063], [46.26303645, -167.48682034, 170.28131412, -17.80502488, -63.9031154], [-107.14829679, -147.66665952, -0.75970554, 77.701768, -130.48677807]]) lats = np.array([[-51.53681682, -83.21762788, 5.91008672, 22.51730385, 66.83356427], [82.78543163, 23.1529456 , -7.16337152, -68.23118425, 28.72194953], [31.03440852, 70.55322517, -83.61780288, 29.88413938, 25.7214828], [-19.02517922, -19.20958728, -14.7825735, 22.66967876, 67.6089238], [45.12202477, 61.79674149, 58.71037615, -62.04350423, 13.06405864]]) time_slot = dt.datetime(1998, 8, 1, 10, 0) # These are the expected results results = np.array([[0., 387.65821593, 51.74080022, 572.48205988, 138.96586013], [227.24857818, 105.53045776, 62.24134162, 172.0870564, 64.12902666], [63.08646652, 311.21934562, 188.44804188, 526.63931022, 185.84893885], [82.86856236, 400.6764648, 43.9431259, 46.58056343, 36.04457644], [377.85794388, 191.3738223, 27.55002934, 173.54213642, 133.75164285]]) new_ch = chan.sunzen_corr(time_slot, lonlats=(lons, lats), limit=80.) self.assertAlmostEqual(np.max(results-new_ch.data), 0.000, places=3) # def test_project(self): # """Project a channel. # """ # from pp.coverage import SatProjCov # from pp.scene import SatelliteInstrumentScene # cov = SatProjCov(SatelliteInstrumentScene(area = "euro"), # "scan", 1000) # data = np.tile(np.array([[1, 2],[3, 4]]), (256, 256)) # self.chan = Channel(name = "newchan", data = data) # self.chan.project(cov) def random_string(length, choices=string.letters): """Generates a random string with elements from *set* of the specified *length*. """ import random return "".join([random.choice(choices) for i in range(length)]) def suite(): """The test suite for test_channel. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestGenericChannel)) mysuite.addTest(loader.loadTestsFromTestCase(TestChannel)) return mysuite mpop-1.5.0/mpop/tests/test_doc.py000066400000000000000000000001311317160620000167520ustar00rootroot00000000000000#!/usr/bin/env python import doctest #doctest.testfile("../doc/source/quickstart.rst") mpop-1.5.0/mpop/tests/test_geo_image.py000066400000000000000000000607051317160620000201360ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014 # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Module for testing the pp.geo_image module. """ import datetime import unittest import sys import numpy as np from mock import patch, MagicMock # Mock some modules, so we don't need them for tests. sys.modules['osgeo'] = MagicMock() sys.modules['pyresample'] = MagicMock() import mpop.imageo.geo_image as geo_image import mpop.imageo.formats.writer_options as writer_opts class TestGeoImage(unittest.TestCase): """Class for testing pp.geo_image. """ def setUp(self): """Setup the test. """ self.time_slot = datetime.datetime(2009, 10, 8, 14, 30) self.data = np.zeros((512, 512), dtype=np.uint8) self.img = geo_image.GeoImage(self.data, area="euro", time_slot=self.time_slot) @patch.object(geo_image.GeoImage, 'geotiff_save') def test_save(self, mock_save): """Save a geo image. """ self.img.save("test.tif", compression=0) mock_save.assert_called_once_with("test.tif", 0, {}, None, 256, writer_options={'blocksize': 256, 'compression': 0}) mock_save.reset_mock() self.img.save("test.tif", compression=9) mock_save.assert_called_once_with("test.tif", 9, {}, None, 256, writer_options={'blocksize': 256, 'compression': 9}) mock_save.reset_mock() self.img.save("test.tif", compression=9, floating_point=True) mock_save.assert_called_once_with("test.tif", 9, {}, None, 256, floating_point=True, writer_options={'blocksize': 256, 'compression': 9}) mock_save.reset_mock() self.img.save("test.tif", compression=9, tags={"NBITS": 20}) mock_save.assert_called_once_with("test.tif", 9, {"NBITS": 20}, None, 256, writer_options={'blocksize': 256, 'nbits': 20, 'compression': 9}) mock_save.reset_mock() self.img.save("test.tif", writer_options={"compression":9}) mock_save.assert_called_once_with("test.tif", 9, {}, None, 256, writer_options={'blocksize': 256, 'compression': 9}) mock_save.reset_mock() self.img.save("test.tif", writer_options={"compression":9, "nbits":16}) mock_save.assert_called_once_with("test.tif", 9, {"NBITS": 16}, None, 256, writer_options={'blocksize': 256, 'nbits': 16, 'compression': 9}) mock_save.reset_mock() self.img.save("test.tif", writer_options={"fill_value_subst": 1}) mock_save.assert_called_once_with("test.tif", 6, {}, None, 256, writer_options={'blocksize': 256, 'compression': 6, 'fill_value_subst': 1}) with patch.object(geo_image.Image, 'save') as mock_isave: self.img.save("test.png") mock_isave.assert_called_once_with(self.img, 'test.png', 6, fformat='png') mock_isave.side_effect = geo_image.UnknownImageFormat("Boom!") self.assertRaises(geo_image.UnknownImageFormat, self.img.save, "test.dummy") @patch('osgeo.osr.SpatialReference') @patch('mpop.projector.get_area_def') @patch('osgeo.gdal.GDT_Float64') @patch('osgeo.gdal.GDT_Byte') @patch('osgeo.gdal.GDT_UInt16') @patch('osgeo.gdal.GDT_UInt32') @patch('osgeo.gdal.GetDriverByName') @patch.object(geo_image.GeoImage, '_gdal_write_channels') def test_save_geotiff(self, mock_write_channels, gtbn, gui32, gui16, gby, gf, gad, spaceref): """Save to geotiff format. """ gadr = gad.return_value gadr.area_extent = [1, 2, 3, 4] gadr.pixel_size_x = 10 gadr.pixel_size_y = 11 gadr.proj4_string = "+proj=geos +ellps=WGS84" gadr.proj_dict = {"proj": "geos", "ellps": "WGS84"} gadr.proj_id = "geos0" # test with 0 compression raster = gtbn.return_value self.img.geotiff_save("test.tif", 0, None, {"BLA": "09"}, 256) gtbn.assert_called_once_with("GTiff") raster.Create.assert_called_once_with("test.tif", self.data.shape[0], self.data.shape[1], 2, gby, ["BLA=09", 'TILED=YES', 'BLOCKXSIZE=256', 'BLOCKYSIZE=256', 'ALPHA=YES']) dst_ds = raster.Create.return_value #mock_write_channels.assert_called_once_with(dst_ds, self.data, # 255, None) self.assertEquals(mock_write_channels.call_count, 1) self.assertEquals(mock_write_channels.call_args[0][0], dst_ds) self.assertEquals(mock_write_channels.call_args[0][2], 255) self.assertTrue(mock_write_channels.call_args[0][3] is None) self.assertTrue(np.all(mock_write_channels.call_args[0][1] == self.data)) dst_ds.SetGeoTransform.assert_called_once_with([1, 10, 0, 4, 0, -11]) srs = spaceref.return_value.ExportToWkt.return_value dst_ds.SetProjection.assert_called_once_with(srs) time_tag = {"TIFFTAG_DATETIME": self.img.time_slot.strftime("%Y:%m:%d %H:%M:%S")} dst_ds.SetMetadata.assert_called_once_with(time_tag, '') @patch('osgeo.osr.SpatialReference') @patch('mpop.projector.get_area_def') @patch('osgeo.gdal.GDT_Float64') @patch('osgeo.gdal.GDT_Byte') @patch('osgeo.gdal.GDT_UInt16') @patch('osgeo.gdal.GDT_UInt32') @patch('osgeo.gdal.GetDriverByName') @patch.object(geo_image.GeoImage, '_gdal_write_channels') def test_save_geotiff_compress(self, mock_write_channels, gtbn, gui32, gui16, gby, gf, gad, spaceref): """Save to geotiff format with compression. """ gadr = gad.return_value gadr.area_extent = [1, 2, 3, 4] gadr.pixel_size_x = 10 gadr.pixel_size_y = 11 gadr.proj4_string = "+proj=geos +ellps=WGS84" gadr.proj_dict = {"proj": "geos", "ellps": "WGS84"} gadr.proj_id = "geos0" raster = gtbn.return_value self.img.geotiff_save("test.tif", 9, None, None, 256) gtbn.assert_called_once_with("GTiff") raster.Create.assert_called_once_with("test.tif", self.data.shape[0], self.data.shape[1], 2, gby, ['COMPRESS=DEFLATE', 'ZLEVEL=9', 'TILED=YES', 'BLOCKXSIZE=256', 'BLOCKYSIZE=256', 'ALPHA=YES']) dst_ds = raster.Create.return_value #mock_write_channels.assert_called_once_with(dst_ds, self.data, # 255, None) self.assertEquals(mock_write_channels.call_count, 1) self.assertEquals(mock_write_channels.call_args[0][0], dst_ds) self.assertEquals(mock_write_channels.call_args[0][2], 255) self.assertTrue(mock_write_channels.call_args[0][3] is None) self.assertTrue(np.all(mock_write_channels.call_args[0][1] == self.data)) dst_ds.SetGeoTransform.assert_called_once_with([1, 10, 0, 4, 0, -11]) srs = spaceref.return_value.ExportToWkt.return_value dst_ds.SetProjection.assert_called_once_with(srs) time_tag = {"TIFFTAG_DATETIME": self.img.time_slot.strftime("%Y:%m:%d %H:%M:%S")} dst_ds.SetMetadata.assert_called_once_with(time_tag, '') @patch('osgeo.osr.SpatialReference') @patch('mpop.projector.get_area_def') @patch('osgeo.gdal.GDT_Float64') @patch('osgeo.gdal.GDT_Byte') @patch('osgeo.gdal.GDT_UInt16') @patch('osgeo.gdal.GDT_UInt32') @patch('osgeo.gdal.GetDriverByName') @patch.object(geo_image.GeoImage, '_gdal_write_channels') def test_save_geotiff_floats(self, mock_write_channels, gtbn, gui32, gui16, gby, gf, gad, spaceref): """Save to geotiff format with floats. """ gadr = gad.return_value gadr.area_extent = [1, 2, 3, 4] gadr.pixel_size_x = 10 gadr.pixel_size_y = 11 gadr.proj4_string = "+proj=geos +ellps=WGS84" gadr.proj_dict = {"proj": "geos", "ellps": "WGS84"} gadr.proj_id = "geos0" # test with floats raster = gtbn.return_value self.img.geotiff_save("test.tif", 0, None, None, 256, floating_point=True) gtbn.assert_called_once_with("GTiff") raster.Create.assert_called_once_with("test.tif", self.data.shape[0], self.data.shape[1], 1, gf, ['TILED=YES', 'BLOCKXSIZE=256', 'BLOCKYSIZE=256']) dst_ds = raster.Create.return_value #mock_write_channels.assert_called_once_with(dst_ds, self.data, # 255, None) self.assertEquals(mock_write_channels.call_count, 1) self.assertEquals(mock_write_channels.call_args[0][0], dst_ds) self.assertEquals(mock_write_channels.call_args[0][2], 0) self.assertEquals(mock_write_channels.call_args[0][3], [0]) self.assertTrue(np.all(mock_write_channels.call_args[0][1] == self.data)) dst_ds.SetGeoTransform.assert_called_once_with([1, 10, 0, 4, 0, -11]) srs = spaceref.return_value.ExportToWkt.return_value dst_ds.SetProjection.assert_called_once_with(srs) time_tag = {"TIFFTAG_DATETIME": self.img.time_slot.strftime("%Y:%m:%d %H:%M:%S")} dst_ds.SetMetadata.assert_called_once_with(time_tag, '') self.fill_value = None self.img.mode = "RGB" self.assertRaises(ValueError, self.img.geotiff_save, "test.tif", 0, None, None, 256, floating_point=True) @patch('osgeo.osr.SpatialReference') @patch('mpop.projector.get_area_def') @patch('osgeo.gdal.GDT_Float64') @patch('osgeo.gdal.GDT_Byte') @patch('osgeo.gdal.GDT_UInt16') @patch('osgeo.gdal.GDT_UInt32') @patch('osgeo.gdal.GetDriverByName') @patch.object(geo_image.GeoImage, '_gdal_write_channels') def test_save_geotiff_32(self, mock_write_channels, gtbn, gui32, gui16, gby, gf, gad, spaceref): """Save to geotiff 32-bits format. """ gadr = gad.return_value gadr.area_extent = [1, 2, 3, 4] gadr.pixel_size_x = 10 gadr.pixel_size_y = 11 gadr.proj4_string = "+proj=geos +ellps=WGS84" gadr.proj_dict = {"proj": "geos", "ellps": "WGS84"} gadr.proj_id = "geos0" raster = gtbn.return_value self.img.geotiff_save("test.tif", 9, {"NBITS": 20}, None, 256) gtbn.assert_called_once_with("GTiff") raster.Create.assert_called_once_with("test.tif", self.data.shape[0], self.data.shape[1], 2, gui32, ['COMPRESS=DEFLATE', 'ZLEVEL=9', 'TILED=YES', 'BLOCKXSIZE=256', 'BLOCKYSIZE=256', 'ALPHA=YES']) dst_ds = raster.Create.return_value #mock_write_channels.assert_called_once_with(dst_ds, self.data, # 255, None) self.assertEquals(mock_write_channels.call_count, 1) self.assertEquals(mock_write_channels.call_args[0][0], dst_ds) self.assertEquals(mock_write_channels.call_args[0][2], 2**32 - 1) self.assertTrue(mock_write_channels.call_args[0][3] is None) self.assertTrue(np.all(mock_write_channels.call_args[0][1] == self.data)) dst_ds.SetGeoTransform.assert_called_once_with([1, 10, 0, 4, 0, -11]) srs = spaceref.return_value.ExportToWkt.return_value dst_ds.SetProjection.assert_called_once_with(srs) time_tag = {"TIFFTAG_DATETIME": self.img.time_slot.strftime("%Y:%m:%d %H:%M:%S"), "NBITS": 20} dst_ds.SetMetadata.assert_called_once_with(time_tag, '') @patch('osgeo.osr.SpatialReference') @patch('mpop.projector.get_area_def') @patch('osgeo.gdal.GDT_Float64') @patch('osgeo.gdal.GDT_Byte') @patch('osgeo.gdal.GDT_UInt16') @patch('osgeo.gdal.GDT_UInt32') @patch('osgeo.gdal.GetDriverByName') @patch.object(geo_image.GeoImage, '_gdal_write_channels') def test_save_geotiff_16(self, mock_write_channels, gtbn, gui32, gui16, gby, gf, gad, spaceref): """Save to geotiff 16-bits format. """ gadr = gad.return_value gadr.area_extent = [1, 2, 3, 4] gadr.pixel_size_x = 10 gadr.pixel_size_y = 11 gadr.proj4_string = "+proj=geos +ellps=WGS84" gadr.proj_dict = {"proj": "geos", "ellps": "WGS84"} gadr.proj_id = "geos0" raster = gtbn.return_value self.img.geotiff_save("test.tif", 9, {"NBITS": 15}, None, 256) gtbn.assert_called_once_with("GTiff") raster.Create.assert_called_once_with("test.tif", self.data.shape[0], self.data.shape[1], 2, gui16, ['COMPRESS=DEFLATE', 'ZLEVEL=9', 'TILED=YES', 'BLOCKXSIZE=256', 'BLOCKYSIZE=256', 'ALPHA=YES']) dst_ds = raster.Create.return_value #mock_write_channels.assert_called_once_with(dst_ds, self.data, # 255, None) self.assertEquals(mock_write_channels.call_count, 1) self.assertEquals(mock_write_channels.call_args[0][0], dst_ds) self.assertEquals(mock_write_channels.call_args[0][2], 2**16 - 1) self.assertTrue(mock_write_channels.call_args[0][3] is None) self.assertTrue(np.all(mock_write_channels.call_args[0][1] == self.data)) dst_ds.SetGeoTransform.assert_called_once_with([1, 10, 0, 4, 0, -11]) srs = spaceref.return_value.ExportToWkt.return_value dst_ds.SetProjection.assert_called_once_with(srs) time_tag = {"TIFFTAG_DATETIME": self.img.time_slot.strftime("%Y:%m:%d %H:%M:%S"), "NBITS": 15} dst_ds.SetMetadata.assert_called_once_with(time_tag, '') @patch('osgeo.osr.SpatialReference') @patch('mpop.projector.get_area_def') @patch('osgeo.gdal.GDT_Float64') @patch('osgeo.gdal.GDT_Byte') @patch('osgeo.gdal.GDT_UInt16') @patch('osgeo.gdal.GDT_UInt32') @patch('osgeo.gdal.GetDriverByName') @patch.object(geo_image.GeoImage, '_gdal_write_channels') def test_save_geotiff_geotransform(self, mock_write_channels, gtbn, gui32, gui16, gby, gf, gad, spaceref): """Save to geotiff format with custom geotransform """ gadr = gad.return_value gadr.area_extent = [1, 2, 3, 4] gadr.pixel_size_x = 10 gadr.pixel_size_y = 11 gadr.proj4_string = "+proj=geos +ellps=WGS84" gadr.proj_dict = {"proj": "geos", "ellps": "WGS84"} gadr.proj_id = "geos0" # test with 0 compression raster = gtbn.return_value self.img.geotiff_save("test.tif", 0, None, None, 256, geotransform="best geotransform of the world", spatialref=spaceref()) gtbn.assert_called_once_with("GTiff") raster.Create.assert_called_once_with("test.tif", self.data.shape[0], self.data.shape[1], 2, gby, ['TILED=YES', 'BLOCKXSIZE=256', 'BLOCKYSIZE=256', 'ALPHA=YES']) dst_ds = raster.Create.return_value #mock_write_channels.assert_called_once_with(dst_ds, self.data, # 255, None) self.assertEquals(mock_write_channels.call_count, 1) self.assertEquals(mock_write_channels.call_args[0][0], dst_ds) self.assertEquals(mock_write_channels.call_args[0][2], 255) self.assertTrue(mock_write_channels.call_args[0][3] is None) self.assertTrue(np.all(mock_write_channels.call_args[0][1] == self.data)) dst_ds.SetGeoTransform.assert_called_once_with("best geotransform of" " the world") srs = spaceref.return_value.ExportToWkt.return_value dst_ds.SetProjection.assert_called_once_with(srs) time_tag = {"TIFFTAG_DATETIME": self.img.time_slot.strftime("%Y:%m:%d %H:%M:%S")} dst_ds.SetMetadata.assert_called_once_with(time_tag, '') @patch('osgeo.osr.SpatialReference') @patch('mpop.projector.get_area_def') @patch('osgeo.gdal.GDT_Float64') @patch('osgeo.gdal.GDT_Byte') @patch('osgeo.gdal.GDT_UInt16') @patch('osgeo.gdal.GDT_UInt32') @patch('osgeo.gdal.GetDriverByName') @patch.object(geo_image.GeoImage, '_gdal_write_channels') def test_save_geotiff_fill_value(self, mock_write_channels, gtbn, gui32, gui16, gby, gf, gad, spaceref): """Save to geotiff format. """ # source image data, masked data but only zeros self.data = np.ma.zeros((512, 512), dtype=np.uint8) self.data.mask = np.zeros(self.data .shape, dtype=bool) self.data.mask[0,0] = True self.img = geo_image.GeoImage(self.data, area="euro", time_slot=self.time_slot) self.img.fill_value = [0] raster = gtbn.return_value self.img.geotiff_save("test.tif", 0, None, {"BLA": "09"}, 256) gtbn.assert_called_once_with("GTiff") raster.Create.assert_called_once_with("test.tif", self.data.shape[0], self.data.shape[1], 1, gby, ["BLA=09", 'TILED=YES', 'BLOCKXSIZE=256', 'BLOCKYSIZE=256']) dst_ds = raster.Create.return_value self.assertEquals(mock_write_channels.call_count, 1) self.assertEquals(mock_write_channels.call_args[0][0], dst_ds) self.assertEquals(mock_write_channels.call_args[0][2], 255) self.assertTrue(mock_write_channels.call_args[0][3], self.img.fill_value) self.assertTrue(np.all(mock_write_channels.call_args[0][1] == self.data)) @patch('osgeo.osr.SpatialReference') @patch('mpop.projector.get_area_def') @patch('osgeo.gdal.GDT_Float64') @patch('osgeo.gdal.GDT_Byte') @patch('osgeo.gdal.GDT_UInt16') @patch('osgeo.gdal.GDT_UInt32') @patch('osgeo.gdal.GetDriverByName') @patch.object(geo_image.GeoImage, '_gdal_write_channels') def test_save_geotiff_fill_value_subst(self, mock_write_channels, gtbn, gui32, gui16, gby, gf, gad, spaceref): """Save to geotiff format. """ # source image data, masked data but only zeros self.data = np.ma.zeros((512, 512), dtype=np.uint8) self.data.mask = np.zeros(self.data .shape, dtype=bool) self.data.mask[0,0] = True self.img = geo_image.GeoImage(self.data, area="euro", time_slot=self.time_slot) self.img.fill_value = [0] # not masked zeros should be replaced by ones fill_value_substitution = 1 data_with_subst = np.ma.copy(self.data) np.place(data_with_subst, self.data == self.img.fill_value[0], 1) raster = gtbn.return_value self.img.geotiff_save("test.tif", 0, None, {"BLA": "09"}, 256, writer_options={writer_opts.WR_OPT_FILL_VALUE_SUBST: fill_value_substitution}) gtbn.assert_called_once_with("GTiff") raster.Create.assert_called_once_with("test.tif", self.data.shape[0], self.data.shape[1], 1, gby, ["BLA=09", 'TILED=YES', 'BLOCKXSIZE=256', 'BLOCKYSIZE=256']) dst_ds = raster.Create.return_value self.assertEquals(mock_write_channels.call_count, 1) self.assertEquals(mock_write_channels.call_args[0][0], dst_ds) self.assertEquals(mock_write_channels.call_args[0][2], 255) self.assertTrue(mock_write_channels.call_args[0][3], self.img.fill_value) # all zeros should be replaced by ones self.assertTrue(np.all(mock_write_channels.call_args[0][1] == data_with_subst)) def suite(): """The test suite for test_geo_image. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestGeoImage)) return mysuite mpop-1.5.0/mpop/tests/test_image.py000066400000000000000000000662661317160620000173140ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2013, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Module for testing the imageo.image module. """ import random import unittest import numpy as np import mpop.imageo.image as image EPSILON = 0.0001 # Support for python <2.5 try: all except NameError: def all(iterable): for element in iterable: if not element: return False return True class TestEmptyImage(unittest.TestCase): """Class for testing the mpop.imageo.image module """ def setUp(self): """Setup the test. """ self.img = image.Image() self.modes = ["L", "LA", "RGB", "RGBA", "YCbCr", "YCbCrA", "P", "PA"] def test_shape(self): """Shape of an empty image. """ oldmode = self.img.mode for mode in self.modes: self.img.convert(mode) self.assertEqual(self.img.shape, (0, 0)) self.img.convert(oldmode) def test_is_empty(self): """Test if an image is empty. """ self.assertEqual(self.img.is_empty(), True) def test_clip(self): """Clip an empty image. """ oldmode = self.img.mode for mode in self.modes: self.img.convert(mode) self.assertEqual(self.img.channels, []) self.img.convert(oldmode) def test_convert(self): """Convert an empty image. """ for mode1 in self.modes: for mode2 in self.modes: self.img.convert(mode1) self.assertEqual(self.img.mode, mode1) self.assertEqual(self.img.channels, []) self.img.convert(mode2) self.assertEqual(self.img.mode, mode2) self.assertEqual(self.img.channels, []) while True: randstr = random_string(random.choice(range(1, 7))) if randstr not in self.modes: break self.assertRaises(ValueError, self.img.convert, randstr) def test_stretch(self): """Stretch an empty image """ oldmode = self.img.mode for mode in self.modes: self.img.convert(mode) self.img.stretch() self.assertEqual(self.img.channels, []) self.img.stretch("linear") self.assertEqual(self.img.channels, []) self.img.stretch("histogram") self.assertEqual(self.img.channels, []) self.img.stretch("crude") self.assertEqual(self.img.channels, []) self.img.stretch((0.05, 0.05)) self.assertEqual(self.img.channels, []) self.assertRaises(ValueError, self.img.stretch, (0.05, 0.05, 0.05)) # Generate a random string while True: testmode = random_string(random.choice(range(1, 7))) if testmode not in self.modes: break self.assertRaises(ValueError, self.img.stretch, testmode) self.assertRaises(TypeError, self.img.stretch, 1) self.img.convert(oldmode) def test_gamma(self): """Gamma correction on an empty image. """ oldmode = self.img.mode for mode in self.modes: self.img.convert(mode) # input a single value self.img.gamma() self.assertEqual(self.img.channels, []) self.img.gamma(0.5) self.assertEqual(self.img.channels, []) self.img.gamma(1) self.assertEqual(self.img.channels, []) self.img.gamma(1.5) self.assertEqual(self.img.channels, []) # input a tuple self.assertRaises(ValueError, self.img.gamma, range(10)) self.assertRaises(ValueError, self.img.gamma, (0.2, 3.5)) self.assertRaises(TypeError, self.img.gamma, ("blue", "white")) # input a negative value self.assertRaises(ValueError, self.img.gamma, -0.5) self.assertRaises(ValueError, self.img.gamma, -1) self.assertRaises(ValueError, self.img.gamma, -3.8) self.assertRaises(TypeError, self.img.gamma, "blue") self.img.convert(oldmode) def test_invert(self): """Invert an empty image. """ oldmode = self.img.mode for mode in self.modes: self.img.convert(mode) self.img.invert() self.assertEqual(self.img.channels, []) self.img.invert(True) self.assertEqual(self.img.channels, []) self.assertRaises(ValueError, self.img.invert, [True, False]) self.assertRaises(ValueError, self.img.invert, [True, False, True, False, True, False, True, False]) self.img.convert(oldmode) def test_pil_image(self): """Return an empty PIL image. """ oldmode = self.img.mode for mode in self.modes: self.img.convert(mode) if mode == "YCbCrA": self.assertRaises(ValueError, self.img.pil_image) elif mode == "YCbCr": continue else: pilimg = self.img.pil_image() self.assertEqual(pilimg.size, (0, 0)) self.img.convert(oldmode) def test_putalpha(self): """Add an alpha channel to en empty image """ # Putting alpha channel to an empty image should not do anything except # change the mode if necessary. oldmode = self.img.mode for mode in self.modes: self.img.convert(mode) self.img.putalpha(np.array([])) self.assertEqual(self.img.channels, []) if mode.endswith("A"): self.assertEqual(self.img.mode, mode) else: self.assertEqual(self.img.mode, mode + "A") self.img.convert(oldmode) self.img.convert(mode) self.assertRaises(ValueError, self.img.putalpha, np.random.rand(3, 2)) self.img.convert(oldmode) def test_save(self): """Save an empty image. """ oldmode = self.img.mode for mode in self.modes: self.img.convert(mode) self.assertRaises(IOError, self.img.save, "test.png") self.img.convert(oldmode) def test_replace_luminance(self): """Replace luminance in an empty image. """ oldmode = self.img.mode for mode in self.modes: self.img.convert(mode) self.img.replace_luminance([]) self.assertEqual(self.img.mode, mode) self.assertEqual(self.img.channels, []) self.assertEqual(self.img.shape, (0, 0)) self.img.convert(oldmode) def test_resize(self): """Resize an empty image. """ self.assertRaises(ValueError, self.img.resize, (10, 10)) def test_merge(self): """Merging of an empty image with another. """ newimg = image.Image() self.assertRaises(ValueError, self.img.merge, newimg) newimg = image.Image(np.array([[1, 2], [3, 4]])) self.assertRaises(ValueError, self.img.merge, newimg) newimg = image.Image(np.array([[1, 2, 3, 4]])) self.assertRaises(ValueError, self.img.merge, newimg) class TestImageCreation(unittest.TestCase): """Class for testing the mpop.imageo.image module """ def setUp(self): """Setup the test. """ self.img = {} self.modes = ["L", "LA", "RGB", "RGBA", "YCbCr", "YCbCrA", "P", "PA"] self.modes_len = [1, 2, 3, 4, 3, 4, 1, 2] def test_creation(self): """Creation of an image. """ self.assertRaises(TypeError, image.Image, channels = random.randint(1,1000)) self.assertRaises(TypeError, image.Image, channels = random.random()) self.assertRaises(TypeError, image.Image, channels = random_string(random.randint(1,10))) chs = [np.random.rand(random.randint(1, 10), random.randint(1, 10)), np.random.rand(random.randint(1, 10), random.randint(1, 10)), np.random.rand(random.randint(1, 10), random.randint(1, 10)), np.random.rand(random.randint(1, 10), random.randint(1, 10))] self.assertRaises(ValueError, image.Image, channels = chs) one_channel = np.random.rand(random.randint(1, 10), random.randint(1, 10)) i = 0 for mode in self.modes: # Empty image, no channels self.img[mode] = image.Image(mode = mode) self.assertEqual(self.img[mode].channels, []) # Empty image, no channels, fill value self.img[mode] = image.Image(mode = mode, fill_value = 0) self.assertEqual(self.img[mode].channels, []) # Empty image, no channels, fill value, wrong color_range self.assertRaises(ValueError, image.Image, mode = mode, fill_value = 0, color_range = ((0, (1, 2)))) self.assertRaises(ValueError, image.Image, mode = mode, fill_value = 0, color_range = ((0, 0), (1, 2), (0, 0), (1, 2), (0, 0), (1, 2))) # Regular image, too many channels self.assertRaises(ValueError, image.Image, channels = ([one_channel] * (self.modes_len[i] + 1)), mode = mode) # Regular image, not enough channels self.assertRaises(ValueError, image.Image, channels = ([one_channel] * (self.modes_len[i] - 1)), mode = mode) # Regular image, channels self.img[mode] = image.Image(channels = ([one_channel] * (self.modes_len[i])), mode = mode) for nb_chan in range(self.modes_len[i]): self.assert_(np.all(self.img[mode].channels[nb_chan] == one_channel)) self.assert_(isinstance(self.img[mode].channels[nb_chan], np.ma.core.MaskedArray)) i = i + 1 class TestRegularImage(unittest.TestCase): """Class for testing the mpop.imageo.image module """ def setUp(self): """Setup the test. """ import os import tempfile one_channel = np.random.rand(random.randint(1, 10), random.randint(1, 10)) self.rand_img = image.Image(channels = [one_channel] * 3, mode = "RGB") self.rand_img2 = image.Image(channels = [one_channel] * 3, mode = "RGB", fill_value = (0, 0, 0)) two_channel = np.array([[0, 0.5, 0.5], [0.5, 0.25, 0.25]]) self.img = image.Image(channels = [two_channel] * 3, mode = "RGB") self.flat_channel = [[1, 1, 1], [1, 1, 1]] self.flat_img = image.Image(channels = [self.flat_channel], mode = "L", fill_value = 0) self.modes = ["L", "LA", "RGB", "RGBA", "YCbCr", "YCbCrA", "P", "PA"] self.modes_len = [1, 2, 3, 4, 3, 4, 1, 2] # create an unusable directory for permission error checking self.tempdir = tempfile.mkdtemp() os.chmod(self.tempdir, 0000) def test_shape(self): """Shape of an image. """ oldmode = self.img.mode for mode in self.modes: if mode == "P" or mode == "PA": continue self.img.convert(mode) self.assertEqual(self.img.shape, (2, 3)) self.img.convert(oldmode) def test_is_empty(self): """Test if an image is empty. """ self.assertEqual(self.img.is_empty(), False) def test_clip(self): """Clip an image. """ oldmode = self.img.mode for mode in self.modes: if mode == "P" or mode == "PA": continue self.img.convert(mode) for chn in self.img.channels: self.assert_(chn.max() <= 1.0) self.assert_(chn.max() >= 0.0) self.img.convert(oldmode) def test_convert(self): """Convert an image. """ i = 0 for mode1 in self.modes: j = 0 for mode2 in self.modes: self.img.convert(mode1) self.assertEqual(self.img.mode, mode1) self.assertEqual(len(self.img.channels), self.modes_len[i]) self.img.convert(mode2) self.assertEqual(self.img.mode, mode2) self.assertEqual(len(self.img.channels), self.modes_len[j]) self.rand_img2.convert(mode1) self.assertEqual(self.rand_img2.mode, mode1) self.assertEqual(len(self.rand_img2.channels), self.modes_len[i]) if mode1 not in ["P", "PA"]: self.assertEqual(len(self.rand_img2.fill_value), self.modes_len[i]) self.rand_img2.convert(mode2) self.assertEqual(self.rand_img2.mode, mode2) self.assertEqual(len(self.rand_img2.channels), self.modes_len[j]) if mode2 not in ["P", "PA"]: self.assertEqual(len(self.rand_img2.fill_value), self.modes_len[j]) j = j + 1 i = i + 1 while True: randstr = random_string(random.choice(range(1, 7))) if randstr not in self.modes: break self.assertRaises(ValueError, self.img.convert, randstr) def test_stretch(self): """Stretch an image. """ oldmode = self.img.mode for mode in "L": self.img.convert(mode) old_channels = [] for chn in self.img.channels: old_channels.append(chn) linear = np.array([[ 0., 1.00048852, 1.00048852], [ 1.00048852, 0.50024426, 0.50024426]]) crude = np.array([[0, 1, 1], [1, 0.5, 0.5]]) histo = np.array([[0.0, 0.99951171875, 0.99951171875], [0.99951171875, 0.39990234375, 0.39990234375]]) self.img.stretch() self.assert_(all([np.all(self.img.channels[i] == old_channels[i]) for i in range(len(self.img.channels))])) self.img.stretch("linear") self.assert_(np.all((self.img.channels[0] - linear) < EPSILON)) self.img.stretch("crude") self.assert_(np.all((self.img.channels[0] - crude) < EPSILON)) self.img.stretch("histogram") self.assert_(np.all(np.abs(self.img.channels[0] - histo) < EPSILON)) self.img.stretch((0.05, 0.05)) self.assert_(np.all((self.img.channels[0] - linear) < EPSILON)) self.assertRaises(ValueError, self.img.stretch, (0.05, 0.05, 0.05)) # Generate a random string while True: testmode = random_string(random.choice(range(1, 7))) if testmode not in self.modes: break self.assertRaises(ValueError, self.img.stretch, testmode) self.assertRaises(TypeError, self.img.stretch, 1) self.img.channels = old_channels self.img.convert(oldmode) def test_gamma(self): """Gamma correction on an image. """ oldmode = self.img.mode for mode in self.modes: if mode == "P" or mode == "PA": continue self.img.convert(mode) old_channels = [] for chn in self.img.channels: old_channels.append(chn) # input a single value self.img.gamma() for i in range(len(self.img.channels)): self.assert_(np.all(self.img.channels[i] == old_channels[i])) self.img.gamma(0.5) for i in range(len(self.img.channels)): self.assert_(np.all(self.img.channels[i] - old_channels[i] ** 2 < EPSILON)) self.img.gamma(1) for i in range(len(self.img.channels)): self.assert_(np.all(self.img.channels[i] - old_channels[i] ** 2 < EPSILON)) # self.img.gamma(2) # for i in range(len(self.img.channels)): # print self.img.channels[i] # print old_channels[i] # self.assert_(np.all(np.abs(self.img.channels[i] - # old_channels[i]) < EPSILON)) # input a tuple self.assertRaises(ValueError, self.img.gamma, range(10)) self.assertRaises(ValueError, self.img.gamma, (0.2, 3., 8., 1., 9.)) self.assertRaises(TypeError, self.img.gamma, ("blue", "white")) # input a negative value self.assertRaises(ValueError, self.img.gamma, -0.5) self.assertRaises(ValueError, self.img.gamma, -1) self.assertRaises(ValueError, self.img.gamma, -3.8) self.assertRaises(TypeError, self.img.gamma, "blue") self.img.convert(oldmode) def test_invert(self): """Invert an image. """ oldmode = self.img.mode for mode in self.modes: if mode == "P" or mode == "PA": continue self.img.convert(mode) old_channels = [] for chn in self.img.channels: old_channels.append(chn) self.img.invert() for i in range(len(self.img.channels)): self.assert_(np.all(self.img.channels[i] == 1 - old_channels[i])) self.img.invert(True) for i in range(len(self.img.channels)): self.assert_(np.all(self.img.channels[i] - old_channels[i] < EPSILON)) self.assertRaises(ValueError, self.img.invert, [True, False, True, False, True, False, True, False]) self.img.convert(oldmode) def test_pil_image(self): """Return an PIL image. """ # FIXME: Should test on palette images oldmode = self.img.mode for mode in self.modes: if (mode == "YCbCr" or mode == "YCbCrA" or mode == "P" or mode == "PA"): continue self.img.convert(mode) if mode == "YCbCrA": self.assertRaises(ValueError, self.img.pil_image) else: pilimg = self.img.pil_image() self.assertEqual(pilimg.size, (3, 2)) self.img.convert(oldmode) def test_putalpha(self): """Add an alpha channel. """ # Putting alpha channel to an image should not do anything except # change the mode if necessary. oldmode = self.img.mode alpha = np.array(np.random.rand(2, 3)) for mode in self.modes: if mode == "P" or mode == "PA": continue self.img.convert(mode) self.img.putalpha(alpha) self.assert_(np.all(self.img.channels[-1] == alpha)) if mode.endswith("A"): self.assertEqual(self.img.mode, mode) else: self.assertEqual(self.img.mode, mode + "A") self.img.convert(oldmode) self.img.convert(mode) self.assertRaises(ValueError, self.img.putalpha, np.random.rand(4, 5)) self.img.convert(oldmode) def test_save(self): """Save an image. """ import os, os.path oldmode = self.img.mode for mode in self.modes: if (mode == "YCbCr" or mode == "YCbCrA" or mode == "P" or mode == "PA"): continue self.img.convert(mode) self.img.save("test.png") self.assert_(os.path.exists("test.png")) os.remove("test.png") # permissions self.assertRaises(IOError, self.img.save, os.path.join(self.tempdir, "test.png")) self.img.convert(oldmode) def test_replace_luminance(self): """Replace luminance in an image. """ oldmode = self.img.mode for mode in self.modes: if (mode == "P" or mode == "PA"): continue self.img.convert(mode) luma = np.ma.array([[0, 0.5, 0.5], [0.5, 0.25, 0.25]]) self.img.replace_luminance(luma) self.assertEqual(self.img.mode, mode) if(self.img.mode.endswith("A")): chans = self.img.channels[:-1] else: chans = self.img.channels for chn in chans: self.assert_(np.all(chn - luma < EPSILON)) self.img.convert(oldmode) def test_resize(self): """Resize an image. """ self.img.resize((6, 6)) res = np.array([[0, 0, 0.5, 0.5, 0.5, 0.5], [0, 0, 0.5, 0.5, 0.5, 0.5], [0, 0, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.25, 0.25, 0.25, 0.25], [0.5, 0.5, 0.25, 0.25, 0.25, 0.25], [0.5, 0.5, 0.25, 0.25, 0.25, 0.25]]) self.assert_(np.all(res == self.img.channels[0])) self.img.resize((2, 3)) res = np.array([[0, 0.5, 0.5], [0.5, 0.25, 0.25]]) self.assert_(np.all(res == self.img.channels[0])) def test_merge(self): """Merging of an image with another. """ newimg = image.Image() self.assertRaises(ValueError, self.img.merge, newimg) newimg = image.Image(np.array([[1, 2], [3, 4]])) self.assertRaises(ValueError, self.img.merge, newimg) newimg = image.Image(np.array([[1, 2, 3, 4]])) self.assertRaises(ValueError, self.img.merge, newimg) newimg = image.Image(np.ma.array([[1, 2, 3], [4, 5, 6]], mask = [[1, 0, 0], [1, 1, 0]]), mode = "L") self.img.convert("L") newimg.merge(self.img) self.assert_(np.all(np.abs(newimg.channels[0] - np.array([[0, 2, 3], [0.5, 0.25, 6]])) < EPSILON)) def tearDown(self): """Clean up the mess. """ import os os.rmdir(self.tempdir) class TestFlatImage(unittest.TestCase): """Test a flat image, ie an image where min == max. """ def setUp(self): channel = np.ma.array([[0, 0.5, 0.5], [0.5, 0.25, 0.25]], mask = [[1, 1, 1], [1, 1, 0]]) self.img = image.Image(channels = [channel] * 3, mode = "RGB") self.modes = ["L", "LA", "RGB", "RGBA", "YCbCr", "YCbCrA", "P", "PA"] def test_stretch(self): """Stretch a flat image. """ self.img.stretch() self.assert_(self.img.channels[0].shape == (2, 3) and np.ma.count_masked(self.img.channels[0]) == 5) self.img.stretch("crude") self.assert_(self.img.channels[0].shape == (2, 3) and np.ma.count_masked(self.img.channels[0]) == 5) self.img.crude_stretch(1, 2) self.assert_(self.img.channels[0].shape == (2, 3) and np.ma.count_masked(self.img.channels[0]) == 5) self.img.stretch("linear") self.assert_(self.img.channels[0].shape == (2, 3) and np.ma.count_masked(self.img.channels[0]) == 5) self.img.stretch("histogram") self.assert_(self.img.channels[0].shape == (2, 3) and np.ma.count_masked(self.img.channels[0]) == 5) class TestNoDataImage(unittest.TestCase): """Test an image filled with no data. """ def setUp(self): channel = np.ma.array([[0, 0.5, 0.5], [0.5, 0.25, 0.25]], mask = [[1, 1, 1], [1, 1, 1]]) self.img = image.Image(channels = [channel] * 3, mode = "RGB") self.modes = ["L", "LA", "RGB", "RGBA", "YCbCr", "YCbCrA", "P", "PA"] def test_stretch(self): """Stretch a no data image. """ self.img.stretch() self.assert_(self.img.channels[0].shape == (2, 3)) self.img.stretch("crude") self.assert_(self.img.channels[0].shape == (2, 3)) self.img.crude_stretch(1, 2) self.assert_(self.img.channels[0].shape == (2, 3)) self.img.stretch("linear") self.assert_(self.img.channels[0].shape == (2, 3)) self.img.stretch("histogram") self.assert_(self.img.channels[0].shape == (2, 3)) def random_string(length, choices="abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): """Generates a random string with elements from *set* of the specified *length*. """ return "".join([random.choice(choices) for dummy in range(length)]) def suite(): """The test suite for test_image. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestImageCreation)) mysuite.addTest(loader.loadTestsFromTestCase(TestRegularImage)) mysuite.addTest(loader.loadTestsFromTestCase(TestFlatImage)) mysuite.addTest(loader.loadTestsFromTestCase(TestEmptyImage)) mysuite.addTest(loader.loadTestsFromTestCase(TestNoDataImage)) return mysuite mpop-1.5.0/mpop/tests/test_mipp.py000066400000000000000000000175701317160620000171710ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2014. # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Test module for mipp plugin. """ import ConfigParser import datetime import random import unittest import numpy as np import mipp.xrit.sat import mpop.satin.mipp_xrit import mpop.scene from mpop.satellites import GeostationaryFactory def random_string(length, choices="abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): """Generates a random string with elements from *set* of the specified *length*. """ return "".join([random.choice(choices) for dummy_itr in range(length)]) CHANNELS = [random_string(3) for dummy_j in range(int(random.random() * 40))] INSTRUMENT_NAME = random_string(10) DUMMY_STRING = random_string(10) def patch_configparser(): """Patch to fake ConfigParser. """ class FakeConfigParser: """Dummy ConfigParser class. """ def __init__(self, *args, **kwargs): pass def read(self, *args, **kwargs): """Dummy read method """ del args, kwargs self = self def get(self, *args, **kwargs): """Dummy get method """ del kwargs self = self sec = args[0] if args[1] in ["name"]: num = int(sec[len(INSTRUMENT_NAME) + 1:]) return "'"+CHANNELS[num]+"'" elif args[1] in ["resolution"]: return str(random.randint(1,10000)) elif args[1] in ["frequency"]: return str((random.random(), random.random()+1, random.random()+2)) elif args[1] == "format": return "mipp" else: return DUMMY_STRING def sections(self): """Dummy sections function. """ self = self secs = [INSTRUMENT_NAME+"-level2"] + [INSTRUMENT_NAME + "-" + str(j) for j, dummy in enumerate(CHANNELS)] return secs def items(self, arg): """Dummy items function. """ self = self try: chn_nb = arg[arg.find("-") + 1:] return [("name", "'" + CHANNELS[int(chn_nb)] + "'"), ("size", str((int(random.random() * 1000), int(random.random() * 1000)))), ("resolution", str(int(random.random() * 1000)))] except ValueError: return [] ConfigParser.OldConfigParser = ConfigParser.ConfigParser ConfigParser.ConfigParser = FakeConfigParser def unpatch_configparser(): """Unpatch fake ConfigParser. """ ConfigParser.ConfigParser = ConfigParser.OldConfigParser delattr(ConfigParser, "OldConfigParser") def patch_satellite(): """Patch the SatelliteInstrumentScene. """ class FakeChannel: """Dummy Channel """ def __init__(self, data): self.info = {} self.data = data class FakeSatelliteInstrumentScene: """Dummy SatelliteInstrumentScene. """ def __init__(self, *args, **kwargs): del args, kwargs self.fullname = random_string(10) self.satname = random_string(10) self.number = random_string(2) self.instrument_name = INSTRUMENT_NAME self.channels_to_load = [CHANNELS[int(random.random() * len(CHANNELS))] for dummy_i in range(int(random.random() * len(CHANNELS)))] self.time_slot = (datetime.timedelta(seconds=int(random.random() * 9999999999)) + datetime.datetime(1970, 1, 1)) self.info = {} self.area_def = None self.area_id = "" self.area = None self.channels = {} def add_to_history(self, *args): pass def __getitem__(self, key): return self.channels[key] def __setitem__(self, key, data): self.channels[key] = FakeChannel(data) mpop.scene.OldSatelliteInstrumentScene = mpop.scene.SatelliteInstrumentScene mpop.scene.SatelliteInstrumentScene = FakeSatelliteInstrumentScene def unpatch_satellite(): """Unpatch the SatelliteInstrumentScene. """ mpop.scene.SatelliteInstrumentScene = mpop.scene.OldSatelliteInstrumentScene delattr(mpop.scene, "OldSatelliteInstrumentScene") def patch_mipp(): """Patch the SatelliteInstrumentScene. """ class FakeMetadata: def __init__(self, *args, **kwargs): del args, kwargs self.calibration_unit = random_string(1) self.proj4_params = "proj=geos h=45684" self.pixel_size = (random.random() * 5642, random.random() * 5642) self.area_extent = (random.random() * 5642000, random.random() * 5642000, random.random() * 5642000, random.random() * 5642000) class FakeSlicer(object): """Fake slicer for mipp. """ def __getitem__(self, key): return FakeMetadata(), np.random.standard_normal((3, 3)) def __call__(self, *args): return FakeMetadata(), np.random.standard_normal((3, 3)) def fake_load(*args, **kwargs): """Fake satellite loading function. """ del args, kwargs return FakeSlicer() mipp.xrit.sat.old_load = mipp.xrit.sat.load mipp.xrit.sat.load = fake_load def unpatch_mipp(): """Unpatch the SatelliteInstrumentScene. """ mipp.xrit.sat.load = mipp.xrit.sat.old_load delattr(mipp.xrit.sat, "old_load") class TestMipp(unittest.TestCase): """Class for testing the mipp loader. """ def setUp(self): """Patch foreign modules. """ patch_configparser() patch_satellite() patch_mipp() # def test_load(self): # """Test the loading function. # """ # channels = ["VIS006", 'VIS008', 'IR_016', 'IR_039', 'WV_062', 'WV_073', # 'IR_087', 'IR_097', 'IR_108', 'IR_120', 'IR_134', 'HRV'] # satscene = GeostationaryFactory.create_scene("meteosat", "09", INSTRUMENT_NAME, None) # channels_to_load = [CHANNELS[random.randint(0, len(CHANNELS)-1)]] # satscene.load(channels_to_load) # for chn in CHANNELS: # if chn in satscene.channels_to_load: # self.assertEquals(satscene.channels[chn].data.shape, (3, 3)) def tearDown(self): """Unpatch foreign modules. """ unpatch_configparser() unpatch_satellite() unpatch_mipp() def suite(): """The test suite for test_mipp. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestMipp)) return mysuite mpop-1.5.0/mpop/tests/test_nc_pps_l2.py000066400000000000000000000111331317160620000200700ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2015 Adam.Dybbroe # Author(s): # Adam.Dybbroe # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Unit testing the pps level-2 netCDF reader """ import unittest from mpop.satin.nc_pps_l2 import PPSReader CTTH_TESTFILE_LOCAL_1 = '/path/to/my/products/S_NWC_CTTH_noaa19_33897_20150906T1240015Z_20150906T1240598Z.nc' CT_TESTFILE_LOCAL_1 = '/path/to/my/products/S_NWC_CT_noaa19_33897_20150906T1240015Z_20150906T1240598Z.nc' CPP_TESTFILE_LOCAL_1 = '/path/to/my/products/S_NWC_CPP_noaa19_33897_20150906T1240015Z_20150906T1240598Z.nc' CMA_TESTFILE_LOCAL_1 = '/path/to/my/products/S_NWC_CMA_noaa19_33897_20150906T1240015Z_20150906T1240598Z.nc' PC_TESTFILE_LOCAL_1 = '/path/to/my/products/S_NWC_PC_noaa19_33897_20150906T1240015Z_20150906T1240598Z.nc' GEO_TESTFILE_LOCAL_1 = '/path/to/my/products/S_NWC_CMA_noaa19_33897_20150906T1240015Z_20150906T1240598Z.nc' CTTH_TESTFILE_EARS_1 = '/path/to/my/products/W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,METOPB+CTTH_C_EUMS_20150914093100_15510.nc.bz2' CT_TESTFILE_EARS_1 = '/path/to/my/products/W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,METOPB+CT_C_EUMS_20150914093100_15510.nc.bz2' CMA_TESTFILE_EARS_1 = '/path/to/my/products/W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,METOPB+CMA_C_EUMS_20150914093100_15510.nc.bz2' CTTH_TESTFILE_EARS_2 = '/path/to/my/products/W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,METOPB+CTTH_C_EUMS_20150914093200_15510.nc.bz2' CT_TESTFILE_EARS_2 = '/path/to/my/products/W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,METOPB+CT_C_EUMS_20150914093200_15510.nc.bz2' CMA_TESTFILE_EARS_2 = '/path/to/my/products/W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,METOPB+CMA_C_EUMS_20150914093200_15510.nc.bz2' class TestPPSReader(unittest.TestCase): """Class for testing the PPSReader reader class. """ def setUp(self): self.reader = PPSReader(object) def test_determine_prod_and_geo_files_local(self): """Test the private method _determine_prod_and_geo_files """ self.reader._source = 'local' self.reader._cloud_product_geodir = None self.reader._geolocation_product_name = 'CMA' pro, geo = self.reader._determine_prod_and_geo_files( CTTH_TESTFILE_LOCAL_1) self.assertEqual(pro.keys(), ['CTTH']) self.assertEqual(pro['CTTH'], [CTTH_TESTFILE_LOCAL_1]) self.assertEqual(geo.keys(), ['CTTH']) self.assertEqual(geo['CTTH'], [GEO_TESTFILE_LOCAL_1]) pro, geo = self.reader._determine_prod_and_geo_files( [CTTH_TESTFILE_LOCAL_1, CT_TESTFILE_LOCAL_1, CMA_TESTFILE_LOCAL_1]) self.assertEqual(len(pro.keys()), 3) self.assertTrue('CMA' in pro.keys()) self.assertTrue('CT' in geo.keys()) self.assertTrue('CTTH' in geo.keys()) self.assertEqual(geo['CTTH'], geo['CT']) self.assertEqual(geo['CTTH'], geo['CMA']) def test_determine_prod_and_geo_files_ears(self): """Test the private method _determine_prod_and_geo_files """ self.reader._source = 'ears' self.reader._cloud_product_geodir = None self.reader._geolocation_product_name = None pro, geo = self.reader._determine_prod_and_geo_files( CTTH_TESTFILE_EARS_1) self.assertEqual(pro.keys(), ['CTTH']) self.assertEqual(pro['CTTH'], [CTTH_TESTFILE_EARS_1]) pro, geo = self.reader._determine_prod_and_geo_files( [CTTH_TESTFILE_EARS_1, CT_TESTFILE_EARS_1, CMA_TESTFILE_EARS_1]) nkeys = len(pro.keys()) self.assertEqual(nkeys, 3) for key in pro.keys(): self.assertTrue(key in ["CTTH", "CT", "CMA"]) for key in pro.keys(): self.assertEqual(geo[key], pro[key]) for key in pro.keys(): if key not in ['CMA']: self.assertNotEqual(geo[key], geo['CMA']) def tearDown(self): pass def suite(): """The test suite for test_viirs_sdr. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestPPSReader)) return mysuite mpop-1.5.0/mpop/tests/test_plugin.py000066400000000000000000000020641317160620000175120ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Copyright (c) 2010. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Module to test the plugin interface. """ import numpy as np def get_lat_lon(*args, **kwargs): """Dummy get_lat_lon function. """ del args, kwargs return (np.zeros((3, 3)), np.zeros((3, 3))) def load(satscene): """Dummy load function. """ pass mpop-1.5.0/mpop/tests/test_pp_core.py000066400000000000000000000064771317160620000176570ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2012, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Integration testing of - :mod:`mpop.scene` - :mod:`mpop.channel` - :mod:`mpop.projector` """ import random import unittest from mock import MagicMock import sys sys.modules['pyresample'] = MagicMock() import mpop.scene def random_string(length, choices="abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): """Generates a random string with elements from *set* of the specified *length*. """ return "".join([random.choice(choices) for dummy_itr in range(length)]) class TestPPCore(unittest.TestCase): """Class for testing the core of mpop. """ def setUp(self): """Apply patches. """ self.scene = mpop.scene.SatelliteInstrumentScene() def test_channel_list_syntax(self): """Test syntax for channel list """ channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class Satscene(mpop.scene.SatelliteInstrumentScene): """Adding a channel list. """ instrument_name = random_string(8) channel_list = channels self.scene = Satscene() for i, chn in enumerate(self.scene.channels): self.assertTrue(isinstance(chn, mpop.channel.Channel)) self.assertEquals(chn.name, channels[i][0]) self.assertEquals(chn.wavelength_range, list(channels[i][1])) self.assertEquals(chn.resolution, channels[i][2]) def test_project(self): """Test project """ channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class Satscene(mpop.scene.SatelliteInstrumentScene): """Adding a channel list. """ instrument_name = random_string(8) channel_list = channels area = random_string(8) self.scene = Satscene(area=area) area2 = random_string(8) new_scene = self.scene.project(area2) self.assertEquals(new_scene.area_id, area2) for chn in new_scene.channels: print chn.area self.assertEquals(chn.area, area2) def suite(): """The test suite for test_pp_core. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestPPCore)) return mysuite mpop-1.5.0/mpop/tests/test_projector.py000066400000000000000000000272171317160620000202320ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2012, 2013, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Test module for mpop.projector. """ import unittest import numpy as np from mock import MagicMock, patch import sys sys.modules['pyresample'] = MagicMock() sys.modules['pyresample.bilinear'] = MagicMock() from pyresample import geometry, utils from mpop.projector import Projector import mpop.projector class TestProjector(unittest.TestCase): """Class for testing the Projector class. """ proj = None @patch('mpop.projector.get_bil_info') @patch.object(utils, 'generate_quick_linesample_arrays') @patch.object(mpop.projector.kd_tree, 'get_neighbour_info') @patch.object(mpop.projector, '_get_area_hash') def test_init(self, gah, gni, gqla, bil_info): """Creation of coverage. """ # in case of wrong number of arguments self.assertRaises(TypeError, Projector) self.assertRaises(TypeError, Projector, random_string(20)) # in case of string arguments in_area_id = random_string(20) out_area_id = random_string(20) area_type = utils.parse_area_file.return_value.__getitem__.return_value gni.side_effect = [("a", "b", "c", "d")] * 10 self.proj = Projector(in_area_id, out_area_id) self.assertEquals(utils.parse_area_file.call_count, 2) area_file = mpop.projector.get_area_file() utils.parse_area_file.assert_any_call(area_file, in_area_id) utils.parse_area_file.assert_any_call(area_file, out_area_id) self.assertEquals(self.proj.in_area, area_type) self.assertEquals(self.proj.out_area, area_type) # in case of undefined areas mock = MagicMock(side_effect=Exception("raise")) with patch.object(utils, 'parse_area_file', mock): self.assertRaises(Exception, Projector, "raise", random_string(20)) self.assertRaises(Exception, Projector, random_string(20), "raise") # in case of geometry objects as input with patch.object(utils, 'AreaNotFound', Exception): mock = MagicMock(side_effect=[utils.AreaNotFound("raise"), MagicMock()]) with patch.object(utils, 'parse_area_file', mock): in_area = geometry.AreaDefinition() self.proj = Projector(in_area, out_area_id) self.assertEquals(self.proj.in_area, in_area) in_area = geometry.SwathDefinition() utils.parse_area_file.return_value.__getitem__.side_effect = [ AttributeError, out_area_id] self.proj = Projector(in_area, out_area_id) self.assertEquals(self.proj.in_area, in_area) out_area = geometry.AreaDefinition() utils.parse_area_file.return_value.__getitem__.side_effect = [ in_area_id, AttributeError] self.proj = Projector(in_area_id, out_area) self.assertEquals(self.proj.out_area, out_area) # in case of lon/lat is input utils.parse_area_file.return_value.__getitem__.side_effect = [ AttributeError, out_area_id] lonlats = ("great_lons", "even_greater_lats") self.proj = Projector("raise", out_area_id, lonlats) geometry.SwathDefinition.assert_called_with(lons=lonlats[0], lats=lonlats[1]) utils.parse_area_file.return_value.__getitem__.side_effect = None # in case of wrong mode self.assertRaises(ValueError, Projector, random_string(20), random_string(20), mode=random_string(20)) utils.parse_area_file.return_value.__getitem__.side_effect = ["a", "b", "c", "d"] gqla.side_effect = [("ridx", "cidx")] # quick mode cache self.proj = Projector(in_area_id, out_area_id, mode="quick") cache = getattr(self.proj, "_cache") self.assertTrue(cache['row_idx'] is not None) self.assertTrue(cache['col_idx'] is not None) # nearest mode cache self.proj = Projector(in_area_id, out_area_id, mode="nearest") cache = getattr(self.proj, "_cache") self.assertTrue(cache['valid_index'] is not None) self.assertTrue(cache['valid_output_index'] is not None) self.assertTrue(cache['index_array'] is not None) # bilinear mode cache bil_info.return_value = (1, 2, 3, 4) def spam(val): return 'adef' with patch.object(mpop.projector, 'get_area_def', spam): self.proj = Projector(in_area_id, out_area_id, mode="bilinear") cache = getattr(self.proj, "_cache") self.assertTrue(cache['bilinear_t'] is not None) self.assertTrue(cache['bilinear_s'] is not None) self.assertTrue(cache['input_idxs'] is not None) self.assertTrue(cache['idx_arr'] is not None) @patch.object(np.ma, "array") @patch.object(mpop.projector.kd_tree, 'get_sample_from_neighbour_info') @patch.object(np, "load") def test_project_array(self, npload, gsfni, marray): """Test the project_array function. """ in_area_id = random_string(20) out_area_id = random_string(20) data = np.random.standard_normal((3, 1)) utils.parse_area_file.return_value.__getitem__.side_effect = [ "a", "b", "c", "d"] # test quick self.proj = Projector(in_area_id, out_area_id, mode="quick") self.proj.project_array(data) mpop.projector.image.ImageContainer.assert_called_with( data, "a", fill_value=None) mpop.projector.image.ImageContainer.return_value.\ get_array_from_linesample.assert_called_with( self.proj._cache["row_idx"], self.proj._cache["col_idx"]) marray.assert_called_once_with( mpop.projector.image.ImageContainer.return_value. get_array_from_linesample.return_value, dtype=np.dtype('float64')) # test nearest in_area = MagicMock() out_area = MagicMock() utils.parse_area_file.return_value.__getitem__.side_effect = [ in_area, out_area] self.proj = Projector(in_area_id, out_area_id, mode="nearest") self.proj.project_array(data) mpop.projector.kd_tree.get_sample_from_neighbour_info.\ assert_called_with('nn', out_area.shape, data, npload.return_value.__getitem__.return_value, npload.return_value.__getitem__.return_value, npload.return_value.__getitem__.return_value, fill_value=None) @patch.object(mpop.projector.kd_tree, 'get_neighbour_info') def test_calc_nearest_params(self, gni): gni.return_value = (1, 2, 3, 4) res = mpop.projector.calc_nearest_params('in_area', 'out_area', 'radius', nprocs='nprocs') self.assertTrue(isinstance(res, dict)) self.assertTrue('valid_index' in res) self.assertEqual(res['valid_index'], 1) self.assertTrue('valid_output_index' in res) self.assertEqual(res['valid_output_index'], 2) self.assertTrue('index_array' in res) self.assertEqual(res['index_array'], 3) @patch.object(mpop.projector.utils, 'generate_quick_linesample_arrays') def test_calc_quick_params(self, gqla): gqla.return_value = (1, 2) res = mpop.projector.calc_quick_params('in_area', 'out_area') self.assertTrue(isinstance(res, dict)) self.assertTrue('row_idx' in res) self.assertEqual(res['row_idx'], 1) self.assertTrue('col_idx' in res) self.assertEqual(res['col_idx'], 2) @patch.object(mpop.projector, 'get_bil_info') def test_calc_bilinear_params(self, gbi): gbi.return_value = (1, 2, 3, 4) res = mpop.projector.calc_bilinear_params('in_area', 'out_area', 'radius', nprocs='nprocs') self.assertTrue(isinstance(res, dict)) self.assertTrue('bilinear_t' in res) self.assertEqual(res['bilinear_t'], 1) self.assertTrue('bilinear_s' in res) self.assertEqual(res['bilinear_s'], 2) self.assertTrue('input_idxs' in res) self.assertEqual(res['input_idxs'], 3) self.assertTrue('idx_arr' in res) self.assertEqual(res['idx_arr'], 4) @patch.object(mpop.projector, 'll2cr') def test_calc_ewa_params(self, ll2): ll2.return_value = (0, 1, 2) res = mpop.projector.calc_ewa_params('in_area', 'out_area') self.assertTrue(isinstance(res, dict)) self.assertTrue('ewa_cols' in res) self.assertEqual(res['ewa_cols'], 1) self.assertTrue('ewa_rows' in res) self.assertEqual(res['ewa_rows'], 2) def test_get_precompute_cache_fname(self): res = mpop.projector.get_precompute_cache_fname('in_id', 'out_id', 'in_area', 'out_area', 'mode', 'proj_dir') cor_res = "proj_dir/in_id2out_id_-" + \ "6296787761359943868to8984161303220364208_mode.npz" self.assertTrue(res == cor_res) @patch.object(mpop.projector, 'get_area_def') @patch.object(mpop.projector.geometry, 'SwathDefinition') def test_get_area_and_id(self, swath_def, gad): # Case when get_area_def works swath_def.return_value = 1 gad.return_value = 'adef' res = mpop.projector.get_area_and_id('area') self.assertTrue(res[0] == 'adef') self.assertTrue(res[1] == 'area') # Case when AttributeError is raised with self.assertRaises(AttributeError): gad.side_effect = AttributeError res = mpop.projector.get_area_and_id('area') # Case when AttributeError is raised and latlons are given gad.side_effect = AttributeError res = mpop.projector.get_area_and_id('area', latlons=[1, 2]) self.assertEqual(res[0], 1) self.assertTrue(res[1], 'area') def random_string(length, choices="abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): """Generates a random string with elements from *set* of the specified *length*. """ import random return "".join([random.choice(choices) for dummy in range(length)]) def suite(): """The test suite for test_projector. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestProjector)) return mysuite mpop-1.5.0/mpop/tests/test_satellites.py000066400000000000000000000171351317160620000203720ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2014. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Unit tests for the module :mod:`pp.satellites`. """ import ConfigParser import random import unittest import mpop.instruments.visir import mpop.satellites INSTRUMENTS = () def random_string(length, choices="abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): """Generates a random string with elements from *set* of the specified *length*. """ return "".join([random.choice(choices) for dummy_itr in range(length)]) def patch_configparser(): """Patch to fake ConfigParser. """ class FakeConfigParser: """Dummy ConfigParser class. """ def __init__(self, *args, **kwargs): pass def sections(self, *args, **kwargs): """Dummy sections method. """ self = self del args, kwargs sections = [] for i in INSTRUMENTS: for j in range(int(random.random() * 10 + 1)): sections += [i + str(j)] return sections def read(self, *args, **kwargs): """Dummy read method """ del args, kwargs self = self def get(self, *args, **kwargs): """Dummy get method """ del kwargs self = self if args[1] == "instruments": return str(INSTRUMENTS) if args[1] == "name": return "'" + random_string(3) + "'" if args[1] == "resolution": return str(int(random.random() * 50000 + 1)) if args[1] == "frequency": return str(random.random()) if args[1] == "module": return random_string(8) ConfigParser.OldConfigParser = ConfigParser.ConfigParser ConfigParser.ConfigParser = FakeConfigParser def unpatch_configparser(): """Unpatch fake ConfigParser. """ ConfigParser.ConfigParser = ConfigParser.OldConfigParser delattr(ConfigParser, "OldConfigParser") def patch_scene(): """Patch the :mod:`mpop.instruments.visir` module to avoid using it in these tests. """ class FakeChannel(object): """FakeChannel class. """ def __init__(self, val): self.data = val def check_range(self, *args): """Dummy check_range function. """ del args return self.data class FakeSatscene(object): """Fake SatelliteInstrumentScene. """ __version__ = "fake" def __init__(self): self.channels = None self.area = None self.time_slot = None self.error = [] def check_channels(self, *args): """Dummy check_channels function. """ self.channels = args def __getitem__(self, key): if key in self.error: raise KeyError() return FakeChannel(key) mpop.instruments.visir.OldVisirCompositer = mpop.instruments.visir.VisirCompositer mpop.instruments.visir.VisirCompositer = FakeSatscene reload(mpop.satellites) def unpatch_scene(): """Unpatch the :mod:`mpop.scene` module. """ mpop.instruments.visir.VisirCompositer = mpop.instruments.visir.OldVisirCompositer delattr(mpop.instruments.visir, "OldVisirCompositer") reload(mpop) reload(mpop.instruments) reload(mpop.instruments.visir) reload(mpop.satellites) class TestSatellites(unittest.TestCase): """Test the satellites base functions. """ def setUp(self): """Patch stuff. """ patch_configparser() patch_scene() def test_buildinstrument(self): """Test the :func:`mpop.satellites.build_instrument` function. """ name = random_string(10) #ch_list = [random_string(10), random_string(12)] inst = mpop.satellites.build_instrument_compositer(name) # Test that the patches are applied self.assertEquals(inst.__version__, "fake") #self.assertEquals(inst.channel_list, ch_list) self.assertEquals(inst.instrument_name, name) self.assertEquals(inst.mro()[1], mpop.instruments.visir.VisirCompositer) def test_build_satellite_class(self): """Test the :func:`mpop.satellites.build_satellite_class` function. """ global INSTRUMENTS inst = random_string(10) INSTRUMENTS = (inst, ) satname = random_string(10) satnumber = random_string(10) satvar = random_string(10) myclass = mpop.satellites.build_sat_instr_compositer((satname, satnumber, satvar), inst) #self.assertEquals(myclass.satname, satname) #self.assertEquals(myclass.number, satnumber) #self.assertEquals(myclass.variant, satvar) self.assertEquals(myclass.mro()[1].__name__, inst.capitalize() + "Compositer") def test_get_satellite_class(self): """Test the :func:`mpop.satellites.get_satellite_class` function. """ global INSTRUMENTS inst = random_string(10) INSTRUMENTS = ("avhrr", inst) satname = random_string(11) satnumber = random_string(10) satvar = random_string(10) klass = mpop.satellites.get_sat_instr_compositer((satname, satnumber, satvar), inst) self.assertTrue(klass.mro()[0].__name__.startswith( satvar.capitalize() + satname.capitalize() + satnumber.capitalize())) INSTRUMENTS = (inst,) satname = random_string(11) satnumber = random_string(10) satvar = random_string(10) klass = mpop.satellites.get_sat_instr_compositer((satname, satnumber, satvar), inst) pklass = klass.mro()[0] self.assertTrue(pklass.__name__.startswith( satvar.capitalize() + satname.capitalize() + satnumber.capitalize())) def tearDown(self): """Unpatch stuff. """ unpatch_configparser() unpatch_scene() def suite(): """The test suite for test_satellites. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestSatellites)) return mysuite mpop-1.5.0/mpop/tests/test_satin_helpers.py000066400000000000000000000252221317160620000210550ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Author(s): # # Panu Lahtinen . import unittest import numpy as np '''Integration testing of - :mod:`mpop.satin.helper_functions` ''' class TestSatinHelpers(unittest.TestCase): '''Class for testing mpop.satin''' def test_boundaries_to_extent(self): '''Test conversion of area boundaries to area extent. ''' from mpop.satin.helper_functions import boundaries_to_extent # MSG3 proj4 string from # xrit.sat.load(..., only_metadata=True).proj4_params proj4_str = 'proj=geos lon_0=0.00 lat_0=0.00 ' \ 'a=6378169.00 b=6356583.80 h=35785831.00' # MSG3 maximum extent msg_extent = [-5567248.07, -5570248.48, 5570248.48, 5567248.07] euro4_lons = [np.array([-47.45398384, -43.46278935, -38.35946515, -31.73014962, -23.05306111, 11.8361092, 1.9545262, 17.28655348, 32.17162432, 44.92350518, 55.01855232, 56.988557157486078]), np.array([56.98855716, 50.26011569, 45.1592762, 41.21696892, 38.10602167, 35.60224391, 33.55098034, 31.8438098, 30.40324844, 29.17282762, 28.11061579, 27.886603224354555]), np.array([27.88660322, 23.94855341, 19.91336672, 15.81854029, 11.70507781, 7.61511006, 3.58934937, -0.33524747, -4.1272886, -7.76204144, -11.2217833, -11.991484302295099]), np.array([-11.9914843, -13.71190987, -15.65433484, -17.8592324, -20.37559742, -23.26235124, -26.5893562, -30.43725577, -34.8946782, -40.05040055, -45.97725877, -47.453983842896925]) ] euro4_lats = [np.array([ 60.95152407, 64.07948755, 67.08804237, 69.89447062, 72.37400834, 74.34558786, 75.57997723, 75.8713547, 75.16167548, 73.58553666, 71.37260506, 70.797059167821104]), np.array([ 70.79705917, 67.92687675, 64.85946318, 61.67911498, 58.44076323, 55.18141964, 51.92695755, 48.69607712, 45.50265971, 42.35720453, 39.26773508, 38.565754283815295]), np.array([ 38.56575428, 39.21556029, 39.65166546, 39.86532337, 39.85213881, 39.61238514, 39.15098428, 38.47715262, 37.60377021, 36.54656798, 35.32324138, 35.020342638475668]), np.array([ 35.02034264, 37.76813725, 40.533077, 43.300949, 46.05396441, 48.76986157, 51.42078481, 53.97194327, 56.38014919, 58.59254174, 60.54617556, 60.95152407157881]) ] # Correct extent values for these boundaries correct_values_euro4 = [-2041009.079233268, 3502723.3881863873, 2211266.5660426724, 5387911.4915445326] maximum_extent_euro4 = boundaries_to_extent(proj4_str, None, msg_extent, euro4_lons, euro4_lats) for i in range(4): self.assertAlmostEqual(maximum_extent_euro4[i], correct_values_euro4[i], 2) # Two of the area corner points is outside the satellite view afgh_lons = [np.array([49.94506701, 52.14080597, 54.33654493, 56.53228389, 58.72802285, 60.92376181, 63.11950077, 65.31523973, 67.51097869, 69.70671766, 71.90245662, 74.09819558, 76.29393454, 78.4896735, 80.68541246, 82.88115142]), np.array([85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299, 85.05493299]), np.array([85.05493299, 82.85919403, 80.66345507, 78.46771611, 76.27197715, 74.07623819, 71.88049923, 69.68476027, 67.48902131, 65.29328234, 63.09754338, 60.90180442, 58.70606546, 56.5103265, 54.31458754, 52.11884858]), np.array([49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701, 49.94506701])] afgh_lats = [np.array([46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743, 46.52610743]), np.array([46.52610743, 44.99436458, 43.42055852, 41.804754, 40.14714935, 38.4480861, 36.70805834, 34.92772129, 33.10789917, 31.24959192, 29.35398073, 27.42243208, 25.45649997, 23.4579264, 21.4286396, 19.37075017]), np.array([17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918, 17.30750918]), np.array([17.30750918, 19.39146328, 21.44907771, 23.47806753, 25.47632393, 27.44192051, 29.37311717, 31.26836176, 33.12628971, 34.94572163, 36.72565938, 38.46528046, 40.16393131, 41.82111941, 43.43650469, 45.00989022]) ] # Correct values for these borders correct_values_afgh = [3053894.9120028536, 1620176.1036167517, 5187086.4642274799, 4155907.3124084808] maximum_extent_afgh = boundaries_to_extent(proj4_str, None, msg_extent, afgh_lons, afgh_lats) for i in range(len(maximum_extent_afgh)): self.assertAlmostEqual(maximum_extent_afgh[i], correct_values_afgh[i], 2) # Correct values for combined boundaries correct_values_comb = [-2041009.079233268, 1620176.1036167517, 5187086.4642274799, 5387911.4915445326] maximum_extent_comb = boundaries_to_extent(proj4_str, maximum_extent_euro4, msg_extent, afgh_lons, afgh_lats) for i in range(4): self.assertAlmostEqual(maximum_extent_comb[i], correct_values_comb[i], 2) # Borders where none of the corners are within the satellite view lons = [np.array([-170., 170., -170., 170])] lats = [np.array([89., 89., -89., -89])] # Correct values are the same as the full disc extent correct_values = [-5567248.07, -5570248.48, 5570248.48, 5567248.07] maximum_extent_full = boundaries_to_extent(proj4_str, None, msg_extent, lons, lats) for i in range(4): self.assertAlmostEqual(maximum_extent_full[i], correct_values[i], 2) def suite(): """The test suite for test_satin_helpers. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestSatinHelpers)) return mysuite mpop-1.5.0/mpop/tests/test_scene.py000066400000000000000000000661001317160620000173120ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2012, 2014, 2015. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Unit tests for scene.py. """ from mock import MagicMock, patch import sys sys.modules['pyresample.geometry'] = MagicMock() import ConfigParser import datetime import random import unittest import numpy as np import mpop.projector from mpop.channel import NotLoadedError from mpop.scene import SatelliteScene, SatelliteInstrumentScene def random_string(length, choices="abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): """Generates a random string with elements from *set* of the specified *length*. """ return "".join([random.choice(choices) for dummy_itr in range(length)]) EPSILON = 0.0001 DUMMY_STRING = "test_plugin" def patch_configparser(): """Patch to fake ConfigParser. """ class FakeConfigParser: """Dummy ConfigParser class. """ def __init__(self, *args, **kwargs): pass def read(self, *args, **kwargs): """Dummy read method """ del args, kwargs self = self def get(self, *args, **kwargs): """Dummy get method """ del args, kwargs self = self return DUMMY_STRING def sections(self): """Dummy sections method """ # return ["satellite", "udlptou-4"] raise ConfigParser.NoSectionError("Dummy sections.") ConfigParser.OldConfigParser = ConfigParser.ConfigParser ConfigParser.ConfigParser = FakeConfigParser def unpatch_configparser(): """Unpatch fake ConfigParser. """ ConfigParser.ConfigParser = ConfigParser.OldConfigParser delattr(ConfigParser, "OldConfigParser") def patch_projector(): """Patch to fake projector. """ class FakeProjector: """Dummy Projector class. """ def __init__(self, *args, **kwargs): del args, kwargs self.out_area = None def project_array(self, arg): """Dummy project_array method. """ return arg def fake_get_area_def(area): return area mpop.projector.OldProjector = mpop.projector.Projector mpop.projector.Projector = FakeProjector mpop.projector.old_get_area_def = mpop.projector.get_area_def mpop.projector.get_area_def = fake_get_area_def def unpatch_projector(): """Unpatch fake projector """ mpop.projector.Projector = mpop.projector.OldProjector delattr(mpop.projector, "OldProjector") mpop.projector.get_area_def = mpop.projector.old_get_area_def delattr(mpop.projector, "old_get_area_def") class TestSatelliteScene(unittest.TestCase): """Class for testing the SatelliteScene class. """ scene = None def test_init(self): """Creation of a satellite scene. """ self.scene = SatelliteScene() self.assertEquals(self.scene.satname, "") self.assertEquals(self.scene.number, "") self.assert_(self.scene.time_slot is None) self.assert_(self.scene.orbit is None) self.assert_(self.scene.area is None) self.assert_(self.scene.lat is None) self.assert_(self.scene.lon is None) # time_slot time_slot = datetime.datetime.now() self.scene = SatelliteScene(time_slot=time_slot) self.assertEquals(self.scene.satname, "") self.assertEquals(self.scene.number, "") self.assertEquals(self.scene.time_slot, time_slot) self.assert_(self.scene.orbit is None) self.assert_(self.scene.area is None) self.assert_(self.scene.lat is None) self.assert_(self.scene.lon is None) # area area = random_string(int(np.random.uniform(9)) + 1) self.scene = SatelliteScene(area=area) self.assertEquals(self.scene.satname, "") self.assertEquals(self.scene.number, "") self.assertEquals(self.scene.area, area) self.assert_(self.scene.orbit is None) self.assert_(self.scene.time_slot is None) self.assert_(self.scene.lat is None) self.assert_(self.scene.lon is None) self.assertRaises(TypeError, SatelliteScene, area=np.random.uniform(1000)) self.assertRaises(TypeError, SatelliteScene, area=int(np.random.uniform(1000))) self.assertRaises(TypeError, SatelliteScene, area=[]) # orbit orbit = random_string(int(np.random.uniform(9)) + 1) self.scene = SatelliteScene(orbit=orbit) self.assertEquals(self.scene.satname, "") self.assertEquals(self.scene.number, "") self.assertEquals(self.scene.orbit, orbit) self.assert_(self.scene.area is None) self.assert_(self.scene.time_slot is None) self.assert_(self.scene.lat is None) self.assert_(self.scene.lon is None) def test_fullname(self): """Fullname of a sat scene. """ self.scene = SatelliteScene() self.scene.satname = random_string(int(np.random.uniform(9)) + 1) self.scene.number = random_string(int(np.random.uniform(9)) + 1) self.scene.variant = random_string(int(np.random.uniform(9)) + 1) self.assertEquals(self.scene.fullname, self.scene.variant + self.scene.satname + self.scene.number) class TestSatelliteInstrumentScene(unittest.TestCase): """Class for testing the SatelliteInstrumentScene class. """ scene = None def setUp(self): """Patch foreign modules. """ patch_configparser() patch_projector() def test_init_area(self): """Creation of a satellite instrument scene. """ channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class SatelliteInstrumentScene2(SatelliteInstrumentScene): """Dummy satinst class. """ channel_list = channels # area area = random_string(int(np.random.uniform(9)) + 1) self.scene = SatelliteInstrumentScene2(area=area) self.assertEquals(self.scene.satname, "") self.assertEquals(self.scene.number, "") self.assertEquals(self.scene.area, area) self.assert_(self.scene.orbit is None) self.assert_(self.scene.time_slot is None) self.assert_(self.scene.lat is None) self.assert_(self.scene.lon is None) self.assert_(self.scene.instrument_name is None) self.assertEquals(self.scene.channels_to_load, set([])) for i, chn in enumerate(self.scene.channels): self.assertEquals(chn.name, channels[i][0]) self.assertEquals(chn.wavelength_range, list(channels[i][1])) self.assertEquals(chn.resolution, channels[i][2]) self.assertRaises(TypeError, SatelliteInstrumentScene2, area=np.random.uniform(1000)) self.assertRaises(TypeError, SatelliteInstrumentScene2, area=int(np.random.uniform(1000))) self.assertRaises(TypeError, SatelliteInstrumentScene2, area=[]) def test_init_orbit(self): """Creation of a satellite instrument scene. """ channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class SatelliteInstrumentScene2(SatelliteInstrumentScene): """Dummy satinst class. """ channel_list = channels # orbit orbit = random_string(int(np.random.uniform(9)) + 1) self.scene = SatelliteInstrumentScene2(orbit=orbit) self.assertEquals(self.scene.satname, "") self.assertEquals(self.scene.number, "") self.assertEquals(self.scene.orbit, orbit) self.assert_(self.scene.area is None) self.assert_(self.scene.time_slot is None) self.assert_(self.scene.lat is None) self.assert_(self.scene.lon is None) self.assert_(self.scene.instrument_name is None) self.assertEquals(self.scene.channels_to_load, set([])) for i, chn in enumerate(self.scene.channels): self.assertEquals(chn.name, channels[i][0]) self.assertEquals(chn.wavelength_range, list(channels[i][1])) self.assertEquals(chn.resolution, channels[i][2]) def test_init_time_slot(self): """Creation of a satellite instrument scene. """ channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class SatelliteInstrumentScene2(SatelliteInstrumentScene): """Dummy satinst class. """ channel_list = channels # time_slot time_slot = datetime.datetime.now() self.scene = SatelliteInstrumentScene2(time_slot=time_slot) self.assertEquals(self.scene.satname, "") self.assertEquals(self.scene.number, "") self.assertEquals(self.scene.time_slot, time_slot) self.assert_(self.scene.orbit is None) self.assert_(self.scene.area is None) self.assert_(self.scene.lat is None) self.assert_(self.scene.lon is None) self.assert_(self.scene.instrument_name is None) self.assertEquals(self.scene.channels_to_load, set([])) for i, chn in enumerate(self.scene.channels): self.assertEquals(chn.name, channels[i][0]) self.assertEquals(chn.wavelength_range, list(channels[i][1])) self.assertEquals(chn.resolution, channels[i][2]) def test_init(self): """Creation of a satellite instrument scene. """ channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class SatelliteInstrumentScene2(SatelliteInstrumentScene): """Dummy satinst class. """ channel_list = channels self.scene = SatelliteInstrumentScene2() self.assertEquals(self.scene.satname, "") self.assertEquals(self.scene.number, "") self.assert_(self.scene.time_slot is None) self.assert_(self.scene.orbit is None) self.assert_(self.scene.area is None) self.assert_(self.scene.lat is None) self.assert_(self.scene.lon is None) self.assert_(self.scene.instrument_name is None) self.assertEquals(self.scene.channels_to_load, set([])) for i, chn in enumerate(self.scene.channels): self.assertEquals(chn.name, channels[i][0]) self.assertEquals(chn.wavelength_range, list(channels[i][1])) self.assertEquals(chn.resolution, channels[i][2]) def test_setitem(self): """__setitem__ for sat scenes. """ channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class SatelliteInstrumentScene2(SatelliteInstrumentScene): """Dummy satinst class. """ channel_list = channels self.scene = SatelliteInstrumentScene2() self.assertRaises(TypeError, self.scene.__setitem__, 10.8, "rsutienrt") a = np.ma.array([1, 2, 3]) self.scene[6.4] = a self.assertTrue( isinstance(self.scene[6.4].data, np.ma.core.MaskedArray)) def test_getitem(self): """__getitem__ for sat scenes. """ # empty scene self.scene = SatelliteInstrumentScene() self.assertRaises(KeyError, self.scene.__getitem__, np.random.uniform(100)) self.assertRaises(KeyError, self.scene.__getitem__, int(np.random.uniform(10000))) self.assertRaises(KeyError, self.scene.__getitem__, random_string(4)) # scene with 3 channels channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class SatelliteInstrumentScene2(SatelliteInstrumentScene): """Dummy satinst class. """ channel_list = channels self.scene = SatelliteInstrumentScene2() for chn in channels: self.assertEquals(self.scene[chn[0]].name, chn[0]) for i in range(3): self.assertEquals(self.scene[chn[1][i]].wavelength_range[i], chn[1][i]) self.assertEquals(self.scene[chn[2]].resolution, chn[2]) self.assertEquals(self.scene[(chn[0], chn[2])].name, chn[0]) self.assertRaises(KeyError, self.scene.__getitem__, []) self.assertRaises(KeyError, self.scene.__getitem__, random_string(5)) self.assertRaises(TypeError, self.scene.__getitem__, set([])) self.assertRaises(KeyError, self.scene.__getitem__, 5.0) self.assertEquals(len(self.scene.__getitem__(5000, aslist=True)), 2) chans = self.scene.__getitem__(5000, aslist=True) self.assertEquals(self.scene[chans[0].name].name, channels[1][0]) self.assertEquals(self.scene[chans[1].name].name, channels[2][0]) def test_check_channels(self): """Check loaded channels. """ # No data loaded channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class SatelliteInstrumentScene2(SatelliteInstrumentScene): """Dummy satinst class. """ channel_list = channels self.scene = SatelliteInstrumentScene2() for chn in channels: self.assertRaises( NotLoadedError, self.scene.check_channels, chn[0]) self.assertRaises( NotLoadedError, self.scene.check_channels, chn[2]) for i in range(3): self.assertRaises(NotLoadedError, self.scene.check_channels, chn[1][i]) # With data self.scene[0.7] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[6.4] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[11.5] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.assertTrue(self.scene.check_channels(0.7, 6.4, 11.5)) self.assertRaises( KeyError, self.scene.check_channels, random_string(5)) def test_loaded_channels(self): """Loaded channels list. """ # No data loaded channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class SatelliteInstrumentScene2(SatelliteInstrumentScene): """Dummy satinst class. """ channel_list = channels self.scene = SatelliteInstrumentScene2() self.assertEquals(self.scene.loaded_channels(), set([])) # With data self.scene[0.7] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[6.4] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[11.5] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.assertEquals(set([chn.name for chn in self.scene.loaded_channels()]), set(["00_7", "06_4", "11_5"])) def test_project(self): """Projecting a scene. """ area = random_string(8) area2 = random_string(8) # scene with 3 channels channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class SatelliteInstrumentScene2(SatelliteInstrumentScene): """Dummy satinst class. """ instrument_name = random_string(8) channel_list = channels # case of a swath self.scene = SatelliteInstrumentScene2(area=None) # With data self.scene[0.7] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[6.4] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[6.4].area = MagicMock() res = self.scene.project(area2) self.assertEquals(res[0.7].shape, (3, 3)) self.assertEquals(res[6.4].shape, (3, 3)) self.assertRaises(KeyError, res.__getitem__, 11.5) self.scene[0.7].area = self.scene[6.4].area res = self.scene.project(area2, channels=[0.7]) self.assertEquals(res[0.7].shape, (3, 3)) self.assertRaises(KeyError, res.__getitem__, 6.4) res = self.scene.project(area2, channels=[0.7, 11.5]) self.assertEquals(res[0.7].shape, (3, 3)) self.assertRaises(KeyError, res.__getitem__, 11.5) res = self.scene.project(area2, channels=[]) self.assertRaises(KeyError, res.__getitem__, 0.7) self.assertRaises(TypeError, self.scene.project, area2, channels=11.5) # case of a grid self.scene = SatelliteInstrumentScene2(area=area) # With data self.scene[0.7] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[6.4] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[11.5] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) res = self.scene.project(area2) self.assertEquals(res[11.5].shape, (3, 3)) self.scene[0.7].area = MagicMock() res = self.scene.project(area2, channels=[0.7]) self.assertEquals(res[0.7].shape, (3, 3)) self.assertRaises(KeyError, res.__getitem__, 6.4) self.scene[6.4].area = MagicMock() self.scene[11.5].area = MagicMock() res = self.scene.project(area2) self.assertEquals(res[0.7].shape, (3, 3)) # case of self projection self.scene = SatelliteInstrumentScene2(area=area) # With data self.scene[0.7] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[6.4] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[11.5] = np.ma.array(np.random.rand(3, 3), mask=np.array(np.random.rand(3, 3) * 2, dtype=int)) self.scene[6.4].area = MagicMock() res = self.scene.project(area) self.assertEquals(res[0.7].shape, (3, 3)) self.assertEquals(res[6.4].shape, (3, 3)) self.assertEquals(res[11.5].shape, (3, 3)) self.scene[11.5].area = self.scene[6.4].area self.scene[0.7].area = self.scene[6.4].area res = self.scene.project(area, channels=None) self.assertEquals(res[0.7].shape, (3, 3)) self.assertEquals(res[6.4].shape, (3, 3)) self.assertEquals(res[11.5].shape, (3, 3)) def test_load(self): """Loading channels into a scene. """ channels = [["00_7", (0.5, 0.7, 0.9), 2500], ["06_4", (5.7, 6.4, 7.1), 5000], ["11_5", (10.5, 11.5, 12.5), 5000]] class SatelliteInstrumentScene2(SatelliteInstrumentScene): """Dummy satinst class. """ instrument_name = random_string(8) channel_list = channels self.scene = SatelliteInstrumentScene2() self.assertRaises(TypeError, self.scene.load, "00_7") self.scene.load(["00_7"]) self.assertEquals(set(), self.scene.channels_to_load) self.scene = SatelliteInstrumentScene2() self.scene.load() self.assertEquals(set(), self.scene.channels_to_load) self.scene.load(["CTTH"]) # Test the reinitialization of channels_to_load # self.scene = SatelliteInstrumentScene2() # self.assertRaises(ValueError, self.scene.load, ["00_7"], area_extent="bla") # self.scene.load(["00_7"], area_extent="bla") # self.assertEquals(set(["00_7"]), self.scene.channels_to_load) # self.scene.load(["06_4"]) # self.assertEquals(len(self.scene.loaded_channels()), 1) # self.assertEquals(self.scene.loaded_channels()[0].name, "06_4") # self.scene.load(["CTTH"]) # def test_assemble_segments(self): # """Assembling segments in a single satscene object. # """ # channels = [["00_7", (0.5, 0.7, 0.9), 2500], # ["06_4", (5.7, 6.4, 7.1), 5000], # ["11_5", (10.5, 11.5, 12.5), 5000]] # class SatelliteInstrumentScene2(SatelliteInstrumentScene): # """Dummy satinst class. # """ # satname = random_string(8) # number = random_string(8) # instrument_name = random_string(8) # channel_list = channels # self.scene = SatelliteInstrumentScene2() # scene2 = SatelliteInstrumentScene2() # self.scene.lon = np.ma.array(np.random.rand(3, 3), # mask = np.array(np.random.rand(3, 3) * 2, # dtype = int)) # self.scene.lat = np.ma.array(np.random.rand(3, 3), # mask = np.array(np.random.rand(3, 3) * 2, # dtype = int)) # self.scene[0.7] = np.ma.array(np.random.rand(3, 3), # mask = np.array(np.random.rand(3, 3) * 2, # dtype = int)) # self.scene[6.4] = np.ma.array(np.random.rand(3, 3), # mask = np.array(np.random.rand(3, 3) * 2, # dtype = int)) # scene2.lon = np.ma.array(np.random.rand(3, 3), # mask = np.array(np.random.rand(3, 3) * 2, # dtype = int)) # scene2.lat = np.ma.array(np.random.rand(3, 3), # mask = np.array(np.random.rand(3, 3) * 2, # dtype = int)) # scene2[0.7] = np.ma.array(np.random.rand(3, 3), # mask = np.array(np.random.rand(3, 3) * 2, # dtype = int)) # scene2[11.5] = np.ma.array(np.random.rand(3, 3), # mask = np.array(np.random.rand(3, 3) * 2, # dtype = int)) # big_scene = mpop.scene.assemble_segments([self.scene, scene2]) # data0 = big_scene[0.7].data # data1 = self.scene[0.7].data # data2 = scene2[0.7].data # self.assertTrue(np.ma.allclose(data0, np.ma.concatenate((data1, data2)), # rtol=EPSILON)) # data0 = big_scene[0.7].data.mask # data1 = self.scene[0.7].data.mask # data2 = scene2[0.7].data.mask # self.assertTrue(np.ma.allclose(data0, np.ma.concatenate((data1, data2)), # rtol=EPSILON)) # data0 = big_scene[6.4].data # data1 = self.scene[6.4].data # data2 = np.ma.masked_all_like(data1) # self.assertTrue(np.ma.allclose(data0, np.ma.concatenate((data1, data2)), # rtol=EPSILON)) # data0 = big_scene[6.4].data.mask # data1 = self.scene[6.4].data.mask # data2 = data2.mask # self.assertTrue(np.ma.allclose(data0, np.ma.concatenate((data1, data2)), # rtol=EPSILON)) # data0 = big_scene[11.5].data # data2 = scene2[11.5].data # data1 = np.ma.masked_all_like(data2) # self.assertTrue(np.ma.allclose(data0, np.ma.concatenate((data1, data2)), # rtol=EPSILON)) # data0 = big_scene[11.5].data.mask # data1 = data1.mask # data2 = scene2[11.5].data.mask # self.assertTrue(np.ma.allclose(data0, np.ma.concatenate((data1, data2)), # rtol = EPSILON)) def tearDown(self): """Unpatch foreign modules. """ unpatch_configparser() unpatch_projector() def suite(): """The test suite for test_scene. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestSatelliteScene)) mysuite.addTest(loader.loadTestsFromTestCase(TestSatelliteInstrumentScene)) return mysuite mpop-1.5.0/mpop/tests/test_seviri.py000066400000000000000000000211141317160620000175120ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2014. # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Module for testing the :mod:`mpop.instruments.seviri` module. """ import random import unittest import numpy as np import mpop.instruments.seviri from mpop.imageo import geo_image def patch_scene_mask(): """Patch the :mod:`mpop.scene` module to avoid using it in these tests. """ class FakeChannel(object): """FakeChannel class. """ def __init__(self, val): del val self.data = np.ma.array((random.random(),)) def check_range(self, *args): """Dummy check_range function. """ del args return self.data class FakeSatscene(object): """Fake SatelliteInstrumentScene. """ __version__ = "fake" def __init__(self): self.channels = {} self.area = None self.time_slot = None self.error = [] def check_channels(self, *args): """Dummy check_channels function. """ for chn in args: if chn in self.error: raise RuntimeError() def __getitem__(self, key): if key not in self.channels: self.channels[key] = FakeChannel(key) return self.channels[key] mpop.instruments.visir.OldVisirCompositer = mpop.instruments.visir.VisirCompositer mpop.instruments.visir.VisirCompositer = FakeSatscene reload(mpop) reload(mpop.instruments) reload(mpop.instruments.seviri) def patch_scene(): """Patch the :mod:`mpop.scene` module to avoid using it in these tests. """ class FakeChannel(object): """FakeChannel class. """ def __init__(self, val): self.data = val def check_range(self, *args): """Dummy check_range function. """ del args return self.data class FakeSatscene(object): """Fake SatelliteInstrumentScene. """ __version__ = "fake" def __init__(self): self.channels = None self.area = None self.time_slot = None self._data_holder = self def check_channels(self, *args): """Dummy check_channels function. """ self.channels = args def __contains__(self, point): return True def __getitem__(self, key): if key == "_IR39Corr": return FakeChannel(3.75) elif key == "HRV": return FakeChannel(0.7) return FakeChannel(key) mpop.instruments.visir.OldVisirCompositer = mpop.instruments.visir.VisirCompositer mpop.instruments.visir.VisirCompositer = FakeSatscene reload(mpop) reload(mpop.instruments) reload(mpop.instruments.seviri) def unpatch_scene(): """Unpatch the :mod:`mpop.scene` module. """ mpop.instruments.visir.VisirCompositer = mpop.instruments.visir.OldVisirCompositer delattr(mpop.instruments.visir, "OldVisirCompositer") reload(mpop) reload(mpop.instruments) reload(mpop.instruments.visir) reload(mpop.instruments.seviri) def patch_geo_image(): """Patch the :mod:`imageo.geo_image` module to avoid using it in these tests. """ class FakeGeoImage: """FakeGeoImage class. """ def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.lum = None self.channels = [self] def enhance(self, **kwargs): """Dummy enhance function. """ self.kwargs.update(kwargs) def replace_luminance(self, lum): """Dummy remplace_luminance. """ self.lum = lum geo_image.OldGeoImage = geo_image.GeoImage geo_image.GeoImage = FakeGeoImage def unpatch_geo_image(): """Unpatch the :mod:`imageo.geo_image` module. """ geo_image.GeoImage = geo_image.OldGeoImage delattr(geo_image, "OldGeoImage") class TestComposites(unittest.TestCase): """Class for testing the composites. """ def setUp(self): """Setup stuff. """ patch_geo_image() patch_scene() self.scene = mpop.instruments.seviri.SeviriCompositer() # def test_cloudtop(self): # """Test cloudtop. # """ # img = self.scene.cloudtop() # self.assertEquals(img.kwargs["mode"], "RGB") # self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) # self.assertEquals(img.args[0], (-3.75, -10.8, -12.0)) # self.assertEquals(img.kwargs["stretch"], (0.005, 0.005)) # self.assertTrue("crange" not in img.kwargs) # self.assertTrue("gamma" not in img.kwargs) # def test_night_fog(self): # """Test night_fog. # """ # img = self.scene.night_fog() # self.assertEquals(img.kwargs["mode"], "RGB") # self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) # self.assertEquals(img.args[0], (12.0 - 10.8, 10.8 - 3.75, 10.8)) # self.assertEquals(img.kwargs["crange"], ((-4, 2), # (0, 6), # (243, 293))) # self.assertEquals(img.kwargs["gamma"], (1.0, 2.0, 1.0)) # self.assertTrue("stretch" not in img.kwargs) # def test_hr_overview(self): # """Test hr_overview. # """ # img = self.scene.hr_overview() # self.assertEquals(img.kwargs["mode"], "RGB") # self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) # self.assertEquals(img.args[0], (0.635, 0.85, -10.8)) # self.assertEquals(img.kwargs["stretch"], "crude") # self.assertEquals(list(img.kwargs["gamma"]), list((1.6, 1.6, 1.1))) # self.assertTrue("crange" not in img.kwargs) # self.assertEquals(img.lum.kwargs["mode"], "L") # self.assertEquals(img.lum.kwargs["crange"], (0, 100)) # self.assertEquals(img.lum.kwargs["gamma"], 2.0) # self.assertTrue("stretch" not in img.lum.kwargs) # self.assertTrue("fill_value" not in img.lum.kwargs) # def test_hr_visual(self): # """Test hr_visual. # """ # img = self.scene.hr_visual() # self.assertEquals(img.kwargs["mode"], "L") # self.assertEquals(img.kwargs["fill_value"], 0) # self.assertEquals(img.args[0], 0.7) # self.assertEquals(img.kwargs["stretch"], "crude") # self.assertTrue("crange" not in img.kwargs) # self.assertTrue("gamma" not in img.kwargs) def tearDown(self): unpatch_scene() unpatch_geo_image() class TestCo2Corr(unittest.TestCase): """Class for testing the composites. """ def setUp(self): """Setup stuff. """ patch_geo_image() patch_scene_mask() self.scene = mpop.instruments.seviri.SeviriCompositer() def test_co2corr(self): """Test CO2 correction. """ res = self.scene.co2corr() bt039 = self.scene[3.9].data bt108 = self.scene[10.8].data bt134 = self.scene[13.4].data dt_co2 = (bt108-bt134)/4.0 rcorr = bt108 ** 4 - (bt108-dt_co2) ** 4 t4_co2corr = bt039 ** 4 + rcorr if t4_co2corr < 0.0: t4_co2corr = 0 solution = t4_co2corr ** 0.25 self.assertEquals(res, solution) self.scene.error = [3.75] res = self.scene.co2corr() self.assertTrue(res is None) def tearDown(self): unpatch_scene() unpatch_geo_image() def suite(): """The test suite for test_seviri. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestComposites)) mysuite.addTest(loader.loadTestsFromTestCase(TestCo2Corr)) return mysuite mpop-1.5.0/mpop/tests/test_viirs_sdr.py000066400000000000000000000054561317160620000202300ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """Module for testing the mpop.channel module. """ import unittest from datetime import datetime from mpop.satin.viirs_sdr import _get_swathsegment class TestViirsSDRReader(unittest.TestCase): """Class for testing the VIIRS SDR reader class. """ def test_get_swath_segment(self): """ Test choosing swath segments based on datatime interval """ filenames = [ "SVM15_npp_d20130312_t1034305_e1035546_b07108_c20130312110058559507_cspp_dev.h5", "SVM15_npp_d20130312_t1035559_e1037201_b07108_c20130312110449303310_cspp_dev.h5", "SVM15_npp_d20130312_t1037213_e1038455_b07108_c20130312110755391459_cspp_dev.h5", "SVM15_npp_d20130312_t1038467_e1040109_b07108_c20130312111106961103_cspp_dev.h5", "SVM15_npp_d20130312_t1040121_e1041363_b07108_c20130312111425464510_cspp_dev.h5", "SVM15_npp_d20130312_t1041375_e1043017_b07108_c20130312111720550253_cspp_dev.h5", "SVM15_npp_d20130312_t1043029_e1044271_b07108_c20130312112246726129_cspp_dev.h5", "SVM15_npp_d20130312_t1044283_e1045525_b07108_c20130312113037160389_cspp_dev.h5", "SVM15_npp_d20130312_t1045537_e1047179_b07108_c20130312114330237590_cspp_dev.h5", "SVM15_npp_d20130312_t1047191_e1048433_b07108_c20130312120148075096_cspp_dev.h5", "SVM15_npp_d20130312_t1048445_e1050070_b07108_c20130312120745231147_cspp_dev.h5", ] # # Test search for multiple granules result = [ "SVM15_npp_d20130312_t1038467_e1040109_b07108_c20130312111106961103_cspp_dev.h5", "SVM15_npp_d20130312_t1040121_e1041363_b07108_c20130312111425464510_cspp_dev.h5", "SVM15_npp_d20130312_t1041375_e1043017_b07108_c20130312111720550253_cspp_dev.h5", "SVM15_npp_d20130312_t1043029_e1044271_b07108_c20130312112246726129_cspp_dev.h5", "SVM15_npp_d20130312_t1044283_e1045525_b07108_c20130312113037160389_cspp_dev.h5", ] tstart = datetime(2013, 3, 12, 10, 39) tend = datetime(2013, 3, 12, 10, 45) sublist = _get_swathsegment(filenames, tstart, tend) self.assert_(sublist == result) # # Test search for single granule tslot = datetime(2013, 3, 12, 10, 45) result_file = [ "SVM15_npp_d20130312_t1044283_e1045525_b07108_c20130312113037160389_cspp_dev.h5", ] single_file = _get_swathsegment(filenames, tslot) self.assert_(result_file == single_file) def suite(): """The test suite for test_viirs_sdr. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestViirsSDRReader)) return mysuite mpop-1.5.0/mpop/tests/test_visir.py000066400000000000000000000326041317160620000173530ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, 2011, 2014. # Author(s): # Martin Raspaud # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # mpop. If not, see . """Module for testing the :mod:`mpop.instruments.visir` module. """ import random import unittest import numpy as np import mpop.instruments.visir import mpop.scene from mpop.imageo import geo_image def patch_scene(): """Patch the :mod:`mpop.scene` module to avoid using it in these tests. """ class FakeChannel(object): """FakeChannel class. """ def __init__(self, val): self.data = val self.area = None def check_range(self): """Dummy check_range function. """ return self.data class FakeSatscene(object): """Fake SatelliteInstrumentScene. """ __version__ = "fake" def __init__(self): self.channels = None self.area = None self.time_slot = None def check_channels(self, *args): """Dummy check_channels function. """ self.channels = args def __getitem__(self, key): return FakeChannel(key) mpop.scene.OldSatelliteInstrumentScene = mpop.scene.SatelliteInstrumentScene mpop.scene.SatelliteInstrumentScene = FakeSatscene reload(mpop) reload(mpop.instruments) reload(mpop.instruments.visir) def unpatch_scene(): """Unpatch the :mod:`mpop.scene` module. """ mpop.scene.SatelliteInstrumentScene = mpop.scene.OldSatelliteInstrumentScene delattr(mpop.scene, "OldSatelliteInstrumentScene") reload(mpop) reload(mpop.instruments) reload(mpop.instruments.visir) def patch_geo_image(): """Patch the :mod:`imageo.geo_image` module to avoid using it in these tests. """ class FakeGeoImage: """FakeGeoImage class. """ def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs def enhance(self, **kwargs): """Dummy enhance function. """ self.kwargs.update(kwargs) def clip(self): """Dummy clip function. """ pass geo_image.OldGeoImage = geo_image.GeoImage geo_image.GeoImage = FakeGeoImage def unpatch_geo_image(): """Unpatch the :mod:`imageo.geo_image` module. """ geo_image.GeoImage = geo_image.OldGeoImage delattr(geo_image, "OldGeoImage") class TestComposites(unittest.TestCase): """Class for testing the composites. """ def setUp(self): """Setup stuff. """ patch_geo_image() patch_scene() self.scene = mpop.instruments.visir.VisirCompositer(mpop.scene.SatelliteInstrumentScene()) def test_channel_image(self): """Test channel_image. """ chn = random.random() img = self.scene.channel_image(chn) self.assertEquals(chn, img.args[0]) self.assertEquals(img.kwargs["stretch"], "crude") self.assertEquals(img.kwargs["mode"], "L") self.assertEquals(img.kwargs["fill_value"], 0) self.assertTrue("crange" not in img.kwargs) def test_overview(self): """Test overview. """ img = self.scene.overview() self.assertEquals(img.kwargs["mode"], "RGB") self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) self.assertEquals(img.args[0], (0.635, 0.85, -10.8)) self.assertEquals(img.kwargs["stretch"], "crude") self.assertEquals(img.kwargs["gamma"], 1.6) self.assertTrue("crange" not in img.kwargs) #self.assertEquals(self.scene.overview.prerequisites, # set([0.635, 0.85, 10.8])) def test_airmass(self): """Test airmass. """ img = self.scene.airmass() self.assertEquals(img.kwargs["mode"], "RGB") self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) self.assertTrue(np.allclose(np.array(img.args[0]), np.array((-0.6, -1.1, 6.7)))) self.assertEquals(img.kwargs["crange"], ((-25, 0), (-40, 5), (243, 208))) self.assertTrue("gamma" not in img.kwargs) self.assertTrue("stretch" not in img.kwargs) #self.assertEquals(self.scene.airmass.prerequisites, set([6.7, 7.3, # 9.7, 10.8])) # def test_vis06(self): # """Test vis06. # """ # img = self.scene.vis06() # self.assertEquals(0.6, img.args[0]) # self.assertEquals(img.kwargs["mode"], "L") # self.assertEquals(img.kwargs["fill_value"], 0) # self.assertEquals(img.kwargs["stretch"], "crude") # self.assertTrue("gamma" not in img.kwargs) # self.assertTrue("crange" not in img.kwargs) # #self.assertEquals(self.scene.vis06.prerequisites, # # set([0.635])) # def test_ir108(self): # """Test ir108. # """ # img = self.scene.ir108() # self.assertEquals(10.8, img.args[0]) # self.assertEquals(img.kwargs["mode"], "L") # self.assertEquals(img.kwargs["fill_value"], 0) # self.assertEquals(img.kwargs["crange"], (-70 + 273.15, 57.5 + 273.15)) # self.assertEquals(img.kwargs["inverse"], True) # self.assertTrue("gamma" not in img.kwargs) # self.assertTrue("stretch" not in img.kwargs) # #self.assertEquals(self.scene.ir108.prerequisites, # # set([10.8])) # def test_wv_high(self): # """Test wv_high. # """ # img = self.scene.wv_high() # self.assertEquals(6.7, img.args[0]) # self.assertEquals(img.kwargs["mode"], "L") # self.assertEquals(img.kwargs["fill_value"], 0) # self.assertEquals(img.kwargs["stretch"], "linear") # self.assertEquals(img.kwargs["inverse"], True) # self.assertTrue("gamma" not in img.kwargs) # self.assertTrue("crange" not in img.kwargs) # #self.assertEquals(self.scene.wv_high.prerequisites, # # set([6.7])) # def test_wv_low(self): # """Test wv_low. # """ # img = self.scene.wv_low() # self.assertEquals(7.3, img.args[0]) # self.assertEquals(img.kwargs["mode"], "L") # self.assertEquals(img.kwargs["fill_value"], 0) # self.assertEquals(img.kwargs["stretch"], "linear") # self.assertEquals(img.kwargs["inverse"], True) # self.assertTrue("gamma" not in img.kwargs) # self.assertTrue("crange" not in img.kwargs) # #self.assertEquals(self.scene.wv_low.prerequisites, # # set([7.3])) def test_natural(self): """Test natural. """ img = self.scene.natural() self.assertEquals(img.kwargs["mode"], "RGB") self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) self.assertEquals(img.args[0], (1.63, 0.85, 0.635)) self.assertEquals(img.kwargs["crange"], ((0, 90), (0, 90), (0, 90))) self.assertEquals(img.kwargs["gamma"], 1.8) self.assertTrue("stretch" not in img.kwargs) #self.assertEquals(self.scene.natural.prerequisites, # set([0.635, 0.85, 1.63])) # def test_green_snow(self): # """Test green_snow. # """ # img = self.scene.green_snow() # self.assertEquals(img.kwargs["mode"], "RGB") # self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) # self.assertEquals(img.args[0], (1.63, 0.85, -10.8)) # self.assertEquals(img.kwargs["stretch"], "crude") # self.assertEquals(img.kwargs["gamma"], 1.6) # self.assertTrue("crange" not in img.kwargs) # #self.assertEquals(self.scene.green_snow.prerequisites, # # set([1.63, 0.85, 10.8])) # def test_red_snow(self): # """Test red_snow. # """ # img = self.scene.red_snow() # self.assertEquals(img.kwargs["mode"], "RGB") # self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) # self.assertEquals(img.args[0], (0.635, 1.63, -10.8)) # self.assertEquals(img.kwargs["stretch"], "crude") # self.assertTrue("crange" not in img.kwargs) # self.assertTrue("gamma" not in img.kwargs) # #self.assertEquals(self.scene.red_snow.prerequisites, # # set([1.63, 0.635, 10.8])) def test_convection(self): """Test convection. """ img = self.scene.convection() self.assertEquals(img.kwargs["mode"], "RGB") self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) self.assertEquals(img.args[0],(6.7 - 7.3, 3.75 - 10.8, 1.63 - 0.635)) self.assertEquals(img.kwargs["crange"], ((-30, 0), (0, 55), (-70, 20))) self.assertTrue("gamma" not in img.kwargs) self.assertTrue("stretch" not in img.kwargs) #self.assertEquals(self.scene.convection.prerequisites, # set([0.635, 1.63, 3.75, 6.7, 7.3, 10.8])) def test_dust(self): """Test dust. """ img = self.scene.dust() self.assertEquals(img.kwargs["mode"], "RGB") self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) self.assertEquals(img.args[0], (12.0 - 10.8, 10.8 - 8.7, 10.8)) self.assertEquals(img.kwargs["crange"], ((-4, 2), (0, 15), (261, 289))) self.assertEquals(img.kwargs["gamma"], (1.0, 2.5, 1.0)) self.assertTrue("stretch" not in img.kwargs) #self.assertEquals(self.scene.dust.prerequisites, # set([8.7, 10.8, 12.0])) def test_ash(self): """Test ash. """ img = self.scene.ash() self.assertEquals(img.kwargs["mode"], "RGB") self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) self.assertEquals(img.args[0], (12.0 - 10.8, 10.8 - 8.7, 10.8)) self.assertEquals(img.kwargs["crange"], ((-4, 2), (-4, 5), (243, 303))) self.assertTrue("gamma" not in img.kwargs) self.assertTrue("stretch" not in img.kwargs) #self.assertEquals(self.scene.ash.prerequisites, # set([8.7, 10.8, 12.0])) def test_fog(self): """Test fog. """ img = self.scene.fog() self.assertEquals(img.kwargs["mode"], "RGB") self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) self.assertEquals(img.args[0], (12.0 - 10.8, 10.8 - 8.7, 10.8)) self.assertEquals(img.kwargs["crange"], ((-4, 2), (0, 6), (243, 283))) self.assertEquals(img.kwargs["gamma"], (1.0, 2.0, 1.0)) self.assertTrue("stretch" not in img.kwargs) #self.assertEquals(self.scene.fog.prerequisites, # set([8.7, 10.8, 12.0])) def test_night_fog(self): """Test night_fog. """ img = self.scene.night_fog() self.assertEquals(img.kwargs["mode"], "RGB") self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) self.assertEquals(img.args[0], (12.0 - 10.8, 10.8 - 3.75, 10.8)) self.assertEquals(img.kwargs["crange"], ((-4, 2), (0, 6), (243, 293))) self.assertEquals(img.kwargs["gamma"], (1.0, 2.0, 1.0)) self.assertTrue("stretch" not in img.kwargs) #self.assertEquals(self.scene.night_fog.prerequisites, # set([3.75, 10.8, 12.0])) def test_cloud_top(self): """Test cloud_top. """ img = self.scene.cloudtop() self.assertEquals(img.kwargs["mode"], "RGB") self.assertEquals(img.kwargs["fill_value"], (0, 0, 0)) self.assertEquals(img.args[0], (-3.75, -10.8, -12.0)) self.assertTrue("crange" not in img.kwargs) self.assertTrue("gamma" not in img.kwargs) self.assertEquals(img.kwargs["stretch"], (0.005, 0.005)) #self.assertEquals(self.scene.cloudtop.prerequisites, # set([3.75, 10.8, 12.0])) def tearDown(self): unpatch_scene() unpatch_geo_image() def suite(): """The test suite for test_visir. """ loader = unittest.TestLoader() mysuite = unittest.TestSuite() mysuite.addTest(loader.loadTestsFromTestCase(TestComposites)) return mysuite mpop-1.5.0/mpop/tools.py000066400000000000000000000220661317160620000151570ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Copyright (c) 2014, 2015 # # Author(s): # # Panu Lahtinen # # This file is part of mpop. # # mpop is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # mpop is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # mpop. If not, see . '''Helper functions for eg. performing Sun zenith angle correction. ''' import numpy as np def sunzen_corr_cos(data, cos_zen, limit=80.): '''Perform Sun zenith angle correction to the given *data* using cosine of the zenith angle (*cos_zen*). The correction is limited to *limit* degrees (default: 80.0 degrees). For larger zenith angles, the correction is the same as at the *limit*. Both *data* and *cos_zen* are given as 2-dimensional Numpy arrays or Numpy MaskedArrays, and they should have equal shapes. ''' # Convert the zenith angle limit to cosine of zenith angle cos_limit = np.cos(np.radians(limit)) # Cosine correction lim_y, lim_x = np.where(cos_zen > cos_limit) data[lim_y, lim_x] /= cos_zen[lim_y, lim_x] # Use constant value (the limit) for larger zenith # angles lim_y, lim_x = np.where(cos_zen <= cos_limit) data[lim_y, lim_x] /= cos_limit return data def estimate_cth(IR_108, cth_atm="standard"): ''' Estimation of the cloud top height using the 10.8 micron channel limitations: this is the most simple approach a simple fit of the ir108 to the temperature profile * no correction for water vapour or any other trace gas * no viewing angle dependency * no correction for semi-transparent clouds optional input: cth_atm * "standard", "tropics", "midlatitude summer", "midlatitude winter", "subarctic summer", "subarctic winter" Matching the 10.8 micron temperature with atmosphere profile (s) AFGL atmospheric constituent profile. U.S. standard atmosphere 1976. (AFGL-TR-86-0110) (t) AFGL atmospheric constituent profile. tropical. (AFGL-TR-86-0110) (mw) AFGL atmospheric constituent profile. midlatitude summer. (AFGL-TR-86-0110) (ms) AFGL atmospheric constituent profile. midlatitude winter. (AFGL-TR-86-0110) (ss) AFGL atmospheric constituent profile. subarctic summer. (AFGL-TR-86-0110) (sw) AFGL atmospheric constituent profile. subarctic winter. (AFGL-TR-86-0110) Ulrich Hamann (MeteoSwiss) * "tropopause" Assuming a fixed tropopause height and a fixed temperature gradient Richard Mueller (DWD) output: parallax corrected channel the content of the channel will be parallax corrected. The name of the new channel will be *original_chan.name+'_PC'*, eg. "IR_108_PC". This name is also stored to the info dictionary of the originating channel. Versions: 05.07.2016 initial version Ulrich Hamann (MeteoSwiss), Richard Mueller (DWD) ''' print "*** estimating CTH using the 10.8 micro meter brightness temperature " if cth_atm.lower() != "tropopause": # define atmospheric temperature profile import os from numpy import loadtxt, zeros, where, logical_and import mpop mpop_dir = os.path.dirname(mpop.__file__) afgl_file = mpop_dir+"/afgl.dat" print "... assume ", cth_atm, " atmosphere for temperature profile" if cth_atm.lower()=="standard" or cth_atm.lower()=="s": z, T = loadtxt(afgl_file, usecols=(0, 1), unpack=True, comments="#") elif cth_atm.lower()=="tropics" or cth_atm.lower()=="t": z, T = loadtxt(afgl_file, usecols=(0, 2), unpack=True, comments="#") elif cth_atm.lower()=="midlatitude summer" or cth_atm.lower()=="ms": z, T = loadtxt(afgl_file, usecols=(0, 3), unpack=True, comments="#") elif cth_atm.lower()=="midlatitude winter" or cth_atm.lower()=="ws": z, T = loadtxt(afgl_file, usecols=(0, 4), unpack=True, comments="#") elif cth_atm.lower()=="subarctic summer" or cth_atm.lower()=="ss": z, T = loadtxt(afgl_file, usecols=(0, 5), unpack=True, comments="#") elif cth_atm.lower()=="subarctic winter" or cth_atm.lower()=="ss": z, T = loadtxt(afgl_file, usecols=(0, 6), unpack=True, comments="#") else: print "*** Error in estimate_cth (mpop/tools.py)" print "unknown temperature profiel for CTH estimation: cth_atm = ", cth_atm quit() height = zeros(IR_108.shape) # warmer than lowest level -> clear sky height[where(IR_108 > T[-1])] = -1. print " z0(km) z1(km) T0(K) T1(K) number of pixels" print "------------------------------------------------------" for i in range(z.size)[::-1]: # search for temperatures between layer i-1 and i ind = np.where( logical_and( T[i-1]< IR_108, IR_108 < T[i]) ) # interpolate CTH according to ir108 temperature height[ind] = z[i] + (IR_108[ind]-T[i])/(T[i-1]-T[i]) * (z[i-1]-z[i]) # verbose output print " {0:8.1f} {1:8.1f} {2:8.1f} {3:8.1f} {4:8d}".format(z[i], z[i-1], T[i], T[i-1], len(ind[0])) # if temperature increases above 8km -> tropopause detected if z[i]>=8. and T[i] <= T[i-1]: # no cloud above tropopose break # no cloud heights above 20km if z[i]>=20.: break # if height is still 0 -> cloud colder than tropopause -> cth == tropopause height height[np.where( height == 0 )] = z[i] else: Htropo=11.0 # km # this is an assumption it should be optimized # by making it dependent on region and season. # It might be good to include the ITC in the # region of interest, that would make a fixed Htropo # value more reliable. Tmin = np.amin(IR_108) # for Tmin it might be better to use the 5th or 10th percentile # else overshoting tops induces further uncertainties # in the calculation of the cloud height. # However numpy provides weird results for 5th percentile. # Hence, for the working version the minima is used print "... assume tropopause height ", Htropo, ", tropopause temperature ", Tmin, "K (", Tmin-273.16, "deg C)" print " and constant temperature gradient 6.5 K/km" height = -(IR_108 - Tmin)/6.5 + Htropo # calculation of the height, the temperature gradient # 6.5 K/km is an assumption # derived from USS and MPI standard profiles. It # has to be improved as well # convert to masked array # convert form km to meter height = np.ma.masked_where(height <= 0, height, copy=False) * 1000. if False: from trollimage.image import Image as trollimage from trollimage.colormap import rainbow from copy import deepcopy # cloud top height prop = height min_data = prop.min() max_data = prop.max() print " estimated CTH(meter) (min/max): ", min_data, max_data min_data = 0 max_data = 12000 colormap = deepcopy(rainbow) colormap.set_range(min_data, max_data) img = trollimage(prop, mode="L") #, fill_value=[0,0,0] img.colorize(colormap) img.show() # return cloud top height in meter return height def viewzen_corr(data, view_zen): """Apply atmospheric correction on the given *data* using the specified satellite zenith angles (*view_zen*). Both input data are given as 2-dimensional Numpy (masked) arrays, and they should have equal shapes. The *data* array will be changed in place and has to be copied before. """ def ratio(value, v_null, v_ref): return (value - v_null) / (v_ref - v_null) def tau0(t): T_0 = 210.0 T_REF = 320.0 TAU_REF = 9.85 return (1 + TAU_REF)**ratio(t, T_0, T_REF) - 1 def tau(t): T_0 = 170.0 T_REF = 295.0 TAU_REF = 1.0 M = 4 return TAU_REF * ratio(t, T_0, T_REF)**M def delta(z): Z_0 = 0.0 Z_REF = 70.0 DELTA_REF = 6.2 return (1 + DELTA_REF)**ratio(z, Z_0, Z_REF) - 1 y0, x0 = np.ma.where(view_zen == 0) data[y0, x0] += tau0(data[y0, x0]) y, x = np.ma.where((view_zen > 0) & (view_zen < 90) & (~data.mask)) data[y, x] += tau(data[y, x]) * delta(view_zen[y, x]) mpop-1.5.0/mpop/utils.py000066400000000000000000000105541317160620000151560ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2012. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # Esben S. Nielsen # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Module defining various utilities. """ import os import re import ConfigParser import logging from mpop import CONFIG_PATH class OrderedConfigParser(object): """Intercepts read and stores ordered section names. Cannot use inheritance and super as ConfigParser use old style classes. """ def __init__(self, *args, **kwargs): self.config_parser = ConfigParser.ConfigParser(*args, **kwargs) def __getattr__(self, name): return getattr(self.config_parser, name) def read(self, filename): """Reads config file """ try: conf_file = open(filename, 'r') config = conf_file.read() config_keys = re.findall(r'\[.*\]', config) self.section_keys = [key[1:-1] for key in config_keys] except IOError, e: # Pass if file not found if e.errno != 2: raise return self.config_parser.read(filename) def sections(self): """Get sections from config file """ try: return self.section_keys except: return self.config_parser.sections() def ensure_dir(filename): """Checks if the dir of f exists, otherwise create it. """ import os directory = os.path.dirname(filename) if len(directory) and not os.path.isdir(directory): os.makedirs(directory) class NullHandler(logging.Handler): """Empty handler. """ def emit(self, record): """Record a message. """ pass def debug_on(): """Turn debugging logging on. """ logging_on(logging.DEBUG) _is_logging_on = False # Read default log level from mpop's config file _config = ConfigParser.ConfigParser() _config.read(os.path.join(CONFIG_PATH, 'mpop.cfg')) try: default_loglevel = _config.get('general', 'loglevel') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): default_loglevel = 'WARNING' default_loglevel = getattr(logging, default_loglevel.upper()) del _config # logging_on(default_loglevel) def logging_on(level=default_loglevel): """Turn logging on. """ global _is_logging_on if not _is_logging_on: console = logging.StreamHandler() console.setFormatter(logging.Formatter("[%(levelname)s: %(asctime)s :" " %(name)s] %(message)s", '%Y-%m-%d %H:%M:%S')) console.setLevel(level) logging.getLogger('').addHandler(console) _is_logging_on = True log = logging.getLogger('') log.setLevel(level) for h in log.handlers: h.setLevel(level) def logging_off(): """Turn logging off. """ logging.getLogger('').handlers = [NullHandler()] def get_logger(name): """Return logger with null handle """ log = logging.getLogger(name) if not log.handlers: log.addHandler(NullHandler()) return log ### import re def strftime(utctime, format_string): """Like datetime.strftime, except it works with string formatting conversion specifier items on windows, making the assumption that all conversion specifiers use mapping keys. E.g.: >>> from datetime import datetime >>> t = datetime.utcnow() >>> a = "blabla%Y%d%m-%H%M%S-%(value)s" >>> strftime(t, a) 'blabla20120911-211448-%(value)s' """ res = format_string for i in re.finditer("%\w", format_string): res = res.replace(i.group(), utctime.strftime(i.group())) return res mpop-1.5.0/mpop/version.py000066400000000000000000000014651317160620000155040ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2013 Martin Raspaud # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Version file. """ __version__ = "v1.5.0" mpop-1.5.0/setup.cfg000066400000000000000000000001761317160620000143110ustar00rootroot00000000000000[bdist_rpm] requires=h5py pyresample numexpr pyhdf mipp release=1 doc_files = doc/Makefile doc/source/*.rst doc/examples/*.py mpop-1.5.0/setup.py000066400000000000000000000053401317160620000142000ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009-2016. # SMHI, # Folkborgsvägen 1, # Norrköping, # Sweden # Author(s): # Martin Raspaud # Adam Dybbroe # This file is part of mpop. # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Setup file for mpop. """ import os.path from setuptools import setup import imp version = imp.load_source('mpop.version', 'mpop/version.py') BASE_PATH = os.path.sep.join(os.path.dirname( os.path.realpath(__file__)).split(os.path.sep)) requires = ['numpy >=1.4.1', 'pyresample'] try: from PIL import Image except ImportError: requires.append("pillow") NAME = 'mpop' setup(name=NAME, version=version.__version__, description='Meteorological post processing package', author='Martin Raspaud', author_email='martin.raspaud@smhi.se', classifiers=["Development Status :: 5 - Production/Stable", "Intended Audience :: Science/Research", "License :: OSI Approved :: GNU General Public License v3 " + "or later (GPLv3+)", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Scientific/Engineering"], url="https://github.com/mraspaud/mpop", test_suite='mpop.tests.suite', packages=['mpop', 'mpop.satellites', 'mpop.instruments', 'mpop.satin', 'mpop.satout', 'mpop.saturn', 'mpop.imageo', 'mpop.imageo.formats'], data_files=[(os.path.join('etc', 'pytroll'), [os.path.join('etc', 'geo_image.cfg'), os.path.join('etc', 'eps_avhrrl1b_6.5.xml')])], zip_safe=False, install_requires=requires, tests_require=["mock", 'pyorbital >= v0.2.3'], extras_require={'xRIT': ['mipp >= 0.6.0'], 'hdf_eos': ['pyhdf'], 'viirs': ['h5py'], 'nc': ['netCDF4'], 'proj': ['pyresample'], 'pyspectral': ['pyspectral'], 'pyorbital': ['pyorbital >= v0.2.3'], 'gac_lac': ['pygac']} ) mpop-1.5.0/test/000077500000000000000000000000001317160620000134435ustar00rootroot00000000000000mpop-1.5.0/test/test_readers.py000066400000000000000000000046641317160620000165130ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Author(s): # # Panu Lahtinen . import unittest import mpop.satin '''Integration testing of - :mod:`mpop.satin` ''' class TestReaders(unittest.TestCase): '''Class for testing mpop.satin''' def test_lonlat_to_geo_extent(self): '''Test conversion of longitudes and latitudes to area extent.''' # MSG3 proj4 string from # xrit.sat.load(..., only_metadata=True).proj4_params proj4_str = 'proj=geos lon_0=0.00 lat_0=0.00 ' \ 'a=6378169.00 b=6356583.80 h=35785831.00' # MSG3 maximum extent max_extent=(-5567248.07, -5570248.48, 5570248.48, 5567248.07) # Few area extents in longitudes/latitudes area_extents_ll = [[-68.328121068060341, # left longitude 18.363816196771392, # down latitude 74.770372053870972, # right longitude 75.66494585661934], # up latitude # all corners outside Earth's disc [1e30, 1e30, 1e30, 1e30] ] # And corresponding correct values in GEO projection geo_extents = [[-5010596.02, 1741593.72, 5570248.48, 5567248.07], [-5567248.07, -5570248.48, 5570248.48, 5567248.07]] for i in range(len(area_extents_ll)): res = mpop.satin.mipp_xrit.lonlat_to_geo_extent(area_extents_ll[i], proj4_str, max_extent=\ max_extent) for j in range(len(res)): self.assertAlmostEqual(res[j], geo_extents[i][j], 2) mpop-1.5.0/utils/000077500000000000000000000000001317160620000136245ustar00rootroot00000000000000mpop-1.5.0/utils/coord2area_def.py000066400000000000000000000110471317160620000170400ustar00rootroot00000000000000# Copyright (c) 2012, 2015 # # Author(s): # Martin Raspaud # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Convert human coordinates (lon and lat) to an area definition. Here is a usage example: python coord2area_def.py france stere 42.0 51.5 -5.5 8.0 1.5 (the arguments are "name proj min_lat max_lat min_lon max_lon resolution(km)") and the result is: REGION: france { NAME: france PCS_ID: stere_1.25_46.75 PCS_DEF: proj=stere,lat_0=46.75,lon_0=1.25,ellps=WGS84 XSIZE: 746 YSIZE: 703 AREA_EXTENT: (-559750.38109755167, -505020.6757764442, 559750.38109755167, 549517.35194826045) }; """ import sys from pyproj import Proj import argparse if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("name", help="The name of the area.") parser.add_argument("proj", help="The projection to use. Use proj.4 names, like 'stere', 'merc'...") parser.add_argument("min_lat", help="The the latitude of the bottom of the area", type=float) parser.add_argument("max_lat", help="The the latitude of the top of the area", type=float) parser.add_argument("min_lon", help="The the longitude of the left of the area", type=float) parser.add_argument("max_lon", help="The the longitude of the right of the area", type=float) parser.add_argument("resolution", help="The resolution of the area (in km)", type=float) parser.add_argument("-s", "--shapes", help="Show a preview of the area using the coastlines in this directory") args = parser.parse_args() name = args.name proj = args.proj left = args.min_lon right = args.max_lon up = args.min_lat down = args.max_lat res = args.resolution * 1000 lat_0 = (up + down) / 2 lon_0 = (right + left) / 2 p = Proj(proj=proj, lat_0=lat_0, lon_0=lon_0, ellps="WGS84") left_ex1, up_ex1 = p(left, up) right_ex1, up_ex2 = p(right, up) left_ex2, down_ex1 = p(left, down) right_ex2, down_ex2 = p(right, down) left_ex3, dummy = p(left, lat_0) right_ex3, dummy = p(right, lat_0) area_extent = (min(left_ex1, left_ex2, left_ex3), min(up_ex1, up_ex2), max(right_ex1, right_ex2, right_ex3), max(down_ex1, down_ex2)) xsize = int((area_extent[2] - area_extent[0]) / res) ysize = int((area_extent[3] - area_extent[1]) / res) proj4_string = "+" + \ " +".join(("proj=" + proj + ",lat_0=" + str(lat_0) + ",lon_0=" + str(lon_0) + ",ellps=WGS84").split(",")) print proj4_string print "REGION:", name, "{" print "\tNAME:\t", name print "\tPCS_ID:\t", proj + "_" + str(lon_0) + "_" + str(lat_0) print ("\tPCS_DEF:\tproj=" + proj + ",lat_0=" + str(lat_0) + ",lon_0=" + str(lon_0) + ",ellps=WGS84") print "\tXSIZE:\t", xsize print "\tYSIZE:\t", ysize print "\tAREA_EXTENT:\t", area_extent print "};" if args.shapes is None: sys.exit(0) from PIL import Image from pycoast import ContourWriterAGG img = Image.new('RGB', (xsize, ysize)) #proj4_string = '+proj=geos +lon_0=0.0 +a=6378169.00 +b=6356583.80 +h=35785831.0' #area_extent = (-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612) area_def = (proj4_string, area_extent) cw = ContourWriterAGG(args.shapes) #cw = ContourWriterAGG('/usr/share/gshhg-gmt-shp/') cw.add_coastlines(img, (proj4_string, area_extent), resolution='l', width=0.5) cw.add_grid(img, area_def, (10.0, 10.0), (2.0, 2.0), write_text=False, outline='white', outline_opacity=175, width=1.0, minor_outline='white', minor_outline_opacity=175, minor_width=0.2, minor_is_tick=False) img.show() mpop-1.5.0/utils/fetch_avhrr_calcoeffs.py000066400000000000000000000076741317160620000205140ustar00rootroot00000000000000#!/usr/bin/env python import urllib2 import h5py import datetime as dt import os.path import sys BASE_URL = "http://www.star.nesdis.noaa.gov/smcd/spb/fwu/homepage/" + \ "AVHRR/Op_Cal_AVHRR/" URLS = { "Metop-B": {"ch1": BASE_URL + "Metop1_AVHRR_Libya_ch1.txt", "ch2": BASE_URL + "Metop1_AVHRR_Libya_ch2.txt", "ch3a": BASE_URL + "Metop1_AVHRR_Libya_ch3a.txt"}, "Metop-A": {"ch1": BASE_URL + "Metop2_AVHRR_Libya_ch1.txt", "ch2": BASE_URL + "Metop2_AVHRR_Libya_ch2.txt", "ch3a": BASE_URL + "Metop2_AVHRR_Libya_ch3a.txt"}, "NOAA-16": {"ch1": BASE_URL + "N16_AVHRR_Libya_ch1.txt", "ch2": BASE_URL + "N16_AVHRR_Libya_ch2.txt"}, "NOAA-17": {"ch1": BASE_URL + "N17_AVHRR_Libya_ch1.txt", "ch2": BASE_URL + "N17_AVHRR_Libya_ch2.txt", "ch3a": BASE_URL + "N17_AVHRR_Libya_ch3a.txt"}, "NOAA-18": {"ch1": BASE_URL + "N18_AVHRR_Libya_ch1.txt", "ch2": BASE_URL + "N18_AVHRR_Libya_ch2.txt"}, "NOAA-19": {"ch1": BASE_URL + "N19_AVHRR_Libya_ch1.txt", "ch2": BASE_URL + "N19_AVHRR_Libya_ch2.txt"} } def get_page(url): '''Retrieve the given page.''' return urllib2.urlopen(url).read() def get_coeffs(page): '''Parse coefficients from the page.''' coeffs = {} coeffs['datetime'] = [] coeffs['slope1'] = [] coeffs['intercept1'] = [] coeffs['slope2'] = [] coeffs['intercept2'] = [] slope1_idx, intercept1_idx, slope2_idx, intercept2_idx = \ None, None, None, None date_idx = 0 for row in page.lower().split('\n'): row = row.split() if len(row) == 0: continue if row[0] == 'update': # Get the column indices from the header line slope1_idx = row.index('slope_lo') intercept1_idx = row.index('int_lo') slope2_idx = row.index('slope_hi') intercept2_idx = row.index('int_hi') continue if slope1_idx is None: continue # In some cases the fields are connected, skip those rows if max([slope1_idx, intercept1_idx, slope2_idx, intercept2_idx]) >= len(row): continue try: dat = dt.datetime.strptime(row[date_idx], "%m/%d/%Y") except ValueError: continue coeffs['datetime'].append([dat.year, dat.month, dat.day]) coeffs['slope1'].append(float(row[slope1_idx])) coeffs['intercept1'].append(float(row[intercept1_idx])) coeffs['slope2'].append(float(row[slope2_idx])) coeffs['intercept2'].append(float(row[intercept2_idx])) return coeffs def get_all_coeffs(): '''Get all available calibration coefficients for the satellites.''' coeffs = {} for platform in URLS.keys(): if platform not in coeffs: coeffs[platform] = {} for chan in URLS[platform].keys(): url = URLS[platform][chan] print url page = get_page(url) coeffs[platform][chan] = get_coeffs(page) return coeffs def save_coeffs(coeffs, out_dir=''): '''Save calibration coefficients to HDF5 files.''' for platform in coeffs.keys(): fname = os.path.join(out_dir, "%s_calibration_data.h5" % platform) fid = h5py.File(fname, 'w') for chan in coeffs[platform].keys(): fid.create_group(chan) fid[chan]['datetime'] = coeffs[platform][chan]['datetime'] fid[chan]['slope1'] = coeffs[platform][chan]['slope1'] fid[chan]['intercept1'] = coeffs[platform][chan]['intercept1'] fid[chan]['slope2'] = coeffs[platform][chan]['slope2'] fid[chan]['intercept2'] = coeffs[platform][chan]['intercept2'] fid.close() print "Calibration coefficients saved for %s" % platform def main(): '''Create calibration coefficient files for AVHRR''' out_dir = sys.argv[1] coeffs = get_all_coeffs() save_coeffs(coeffs, out_dir=out_dir) if __name__ == "__main__": main() mpop-1.5.0/utils/get_tile_def.py000066400000000000000000000023061317160620000166110ustar00rootroot00000000000000#!/usr/bin/python import xml.etree.ElementTree as ET from pyresample import utils import pickle import urllib2 length=109800 #https://sentinel.esa.int/documents/247904/1955685/S2A_OPER_GIP_TILPAR_20150622T000000_21000101T000000_ZZ_0001 FNAME="S2A_OPER_GIP_TILPAR_20150622T000000_21000101T000000_ZZ_0001" TILPAR_URL="https://sentinel.esa.int/documents/247904/1955685/"+FNAME FNAME=FNAME+".kml" tiles = urllib2.urlopen(TILPAR_URL) with open(FNAME,'wb') as output: output.write(tiles.read()) tiles.close() tree = ET.parse(FNAME) root = tree.getroot() s2tiles={} for pm in root.iter('{http://www.opengis.net/kml/2.2}Placemark'): tilename=None epsg=None utm_ul_x=None utm_ul_y=None for name in pm.iter('{http://www.opengis.net/kml/2.2}name'): tilename=name.text for simple in pm.iter('{http://www.opengis.net/kml/2.2}SimpleData'): if (simple.attrib['name']=='epsg'): epsg=simple.text if(simple.attrib['name']=='utm_ul_x'): utm_ul_x=simple.text if(simple.attrib['name']=='utm_ul_y'): utm_ul_y=simple.text extent=(float(utm_ul_x),float(utm_ul_y)-length,float(utm_ul_x)+length,float(utm_ul_y)) s2tiles[tilename]=[epsg,extent] f=open('s2tiles.pickle','w') pickle.dump(s2tiles,f) f.close() mpop-1.5.0/utils/precompute_projection.py000066400000000000000000000064441317160620000206250ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2017 # # Author(s): # # Panu Lahtinen # # This file is part of mpop. # # mpop is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # mpop is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with mpop. If not, see . """Module for calculating and projection mapping look-up tables, which can be used with MPOP by setting 'precompute=True'""" import numpy as np import sys from mpop.projector import (calc_nearest_params, calc_bilinear_params, calc_quick_params, get_precompute_cache_fname, get_area_and_id) def save(fname, **data): """Save preprojection data to npz file.""" np.savez(fname, **data) def calc_preproj_params(out_dir, mode, in_area_name, out_area_name, radius=None, nprocs=1): """Calculate preprojection parameters and save to *out_dir*""" in_area, in_id = get_area_and_id(in_area_name) out_area, out_id = get_area_and_id(out_area_name) fname = get_precompute_cache_fname(in_id, out_id, in_area_name, out_area_name, mode, out_dir) if mode == "nearest": data = calc_nearest_params(in_area, out_area, radius, nprocs=nprocs) elif mode == "bilinear": data = calc_bilinear_params(in_area, out_area, radius, nprocs=nprocs) elif mode == "quick": data = calc_quick_params(in_area, out_area) save(fname, **data) def print_usage(): """Print usage""" print("USAGE:") print("python precompute_projection.py " "") print("python precompute_projection.py " " ") print("python precompute_projection.py " " ") print("python precompute_projection.py " " ") print("python precompute_projection.py " " ") def main(): try: in_area_name = sys.argv[1] out_area_name = sys.argv[2] except IndexError: print_usage() return try: mode = sys.argv[3] except IndexError: mode = "nearest" try: radius = int(sys.argv[4]) except IndexError: radius = 50000 try: nprocs = int(sys.argv[5]) except IndexError: nprocs = 1 try: out_dir = sys.argv[6] except IndexError: out_dir = '.' calc_preproj_params(out_dir, mode, in_area_name, out_area_name, radius=radius, nprocs=nprocs) if __name__ == "__main__": main()