eliot-1.11.0/0000775000175000017500000000000013573001162014400 5ustar itamarstitamarst00000000000000eliot-1.11.0/PKG-INFO0000664000175000017500000001036613573001162015503 0ustar itamarstitamarst00000000000000Metadata-Version: 2.1 Name: eliot Version: 1.11.0 Summary: Logging library that tells you why it happened Home-page: https://github.com/itamarst/eliot/ Maintainer: Itamar Turner-Trauring Maintainer-email: itamar@itamarst.org License: Apache 2.0 Description: Eliot: Logging that tells you *why* it happened ================================================ .. image:: https://travis-ci.org/itamarst/eliot.png?branch=master :target: http://travis-ci.org/itamarst/eliot :alt: Build Status Python's built-in ``logging`` and other similar systems output a stream of factoids: they're interesting, but you can't really tell what's going on. * Why is your application slow? * What caused this code path to be chosen? * Why did this error happen? Standard logging can't answer these questions. But with a better model you could understand what and why things happened in your application. You could pinpoint performance bottlenecks, you could understand what happened when, who called what. That is what Eliot does. ``eliot`` is a Python logging system that outputs causal chains of **actions**: actions can spawn other actions, and eventually they either **succeed or fail**. The resulting logs tell you the story of what your software did: what happened, and what caused it. Eliot supports a range of use cases and 3rd party libraries: * Logging within a single process. * Causal tracing across a distributed system. * Scientific computing, with `built-in support for NumPy and Dask `_. * `Asyncio and Trio coroutines `_ and the `Twisted networking framework `_. Eliot is only used to generate your logs; you will might need tools like Logstash and ElasticSearch to aggregate and store logs if you are using multiple processes across multiple machines. Eliot supports Python 3.5, 3.6, 3.7, and 3.8, as well as PyPy3. It is maintained by Itamar Turner-Trauring, and released under the Apache 2.0 License. Python 2.7 is in legacy support mode, with the last release supported being 1.7; see `here `_ for details. * `Read the documentation `_. * Download from `PyPI`_ or `conda-forge `_. * Need help or have any questions? `File an issue `_ on GitHub. * **Commercial support** is available from `Python⇒Speed `_. Testimonials ------------ "Eliot has made tracking down causes of failure (in complex external integrations and internal uses) tremendously easier. Our errors are logged to Sentry with the Eliot task UUID. That means we can go from a Sentry notification to a high-level trace of operations—with important metadata at each operation—in a few seconds. We immediately know which user did what in which part of the system." —Jonathan Jacobs .. _Github: https://github.com/itamarst/eliot .. _PyPI: https://pypi.python.org/pypi/eliot Keywords: logging Platform: UNKNOWN Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: System :: Logging Requires-Python: >=3.5.3 Provides-Extra: test Provides-Extra: journald Provides-Extra: dev eliot-1.11.0/benchmarks/0000775000175000017500000000000013573001162016515 5ustar itamarstitamarst00000000000000eliot-1.11.0/benchmarks/serialization.py0000664000175000017500000000214713573001140021744 0ustar itamarstitamarst00000000000000""" Benchmark of message serialization. The goal here is to mostly focus on performance of serialization, in a vaguely realistic manner. That is, mesages are logged in context of a message with a small number of fields. """ from __future__ import unicode_literals import time from eliot import Message, start_action, to_file # Ensure JSON serialization is part of benchmark: to_file(open("/dev/null", "w")) N = 10000 def run(): start = time.time() for i in range(N): with start_action(action_type="my_action"): with start_action(action_type="my_action2") as ctx: ctx.log( message_type="my_message", integer=3, string="abcdeft", string2="dgsjdlkgjdsl", list=[1, 2, 3, 4], ) end = time.time() # Each iteration has 5 messages: start/end of my_action, start/end of # my_action2, and my_message. print("%.6f per message" % ((end - start) / (N * 5),)) print("%s messages/sec" % (int(N / (end - start)),)) if __name__ == "__main__": run() eliot-1.11.0/benchmarks/logwriter.py0000664000175000017500000000202713470775105021121 0ustar itamarstitamarst00000000000000""" A benchmark for eliot.logwriter. """ import tempfile import time from twisted.internet.task import react from twisted.python.filepath import FilePath from eliot.logwriter import ThreadedFileWriter LENGTH = 100 MESSAGES = 100000 def main(reactor): print("Message size: %d bytes Num messages: %d" % (LENGTH, MESSAGES)) message = b"a" * LENGTH fp = FilePath(tempfile.mktemp()) writer = ThreadedFileWriter(fp.open("ab"), reactor) writer.startService() start = time.time() for i in range(MESSAGES): writer(message) d = writer.stopService() def done(_): elapsed = time.time() - start kbSec = (LENGTH * MESSAGES) / (elapsed * 1024) messagesSec = MESSAGES / elapsed print("messages/sec: %s KB/sec: %s" % (messagesSec, kbSec)) d.addCallback(done) def cleanup(result): fp.restat() print() print("File size: ", fp.getsize()) fp.remove() d.addBoth(cleanup) return d if __name__ == "__main__": react(main, []) eliot-1.11.0/MANIFEST.in0000664000175000017500000000027213460352650016145 0ustar itamarstitamarst00000000000000include LICENSE include README.rst include versioneer.py include eliot/_version.py recursive-include docs * prune docs/_build recursive-include benchmarks * recursive-include examples * eliot-1.11.0/README.rst0000664000175000017500000000556313573001140016074 0ustar itamarstitamarst00000000000000Eliot: Logging that tells you *why* it happened ================================================ .. image:: https://travis-ci.org/itamarst/eliot.png?branch=master :target: http://travis-ci.org/itamarst/eliot :alt: Build Status Python's built-in ``logging`` and other similar systems output a stream of factoids: they're interesting, but you can't really tell what's going on. * Why is your application slow? * What caused this code path to be chosen? * Why did this error happen? Standard logging can't answer these questions. But with a better model you could understand what and why things happened in your application. You could pinpoint performance bottlenecks, you could understand what happened when, who called what. That is what Eliot does. ``eliot`` is a Python logging system that outputs causal chains of **actions**: actions can spawn other actions, and eventually they either **succeed or fail**. The resulting logs tell you the story of what your software did: what happened, and what caused it. Eliot supports a range of use cases and 3rd party libraries: * Logging within a single process. * Causal tracing across a distributed system. * Scientific computing, with `built-in support for NumPy and Dask `_. * `Asyncio and Trio coroutines `_ and the `Twisted networking framework `_. Eliot is only used to generate your logs; you will might need tools like Logstash and ElasticSearch to aggregate and store logs if you are using multiple processes across multiple machines. Eliot supports Python 3.5, 3.6, 3.7, and 3.8, as well as PyPy3. It is maintained by Itamar Turner-Trauring, and released under the Apache 2.0 License. Python 2.7 is in legacy support mode, with the last release supported being 1.7; see `here `_ for details. * `Read the documentation `_. * Download from `PyPI`_ or `conda-forge `_. * Need help or have any questions? `File an issue `_ on GitHub. * **Commercial support** is available from `Python⇒Speed `_. Testimonials ------------ "Eliot has made tracking down causes of failure (in complex external integrations and internal uses) tremendously easier. Our errors are logged to Sentry with the Eliot task UUID. That means we can go from a Sentry notification to a high-level trace of operations—with important metadata at each operation—in a few seconds. We immediately know which user did what in which part of the system." —Jonathan Jacobs .. _Github: https://github.com/itamarst/eliot .. _PyPI: https://pypi.python.org/pypi/eliot eliot-1.11.0/LICENSE0000664000175000017500000002607313460352650015423 0ustar itamarstitamarst00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.eliot-1.11.0/eliot/0000775000175000017500000000000013573001162015514 5ustar itamarstitamarst00000000000000eliot-1.11.0/eliot/_validation.py0000664000175000017500000004002413573001140020353 0ustar itamarstitamarst00000000000000""" A log message serialization and validation system for Eliot. Validation is intended to be done by unit tests, not the production code path, although in theory it could be done then as well. """ from __future__ import unicode_literals from warnings import warn import six unicode = six.text_type from pyrsistent import PClass, field as pyrsistent_field from ._message import ( Message, REASON_FIELD, MESSAGE_TYPE_FIELD, TASK_LEVEL_FIELD, TASK_UUID_FIELD, TIMESTAMP_FIELD, ) from ._action import ( start_action, startTask, ACTION_STATUS_FIELD, ACTION_TYPE_FIELD, STARTED_STATUS, SUCCEEDED_STATUS, FAILED_STATUS, log_message, ) class ValidationError(Exception): """ A field value failed validation. """ # Types that can be encoded to JSON: _JSON_TYPES = {type(None), int, float, unicode, list, dict, bytes, bool} _JSON_TYPES |= set(six.integer_types) RESERVED_FIELDS = (TASK_LEVEL_FIELD, TASK_UUID_FIELD, TIMESTAMP_FIELD) class Field(object): """ A named field that can accept rich types and serialize them to the logging system's basic types (currently, JSON types). An optional extra validation function can be used to validate inputs when unit testing. @ivar key: The name of the field, the key which refers to it, e.g. C{"path"}. @ivar description: A description of what this field contains. @type description: C{unicode} """ def __init__(self, key, serializer, description="", extraValidator=None): """ @param serializer: A function that takes a single rich input and returns a serialized value that can be written out as JSON. May raise L{ValidationError} to indicate bad inputs. @param extraValidator: Allow additional validation of the field value. A callable that takes a field value, and raises L{ValidationError} if the value is a incorrect one for this field. Alternatively can be set to C{None}, in which case no additional validation is done. """ self.key = key self.description = description self._serializer = serializer self._extraValidator = extraValidator def validate(self, input): """ Validate the given input value against this L{Field} definition. @param input: An input value supposedly serializable by this L{Field}. @raises ValidationError: If the value is not serializable or fails to be validated by the additional validator. """ # Make sure the input serializes: self._serializer(input) # Use extra validator, if given: if self._extraValidator is not None: self._extraValidator(input) def serialize(self, input): """ Convert the given input to a value that can actually be logged. @param input: An input value supposedly serializable by this L{Field}. @return: A serialized value. """ return self._serializer(input) @classmethod def forValue(klass, key, value, description): """ Create a L{Field} that can only have a single value. @param key: The name of the field, the key which refers to it, e.g. C{"path"}. @param value: The allowed value for the field. @param description: A description of what this field contains. @type description: C{unicode} @return: A L{Field}. """ def validate(checked): if checked != value: raise ValidationError(checked, "Field %r must be %r" % (key, value)) return klass(key, lambda _: value, description, validate) # PEP 8 variant: for_value = forValue @classmethod def forTypes(klass, key, classes, description, extraValidator=None): """ Create a L{Field} that must be an instance of a given set of types. @param key: The name of the field, the key which refers to it, e.g. C{"path"}. @ivar classes: A C{list} of allowed Python classes for this field's values. Supported classes are C{unicode}, C{int}, C{float}, C{bool}, C{long}, C{list} and C{dict} and C{None} (the latter isn't strictly a class, but will be converted appropriately). @param description: A description of what this field contains. @type description: C{unicode} @param extraValidator: See description in L{Field.__init__}. @return: A L{Field}. """ fixedClasses = [] for k in classes: if k is None: k = type(None) if k not in _JSON_TYPES: raise TypeError("%s is not JSON-encodeable" % (k,)) fixedClasses.append(k) fixedClasses = tuple(fixedClasses) def validate(value): if not isinstance(value, fixedClasses): raise ValidationError( value, "Field %r requires type to be one of %s" % (key, classes) ) if extraValidator is not None: extraValidator(value) return klass(key, lambda v: v, description, extraValidator=validate) # PEP 8 variant: for_types = forTypes def fields(*fields, **keys): """ Factory for for L{MessageType} and L{ActionType} field definitions. @param *fields: A L{tuple} of L{Field} instances. @param **keys: A L{dict} mapping key names to the expected type of the field's values. @return: A L{list} of L{Field} instances. """ return list(fields) + [ Field.forTypes(key, [value], "") for key, value in keys.items() ] REASON = Field.forTypes(REASON_FIELD, [unicode], "The reason for an event.") TRACEBACK = Field.forTypes("traceback", [unicode], "The traceback for an exception.") EXCEPTION = Field.forTypes("exception", [unicode], "The FQPN of an exception class.") class _MessageSerializer(object): """ A serializer and validator for messages. @ivar fields: A C{dict} mapping a C{unicode} field name to the respective L{Field}. @ivar allow_additional_fields: If true, additional fields don't cause validation failure. """ def __init__(self, fields, allow_additional_fields=False): keys = [] for field in fields: if not isinstance(field, Field): raise TypeError("Expected a Field instance but got", field) keys.append(field.key) if len(set(keys)) != len(keys): raise ValueError(keys, "Duplicate field name") if ACTION_TYPE_FIELD in keys: if MESSAGE_TYPE_FIELD in keys: raise ValueError( keys, "Messages must have either " "'action_type' or 'message_type', not both", ) elif MESSAGE_TYPE_FIELD not in keys: raise ValueError( keys, "Messages must have either 'action_type' ", "or 'message_type'" ) if any(key.startswith("_") for key in keys): raise ValueError(keys, "Field names must not start with '_'") for reserved in RESERVED_FIELDS: if reserved in keys: raise ValueError( keys, "The field name %r is reserved for use " "by the logging framework" % (reserved,), ) self.fields = dict((field.key, field) for field in fields) self.allow_additional_fields = allow_additional_fields def serialize(self, message): """ Serialize the given message in-place, converting inputs to outputs. We do this in-place for performance reasons. There are more fields in a message than there are L{Field} objects because of the timestamp, task_level and task_uuid fields. By only iterating over our L{Fields} we therefore reduce the number of function calls in a critical code path. @param message: A C{dict}. """ for key, field in self.fields.items(): message[key] = field.serialize(message[key]) def validate(self, message): """ Validate the given message. @param message: A C{dict}. @raises ValidationError: If the message has the wrong fields or one of its field values fail validation. """ for key, field in self.fields.items(): if key not in message: raise ValidationError(message, "Field %r is missing" % (key,)) field.validate(message[key]) if self.allow_additional_fields: return # Otherwise, additional fields are not allowed: fieldSet = set(self.fields) | set(RESERVED_FIELDS) for key in message: if key not in fieldSet: raise ValidationError(message, "Unexpected field %r" % (key,)) class MessageType(object): """ A specific type of non-action message. Example usage: # Schema definition: KEY = Field("key", [int], u"The lookup key for things.") STATUS = Field("status", [int], u"The status of a thing.") LOG_STATUS = MessageType( "yourapp:subsystem:status", [KEY, STATUS], u"We just set the status of something.") # Actual code, with logging added: def setstatus(key, status): doactualset(key, status) LOG_STATUS(key=key, status=status).write() You do not need to use the L{MessageType} to create the L{eliot.Message}, however; you could build it up using a series of L{eliot.Message.bind} calls. Having a L{MessageType} is nonetheless still useful for validation and documentation. @ivar message_type: The name of the type, e.g. C{"yourapp:subsystem:yourtype"}. @ivar description: A description of what this message means. @type description: C{unicode} """ def __init__(self, message_type, fields, description=""): """ @ivar type: The name of the type, e.g. C{"yourapp:subsystem:yourtype"}. @ivar fields: A C{list} of L{Field} instances which can appear in this type. @param description: A description of what this message means. @type description: C{unicode} """ self.message_type = message_type self.description = description self._serializer = _MessageSerializer( fields + [Field.forValue(MESSAGE_TYPE_FIELD, message_type, "The message type.")] ) def __call__(self, **fields): """ Create a new L{eliot.Message} of this type with the given fields. @param fields: Extra fields to add to the message. @rtype: L{eliot.Message} """ warn( "MessageType.__call__() is deprecated since 1.11.0, " "use MessageType.log() instead.", DeprecationWarning, stacklevel=2, ) fields[MESSAGE_TYPE_FIELD] = self.message_type return Message(fields, self._serializer) def log(self, **fields): """ Write a new L{Message} of this type to the default L{Logger}. The keyword arguments will become contents of the L{Message}. """ fields["__eliot_serializer__"] = self._serializer log_message(self.message_type, **fields) class _ActionSerializers(PClass): """ Serializers for the three action messages: start, success and failure. """ start = pyrsistent_field(mandatory=True) success = pyrsistent_field(mandatory=True) failure = pyrsistent_field(mandatory=True) class ActionType(object): """ A specific type of action. Example usage: # Schema definition: KEY = Field("key", [int], u"The lookup key for things.") RESULT = Field("result", [str], u"The result of lookups.") LOG_DOSOMETHING = ActionType( "yourapp:subsystem:youraction", [KEY], [RESULT], u"Do something with a key, resulting in a value.") # Actual code, with logging added: def dosomething(key): with LOG_DOSOMETHING(logger, key=key) as action: _dostuff(key) _morestuff(key) result = _theresult() action.addSuccessFields(result=result) return result @ivar action_type: The name of the action, e.g. C{"yourapp:subsystem:youraction"}. @ivar startFields: A C{list} of L{Field} instances which can appear in this action's start message. @ivar successFields: A C{list} of L{Field} instances which can appear in this action's succesful finish message. @ivar failureFields: A C{list} of L{Field} instances which can appear in this action's failed finish message (in addition to the built-in C{"exception"} and C{"reason"} fields). @ivar description: A description of what this action's messages mean. @type description: C{unicode} """ # Overrideable hook for testing; need staticmethod() so functions don't # get turned into methods. _start_action = staticmethod(start_action) _startTask = staticmethod(startTask) def __init__(self, action_type, startFields, successFields, description=""): self.action_type = action_type self.description = description actionTypeField = Field.forValue( ACTION_TYPE_FIELD, action_type, "The action type" ) def makeActionStatusField(value): return Field.forValue(ACTION_STATUS_FIELD, value, "The action status") startFields = startFields + [ actionTypeField, makeActionStatusField(STARTED_STATUS), ] successFields = successFields + [ actionTypeField, makeActionStatusField(SUCCEEDED_STATUS), ] failureFields = [ actionTypeField, makeActionStatusField(FAILED_STATUS), REASON, EXCEPTION, ] self._serializers = _ActionSerializers( start=_MessageSerializer(startFields), success=_MessageSerializer(successFields), # Failed action messages can have extra fields from exception # extraction: failure=_MessageSerializer(failureFields, allow_additional_fields=True), ) def __call__(self, logger=None, **fields): """ Start a new L{eliot.Action} of this type with the given start fields. You can use the result as a Python context manager, or use the L{eliot.Action.finish} API. LOG_DOSOMETHING = ActionType("yourapp:subsystem:dosomething", [Field.forTypes("entry", [int], "")], [Field.forTypes("result", [int], "")], [], "Do something with an entry.") with LOG_DOSOMETHING(entry=x) as action: do(x) result = something(x * 2) action.addSuccessFields(result=result) Or perhaps: action = LOG_DOSOMETHING(entry=x) action.run(doSomething) action.finish() @param logger: A L{eliot.ILogger} provider to which the action's messages will be written, or C{None} to use the default one. @param fields: Extra fields to add to the message. @rtype: L{eliot.Action} """ return self._start_action(logger, self.action_type, self._serializers, **fields) def as_task(self, logger=None, **fields): """ Start a new L{eliot.Action} of this type as a task (i.e. top-level action) with the given start fields. See L{ActionType.__call__} for example of usage. @param logger: A L{eliot.ILogger} provider to which the action's messages will be written, or C{None} to use the default one. @param fields: Extra fields to add to the message. @rtype: L{eliot.Action} """ return self._startTask(logger, self.action_type, self._serializers, **fields) # Backwards compatible variant: asTask = as_task __all__ = [] eliot-1.11.0/eliot/tests/0000775000175000017500000000000013573001162016656 5ustar itamarstitamarst00000000000000eliot-1.11.0/eliot/tests/test_testing.py0000664000175000017500000007603413515376117021771 0ustar itamarstitamarst00000000000000""" Tests for L{eliot.testing}. """ from __future__ import unicode_literals from unittest import SkipTest, TestResult, TestCase from ..testing import ( issuperset, assertContainsFields, LoggedAction, LoggedMessage, validateLogging, UnflushedTracebacks, assertHasMessage, assertHasAction, validate_logging, capture_logging, swap_logger, check_for_errors, ) from .._output import MemoryLogger from .._action import start_action from .._message import Message from .._validation import ActionType, MessageType, ValidationError, Field from .._traceback import write_traceback from .. import add_destination, remove_destination, _output class IsSuperSetTests(TestCase): """ Tests for L{issuperset}. """ def test_equal(self): """ Equal dictionaries are supersets of each other. """ a = {"a": 1} b = a.copy() self.assertTrue(issuperset(a, b)) def test_additionalIsSuperSet(self): """ If C{A} is C{B} plus some extra entries, C{A} is superset of C{B}. """ a = {"a": 1, "b": 2, "c": 3} b = {"a": 1, "c": 3} self.assertTrue(issuperset(a, b)) def test_missingIsNotSuperSet(self): """ If C{A} is C{B} minus some entries, C{A} is not a superset of C{B}. """ a = {"a": 1, "c": 3} b = {"a": 1, "b": 2, "c": 3} self.assertFalse(issuperset(a, b)) class LoggedActionTests(TestCase): """ Tests for L{LoggedAction}. """ def test_values(self): """ The values given to the L{LoggedAction} constructor are stored on it. """ d1 = {"x": 1} d2 = {"y": 2} root = LoggedAction(d1, d2, []) self.assertEqual((root.startMessage, root.endMessage), (d1, d2)) def fromMessagesIndex(self, messages, index): """ Call L{LoggedAction.fromMessages} using action specified by index in a list of message dictionaries. @param messages: A C{list} of message dictionaries. @param index: Index to the logger's messages. @return: Result of L{LoggedAction.fromMessages}. """ uuid = messages[index]["task_uuid"] level = messages[index]["task_level"] return LoggedAction.fromMessages(uuid, level, messages) def test_fromMessagesCreatesLoggedAction(self): """ L{LoggedAction.fromMessages} returns a L{LoggedAction}. """ logger = MemoryLogger() with start_action(logger, "test"): pass logged = self.fromMessagesIndex(logger.messages, 0) self.assertIsInstance(logged, LoggedAction) def test_fromMessagesStartAndSuccessfulFinish(self): """ L{LoggedAction.fromMessages} finds the start and successful finish messages of an action and stores them in the result. """ logger = MemoryLogger() Message.new(x=1).write(logger) with start_action(logger, "test"): Message.new(x=1).write(logger) # Now we should have x message, start action message, another x message # and finally finish message. logged = self.fromMessagesIndex(logger.messages, 1) self.assertEqual( (logged.startMessage, logged.endMessage), (logger.messages[1], logger.messages[3]), ) def test_fromMessagesStartAndErrorFinish(self): """ L{LoggedAction.fromMessages} finds the start and successful finish messages of an action and stores them in the result. """ logger = MemoryLogger() try: with start_action(logger, "test"): raise KeyError() except KeyError: pass logged = self.fromMessagesIndex(logger.messages, 0) self.assertEqual( (logged.startMessage, logged.endMessage), (logger.messages[0], logger.messages[1]), ) def test_fromMessagesStartNotFound(self): """ L{LoggedAction.fromMessages} raises a L{ValueError} if a start message is not found. """ logger = MemoryLogger() with start_action(logger, action_type="test"): pass self.assertRaises(ValueError, self.fromMessagesIndex, logger.messages[1:], 0) def test_fromMessagesFinishNotFound(self): """ L{LoggedAction.fromMessages} raises a L{ValueError} if a finish message is not found. """ logger = MemoryLogger() with start_action(logger, action_type="test"): pass with self.assertRaises(ValueError) as cm: self.fromMessagesIndex(logger.messages[:1], 0) self.assertEqual(cm.exception.args[0], "Missing end message of type test") def test_fromMessagesAddsChildMessages(self): """ L{LoggedAction.fromMessages} adds direct child messages to the constructed L{LoggedAction}. """ logger = MemoryLogger() # index 0: Message.new(x=1).write(logger) # index 1 - start action with start_action(logger, "test"): # index 2 Message.new(x=2).write(logger) # index 3 Message.new(x=3).write(logger) # index 4 - end action # index 5 Message.new(x=4).write(logger) logged = self.fromMessagesIndex(logger.messages, 1) expectedChildren = [ LoggedMessage(logger.messages[2]), LoggedMessage(logger.messages[3]), ] self.assertEqual(logged.children, expectedChildren) def test_fromMessagesAddsChildActions(self): """ L{LoggedAction.fromMessages} recursively adds direct child actions to the constructed L{LoggedAction}. """ logger = MemoryLogger() # index 0 with start_action(logger, "test"): # index 1: with start_action(logger, "test2"): # index 2 Message.new(message_type="end", x=2).write(logger) # index 3 - end action with start_action(logger, "test3"): # index 4 pass # index 5 - end action # index 6 - end action logged = self.fromMessagesIndex(logger.messages, 0) self.assertEqual(logged.children[0], self.fromMessagesIndex(logger.messages, 1)) self.assertEqual( logged.type_tree(), {"test": [{"test2": ["end"]}, {"test3": []}]} ) def test_ofType(self): """ L{LoggedAction.ofType} returns a list of L{LoggedAction} created by the specified L{ActionType}. """ ACTION = ActionType("myaction", [], [], "An action!") logger = MemoryLogger() # index 0 with start_action(logger, "test"): # index 1: with ACTION(logger): # index 2 Message.new(x=2).write(logger) # index 3 - end action # index 4 - end action # index 5 with ACTION(logger): pass # index 6 - end action logged = LoggedAction.ofType(logger.messages, ACTION) self.assertEqual( logged, [ self.fromMessagesIndex(logger.messages, 1), self.fromMessagesIndex(logger.messages, 5), ], ) # String-variant of ofType: logged2 = LoggedAction.ofType(logger.messages, "myaction") self.assertEqual(logged, logged2) def test_ofTypeNotFound(self): """ L{LoggedAction.ofType} returns an empty list if actions of the given type cannot be found. """ ACTION = ActionType("myaction", [], [], "An action!") logger = MemoryLogger() self.assertEqual(LoggedAction.ofType(logger.messages, ACTION), []) def test_descendants(self): """ L{LoggedAction.descendants} returns all descendants of the L{LoggedAction}. """ ACTION = ActionType("myaction", [], [], "An action!") logger = MemoryLogger() # index 0 with ACTION(logger): # index 1: with start_action(logger, "test"): # index 2 Message.new(x=2).write(logger) # index 3 - end action # index 4 Message.new(x=2).write(logger) # index 5 - end action loggedAction = LoggedAction.ofType(logger.messages, ACTION)[0] self.assertEqual( list(loggedAction.descendants()), [ self.fromMessagesIndex(logger.messages, 1), LoggedMessage(logger.messages[2]), LoggedMessage(logger.messages[4]), ], ) def test_succeeded(self): """ If the action succeeded, L{LoggedAction.succeeded} will be true. """ logger = MemoryLogger() with start_action(logger, "test"): pass logged = self.fromMessagesIndex(logger.messages, 0) self.assertTrue(logged.succeeded) def test_notSucceeded(self): """ If the action failed, L{LoggedAction.succeeded} will be false. """ logger = MemoryLogger() try: with start_action(logger, "test"): raise KeyError() except KeyError: pass logged = self.fromMessagesIndex(logger.messages, 0) self.assertFalse(logged.succeeded) class LoggedMessageTest(TestCase): """ Tests for L{LoggedMessage}. """ def test_values(self): """ The values given to the L{LoggedMessage} constructor are stored on it. """ message = {"x": 1} logged = LoggedMessage(message) self.assertEqual(logged.message, message) def test_ofType(self): """ L{LoggedMessage.ofType} returns a list of L{LoggedMessage} created by the specified L{MessageType}. """ MESSAGE = MessageType("mymessage", [], "A message!") logger = MemoryLogger() # index 0 MESSAGE().write(logger) # index 1 Message.new(x=2).write(logger) # index 2 MESSAGE().write(logger) logged = LoggedMessage.ofType(logger.messages, MESSAGE) self.assertEqual( logged, [LoggedMessage(logger.messages[0]), LoggedMessage(logger.messages[2])], ) # Lookup by string type: logged2 = LoggedMessage.ofType(logger.messages, "mymessage") self.assertEqual(logged, logged2) def test_ofTypeNotFound(self): """ L{LoggedMessage.ofType} returns an empty list if messages of the given type cannot be found. """ MESSAGE = MessageType("mymessage", [], "A message!") logger = MemoryLogger() self.assertEqual(LoggedMessage.ofType(logger.messages, MESSAGE), []) class AssertContainsFields(TestCase): """ Tests for L{assertContainsFields}. """ class ContainsTest(TestCase): """ A test case that uses L{assertContainsFields}. """ def __init__(self, message, expectedFields): TestCase.__init__(self) self.message = message self.expectedFields = expectedFields def runTest(self): assertContainsFields(self, self.message, self.expectedFields) def test_equal(self): """ Equal dictionaries contain each other. """ message = {"a": 1} expected = message.copy() test = self.ContainsTest(message, expected) # No exception raised: test.debug() def test_additionalIsSuperSet(self): """ If C{A} is C{B} plus some extra entries, C{A} contains the fields in C{B}. """ message = {"a": 1, "b": 2, "c": 3} expected = {"a": 1, "c": 3} test = self.ContainsTest(message, expected) # No exception raised: test.debug() def test_missingFields(self): """ If C{A} is C{B} minus some entries, C{A} does not contain the fields in C{B}. """ message = {"a": 1, "c": 3} expected = {"a": 1, "b": 2, "c": 3} test = self.ContainsTest(message, expected) self.assertRaises(AssertionError, test.debug) def test_differentValues(self): """ If C{A} has a different value for a specific field than C{B}, C{A} does not contain the fields in C{B}. """ message = {"a": 1, "c": 3} expected = {"a": 1, "c": 2} test = self.ContainsTest(message, expected) self.assertRaises(AssertionError, test.debug) class ValidateLoggingTestsMixin(object): """ Tests for L{validateLogging} and L{capture_logging}. """ validate = None def test_decoratedFunctionCalledWithMemoryLogger(self): """ The underlying function decorated with L{validateLogging} is called with a L{MemoryLogger} instance. """ result = [] class MyTest(TestCase): @self.validate(None) def test_foo(this, logger): result.append((this, logger.__class__)) theTest = MyTest("test_foo") theTest.run() self.assertEqual(result, [(theTest, MemoryLogger)]) def test_decorated_function_passthrough(self): """ Additional arguments are passed to the underlying function. """ result = [] def another_wrapper(f): def g(this): f(this, 1, 2, c=3) return g class MyTest(TestCase): @another_wrapper @self.validate(None) def test_foo(this, a, b, logger, c=None): result.append((a, b, c)) theTest = MyTest("test_foo") theTest.debug() self.assertEqual(result, [(1, 2, 3)]) def test_newMemoryLogger(self): """ The underlying function decorated with L{validateLogging} is called with a new L{MemoryLogger} every time the wrapper is called. """ result = [] class MyTest(TestCase): @self.validate(None) def test_foo(this, logger): result.append(logger) theTest = MyTest("test_foo") theTest.run() theTest.run() self.assertIsNot(result[0], result[1]) def test_returns(self): """ The result of the underlying function is returned by wrapper when called. """ class MyTest(TestCase): @self.validate(None) def test_foo(self, logger): return 123 self.assertEqual(MyTest("test_foo").test_foo(), 123) def test_raises(self): """ The exception raised by the underlying function is passed through by the wrapper when called. """ exc = Exception() class MyTest(TestCase): @self.validate(None) def test_foo(self, logger): raise exc raised = None try: MyTest("test_foo").debug() except Exception as e: raised = e self.assertIs(exc, raised) def test_name(self): """ The wrapper has the same name as the wrapped function. """ class MyTest(TestCase): @self.validate(None) def test_foo(self, logger): pass self.assertEqual(MyTest.test_foo.__name__, "test_foo") def test_addCleanupValidate(self): """ When a test method is decorated with L{validateLogging} it has L{MemoryLogger.validate} registered as a test cleanup. """ MESSAGE = MessageType("mymessage", [], "A message") class MyTest(TestCase): @self.validate(None) def runTest(self, logger): self.logger = logger logger.write({"message_type": "wrongmessage"}, MESSAGE._serializer) test = MyTest() with self.assertRaises(ValidationError) as context: test.debug() # Some reference to the reason: self.assertIn("wrongmessage", str(context.exception)) # Some reference to which file caused the problem: self.assertIn("test_testing.py", str(context.exception)) def test_addCleanupTracebacks(self): """ When a test method is decorated with L{validateLogging} it has has a check unflushed tracebacks in the L{MemoryLogger} registered as a test cleanup. """ class MyTest(TestCase): @self.validate(None) def runTest(self, logger): try: 1 / 0 except ZeroDivisionError: write_traceback(logger) test = MyTest() self.assertRaises(UnflushedTracebacks, test.debug) def test_assertion(self): """ If a callable is passed to L{validateLogging}, it is called with the L{TestCase} instance and the L{MemoryLogger} passed to the test method. """ result = [] class MyTest(TestCase): def assertLogging(self, logger): result.append((self, logger)) @self.validate(assertLogging) def runTest(self, logger): self.logger = logger test = MyTest() test.run() self.assertEqual(result, [(test, test.logger)]) def test_assertionArguments(self): """ If a callable together with additional arguments and keyword arguments are passed to L{validateLogging}, the callable is called with the additional args and kwargs. """ result = [] class MyTest(TestCase): def assertLogging(self, logger, x, y): result.append((self, logger, x, y)) @self.validate(assertLogging, 1, y=2) def runTest(self, logger): self.logger = logger test = MyTest() test.run() self.assertEqual(result, [(test, test.logger, 1, 2)]) def test_assertionAfterTest(self): """ If a callable is passed to L{validateLogging}, it is called with the after the main test code has run, allowing it to make assertions about log messages from the test. """ class MyTest(TestCase): def assertLogging(self, logger): self.result.append(2) @self.validate(assertLogging) def runTest(self, logger): self.result = [1] test = MyTest() test.run() self.assertEqual(test.result, [1, 2]) def test_assertionBeforeTracebackCleanup(self): """ If a callable is passed to L{validateLogging}, it is called with the before the check for unflushed tracebacks, allowing it to flush traceback log messages. """ class MyTest(TestCase): def assertLogging(self, logger): logger.flushTracebacks(ZeroDivisionError) self.flushed = True @self.validate(assertLogging) def runTest(self, logger): self.flushed = False try: 1 / 0 except ZeroDivisionError: write_traceback(logger) test = MyTest() test.run() self.assertTrue(test.flushed) class ValidateLoggingTests(ValidateLoggingTestsMixin, TestCase): """ Tests for L{validate_logging}. """ validate = staticmethod(validate_logging) class CaptureLoggingTests(ValidateLoggingTestsMixin, TestCase): """ Tests for L{capture_logging}. """ validate = staticmethod(capture_logging) def setUp(self): # Since we're not always calling the test method via the TestCase # infrastructure, sometimes cleanup methods are not called. This # means the original default logger is not restored. So we do so # manually. If the issue is a bug in capture_logging itself the # tests below will catch that. original_logger = _output._DEFAULT_LOGGER def cleanup(): _output._DEFAULT_LOGGER = original_logger self.addCleanup(cleanup) def test_default_logger(self): """ L{capture_logging} captures messages from logging that doesn't specify a L{Logger}. """ class MyTest(TestCase): @capture_logging(None) def runTest(self, logger): Message.log(some_key=1234) self.logger = logger test = MyTest() test.run() self.assertEqual(test.logger.messages[0]["some_key"], 1234) def test_global_cleanup(self): """ After the function wrapped with L{capture_logging} finishes, logging that doesn't specify a logger is logged normally. """ class MyTest(TestCase): @capture_logging(None) def runTest(self, logger): pass test = MyTest() test.run() messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) Message.log(some_key=1234) self.assertEqual(messages[0]["some_key"], 1234) def test_global_cleanup_exception(self): """ If the function wrapped with L{capture_logging} throws an exception, logging that doesn't specify a logger is logged normally. """ class MyTest(TestCase): @capture_logging(None) def runTest(self, logger): raise RuntimeError() test = MyTest() test.run() messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) Message.log(some_key=1234) self.assertEqual(messages[0]["some_key"], 1234) def test_validationNotRunForSkip(self): """ If the decorated test raises L{SkipTest} then the logging validation is also skipped. """ class MyTest(TestCase): recorded = False def record(self, logger): self.recorded = True @validateLogging(record) def runTest(self, logger): raise SkipTest("Do not run this test.") test = MyTest() result = TestResult() test.run(result) # Verify that the validation function did not run and that the test was # nevertheless marked as a skip with the correct reason. self.assertEqual( (test.recorded, result.skipped, result.errors, result.failures), (False, [(test, "Do not run this test.")], [], []), ) MESSAGE1 = MessageType( "message1", [Field.forTypes("x", [int], "A number")], "A message for testing." ) MESSAGE2 = MessageType("message2", [], "A message for testing.") class AssertHasMessageTests(TestCase): """ Tests for L{assertHasMessage}. """ class UnitTest(TestCase): """ Test case that can be instantiated. """ def runTest(self): pass def test_failIfNoMessagesOfType(self): """ L{assertHasMessage} raises L{AssertionError} if the given L{MemoryLogger} has no messages of the given L{MessageType}. """ test = self.UnitTest() logger = MemoryLogger() MESSAGE1(x=123).write(logger) self.assertRaises(AssertionError, assertHasMessage, test, logger, MESSAGE2) def test_returnsIfMessagesOfType(self): """ L{assertHasMessage} returns the first message of the given L{MessageType}. """ test = self.UnitTest() logger = MemoryLogger() MESSAGE1(x=123).write(logger) self.assertEqual( assertHasMessage(test, logger, MESSAGE1), LoggedMessage.ofType(logger.messages, MESSAGE1)[0], ) def test_failIfNotSubset(self): """ L{assertHasMessage} raises L{AssertionError} if the found message doesn't contain the given fields. """ test = self.UnitTest() logger = MemoryLogger() MESSAGE1(x=123).write(logger) self.assertRaises( AssertionError, assertHasMessage, test, logger, MESSAGE1, {"x": 24} ) def test_returnsIfSubset(self): """ L{assertHasMessage} returns the first message of the given L{MessageType} if it contains the given fields. """ test = self.UnitTest() logger = MemoryLogger() MESSAGE1(x=123).write(logger) self.assertEqual( assertHasMessage(test, logger, MESSAGE1, {"x": 123}), LoggedMessage.ofType(logger.messages, MESSAGE1)[0], ) ACTION1 = ActionType( "action1", [Field.forTypes("x", [int], "A number")], [Field.forTypes("result", [int], "A number")], "A action for testing.", ) ACTION2 = ActionType("action2", [], [], "A action for testing.") class AssertHasActionTests(TestCase): """ Tests for L{assertHasAction}. """ class UnitTest(TestCase): """ Test case that can be instantiated. """ def runTest(self): pass def test_failIfNoActionsOfType(self): """ L{assertHasAction} raises L{AssertionError} if the given L{MemoryLogger} has no actions of the given L{ActionType}. """ test = self.UnitTest() logger = MemoryLogger() with ACTION1(logger, x=123): pass self.assertRaises(AssertionError, assertHasAction, test, logger, ACTION2, True) def test_failIfWrongSuccessStatus(self): """ L{assertHasAction} raises L{AssertionError} if the given success status does not match that of the found actions. """ test = self.UnitTest() logger = MemoryLogger() with ACTION1(logger, x=123): pass try: with ACTION2(logger): 1 / 0 except ZeroDivisionError: pass self.assertRaises(AssertionError, assertHasAction, test, logger, ACTION1, False) self.assertRaises(AssertionError, assertHasAction, test, logger, ACTION2, True) def test_returnsIfMessagesOfType(self): """ A successful L{assertHasAction} returns the first message of the given L{ActionType}. """ test = self.UnitTest() logger = MemoryLogger() with ACTION1(logger, x=123): pass self.assertEqual( assertHasAction(test, logger, ACTION1, True), LoggedAction.ofType(logger.messages, ACTION1)[0], ) def test_failIfNotStartSubset(self): """ L{assertHasAction} raises L{AssertionError} if the found action doesn't contain the given start fields. """ test = self.UnitTest() logger = MemoryLogger() with ACTION1(logger, x=123): pass self.assertRaises( AssertionError, assertHasAction, test, logger, ACTION1, True, {"x": 24} ) def test_failIfNotEndSubset(self): """ L{assertHasAction} raises L{AssertionError} if the found action doesn't contain the given end fields. """ test = self.UnitTest() logger = MemoryLogger() with ACTION1(logger, x=123) as act: act.addSuccessFields(result=5) self.assertRaises( AssertionError, assertHasAction, test, logger, ACTION1, True, startFields={"x": 123}, endFields={"result": 24}, ) def test_returns(self): """ A successful L{assertHasAction} returns the first message of the given L{ActionType} after doing all validation. """ test = self.UnitTest() logger = MemoryLogger() with ACTION1(logger, x=123) as act: act.addSuccessFields(result=5) self.assertEqual( assertHasAction(test, logger, ACTION1, True, {"x": 123}, {"result": 5}), LoggedAction.ofType(logger.messages, ACTION1)[0], ) class PEP8Tests(TestCase): """ Tests for PEP 8 method compatibility. """ def test_LoggedAction_from_messages(self): """ L{LoggedAction.from_messages} is the same as L{LoggedAction.fromMessages}. """ self.assertEqual(LoggedAction.from_messages, LoggedAction.fromMessages) def test_LoggedAction_of_type(self): """ L{LoggedAction.of_type} is the same as L{LoggedAction.ofType}. """ self.assertEqual(LoggedAction.of_type, LoggedAction.ofType) def test_LoggedAction_end_message(self): """ L{LoggedAction.end_message} is the same as L{LoggedAction.endMessage}. """ action = LoggedAction({1: 2}, {3: 4}, []) self.assertEqual(action.end_message, action.endMessage) def test_LoggedAction_start_message(self): """ L{LoggedAction.start_message} is the same as L{LoggedAction.startMessage}. """ action = LoggedAction({1: 2}, {3: 4}, []) self.assertEqual(action.start_message, action.startMessage) def test_LoggedMessage_of_type(self): """ L{LoggedMessage.of_type} is the same as L{LoggedMessage.ofType}. """ self.assertEqual(LoggedMessage.of_type, LoggedMessage.ofType) def test_validate_logging(self): """ L{validate_logging} is the same as L{validateLogging}. """ self.assertEqual(validate_logging, validateLogging) class LowLevelTestingHooks(TestCase): """Tests for lower-level APIs for setting up MemoryLogger.""" @capture_logging(None) def test_swap_logger(self, logger): """C{swap_logger} swaps out the current logger.""" new_logger = MemoryLogger() old_logger = swap_logger(new_logger) Message.log(message_type="hello") # We swapped out old logger for new: self.assertIs(old_logger, logger) self.assertEqual(new_logger.messages[0]["message_type"], "hello") # Now restore old logger: intermediate_logger = swap_logger(old_logger) Message.log(message_type="goodbye") self.assertIs(intermediate_logger, new_logger) self.assertEqual(logger.messages[0]["message_type"], "goodbye") def test_check_for_errors_unflushed_tracebacks(self): """C{check_for_errors} raises on unflushed tracebacks.""" logger = MemoryLogger() # No errors initially: check_for_errors(logger) try: 1 / 0 except ZeroDivisionError: write_traceback(logger) logger.flush_tracebacks(ZeroDivisionError) # Flushed tracebacks don't count: check_for_errors(logger) # But unflushed tracebacks do: try: raise RuntimeError except RuntimeError: write_traceback(logger) with self.assertRaises(UnflushedTracebacks): check_for_errors(logger) def test_check_for_errors_validation(self): """C{check_for_errors} raises on validation errors.""" logger = MemoryLogger() logger.write({"x": 1, "message_type": "mem"}) # No errors: check_for_errors(logger) # Now long something unserializable to JSON: logger.write({"message_type": object()}) with self.assertRaises(TypeError): check_for_errors(logger) eliot-1.11.0/eliot/tests/common.py0000664000175000017500000000067713460352650020540 0ustar itamarstitamarst00000000000000""" Common testing infrastructure. """ from io import BytesIO class FakeSys(object): """ A fake L{sys} module. """ def __init__(self, argv, stdinBytes): """ @param argv: List of command-line arguments. @param stdinBytes: C{bytes} that are readable from stdin. """ self.argv = argv self.stdin = BytesIO(stdinBytes) self.stdout = BytesIO() self.stderr = BytesIO() eliot-1.11.0/eliot/tests/test_json.py0000664000175000017500000000525013470775105021255 0ustar itamarstitamarst00000000000000""" Tests for L{eliot.json}. """ from __future__ import unicode_literals, absolute_import from unittest import TestCase, skipUnless, skipIf from json import loads, dumps from math import isnan try: import numpy as np except ImportError: np = None from eliot.json import EliotJSONEncoder class EliotJSONEncoderTests(TestCase): """Tests for L{EliotJSONEncoder}.""" def test_nan_inf(self): """NaN, inf and -inf are round-tripped.""" l = [float("nan"), float("inf"), float("-inf")] roundtripped = loads(dumps(l, cls=EliotJSONEncoder)) self.assertEqual(l[1:], roundtripped[1:]) self.assertTrue(isnan(roundtripped[0])) @skipUnless(np, "NumPy not installed.") def test_numpy(self): """NumPy objects get serialized to readable JSON.""" l = [ np.float32(12.5), np.float64(2.0), np.float16(0.5), np.bool(True), np.bool(False), np.bool_(True), np.unicode_("hello"), np.byte(12), np.short(12), np.intc(-13), np.int_(0), np.longlong(100), np.intp(7), np.ubyte(12), np.ushort(12), np.uintc(13), np.ulonglong(100), np.uintp(7), np.int8(1), np.int16(3), np.int32(4), np.int64(5), np.uint8(1), np.uint16(3), np.uint32(4), np.uint64(5), ] l2 = [l, np.array([1, 2, 3])] roundtripped = loads(dumps(l2, cls=EliotJSONEncoder)) self.assertEqual([l, [1, 2, 3]], roundtripped) @skipIf(np, "NumPy is installed.") def test_numpy_not_imported(self): """If NumPy is not available, EliotJSONEncoder continues to work. This ensures NumPy isn't a hard dependency. """ with self.assertRaises(TypeError): dumps([object()], cls=EliotJSONEncoder) self.assertEqual(dumps(12, cls=EliotJSONEncoder), "12") @skipUnless(np, "NumPy is not installed.") def test_large_numpy_array(self): """ Large NumPy arrays are not serialized completely, since this is (A) a performance hit (B) probably a mistake on the user's part. """ a1000 = np.array([0] * 10000) self.assertEqual(loads(dumps(a1000, cls=EliotJSONEncoder)), a1000.tolist()) a1002 = np.zeros((2, 5001)) a1002[0][0] = 12 a1002[0][1] = 13 a1002[1][1] = 500 self.assertEqual( loads(dumps(a1002, cls=EliotJSONEncoder)), {"array_start": a1002.flat[:10000].tolist(), "original_shape": [2, 5001]}, ) eliot-1.11.0/eliot/tests/test_output.py0000664000175000017500000006722613470775105021657 0ustar itamarstitamarst00000000000000""" Tests for L{eliot._output}. """ from sys import stdout from unittest import TestCase, skipUnless # Make sure to use StringIO that only accepts unicode: from io import BytesIO, StringIO import json as pyjson from tempfile import mktemp from time import time from uuid import UUID from threading import Thread try: import numpy as np except ImportError: np = None from zope.interface.verify import verifyClass from .._output import ( MemoryLogger, ILogger, Destinations, Logger, bytesjson as json, to_file, FileDestination, _DestinationsSendError, ) from .._validation import ValidationError, Field, _MessageSerializer from .._traceback import write_traceback from ..testing import assertContainsFields class MemoryLoggerTests(TestCase): """ Tests for L{MemoryLogger}. """ def test_interface(self): """ L{MemoryLogger} implements L{ILogger}. """ verifyClass(ILogger, MemoryLogger) def test_write(self): """ Dictionaries written with L{MemoryLogger.write} are stored on a list. """ logger = MemoryLogger() logger.write({"a": "b"}) logger.write({"c": 1}) self.assertEqual(logger.messages, [{"a": "b"}, {"c": 1}]) logger.validate() def test_notStringFieldKeys(self): """ Field keys must be unicode or bytes; if not L{MemoryLogger.validate} raises a C{TypeError}. """ logger = MemoryLogger() logger.write({123: "b"}) self.assertRaises(TypeError, logger.validate) def test_bytesMustBeUTF8(self): """ Field keys can be bytes, but only if they're UTF-8 encoded Unicode. """ logger = MemoryLogger() logger.write({"\u1234".encode("utf-16"): "b"}) self.assertRaises(UnicodeDecodeError, logger.validate) def test_serializer(self): """ L{MemoryLogger.validate} calls the given serializer's C{validate()} method with the message, as does L{MemoryLogger.write}. """ class FakeValidator(list): def validate(self, message): self.append(message) def serialize(self, obj): return obj validator = FakeValidator() logger = MemoryLogger() message = {"message_type": "mymessage", "X": 1} logger.write(message, validator) self.assertEqual(validator, [message]) logger.validate() self.assertEqual(validator, [message, message]) def test_failedValidation(self): """ L{MemoryLogger.validate} will allow exceptions raised by the serializer to pass through. """ serializer = _MessageSerializer( [Field.forValue("message_type", "mymessage", "The type")] ) logger = MemoryLogger() logger.write({"message_type": "wrongtype"}, serializer) self.assertRaises(ValidationError, logger.validate) def test_JSON(self): """ L{MemoryLogger.validate} will encode the output of serialization to JSON. """ serializer = _MessageSerializer( [ Field.forValue("message_type", "type", "The type"), Field("foo", lambda value: object(), "The type"), ] ) logger = MemoryLogger() logger.write( {"message_type": "type", "foo": "will become object()"}, serializer ) self.assertRaises(TypeError, logger.validate) def test_serialize(self): """ L{MemoryLogger.serialize} returns a list of serialized versions of the logged messages. """ serializer = _MessageSerializer( [ Field.forValue("message_type", "mymessage", "The type"), Field("length", len, "The length"), ] ) messages = [ {"message_type": "mymessage", "length": "abc"}, {"message_type": "mymessage", "length": "abcd"}, ] logger = MemoryLogger() for message in messages: logger.write(message, serializer) self.assertEqual( logger.serialize(), [ {"message_type": "mymessage", "length": 3}, {"message_type": "mymessage", "length": 4}, ], ) def test_serializeCopies(self): """ L{MemoryLogger.serialize} does not mutate the original logged messages. """ serializer = _MessageSerializer( [ Field.forValue("message_type", "mymessage", "The type"), Field("length", len, "The length"), ] ) message = {"message_type": "mymessage", "length": "abc"} logger = MemoryLogger() logger.write(message, serializer) logger.serialize() self.assertEqual(logger.messages[0]["length"], "abc") def write_traceback(self, logger, exception): """ Write an exception as a traceback to the logger. """ try: raise exception except: write_traceback(logger) def test_tracebacksCauseTestFailure(self): """ Logging a traceback to L{MemoryLogger} will add its exception to L{MemoryLogger.tracebackMessages}. """ logger = MemoryLogger() exception = Exception() self.write_traceback(logger, exception) self.assertEqual(logger.tracebackMessages[0]["reason"], exception) def test_flushTracebacksNoTestFailure(self): """ Any tracebacks cleared by L{MemoryLogger.flushTracebacks} (as specified by exception type) are removed from L{MemoryLogger.tracebackMessages}. """ logger = MemoryLogger() exception = RuntimeError() self.write_traceback(logger, exception) logger.flushTracebacks(RuntimeError) self.assertEqual(logger.tracebackMessages, []) def test_flushTracebacksReturnsExceptions(self): """ L{MemoryLogger.flushTracebacks} returns the traceback messages. """ exceptions = [ZeroDivisionError(), ZeroDivisionError()] logger = MemoryLogger() logger.write({"x": 1}) for exc in exceptions: self.write_traceback(logger, exc) logger.write({"x": 1}) flushed = logger.flushTracebacks(ZeroDivisionError) self.assertEqual(flushed, logger.messages[1:3]) def test_flushTracebacksUnflushedTestFailure(self): """ Any tracebacks uncleared by L{MemoryLogger.flushTracebacks} (because they are of a different type) are still listed in L{MemoryLogger.tracebackMessages}. """ logger = MemoryLogger() exception = RuntimeError() self.write_traceback(logger, exception) logger.flushTracebacks(KeyError) self.assertEqual(logger.tracebackMessages[0]["reason"], exception) def test_flushTracebacksUnflushedUnreturned(self): """ Any tracebacks uncleared by L{MemoryLogger.flushTracebacks} (because they are of a different type) are not returned. """ logger = MemoryLogger() exception = RuntimeError() self.write_traceback(logger, exception) self.assertEqual(logger.flushTracebacks(KeyError), []) def test_reset(self): """ L{MemoryLogger.reset} clears all logged messages and tracebacks. """ logger = MemoryLogger() logger.write({"key": "value"}, None) logger.reset() self.assertEqual( (logger.messages, logger.serializers, logger.tracebackMessages), ([], [], []), ) def test_threadSafeWrite(self): """ L{MemoryLogger.write} can be called from multiple threads concurrently. """ # Some threads will log some messages thread_count = 10 # A lot of messages. This will keep the threads running long enough # to give them a chance to (try to) interfere with each other. write_count = 10000 # They'll all use the same MemoryLogger instance. logger = MemoryLogger() # Each thread will have its own message and serializer that it writes # to the log over and over again. def write(msg, serializer): for i in range(write_count): logger.write(msg, serializer) # Generate a single distinct message for each thread to log. msgs = list({"i": i} for i in range(thread_count)) # Generate a single distinct serializer for each thread to log. serializers = list(object() for i in range(thread_count)) # Pair them all up. This gives us a simple invariant we can check # later on. write_args = zip(msgs, serializers) # Create the threads. threads = list(Thread(target=write, args=args) for args in write_args) # Run them all. Note threads early in this list will start writing to # the log before later threads in the list even get a chance to start. # That's part of why we have each thread write so many messages. for t in threads: t.start() # Wait for them all to finish. for t in threads: t.join() # Check that we got the correct number of messages in the log. expected_count = thread_count * write_count self.assertEqual(len(logger.messages), expected_count) self.assertEqual(len(logger.serializers), expected_count) # Check the simple invariant we created above. Every logged message # must be paired with the correct serializer, where "correct" is # defined by ``write_args`` above. for position, (msg, serializer) in enumerate( zip(logger.messages, logger.serializers) ): # The indexes must match because the objects are paired using # zip() above. msg_index = msgs.index(msg) serializer_index = serializers.index(serializer) self.assertEqual( msg_index, serializer_index, "Found message #{} with serializer #{} at position {}".format( msg_index, serializer_index, position ), ) class MyException(Exception): """ Custom exception. """ class BadDestination(list): """ A destination that throws an exception the first time it is called. """ called = 0 def __call__(self, msg): if not self.called: self.called = True raise MyException("ono") self.append(msg) class DestinationsTests(TestCase): """ Tests for L{Destinations}. """ def test_send(self): """ L{Destinations.send} calls all destinations added with L{Destinations.add} with the given dictionary. """ destinations = Destinations() message = {"hoorj": "blargh"} dest = [] dest2 = [] dest3 = [] destinations.add(dest.append, dest2.append) destinations.add(dest3.append) destinations.send(message) self.assertEqual(dest, [message]) self.assertEqual(dest2, [message]) self.assertEqual(dest3, [message]) def test_destinationExceptionMultipleDestinations(self): """ If one destination throws an exception, other destinations still get the message. """ destinations = Destinations() dest = [] dest2 = BadDestination() dest3 = [] destinations.add(dest.append) destinations.add(dest2) destinations.add(dest3.append) message = {"hello": 123} self.assertRaises(_DestinationsSendError, destinations.send, {"hello": 123}) self.assertEqual((dest, dest3), ([message], [message])) def test_destinationExceptionContinue(self): """ If a destination throws an exception, future messages are still sent to it. """ destinations = Destinations() dest = BadDestination() destinations.add(dest) self.assertRaises(_DestinationsSendError, destinations.send, {"hello": 123}) destinations.send({"hello": 200}) self.assertEqual(dest, [{"hello": 200}]) def test_remove(self): """ A destination removed with L{Destinations.remove} will no longer receive messages from L{Destionations.add} calls. """ destinations = Destinations() message = {"hello": 123} dest = [] destinations.add(dest.append) destinations.remove(dest.append) destinations.send(message) self.assertEqual(dest, []) def test_removeNonExistent(self): """ Removing a destination that has not previously been added with result in a C{ValueError} being thrown. """ destinations = Destinations() self.assertRaises(ValueError, destinations.remove, [].append) def test_addGlobalFields(self): """ L{Destinations.addGlobalFields} adds the given fields and values to the messages being passed in. """ destinations = Destinations() dest = [] destinations.add(dest.append) destinations.addGlobalFields(x=123, y="hello") destinations.send({"z": 456}) self.assertEqual(dest, [{"x": 123, "y": "hello", "z": 456}]) def test_addGlobalFieldsCumulative(self): """ L{Destinations.addGlobalFields} adds the given fields to those set by previous calls. """ destinations = Destinations() dest = [] destinations.add(dest.append) destinations.addGlobalFields(x=123, y="hello") destinations.addGlobalFields(x=456, z=456) destinations.send({"msg": "X"}) self.assertEqual(dest, [{"x": 456, "y": "hello", "z": 456, "msg": "X"}]) def test_buffering(self): """ Before any destinations are set up to 1000 messages are buffered, and then delivered to the first registered destinations. """ destinations = Destinations() messages = [{"k": i} for i in range(1050)] for m in messages: destinations.send(m) dest, dest2 = [], [] destinations.add(dest.append, dest2.append) self.assertEqual((dest, dest2), (messages[-1000:], messages[-1000:])) def test_buffering_second_batch(self): """ The second batch of added destination don't get the buffered messages. """ destinations = Destinations() message = {"m": 1} message2 = {"m": 2} destinations.send(message) dest = [] dest2 = [] destinations.add(dest.append) destinations.add(dest2.append) destinations.send(message2) self.assertEqual((dest, dest2), ([message, message2], [message2])) def test_global_fields_buffering(self): """ Global fields are added to buffered messages, when possible. """ destinations = Destinations() message = {"m": 1} destinations.send(message) destinations.addGlobalFields(k=123) dest = [] destinations.add(dest.append) self.assertEqual(dest, [{"m": 1, "k": 123}]) def makeLogger(): """ Return a tuple (L{Logger} instance, C{list} of written messages). """ logger = Logger() logger._destinations = Destinations() written = [] logger._destinations.add(written.append) return logger, written class LoggerTests(TestCase): """ Tests for L{Logger}. """ def test_interface(self): """ L{Logger} implements L{ILogger}. """ verifyClass(ILogger, Logger) def test_global(self): """ A global L{Destinations} is used by the L{Logger} class. """ self.assertIsInstance(Logger._destinations, Destinations) def test_write(self): """ L{Logger.write} sends the given dictionary L{Destinations} object. """ logger, written = makeLogger() d = {"hello": 1} logger.write(d) self.assertEqual(written, [d]) def test_serializer(self): """ If a L{_MessageSerializer} is passed to L{Logger.write}, it is used to serialize the message before it is passed to the destination. """ logger, written = makeLogger() serializer = _MessageSerializer( [ Field.forValue("message_type", "mymessage", "The type"), Field("length", len, "The length of a thing"), ] ) logger.write({"message_type": "mymessage", "length": "thething"}, serializer) self.assertEqual(written, [{"message_type": "mymessage", "length": 8}]) def test_passedInDictionaryUnmodified(self): """ The dictionary passed in to L{Logger.write} is not modified. """ logger, written = makeLogger() serializer = _MessageSerializer( [ Field.forValue("message_type", "mymessage", "The type"), Field("length", len, "The length of a thing"), ] ) d = {"message_type": "mymessage", "length": "thething"} original = d.copy() logger.write(d, serializer) self.assertEqual(d, original) def test_safeUnicodeDictionary(self): """ L{Logger._safeUnicodeDictionary} converts the given dictionary's values and keys to unicode using C{safeunicode}. """ class badobject(object): def __repr__(self): raise TypeError() dictionary = {badobject(): 123, 123: badobject()} badMessage = "eliot: unknown, unicode() raised exception" self.assertEqual( eval(Logger()._safeUnicodeDictionary(dictionary)), {badMessage: "123", "123": badMessage}, ) def test_safeUnicodeDictionaryFallback(self): """ If converting the dictionary failed for some reason, L{Logger._safeUnicodeDictionary} runs C{repr} on the object. """ self.assertEqual(Logger()._safeUnicodeDictionary(None), "None") def test_safeUnicodeDictionaryFallbackFailure(self): """ If all else fails, L{Logger._safeUnicodeDictionary} just gives up. """ class badobject(object): def __repr__(self): raise TypeError() self.assertEqual( Logger()._safeUnicodeDictionary(badobject()), "eliot: unknown, unicode() raised exception", ) def test_serializationErrorTraceback(self): """ If serialization fails in L{Logger.write}, a traceback is logged, along with a C{eliot:serialization_failure} message for debugging purposes. """ logger, written = makeLogger() def raiser(i): raise RuntimeError("oops") serializer = _MessageSerializer( [ Field.forValue("message_type", "mymessage", "The type"), Field("fail", raiser, "Serialization fail"), ] ) message = {"message_type": "mymessage", "fail": "will"} logger.write(message, serializer) self.assertEqual(len(written), 2) tracebackMessage = written[0] assertContainsFields( self, tracebackMessage, { "exception": "%s.RuntimeError" % (RuntimeError.__module__,), "message_type": "eliot:traceback", }, ) self.assertIn("RuntimeError: oops", tracebackMessage["traceback"]) # Calling _safeUnicodeDictionary multiple times leads to # inconsistent results due to hash ordering, so compare contents: assertContainsFields( self, written[1], {"message_type": "eliot:serialization_failure"} ) self.assertEqual( eval(written[1]["message"]), dict((repr(key), repr(value)) for (key, value) in message.items()), ) def test_destinationExceptionCaught(self): """ If a destination throws an exception, an appropriate error is logged. """ logger = Logger() logger._destinations = Destinations() dest = BadDestination() logger._destinations.add(dest) message = {"hello": 123} logger.write({"hello": 123}) assertContainsFields( self, dest[0], { "message_type": "eliot:destination_failure", "message": logger._safeUnicodeDictionary(message), "reason": "ono", "exception": "eliot.tests.test_output.MyException", }, ) def test_destinationMultipleExceptionsCaught(self): """ If multiple destinations throw an exception, an appropriate error is logged for each. """ logger = Logger() logger._destinations = Destinations() logger._destinations.add(BadDestination()) logger._destinations.add(lambda msg: 1 / 0) messages = [] logger._destinations.add(messages.append) try: 1 / 0 except ZeroDivisionError as e: zero_divide = str(e) zero_type = ZeroDivisionError.__module__ + ".ZeroDivisionError" message = {"hello": 123} logger.write({"hello": 123}) def remove(key): return [message.pop(key) for message in messages[1:]] # Make sure we have task_level & task_uuid in exception messages. task_levels = remove("task_level") task_uuids = remove("task_uuid") timestamps = remove("timestamp") self.assertEqual( ( abs(timestamps[0] + timestamps[1] - 2 * time()) < 1, task_levels == [[1], [1]], len([UUID(uuid) for uuid in task_uuids]) == 2, messages, ), ( True, True, True, [ message, { "message_type": "eliot:destination_failure", "message": logger._safeUnicodeDictionary(message), "reason": "ono", "exception": "eliot.tests.test_output.MyException", }, { "message_type": "eliot:destination_failure", "message": logger._safeUnicodeDictionary(message), "reason": zero_divide, "exception": zero_type, }, ], ), ) def test_destinationExceptionCaughtTwice(self): """ If a destination throws an exception, and the logged error about it also causes an exception, then just drop that exception on the floor, since there's nothing we can do with it. """ logger = Logger() logger._destinations = Destinations() def always_raise(message): raise ZeroDivisionError() logger._destinations.add(always_raise) # No exception raised; since everything is dropped no other # assertions to be made. logger.write({"hello": 123}) class PEP8Tests(TestCase): """ Tests for PEP 8 method compatibility. """ def test_flush_tracebacks(self): """ L{MemoryLogger.flush_tracebacks} is the same as L{MemoryLogger.flushTracebacks} """ self.assertEqual(MemoryLogger.flush_tracebacks, MemoryLogger.flushTracebacks) class ToFileTests(TestCase): """ Tests for L{to_file}. """ def test_to_file_adds_destination(self): """ L{to_file} adds a L{FileDestination} destination with the given file. """ f = stdout to_file(f) expected = FileDestination(file=f) self.addCleanup(Logger._destinations.remove, expected) self.assertIn(expected, Logger._destinations._destinations) def test_to_file_custom_encoder(self): """ L{to_file} accepts a custom encoder, and sets it on the resulting L{FileDestination}. """ f = stdout encoder = object() to_file(f, encoder=encoder) expected = FileDestination(file=f, encoder=encoder) self.addCleanup(Logger._destinations.remove, expected) self.assertIn(expected, Logger._destinations._destinations) def test_bytes_values(self): """ DEPRECATED: On Python 3L{FileDestination} will encode bytes as if they were UTF-8 encoded strings when writing to BytesIO only. """ message = {"x": b"abc"} bytes_f = BytesIO() destination = FileDestination(file=bytes_f) destination(message) self.assertEqual( [json.loads(line) for line in bytes_f.getvalue().splitlines()], [{"x": "abc"}], ) @skipUnless(np, "NumPy is not installed.") def test_default_encoder_is_EliotJSONEncoder(self): """The default encoder if none are specified is EliotJSONEncoder.""" message = {"x": np.int64(3)} f = StringIO() destination = FileDestination(file=f) destination(message) self.assertEqual( [json.loads(line) for line in f.getvalue().splitlines()], [{"x": 3}] ) def test_filedestination_writes_json_bytes(self): """ L{FileDestination} writes JSON-encoded messages to a file that accepts bytes. """ message1 = {"x": 123} message2 = {"y": None, "x": "abc"} bytes_f = BytesIO() destination = FileDestination(file=bytes_f) destination(message1) destination(message2) self.assertEqual( [json.loads(line) for line in bytes_f.getvalue().splitlines()], [message1, message2], ) def test_filedestination_custom_encoder(self): """ L{FileDestionation} can use a custom encoder. """ custom = object() class CustomEncoder(pyjson.JSONEncoder): def default(self, o): if o is custom: return "CUSTOM!" else: return pyjson.JSONEncoder.default(self, o) message = {"x": 123, "z": custom} f = BytesIO() destination = FileDestination(file=f, encoder=CustomEncoder) destination(message) self.assertEqual( json.loads(f.getvalue().splitlines()[0]), {"x": 123, "z": "CUSTOM!"} ) def test_filedestination_flushes(self): """ L{FileDestination} flushes after every write, to ensure logs get written out even if the local buffer hasn't filled up. """ path = mktemp() # File with large buffer: f = open(path, "wb", 1024 * 1024 * 10) # and a small message that won't fill the buffer: message1 = {"x": 123} destination = FileDestination(file=f) destination(message1) # Message got written even though buffer wasn't filled: self.assertEqual( [json.loads(line) for line in open(path, "rb").read().splitlines()], [message1], ) def test_filedestination_writes_json_unicode(self): """ L{FileDestination} writes JSON-encoded messages to file that only accepts Unicode. """ message = {"x": "\u1234"} unicode_f = StringIO() destination = FileDestination(file=unicode_f) destination(message) self.assertEqual(pyjson.loads(unicode_f.getvalue()), message) def test_filedestination_unwriteable_file(self): """ L{FileDestination} raises a runtime error if the given file isn't writeable. """ path = mktemp() open(path, "w").close() f = open(path, "r") with self.assertRaises(RuntimeError): FileDestination(f) eliot-1.11.0/eliot/tests/test_pyinstaller.py0000664000175000017500000000230113470775105022644 0ustar itamarstitamarst00000000000000"""Test for pyinstaller compatibility.""" from __future__ import absolute_import from unittest import TestCase, SkipTest from tempfile import mkdtemp, NamedTemporaryFile from subprocess import check_call, CalledProcessError import os from six import PY2 if PY2: FileNotFoundError = OSError class PyInstallerTests(TestCase): """Make sure PyInstaller doesn't break Eliot.""" def setUp(self): try: check_call(["pyinstaller", "--help"]) except (CalledProcessError, FileNotFoundError): raise SkipTest("Can't find pyinstaller.") def test_importable(self): """The Eliot package can be imported inside a PyInstaller packaged binary.""" output_dir = mkdtemp() with NamedTemporaryFile(mode="w") as f: f.write("import eliot; import eliot.prettyprint\n") f.flush() check_call( [ "pyinstaller", "--distpath", output_dir, "-F", "-n", "importeliot", f.name, ] ) check_call([os.path.join(output_dir, "importeliot")]) eliot-1.11.0/eliot/tests/test_validation.py0000664000175000017500000007567713573001140022443 0ustar itamarstitamarst00000000000000""" Tests for L{eliot._validation}. """ from __future__ import unicode_literals from unittest import TestCase from six import text_type as unicode from .._validation import ( Field, MessageType, ActionType, ValidationError, fields, _MessageSerializer, ) from .._action import start_action, startTask from .._output import MemoryLogger from ..serializers import identity from .. import add_destination, remove_destination class TypedFieldTests(TestCase): """ Tests for L{Field.forTypes}. """ def test_validateCorrectType(self): """ L{Field.validate} will not raise an exception if the given value is in the list of supported classes. """ field = Field.forTypes("path", [unicode, int], "A path!") field.validate(123) field.validate("hello") def test_validateNone(self): """ When given a "class" of C{None}, L{Field.validate} will support validating C{None}. """ field = Field.forTypes("None", [None], "Nothing!") field.validate(None) def test_validateWrongType(self): """ L{Field.validate} will raise a L{ValidationError} exception if the given value's type is not in the list of supported classes. """ field = Field.forTypes("key", [int], "An integer key") self.assertRaises(ValidationError, field.validate, "lala") self.assertRaises(ValidationError, field.validate, None) self.assertRaises(ValidationError, field.validate, object()) def test_extraValidatorPasses(self): """ L{Field.validate} will not raise an exception if the extra validator does not raise an exception. """ def validate(i): if i > 10: return else: raise ValidationError("too small") field = Field.forTypes("key", [int], "An integer key", validate) field.validate(11) def test_extraValidatorFails(self): """ L{Field.validate} will raise a L{ValidationError} exception if the extra validator raises one. """ def validate(i): if i > 10: return else: raise ValidationError("too small") field = Field.forTypes("key", [int], "An int", validate) self.assertRaises(ValidationError, field.validate, 10) def test_onlyValidTypes(self): """ Only JSON supported types can be passed to L{Field.forTypes}. """ self.assertRaises(TypeError, Field.forTypes, "key", [complex], "Oops") def test_listIsValidType(self): """ A C{list} is a valid type for L{Field.forTypes}. """ Field.forTypes("key", [list], "Oops") def test_dictIsValidType(self): """ A C{dict} is a valid type for L{Field.forTypes}. """ Field.forTypes("key", [dict], "Oops") class FieldTests(TestCase): """ Tests for L{Field}. """ def test_description(self): """ L{Field.description} stores the passed in description. """ field = Field("path", identity, "A path!") self.assertEqual(field.description, "A path!") def test_optionalDescription(self): """ L{Field} can be constructed with no description. """ field = Field("path", identity) self.assertEqual(field.description, "") def test_key(self): """ L{Field.key} stores the passed in field key. """ field = Field("path", identity, "A path!") self.assertEqual(field.key, "path") def test_serialize(self): """ L{Field.serialize} calls the given serializer function. """ result = [] Field("key", result.append, "field").serialize(123) self.assertEqual(result, [123]) def test_serializeResult(self): """ L{Field.serialize} returns the result of the given serializer function. """ result = Field("key", lambda obj: 456, "field").serialize(None) self.assertEqual(result, 456) def test_serializeCallsValidate(self): """ L{Field.validate} calls the serializer, in case that raises an exception for the given input. """ class MyException(Exception): pass def serialize(obj): raise MyException() field = Field("key", serialize, "") self.assertRaises(MyException, field.validate, 123) def test_noExtraValidator(self): """ L{Field.validate} doesn't break if there is no extra validator. """ field = Field("key", identity, "") field.validate(123) def test_extraValidatorPasses(self): """ L{Field.validate} will not raise an exception if the extra validator does not raise an exception. """ def validate(i): if i > 10: return else: raise ValidationError("too small") field = Field("path", identity, "A path!", validate) field.validate(11) def test_extraValidatorFails(self): """ L{Field.validate} will raise a L{ValidationError} exception if the extra validator raises one. """ def validate(i): if i > 10: return else: raise ValidationError("too small") field = Field("path", identity, "A path!", validate) self.assertRaises(ValidationError, field.validate, 10) class FieldForValueTests(TestCase): """ Tests for L{Field.forValue}. """ def test_forValue(self): """ L{Field.forValue} creates a L{Field} with the given key and description. """ field = Field.forValue("key", None, "description") self.assertEqual(field.key, "key") self.assertEqual(field.description, "description") def test_forValueGoodValue(self): """ The L{Field.forValue}-created L{Field} validates the value it was constructed with. """ field = Field.forValue("key", 1234, "description") field.validate(1234) def test_valueFieldWrongValue(self): """ The L{Field.forValue}-created L{Field} raises a L{ValidationError} for different values. """ field = Field.forValue("key", 1234, "description") self.assertRaises(ValidationError, field.validate, 5678) def test_serialize(self): """ The L{Field.forValue}-created L{Field} returns the given object when serializing, regardless of input. If the caller is buggy, no need to log garbage if we know what needs logging. These bugs will be caught by unit tests, anyway, if author of code is doing things correctly. """ field = Field.forValue("key", 1234, "description") self.assertEqual(field.serialize(None), 1234) class FieldsTests(TestCase): """ Tests for L{fields}. """ def test_positional(self): """ L{fields} accepts positional arguments of L{Field} instances and combines them with fields specied as keyword arguments. """ a_field = Field("akey", identity) l = fields(a_field, another=str) self.assertIn(a_field, l) self.assertEqual( {(type(field), field.key) for field in l}, {(Field, "akey"), (Field, "another")}, ) def test_keys(self): """ L{fields} creates L{Field} instances with the given keys. """ l = fields(key=int, status=str) self.assertEqual( {(type(field), field.key) for field in l}, {(Field, "key"), (Field, "status")}, ) def test_validTypes(self): """ The L{Field} instances constructed by L{fields} validate the specified types. """ (field,) = fields(key=int) self.assertRaises(ValidationError, field.validate, "abc") def test_noSerialization(self): """ The L{Field} instances constructed by L{fields} do no special serialization. """ (field,) = fields(key=int) self.assertEqual(field.serialize("abc"), "abc") class MessageSerializerTests(TestCase): """ Tests for L{_MessageSerializer}. """ def test_noMultipleFields(self): """ L{_MessageSerializer.__init__} will raise a L{ValueError} exception if constructed with more than object per field name. """ self.assertRaises( ValueError, _MessageSerializer, [ Field("akey", identity, ""), Field("akey", identity, ""), Field("message_type", identity, ""), ], ) def test_noBothTypeFields(self): """ L{_MessageSerializer.__init__} will raise a L{ValueError} exception if constructed with both a C{"message_type"} and C{"action_type"} field. """ self.assertRaises( ValueError, _MessageSerializer, [Field("message_type", identity, ""), Field("action_type", identity, "")], ) def test_missingTypeField(self): """ L{_MessageSerializer.__init__} will raise a L{ValueError} if there is neither a C{"message_type"} nor a C{"action_type"} field. """ self.assertRaises(ValueError, _MessageSerializer, []) def test_noTaskLevel(self): """ L{_MessageSerializer.__init__} will raise a L{ValueError} if there is a C{"task_level"} field included. """ self.assertRaises( ValueError, _MessageSerializer, [Field("message_type", identity, ""), Field("task_level", identity, "")], ) def test_noTaskUuid(self): """ L{_MessageSerializer.__init__} will raise a L{ValueError} if there is a C{"task_uuid"} field included. """ self.assertRaises( ValueError, _MessageSerializer, [Field("message_type", identity, ""), Field("task_uuid", identity, "")], ) def test_noTimestamp(self): """ L{_MessageSerializer.__init__} will raise a L{ValueError} if there is a C{"timestamp"} field included. """ self.assertRaises( ValueError, _MessageSerializer, [Field("message_type", identity, ""), Field("timestamp", identity, "")], ) def test_noUnderscoreStart(self): """ L{_MessageSerializer.__init__} will raise a L{ValueError} if there is a field included whose name starts with C{"_"}. """ self.assertRaises( ValueError, _MessageSerializer, [Field("message_type", identity, ""), Field("_key", identity, "")], ) def test_serialize(self): """ L{_MessageSerializer.serialize} will serialize all values in the given dictionary using the respective L{Field}. """ serializer = _MessageSerializer( [ Field.forValue("message_type", "mymessage", "The type"), Field("length", len, "The length of a thing"), ] ) message = {"message_type": "mymessage", "length": "thething"} serializer.serialize(message) self.assertEqual(message, {"message_type": "mymessage", "length": 8}) def test_missingSerializer(self): """ If a value in the dictionary passed to L{_MessageSerializer.serialize} has no respective field, it is unchanged. Logging attempts to capture everything, with minimal work; with any luck this value is JSON-encodable. Unit tests should catch such bugs, in any case. """ serializer = _MessageSerializer( [ Field.forValue("message_type", "mymessage", "The type"), Field("length", len, "The length of a thing"), ] ) message = {"message_type": "mymessage", "length": "thething", "extra": 123} serializer.serialize(message) self.assertEqual( message, {"message_type": "mymessage", "length": 8, "extra": 123} ) def test_fieldInstances(self): """ Fields to L{_MessageSerializer.__init__} should be instances of L{Field}. """ a_field = Field("a_key", identity) arg = object() with self.assertRaises(TypeError) as cm: _MessageSerializer([a_field, arg]) self.assertEqual(("Expected a Field instance but got", arg), cm.exception.args) class MessageTypeTests(TestCase): """ Tests for L{MessageType}. """ def messageType(self): """ Return a L{MessageType} suitable for unit tests. """ return MessageType( "myapp:mysystem", [Field.forTypes("key", [int], ""), Field.forTypes("value", [int], "")], "A message type", ) def test_validateMissingType(self): """ L{MessageType._serializer.validate} raises a L{ValidationError} exception if the given dictionary has no C{"message_type"} field. """ messageType = self.messageType() self.assertRaises( ValidationError, messageType._serializer.validate, {"key": 1, "value": 2} ) def test_validateWrongType(self): """ L{MessageType._serializer.validate} raises a L{ValidationError} exception if the given dictionary has the wrong value for the C{"message_type"} field. """ messageType = self.messageType() self.assertRaises( ValidationError, messageType._serializer.validate, {"key": 1, "value": 2, "message_type": "wrong"}, ) def test_validateExtraField(self): """ L{MessageType._serializer.validate} raises a L{ValidationError} exception if the given dictionary has an extra unknown field. """ messageType = self.messageType() self.assertRaises( ValidationError, messageType._serializer.validate, {"key": 1, "value": 2, "message_type": "myapp:mysystem", "extra": "hello"}, ) def test_validateMissingField(self): """ L{MessageType._serializer.validate} raises a L{ValidationError} exception if the given dictionary has a missing field. """ messageType = self.messageType() self.assertRaises( ValidationError, messageType._serializer.validate, {"key": 1, "message_type": "myapp:mysystem"}, ) def test_validateFieldValidation(self): """ L{MessageType._serializer.validate} raises a L{ValidationError} exception if the one of the field values fails field-specific validation. """ messageType = self.messageType() self.assertRaises( ValidationError, messageType._serializer.validate, {"key": 1, "value": None, "message_type": "myapp:mysystem"}, ) def test_validateStandardFields(self): """ L{MessageType._serializer.validate} does not raise an exception if the dictionary has the standard fields that are added to all messages. """ messageType = self.messageType() messageType._serializer.validate( { "key": 1, "value": 2, "message_type": "myapp:mysystem", "task_level": "/", "task_uuid": "123", "timestamp": "xxx", } ) def test_call(self): """ L{MessageType.__call__} creates a new L{Message} with correct C{message_type} field value added. """ messageType = self.messageType() message = messageType() self.assertEqual(message._contents, {"message_type": messageType.message_type}) def test_callSerializer(self): """ L{MessageType.__call__} creates a new L{Message} with the L{MessageType._serializer} as its serializer. """ messageType = self.messageType() message = messageType() self.assertIs(message._serializer, messageType._serializer) def test_callWithFields(self): """ L{MessageType.__call__} creates a new L{Message} with the additional given fields. """ messageType = self.messageType() message = messageType(key=2, value=3) self.assertEqual( message._contents, {"message_type": messageType.message_type, "key": 2, "value": 3}, ) def test_logCallsDefaultLoggerWrite(self): """ L{MessageType.log} calls the given logger's C{write} method with a dictionary that is superset of the L{Message} contents. """ messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) message_type = self.messageType() message_type.log(key=1234, value=3) self.assertEqual(messages[0]["key"], 1234) self.assertEqual(messages[0]["value"], 3) self.assertEqual(messages[0]["message_type"], message_type.message_type) def test_description(self): """ L{MessageType.description} stores the passed in description. """ messageType = self.messageType() self.assertEqual(messageType.description, "A message type") def test_optionalDescription(self): """ L{MessageType} can be constructed without a description. """ messageType = MessageType("name", []) self.assertEqual(messageType.description, "") class ActionTypeTestsMixin(object): """ Mixin for tests for the three L{ActionType} message variants. """ def getValidMessage(self): """ Return a dictionary of a message that is of the action status being tested. """ raise NotImplementedError("Override in subclasses") def getSerializer(self, actionType): """ Given a L{ActionType}, return the L{_MessageSerializer} for this variant. """ raise NotImplementedError("Override in subclasses") def actionType(self): """ Return a L{ActionType} suitable for unit tests. """ return ActionType( "myapp:mysystem:myaction", [Field.forTypes("key", [int], "")], # start fields [Field.forTypes("value", [int], "")], # success fields "A action type", ) def test_validateMissingType(self): """ L{ActionType.validate} raises a L{ValidationError} exception if the given dictionary has no C{"action_type"} field. """ actionType = self.actionType() message = self.getValidMessage() del message["action_type"] self.assertRaises( ValidationError, self.getSerializer(actionType).validate, message ) def test_validateWrongType(self): """ L{ActionType.validate} raises a L{ValidationError} exception if the given dictionary has the wrong value for the C{"action_type"} field. """ actionType = self.actionType() message = self.getValidMessage() message["action_type"] = "xxx" self.assertRaises( ValidationError, self.getSerializer(actionType).validate, message ) def test_validateExtraField(self): """ L{ActionType.validate} raises a L{ValidationError} exception if the given dictionary has an extra unknown field. """ actionType = self.actionType() message = self.getValidMessage() message["extra"] = "ono" self.assertRaises( ValidationError, self.getSerializer(actionType).validate, message ) def test_validateMissingField(self): """ L{ActionType.validate} raises a L{ValidationError} exception if the given dictionary has a missing field. """ actionType = self.actionType() message = self.getValidMessage() for key in message: if key != "action_type": del message[key] break self.assertRaises( ValidationError, self.getSerializer(actionType).validate, message ) def test_validateFieldValidation(self): """ L{ActionType.validate} raises a L{ValidationError} exception if the one of the field values fails field-specific validation. """ actionType = self.actionType() message = self.getValidMessage() for key in message: if key != "action_type": message[key] = object() break self.assertRaises( ValidationError, self.getSerializer(actionType).validate, message ) def test_validateStandardFields(self): """ L{ActionType.validate} does not raise an exception if the dictionary has the standard fields that are added to all messages. """ actionType = self.actionType() message = self.getValidMessage() message.update({"task_level": "/", "task_uuid": "123", "timestamp": "xxx"}) self.getSerializer(actionType).validate(message) class ActionTypeStartMessage(TestCase, ActionTypeTestsMixin): """ Tests for L{ActionType} validation of action start messages. """ def getValidMessage(self): """ Return a dictionary of a valid action start message. """ return { "action_type": "myapp:mysystem:myaction", "action_status": "started", "key": 1, } def getSerializer(self, actionType): return actionType._serializers.start class ActionTypeSuccessMessage(TestCase, ActionTypeTestsMixin): """ Tests for L{ActionType} validation of action success messages. """ def getValidMessage(self): """ Return a dictionary of a valid action success message. """ return { "action_type": "myapp:mysystem:myaction", "action_status": "succeeded", "value": 2, } def getSerializer(self, actionType): return actionType._serializers.success class ActionTypeFailureMessage(TestCase, ActionTypeTestsMixin): """ Tests for L{ActionType} validation of action failure messages. """ def getValidMessage(self): """ Return a dictionary of a valid action failure message. """ return { "action_type": "myapp:mysystem:myaction", "action_status": "failed", "exception": "exceptions.RuntimeError", "reason": "because", } def getSerializer(self, actionType): return actionType._serializers.failure def test_validateExtraField(self): """ Additional fields (which can be added by exception extraction) don't cause a validation failure for failed action messages. """ actionType = self.actionType() message = self.getValidMessage() message.update({"task_level": "/", "task_uuid": "123", "timestamp": "xxx"}) message.update({"extra_field": "hello"}) self.getSerializer(actionType).validate(message) class ChildActionTypeStartMessage(TestCase): """ Tests for validation of child actions created with L{ActionType}. """ def test_childActionUsesChildValidator(self): """ Validation of child actions uses the child's validator. """ A = ActionType("myapp:foo", [Field.forTypes("a", [int], "")], [], "") B = ActionType("myapp:bar", [Field.forTypes("b", [int], "")], [], "") logger = MemoryLogger() with A(logger, a=1): with B(logger, b=2): pass # If wrong serializers/validators were used, this will fail: logger.validate() class ActionTypeTests(TestCase): """ General tests for L{ActionType}. """ def actionType(self): """ Return a L{ActionType} suitable for unit tests. """ return ActionType("myapp:mysystem:myaction", [], [], "An action type") def test_call(self): """ L{ActionType.__call__} returns the result of calling C{self._start_action}. """ actionType = self.actionType() actionType._start_action = lambda *args, **kwargs: 1234 result = actionType(object()) self.assertEqual(result, 1234) def test_callArguments(self): """ L{ActionType.__call__} calls C{self._start_action} with the logger, action type, serializers and passed in fields. """ called = [] actionType = self.actionType() actionType._start_action = lambda *args, **kwargs: called.append((args, kwargs)) logger = object() actionType(logger, key=5) self.assertEqual( called, [ ( (logger, "myapp:mysystem:myaction", actionType._serializers), {"key": 5}, ) ], ) def test_defaultStartAction(self): """ L{ActionType._start_action} is L{eliot.start_action} by default. """ self.assertEqual(ActionType._start_action, start_action) def test_as_task(self): """ L{ActionType.as_task} returns the result of calling C{self._startTask}. """ actionType = self.actionType() actionType._startTask = lambda *args, **kwargs: 1234 result = actionType.as_task(object()) self.assertEqual(result, 1234) def test_as_taskArguments(self): """ L{ActionType.as_task} calls C{self._startTask} with the logger, action type and passed in fields. """ called = [] actionType = self.actionType() actionType._startTask = lambda *args, **kwargs: called.append((args, kwargs)) logger = object() actionType.as_task(logger, key=5) self.assertEqual( called, [ ( (logger, "myapp:mysystem:myaction", actionType._serializers), {"key": 5}, ) ], ) def test_defaultStartTask(self): """ L{ActionType._startTask} is L{eliot.startTask} by default. """ self.assertEqual(ActionType._startTask, startTask) def test_description(self): """ L{ActionType.description} stores the passed in description. """ actionType = self.actionType() self.assertEqual(actionType.description, "An action type") def test_optionalDescription(self): """ L{ActionType} can be constructed without a description. """ actionType = ActionType("name", [], []) self.assertEqual(actionType.description, "") def test_as_taskDefaultLogger(self): """ L{ActionType.as_task} doesn't require passing in a logger. """ actionType = self.actionType() actionType.as_task(key=5) class EndToEndValidationTests(TestCase): """ Test validation of messages created using L{MessageType} and L{ActionType}. """ MESSAGE = MessageType( "myapp:mymessage", [Field.forTypes("key", [int], "The key")], "A message for testing.", ) ACTION = ActionType( "myapp:myaction", [Field.forTypes("key", [int], "The key")], [Field.forTypes("result", [unicode], "The result")], "An action for testing.", ) def test_correctFromMessageType(self): """ A correct message created using L{MessageType} will be logged to a L{MemoryLogger}. """ logger = MemoryLogger() msg = self.MESSAGE().bind(key=123) msg.write(logger) self.assertEqual(logger.messages[0]["key"], 123) def test_incorrectFromMessageType(self): """ An incorrect message created using L{MessageType} will raise a L{ValidationError} in L{MemoryLogger.validate}. """ logger = MemoryLogger() msg = self.MESSAGE().bind(key="123") msg.write(logger) self.assertRaises(ValidationError, logger.validate) def test_correctStartFromActionType(self): """ A correct start message created using a L{ActionType} will be logged to a L{MemoryLogger}. """ logger = MemoryLogger() with self.ACTION(logger, key=123) as action: action.addSuccessFields(result="foo") self.assertEqual(logger.messages[0]["key"], 123) def test_omitLoggerFromActionType(self): """ If no logger is given to the L{ActionType} the default logger is used. """ messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) with self.ACTION(key=123) as action: action.add_success_fields(result="foo") self.assertEqual(messages[0]["key"], 123) def test_incorrectStartFromActionType(self): """ An incorrect start message created using a L{ActionType} will raise a L{ValidationError}. """ logger = MemoryLogger() with self.ACTION(logger, key="123") as action: action.addSuccessFields(result="foo") self.assertRaises(ValidationError, logger.validate) def test_correctSuccessFromActionType(self): """ A correct success message created using a L{ActionType} will be logged to a L{MemoryLogger}. """ logger = MemoryLogger() with self.ACTION(logger, key=123) as action: action.addSuccessFields(result="foo") self.assertEqual(logger.messages[1]["result"], "foo") def test_incorrectSuccessFromActionType(self): """ An incorrect success message created using a L{ActionType} will raise a L{ValidationError}. """ logger = MemoryLogger() with self.ACTION(logger, key=123) as action: action.addSuccessFields(result=-1) self.assertRaises(ValidationError, logger.validate) def test_correctFailureFromActionType(self): """ A correct failure message created using a L{ActionType} will be logged to a L{MemoryLogger}. """ logger = MemoryLogger() def run(): with self.ACTION(logger, key=123): raise RuntimeError("hello") self.assertRaises(RuntimeError, run) self.assertEqual(logger.messages[1]["reason"], "hello") class PEP8Tests(TestCase): """ Tests for PEP 8 method compatibility. """ def test_for_value(self): """ L{Field.for_value} is the same as L{Field.forValue}. """ self.assertEqual(Field.for_value, Field.forValue) def test_for_types(self): """ L{Field.for_types} is the same as L{Field.forTypes}. """ self.assertEqual(Field.for_types, Field.forTypes) def test_as_task(self): """ L{ActionType.as_task} is the same as L{ActionType.asTask}. """ self.assertEqual(ActionType.as_task, ActionType.asTask) eliot-1.11.0/eliot/tests/test_parse.py0000664000175000017500000002470613470775105021425 0ustar itamarstitamarst00000000000000""" Tests for L{eliot._parse}. """ from __future__ import unicode_literals from unittest import TestCase from itertools import chain from six import text_type as unicode, assertCountEqual from six.moves import zip_longest from hypothesis import strategies as st, given, assume from pyrsistent import PClass, field, pvector_field from .. import start_action, Message from ..testing import MemoryLogger from ..parse import Task, Parser from .._message import ( WrittenMessage, MESSAGE_TYPE_FIELD, TASK_LEVEL_FIELD, TASK_UUID_FIELD, ) from .._action import FAILED_STATUS, ACTION_STATUS_FIELD, WrittenAction from .strategies import labels class ActionStructure(PClass): """ A tree structure used to generate/compare to Eliot trees. Individual messages are encoded as a unicode string; actions are encoded as a L{ActionStructure} instance. """ type = field(type=(unicode, None.__class__)) children = pvector_field(object) # XXX ("StubAction", unicode)) failed = field(type=bool) @classmethod def from_written(cls, written): """ Create an L{ActionStructure} or L{unicode} from a L{WrittenAction} or L{WrittenMessage}. """ if isinstance(written, WrittenMessage): return written.as_dict()[MESSAGE_TYPE_FIELD] else: # WrittenAction if not written.end_message: raise AssertionError("Missing end message.") return cls( type=written.action_type, failed=( written.end_message.contents[ACTION_STATUS_FIELD] == FAILED_STATUS ), children=[cls.from_written(o) for o in written.children], ) @classmethod def to_eliot(cls, structure_or_message, logger): """ Given a L{ActionStructure} or L{unicode}, generate appropriate structured Eliot log mesages to given L{MemoryLogger}. """ if isinstance(structure_or_message, cls): action = structure_or_message try: with start_action(logger, action_type=action.type): for child in action.children: cls.to_eliot(child, logger) if structure_or_message.failed: raise RuntimeError("Make the eliot action fail.") except RuntimeError: pass else: Message.new(message_type=structure_or_message).write(logger) return logger.messages @st.composite def action_structures(draw): """ A Hypothesis strategy that creates a tree of L{ActionStructure} and L{unicode}. """ tree = draw(st.recursive(labels, st.lists, max_leaves=20)) def to_structure(tree_or_message): if isinstance(tree_or_message, list): return ActionStructure( type=draw(labels), failed=draw(st.booleans()), children=[to_structure(o) for o in tree_or_message], ) else: return tree_or_message return to_structure(tree) def _structure_and_messages(structure): messages = ActionStructure.to_eliot(structure, MemoryLogger()) return st.permutations(messages).map(lambda permuted: (structure, permuted)) # Hypothesis strategy that creates a tuple of ActionStructure/unicode and # corresponding serialized Eliot messages, randomly shuffled. STRUCTURES_WITH_MESSAGES = action_structures().flatmap(_structure_and_messages) def parse_to_task(messages): """ Feed a set of messages to a L{Task}. @param messages: Sequence of messages dictionaries to parse. @return: Resulting L{Task}. """ task = Task() for message in messages: task = task.add(message) return task class TaskTests(TestCase): """ Tests for L{Task}. """ @given(structure_and_messages=STRUCTURES_WITH_MESSAGES) def test_missing_action(self, structure_and_messages): """ If we parse messages (in shuffled order) but a start message is missing then the structure is still deduced correctly from the remaining messages. """ action_structure, messages = structure_and_messages assume(not isinstance(action_structure, unicode)) # Remove first start message we encounter; since messages are # shuffled the location removed will differ over Hypothesis test # iterations: messages = messages[:] for i, message in enumerate(messages): if message[TASK_LEVEL_FIELD][-1] == 1: # start message del messages[i] break task = parse_to_task(messages) parsed_structure = ActionStructure.from_written(task.root()) # We expect the action with missing start message to otherwise # be parsed correctly: self.assertEqual(parsed_structure, action_structure) @given(structure_and_messages=STRUCTURES_WITH_MESSAGES) def test_parse_from_random_order(self, structure_and_messages): """ If we shuffle messages and parse them the parser builds a tree of actions that is the same as the one used to generate the messages. Shuffled messages means we have to deal with (temporarily) missing information sufficiently well to be able to parse correctly once the missing information arrives. """ action_structure, messages = structure_and_messages task = Task() for message in messages: task = task.add(message) # Assert parsed structure matches input structure: parsed_structure = ActionStructure.from_written(task.root()) self.assertEqual(parsed_structure, action_structure) @given(structure_and_messages=STRUCTURES_WITH_MESSAGES) def test_is_complete(self, structure_and_messages): """ ``Task.is_complete()`` only returns true when all messages within the tree have been delivered. """ action_structure, messages = structure_and_messages task = Task() completed = [] for message in messages: task = task.add(message) completed.append(task.is_complete()) self.assertEqual(completed, [False for m in messages[:-1]] + [True]) def test_parse_contents(self): """ L{{Task.add}} parses the contents of the messages it receives. """ logger = MemoryLogger() with start_action(logger, action_type="xxx", y=123) as ctx: Message.new(message_type="zzz", z=4).write(logger) ctx.add_success_fields(foo=[1, 2]) messages = logger.messages expected = WrittenAction.from_messages( WrittenMessage.from_dict(messages[0]), [WrittenMessage.from_dict(messages[1])], WrittenMessage.from_dict(messages[2]), ) task = parse_to_task(messages) self.assertEqual(task.root(), expected) class ParserTests(TestCase): """ Tests for L{Parser}. """ @given( structure_and_messages1=STRUCTURES_WITH_MESSAGES, structure_and_messages2=STRUCTURES_WITH_MESSAGES, structure_and_messages3=STRUCTURES_WITH_MESSAGES, ) def test_parse_into_tasks( self, structure_and_messages1, structure_and_messages2, structure_and_messages3 ): """ Adding messages to a L{Parser} parses them into a L{Task} instances. """ _, messages1 = structure_and_messages1 _, messages2 = structure_and_messages2 _, messages3 = structure_and_messages3 all_messages = (messages1, messages2, messages3) # Need unique UUIDs per task: assume(len(set(m[0][TASK_UUID_FIELD] for m in all_messages)) == 3) parser = Parser() all_tasks = [] for message in chain(*zip_longest(*all_messages)): if message is not None: completed_tasks, parser = parser.add(message) all_tasks.extend(completed_tasks) assertCountEqual( self, all_tasks, [parse_to_task(msgs) for msgs in all_messages] ) @given(structure_and_messages=STRUCTURES_WITH_MESSAGES) def test_incomplete_tasks(self, structure_and_messages): """ Until a L{Task} is fully parsed, it is returned in L{Parser.incomplete_tasks}. """ _, messages = structure_and_messages parser = Parser() task = Task() incomplete_matches = [] for message in messages[:-1]: _, parser = parser.add(message) task = task.add(message) incomplete_matches.append(parser.incomplete_tasks() == [task]) task = task.add(messages[-1]) _, parser = parser.add(messages[-1]) self.assertEqual( dict( incomplete_matches=incomplete_matches, final_incompleted=parser.incomplete_tasks(), ), dict(incomplete_matches=[True] * (len(messages) - 1), final_incompleted=[]), ) @given( structure_and_messages1=STRUCTURES_WITH_MESSAGES, structure_and_messages2=STRUCTURES_WITH_MESSAGES, structure_and_messages3=STRUCTURES_WITH_MESSAGES, ) def test_parse_stream( self, structure_and_messages1, structure_and_messages2, structure_and_messages3 ): """ L{Parser.parse_stream} returns an iterable of completed and then incompleted tasks. """ _, messages1 = structure_and_messages1 _, messages2 = structure_and_messages2 _, messages3 = structure_and_messages3 # Need at least one non-dropped message in partial tree: assume(len(messages3) > 1) # Need unique UUIDs per task: assume( len(set(m[0][TASK_UUID_FIELD] for m in (messages1, messages2, messages3))) == 3 ) # Two complete tasks, one incomplete task: all_messages = (messages1, messages2, messages3[:-1]) all_tasks = list( Parser.parse_stream( [m for m in chain(*zip_longest(*all_messages)) if m is not None] ) ) assertCountEqual( self, all_tasks, [parse_to_task(msgs) for msgs in all_messages] ) class BackwardsCompatibility(TestCase): """Tests for backwards compatibility.""" def test_imports(self): """Old ways of importing still work.""" import eliot._parse from eliot import _parse import eliot.parse self.assertIs(eliot.parse, eliot._parse) self.assertIs(_parse, eliot.parse) eliot-1.11.0/eliot/tests/test_message.py0000664000175000017500000002576413573001140021725 0ustar itamarstitamarst00000000000000""" Tests for L{eliot._message}. """ from __future__ import unicode_literals from unittest import TestCase from uuid import UUID import time from pyrsistent import pmap try: from twisted.python.failure import Failure except ImportError: Failure = None from .._message import WrittenMessage, Message, log_message from .._output import MemoryLogger from .._action import Action, start_action, TaskLevel from .. import add_destinations, remove_destination class DeprecatedMessageTests(TestCase): """ Test for L{Message}. """ def test_new(self): """ L{Message.new} returns a new L{Message} that is initialized with the given keyword arguments as its contents, and a default message type. """ msg = Message.new(key="value", another=2) self.assertEqual(msg.contents(), {"key": "value", "another": 2}) def test_contentsCopies(self): """ L{Message.contents} returns a copy of the L{Message} contents. """ msg = Message.new(key="value") del msg.contents()["key"] self.assertEqual(msg.contents(), {"key": "value"}) def test_bindOverwrites(self): """ L{Message.bind} returns a new L{Message} whose contents include the additional given fields. """ msg = Message.new(key="value", another=2) another = msg.bind(another=3, more=4) self.assertIsInstance(another, Message) self.assertEqual(another.contents(), {"key": "value", "another": 3, "more": 4}) def test_bindPreservesOriginal(self): """ L{Message.bind} does not mutate the instance it is called on. """ msg = Message.new(key=4) msg.bind(key=6) self.assertEqual(msg.contents(), {"key": 4}) def test_writeCallsLoggerWrite(self): """ L{Message.write} calls the given logger's C{write} method with a dictionary that is superset of the L{Message} contents. """ logger = MemoryLogger() msg = Message.new(key=4) msg.write(logger) self.assertEqual(len(logger.messages), 1) self.assertEqual(logger.messages[0]["key"], 4) def test_writeDefaultLogger(self): """ L{Message.write} writes to the default logger if none is given. """ messages = [] add_destinations(messages.append) self.addCleanup(remove_destination, messages.append) Message.new(some_key=1234).write() self.assertEqual(messages[0]["some_key"], 1234) def test_writeCreatesNewDictionary(self): """ L{Message.write} creates a new dictionary on each call. This is important because we mutate the dictionary in ``Logger.write``, so we want to make sure the ``Message`` is unchanged in that case. In general we want ``Message`` objects to be effectively immutable. """ class Logger(list): def write(self, d, serializer): self.append(d) logger = Logger() msg = Message.new(key=4) msg.write(logger) logger[0]["key"] = 5 msg.write(logger) self.assertEqual(logger[1]["key"], 4) def test_logCallsDefaultLoggerWrite(self): """ L{Message.log} calls the default logger's C{write} method with a dictionary that is superset of the L{Message} contents. """ messages = [] add_destinations(messages.append) self.addCleanup(remove_destination, messages.append) Message.log(some_key=1234) self.assertEqual(messages[0]["some_key"], 1234) def test_defaultTime(self): """ L{Message._time} returns L{time.time} by default. """ msg = Message({}) self.assertIs(msg._time, time.time) def test_writeAddsTimestamp(self): """ L{Message.write} adds a C{"timestamp"} field to the dictionary written to the logger, with the current time in seconds since the epoch. """ logger = MemoryLogger() msg = Message.new(key=4) msg.write(logger) self.assertTrue(time.time() - logger.messages[0]["timestamp"] < 0.1) def test_write_preserves_message_type(self): """ L{Message.write} doesn't add a C{message_type} if one is already set. """ logger = MemoryLogger() msg = Message.new(key=4, message_type="isetit") msg.write(logger) self.assertEqual(logger.messages[0]["message_type"], "isetit") self.assertNotIn("action_type", logger.messages[0]) def test_explicitAction(self): """ L{Message.write} adds the identification fields from the given L{Action} to the dictionary written to the logger, as well as a message_type if none is set. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") msg = Message.new(key=2) msg.write(logger, action) written = logger.messages[0] del written["timestamp"] self.assertEqual( written, {"task_uuid": "unique", "task_level": [1], "key": 2, "message_type": ""}, ) def test_implicitAction(self): """ If no L{Action} is specified, L{Message.write} adds the identification fields from the current execution context's L{Action} to the dictionary written to the logger. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") msg = Message.new(key=2) with action: msg.write(logger) written = logger.messages[0] del written["timestamp"] self.assertEqual( written, {"task_uuid": "unique", "task_level": [1], "key": 2, "message_type": ""}, ) def test_missingAction(self): """ If no L{Action} is specified, and the current execution context has no L{Action}, a new task_uuid is generated. This ensures all messages have a unique identity, as specified by task_uuid/task_level. """ logger = MemoryLogger() Message.new(key=2).write(logger) Message.new(key=3).write(logger) message1, message2 = logger.messages self.assertEqual( ( UUID(message1["task_uuid"]) != UUID(message2["task_uuid"]), message1["task_level"], message2["task_level"], ), (True, [1], [1]), ) def test_actionCounter(self): """ Each message written within the context of an L{Action} gets its C{task_level} field incremented. """ logger = MemoryLogger() msg = Message.new(key=2) with start_action(logger, "sys:thename"): for i in range(4): msg.write(logger) # We expect 6 messages: start action, 4 standalone messages, finish # action: self.assertEqual( [m["task_level"] for m in logger.messages], [[1], [2], [3], [4], [5], [6]] ) def test_writePassesSerializer(self): """ If a L{Message} is created with a serializer, it is passed as a second argument to the logger when C{write} is called. """ class ListLogger(list): def write(self, dictionary, serializer): self.append(serializer) logger = ListLogger() serializer = object() msg = Message({}, serializer) msg.write(logger) self.assertIs(logger[0], serializer) def test_serializerPassedInBind(self): """ The L{Message} returned by L{Message.bind} includes the serializer passed to the parent. """ serializer = object() msg = Message({}, serializer) msg2 = msg.bind(x=1) self.assertIs(msg2._serializer, serializer) def test_newWithSerializer(self): """ L{Message.new} can accept a serializer. """ serializer = object() msg = Message.new(serializer, x=1) self.assertIs(msg._serializer, serializer) class WrittenMessageTests(TestCase): """ Tests for L{WrittenMessage}. """ def test_as_dict(self): """ L{WrittenMessage.as_dict} returns the dictionary that will be serialized to the log. """ log_entry = pmap( {"timestamp": 1, "task_uuid": "unique", "task_level": [1], "foo": "bar"} ) self.assertEqual(WrittenMessage.from_dict(log_entry).as_dict(), log_entry) def test_from_dict(self): """ L{WrittenMessage.from_dict} converts a dictionary that has been deserialized from a log into a L{WrittenMessage} object. """ log_entry = pmap( {"timestamp": 1, "task_uuid": "unique", "task_level": [1], "foo": "bar"} ) parsed = WrittenMessage.from_dict(log_entry) self.assertEqual(parsed.timestamp, 1) self.assertEqual(parsed.task_uuid, "unique") self.assertEqual(parsed.task_level, TaskLevel(level=[1])) self.assertEqual(parsed.contents, pmap({"foo": "bar"})) class LogMessageTests(TestCase): """Tests for L{log_message}.""" def test_writes_message(self): """ L{log_message} writes to the default logger. """ messages = [] add_destinations(messages.append) self.addCleanup(remove_destination, messages.append) log_message(message_type="hello", some_key=1234) self.assertEqual(messages[0]["some_key"], 1234) self.assertEqual(messages[0]["message_type"], "hello") self.assertTrue(time.time() - messages[0]["timestamp"] < 0.1) def test_implicitAction(self): """ If no L{Action} is specified, L{log_message} adds the identification fields from the current execution context's L{Action} to the dictionary written to the logger. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") with action: log_message(key=2, message_type="a") written = logger.messages[0] del written["timestamp"] self.assertEqual( written, {"task_uuid": "unique", "task_level": [1], "key": 2, "message_type": "a"}, ) def test_missingAction(self): """ If no L{Action} is specified, and the current execution context has no L{Action}, a new task_uuid is generated. This ensures all messages have a unique identity, as specified by task_uuid/task_level. """ messages = [] add_destinations(messages.append) self.addCleanup(remove_destination, messages.append) log_message(key=2, message_type="") log_message(key=3, message_type="") message1, message2 = messages self.assertEqual( ( UUID(message1["task_uuid"]) != UUID(message2["task_uuid"]), message1["task_level"], message2["task_level"], ), (True, [1], [1]), ) eliot-1.11.0/eliot/tests/test_traceback.py0000664000175000017500000001605513470775105022230 0ustar itamarstitamarst00000000000000""" Tests for L{eliot._traceback}. """ from __future__ import unicode_literals from unittest import TestCase, SkipTest import traceback import sys try: from twisted.python.failure import Failure except ImportError: Failure = None from .._traceback import write_traceback, writeFailure, _writeTracebackMessage from ..testing import ( assertContainsFields, validateLogging, capture_logging, MemoryLogger, ) from .._errors import register_exception_extractor from .test_action import make_error_extraction_tests def assert_expected_traceback(test, logger, message, exception, expected_traceback): """Assert we logged the given exception and the expected traceback.""" lines = expected_traceback.split("\n") # Remove source code lines: expected_traceback = "\n".join([l for l in lines if not l.startswith(" ")]) assertContainsFields( test, message, { "message_type": "eliot:traceback", "exception": RuntimeError, "reason": exception, "traceback": expected_traceback, }, ) logger.flushTracebacks(RuntimeError) class TracebackLoggingTests(TestCase): """ Tests for L{write_traceback} and L{writeFailure}. """ @validateLogging(None) def test_write_traceback_implicit(self, logger): """ L{write_traceback} with no arguments writes the current traceback to the log. """ e = None def raiser(): raise RuntimeError("because") try: raiser() except Exception as exception: expected_traceback = traceback.format_exc() write_traceback(logger) e = exception assert_expected_traceback( self, logger, logger.messages[0], e, expected_traceback ) @validateLogging(None) def test_write_traceback_explicit(self, logger): """ L{write_traceback} with explicit arguments writes the given traceback to the log. """ e = None def raiser(): raise RuntimeError("because") try: raiser() except Exception as exception: expected_traceback = traceback.format_exc() write_traceback(logger, exc_info=sys.exc_info()) e = exception assert_expected_traceback( self, logger, logger.messages[0], e, expected_traceback ) @capture_logging(None) def test_writeTracebackDefaultLogger(self, logger): """ L{write_traceback} writes to the default log, if none is specified. """ def raiser(): raise RuntimeError("because") try: raiser() except Exception: write_traceback() message = logger.messages[0] assertContainsFields(self, message, {"message_type": "eliot:traceback"}) logger.flushTracebacks(RuntimeError) @validateLogging(None) def test_writeFailure(self, logger): """ L{writeFailure} writes a L{Failure} to the log. """ if Failure is None: raise SkipTest("Twisted unavailable") try: raise RuntimeError("because") except: failure = Failure() expectedTraceback = failure.getBriefTraceback() writeFailure(failure, logger) message = logger.messages[0] assertContainsFields( self, message, { "message_type": "eliot:traceback", "exception": RuntimeError, "reason": failure.value, "traceback": expectedTraceback, }, ) logger.flushTracebacks(RuntimeError) @capture_logging(None) def test_writeFailureDefaultLogger(self, logger): """ L{writeFailure} writes to the default log, if none is specified. """ if Failure is None: raise SkipTest("Twisted unavailable") try: raise RuntimeError("because") except: failure = Failure() writeFailure(failure) message = logger.messages[0] assertContainsFields(self, message, {"message_type": "eliot:traceback"}) logger.flushTracebacks(RuntimeError) @validateLogging(None) def test_writeFailureResult(self, logger): """ L{writeFailure} returns C{None}. """ if Failure is None: raise SkipTest("Twisted unavailable") try: raise RuntimeError("because") except: result = writeFailure(Failure(), logger) self.assertIs(result, None) logger.flushTracebacks(RuntimeError) @validateLogging(None) def test_serialization(self, logger): """ L{_writeTracebackMessage} serializes exceptions to string values and types to FQPN. """ try: raise KeyError(123) except: exc_info = sys.exc_info() _writeTracebackMessage(logger, *exc_info) serialized = logger.serialize()[0] assertContainsFields( self, serialized, {"exception": "%s.KeyError" % (KeyError.__module__,), "reason": "123"}, ) logger.flushTracebacks(KeyError) @validateLogging(None) def test_badException(self, logger): """ L{_writeTracebackMessage} logs a message even if given a bad exception. """ class BadException(Exception): def __str__(self): raise TypeError() try: raise BadException() except BadException: exc_info = sys.exc_info() _writeTracebackMessage(logger, *exc_info) self.assertEqual( logger.serialize()[0]["reason"], "eliot: unknown, unicode() raised exception", ) logger.flushTracebacks(BadException) def get_traceback_messages(exception): """ Given an exception instance generate a traceback Eliot message. """ logger = MemoryLogger() try: raise exception except exception.__class__: write_traceback(logger) # MemoryLogger.validate() mutates messages: # https://github.com/itamarst/eliot/issues/243 messages = [message.copy() for message in logger.messages] logger.validate() return messages class TracebackExtractionTests(make_error_extraction_tests(get_traceback_messages)): """ Error extraction tests for tracebacks. """ def test_regular_fields(self): """ The normal traceback fields are still present when error extraction is used. """ class MyException(Exception): pass register_exception_extractor(MyException, lambda e: {"key": e.args[0]}) exception = MyException("because") messages = get_traceback_messages(exception) assertContainsFields( self, messages[0], { "message_type": "eliot:traceback", "reason": exception, "exception": MyException, }, ) eliot-1.11.0/eliot/tests/strategies.py0000664000175000017500000001625113470775105021422 0ustar itamarstitamarst00000000000000""" Hypothesis strategies for eliot. """ from __future__ import unicode_literals from functools import partial from six import text_type as unicode from hypothesis.strategies import ( builds, dictionaries, fixed_dictionaries, floats, integers, lists, just, none, one_of, recursive, text, uuids, ) from pyrsistent import pmap, pvector, ny, thaw from .._action import ( ACTION_STATUS_FIELD, ACTION_TYPE_FIELD, FAILED_STATUS, STARTED_STATUS, SUCCEEDED_STATUS, TaskLevel, WrittenAction, ) from .._message import ( EXCEPTION_FIELD, REASON_FIELD, TASK_LEVEL_FIELD, TASK_UUID_FIELD, WrittenMessage, ) task_level_indexes = integers(min_value=1, max_value=10) # Task levels can be arbitrarily deep, but in the wild rarely as much as 100. # Five seems a sensible average. task_level_lists = lists(task_level_indexes, min_size=1, max_size=6) task_levels = task_level_lists.map(lambda level: TaskLevel(level=level)) # Text generation is slow, and most of the things are short labels. We set # a restricted alphabet so they're easier to read, and in general large # amount of randomness in label generation doesn't enhance our testing in # any way, since we don't parse type names or user field values. labels = text(min_size=1, max_size=8, alphabet="CGAT") timestamps = floats(min_value=0, max_value=1000.0) message_core_dicts = fixed_dictionaries( dict( task_level=task_level_lists.map(pvector), task_uuid=uuids().map(unicode), timestamp=timestamps, ) ).map(pmap) # Text generation is slow. We can make it faster by not generating so # much. These are reasonable values. message_data_dicts = dictionaries( keys=labels, values=labels, # People don't normally put much more than ten fields in their # messages, surely? max_size=10, ).map(pmap) def written_from_pmap(d): """ Convert a C{pmap} to a C{WrittenMessage}. """ return WrittenMessage.from_dict(thaw(d)) def union(*dicts): result = pmap().evolver() for d in dicts: # Work around bug in pyrsistent where it sometimes loses updates if # they contain some kv pairs that are identical to the ones in the # dict being updated. # # https://github.com/tobgu/pyrsistent/pull/54 for key, value in d.items(): if key in result and result[key] is value: continue result[key] = value return result.persistent() message_dicts = builds(union, message_data_dicts, message_core_dicts) written_messages = message_dicts.map(written_from_pmap) _start_action_fields = fixed_dictionaries( {ACTION_STATUS_FIELD: just(STARTED_STATUS), ACTION_TYPE_FIELD: labels} ) start_action_message_dicts = builds(union, message_dicts, _start_action_fields).map( lambda x: x.update({TASK_LEVEL_FIELD: x[TASK_LEVEL_FIELD].set(-1, 1)}) ) start_action_messages = start_action_message_dicts.map(written_from_pmap) def sibling_task_level(message, n): return message.task_level.parent().level.append(n) _end_action_fields = one_of( just({ACTION_STATUS_FIELD: SUCCEEDED_STATUS}), fixed_dictionaries( { ACTION_STATUS_FIELD: just(FAILED_STATUS), # Text generation is slow. We can make it faster by not generating so # much. Thqese are reasonable values. EXCEPTION_FIELD: labels, REASON_FIELD: labels, } ), ) def _make_written_action(start_message, child_messages, end_message_dict): """ Helper for creating arbitrary L{WrittenAction}s. The child messages and end message (if provided) will be updated to have the same C{task_uuid} as C{start_message}. Likewise, their C{task_level}s will be such that they follow on from C{start_message}. @param WrittenMessage start_message: The message to start the action with. @param child_messages: A sequence of L{WrittenAction}s and L{WrittenMessage}s that make up the action. @param (PMap | None) end_message_dict: A dictionary that makes up an end message. If None, then the action is unfinished. @return: A L{WrittenAction} """ task_uuid = start_message.task_uuid children = [] for i, child in enumerate(child_messages, 2): task_level = TaskLevel(level=sibling_task_level(start_message, i)) children.append(reparent_action(task_uuid, task_level, child)) if end_message_dict: end_message = written_from_pmap( union( end_message_dict, { ACTION_TYPE_FIELD: start_message.contents[ACTION_TYPE_FIELD], TASK_UUID_FIELD: task_uuid, TASK_LEVEL_FIELD: sibling_task_level( start_message, 2 + len(children) ), }, ) ) else: end_message = None return WrittenAction.from_messages(start_message, children, end_message) written_actions = recursive( written_messages, lambda children: builds( _make_written_action, start_message=start_action_messages, child_messages=lists(children, max_size=5), end_message_dict=builds(union, message_dicts, _end_action_fields) | none(), ), ) def _map_messages(f, written_action): """ Map C{f} across all of the messages that make up C{written_action}. This is a structure-preserving map operation. C{f} will be applied to all messages that make up C{written_action}: the start message, end message, and children. If any of the children are themselves L{WrittenAction}s, we recurse down into them. @param f: A function that takes a L{WrittenMessage} and returns a new L{WrittenMessage}. @param (WrittenAction | WrittenMessage) written_action: A written @return: A L{WrittenMessage} if C{written_action} is a C{WrittenMessage}, a L{WrittenAction} otherwise. """ if isinstance(written_action, WrittenMessage): return f(written_action) start_message = f(written_action.start_message) children = written_action.children.transform([ny], partial(_map_messages, f)) if written_action.end_message: end_message = f(written_action.end_message) else: end_message = None return WrittenAction.from_messages( start_message=start_message, children=pvector(children), end_message=end_message ) def reparent_action(task_uuid, task_level, written_action): """ Return a version of C{written_action} that has the given C{task_uuid} and is rooted at the given C{task_level}. @param UUID task_uuid: The new task UUID. @param TaskLevel task_level: The new task level. @param (WrittenAction | WrittenMessage) written_action: The action or message to update. @return: A new version of C{written_action}. """ new_prefix = list(task_level.level) old_prefix_len = len(written_action.task_level.level) def fix_message(message): return message.transform( ["_logged_dict", TASK_LEVEL_FIELD], lambda level: new_prefix + level[old_prefix_len:], ).transform(["_logged_dict", TASK_UUID_FIELD], task_uuid) return _map_messages(fix_message, written_action) eliot-1.11.0/eliot/tests/test_util.py0000664000175000017500000000240313470775105021256 0ustar itamarstitamarst00000000000000""" Tests for L{eliot._util}. """ from __future__ import unicode_literals from unittest import TestCase import pprint from .._util import load_module class LoadModuleTests(TestCase): """ Tests for L{load_module}. """ maxDiff = None def test_returns_module(self): """ L{load_module} returns an object with same methods as original module. """ loaded = load_module(str("copy"), pprint) obj = [1, 2, b"hello"] self.assertEqual(loaded.pformat(obj), pprint.pformat(obj)) def test_name(self): """ L{load_module} returns an object with the given name. """ name = str("my_copy") loaded = load_module(name, pprint) self.assertEqual(loaded.__name__, name) def test_distinct_from_original(self): """ L{load_module} returns a distinct object from the original module. """ loaded = load_module(str("copy"), pprint) # Override repr in copy: loaded.repr = lambda o: str("OVERRIDE") # Demonstrate that override applies to copy but not original: self.assertEqual( dict(original=pprint.pformat(123), loaded=loaded.pformat(123)), dict(original="123", loaded="OVERRIDE"), ) eliot-1.11.0/eliot/tests/test_logwriter.py0000664000175000017500000002430513470775105022324 0ustar itamarstitamarst00000000000000""" Tests for L{eliot.logwriter}. """ from __future__ import unicode_literals import time import threading # Make sure to use StringIO that only accepts unicode: from io import BytesIO, StringIO from unittest import skipIf import json as pyjson from warnings import catch_warnings, simplefilter from six import PY2 try: from zope.interface.verify import verifyClass from twisted.internet import reactor from twisted.trial.unittest import TestCase from twisted.application.service import IService from twisted.python import threadable except ImportError: # Make tests not run at all. TestCase = object else: # Make sure we always import this if Twisted is available, so broken # logwriter.py causes a failure: from ..logwriter import ThreadedFileWriter, ThreadedWriter from .. import Logger, removeDestination, FileDestination class BlockingFile(object): """ A file-like whose writes can be blocked. Also, allow calling C{getvalue} after C{close}, unlike L{BytesIO}. """ def __init__(self): self.file = BytesIO() self.lock = threading.Lock() self.data = b"" def block(self): """ Prevent writes until L{unblock} is called. """ self.lock.acquire() def unblock(self): """ Allow writes if L{block} was previous called. """ self.lock.release() def getvalue(self): """ Get written bytes. @return: Written bytes. """ return self.data def write(self, data): with self.lock: self.file.write(data) def flush(self): self.data = self.file.getvalue() def close(self): self.file.close() class ThreadedWriterTests(TestCase): """ Tests for L{ThreadedWriter}. Many of these tests involve interactions across threads, so they arbitrarily wait for up to 5 seconds to reduce chances of slow thread switching causing the test to fail. """ def test_interface(self): """ L{ThreadedWriter} provides L{IService}. """ verifyClass(IService, ThreadedWriter) def test_name(self): """ L{ThreadedWriter} has a name. """ self.assertEqual(ThreadedWriter.name, "Eliot Log Writer") def test_startServiceRunning(self): """ L{ThreadedWriter.startService} starts the service as required by the L{IService} interface. """ writer = ThreadedWriter(FileDestination(file=BytesIO()), reactor) self.assertFalse(writer.running) writer.startService() self.addCleanup(writer.stopService) self.assertTrue(writer.running) def test_stopServiceRunning(self): """ L{ThreadedWriter.stopService} stops the service as required by the L{IService} interface. """ writer = ThreadedWriter(FileDestination(file=BytesIO()), reactor) writer.startService() d = writer.stopService() d.addCallback(lambda _: self.assertFalse(writer.running)) return d def test_startServiceStartsThread(self): """ L{ThreadedWriter.startService} starts up a thread running L{ThreadedWriter._writer}. """ previousThreads = threading.enumerate() result = [] event = threading.Event() def _writer(): current = threading.currentThread() if current not in previousThreads: result.append(current) event.set() writer = ThreadedWriter(FileDestination(file=BytesIO()), reactor) writer._writer = _writer writer.startService() event.wait() self.assertTrue(result) # Make sure thread is dead so it doesn't die half way through another # test: result[0].join(5) def test_stopServiceStopsThread(self): """ L{ThreadedWriter.stopService} stops the writer thread. """ previousThreads = set(threading.enumerate()) writer = ThreadedWriter(FileDestination(file=BytesIO()), reactor) writer.startService() start = time.time() while set(threading.enumerate()) == previousThreads and ( time.time() - start < 5 ): time.sleep(0.0001) # If not true the next assertion might pass by mistake: self.assertNotEqual(set(threading.enumerate()), previousThreads) writer.stopService() while set(threading.enumerate()) != previousThreads and ( time.time() - start < 5 ): time.sleep(0.0001) self.assertEqual(set(threading.enumerate()), previousThreads) def test_stopServiceFinishesWriting(self): """ L{ThreadedWriter.stopService} stops the writer thread, but only after all queued writes are written out. """ f = BlockingFile() writer = ThreadedWriter(FileDestination(file=f), reactor) f.block() writer.startService() for i in range(100): writer({"write": 123}) threads = threading.enumerate() writer.stopService() # Make sure writes didn't happen before the stopService, thus making the # test pointless: self.assertEqual(f.getvalue(), b"") f.unblock() start = time.time() while threading.enumerate() == threads and time.time() - start < 5: time.sleep(0.0001) self.assertEqual(f.getvalue(), b'{"write": 123}\n' * 100) def test_stopServiceResult(self): """ L{ThreadedWriter.stopService} returns a L{Deferred} that fires only after the thread has shut down. """ f = BlockingFile() writer = ThreadedWriter(FileDestination(file=f), reactor) f.block() writer.startService() writer({"hello": 123}) threads = threading.enumerate() d = writer.stopService() f.unblock() def done(_): self.assertEqual(f.getvalue(), b'{"hello": 123}\n') self.assertNotEqual(threading.enumerate(), threads) d.addCallback(done) return d def test_noChangeToIOThread(self): """ Running a L{ThreadedWriter} doesn't modify the Twisted registered IO thread. """ writer = ThreadedWriter(FileDestination(file=BytesIO()), reactor) writer.startService() d = writer.stopService() # Either the current thread (the one running the tests) is the the I/O # thread or the I/O thread was never set. Either may happen depending on # how and whether the reactor has been started by the unittesting # framework. d.addCallback( lambda _: self.assertIn( threadable.ioThread, (None, threading.currentThread().ident) ) ) return d def test_startServiceRegistersDestination(self): """ L{ThreadedWriter.startService} registers itself as an Eliot log destination. """ f = BlockingFile() writer = ThreadedWriter(FileDestination(file=f), reactor) writer.startService() Logger().write({"x": "abc"}) d = writer.stopService() d.addCallback(lambda _: self.assertIn(b"abc", f.getvalue())) return d def test_stopServiceUnregistersDestination(self): """ L{ThreadedWriter.stopService} unregisters itself as an Eliot log destination. """ writer = ThreadedWriter(FileDestination(file=BytesIO()), reactor) writer.startService() d = writer.stopService() d.addCallback(lambda _: removeDestination(writer)) return self.assertFailure(d, ValueError) def test_call(self): """ The message passed to L{ThreadedWriter.__call__} is passed to the underlying destination in the writer thread. """ result = [] def destination(message): result.append((message, threading.currentThread().ident)) writer = ThreadedWriter(destination, reactor) writer.startService() thread_ident = writer._thread.ident msg = {"key": 123} writer(msg) d = writer.stopService() d.addCallback(lambda _: self.assertEqual(result, [(msg, thread_ident)])) return d class ThreadedFileWriterTests(TestCase): """ Tests for ``ThreadedFileWriter``. """ def test_deprecation_warning(self): """ Instantiating ``ThreadedFileWriter`` gives a ``DeprecationWarning``. """ with catch_warnings(record=True) as warnings: ThreadedFileWriter(BytesIO(), reactor) simplefilter("always") # Catch all warnings self.assertEqual(warnings[-1].category, DeprecationWarning) def test_write(self): """ Messages passed to L{ThreadedFileWriter.__call__} are then written by the writer thread with a newline added. """ f = BytesIO() writer = ThreadedFileWriter(f, reactor) writer.startService() self.addCleanup(writer.stopService) writer({"hello": 123}) start = time.time() while not f.getvalue() and time.time() - start < 5: time.sleep(0.0001) self.assertEqual(f.getvalue(), b'{"hello": 123}\n') @skipIf(PY2, "Python 2 files always accept bytes") def test_write_unicode(self): """ Messages passed to L{ThreadedFileWriter.__call__} are then written by the writer thread with a newline added to files that accept unicode. """ f = StringIO() writer = ThreadedFileWriter(f, reactor) writer.startService() self.addCleanup(writer.stopService) original = {"hello\u1234": 123} writer(original) start = time.time() while not f.getvalue() and time.time() - start < 5: time.sleep(0.0001) self.assertEqual(f.getvalue(), pyjson.dumps(original) + "\n") def test_stopServiceClosesFile(self): """ L{ThreadedWriter.stopService} closes the file. """ f = BytesIO() writer = ThreadedFileWriter(f, reactor) writer.startService() d = writer.stopService() def done(_): self.assertTrue(f.closed) d.addCallback(done) return d eliot-1.11.0/eliot/tests/test_action.py0000664000175000017500000016241113573001140021545 0ustar itamarstitamarst00000000000000""" Tests for L{eliot._action}. """ from __future__ import unicode_literals import pickle import time from unittest import TestCase, skipIf from unittest.mock import patch from threading import Thread import six if six.PY3: unicode = six.text_type from hypothesis import assume, given, settings, HealthCheck from hypothesis.strategies import integers, lists, just, text from pyrsistent import pvector, v import testtools from testtools.matchers import MatchesStructure from .._action import ( Action, current_action, startTask, start_action, ACTION_STATUS_FIELD, ACTION_TYPE_FIELD, FAILED_STATUS, STARTED_STATUS, SUCCEEDED_STATUS, DuplicateChild, InvalidStartMessage, InvalidStatus, TaskLevel, WrittenAction, WrongActionType, WrongTask, WrongTaskLevel, TooManyCalls, log_call, ) from .._message import ( EXCEPTION_FIELD, REASON_FIELD, TASK_LEVEL_FIELD, TASK_UUID_FIELD, MESSAGE_TYPE_FIELD, Message, ) from .._output import MemoryLogger from .._validation import ActionType, Field, _ActionSerializers from ..testing import assertContainsFields, capture_logging from ..parse import Parser from .. import ( add_destination, remove_destination, register_exception_extractor, preserve_context, ) from .strategies import ( message_dicts, start_action_message_dicts, start_action_messages, task_level_indexes, task_level_lists, written_actions, written_messages, reparent_action, sibling_task_level, union, written_from_pmap, ) class ActionTests(TestCase): """ Tests for L{Action}. """ def test_start(self): """ L{Action._start} logs an C{action_status="started"} message. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") action._start({"key": "value"}) assertContainsFields( self, logger.messages[0], { "task_uuid": "unique", "task_level": [1], "action_type": "sys:thename", "action_status": "started", "key": "value", }, ) def test_task_uuid(self): """ L{Action.task_uuid} return the task's UUID. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") self.assertEqual(action.task_uuid, "unique") def test_startMessageSerialization(self): """ The start message logged by L{Action._start} is created with the appropriate start message L{eliot._validation._MessageSerializer}. """ serializers = ActionType( "sys:thename", [Field("key", lambda x: x, "")], [], "" )._serializers class Logger(list): def write(self, msg, serializer): self.append(serializer) logger = Logger() action = Action( logger, "unique", TaskLevel(level=[]), "sys:thename", serializers ) action._start({"key": "value"}) self.assertIs(logger[0], serializers.start) def test_child(self): """ L{Action.child} returns a new L{Action} with the given logger, system and name, and a task_uuid taken from the parent L{Action}. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") logger2 = MemoryLogger() child = action.child(logger2, "newsystem:newname") self.assertEqual( [child._logger, child._identification, child._task_level], [ logger2, {"task_uuid": "unique", "action_type": "newsystem:newname"}, TaskLevel(level=[1]), ], ) def test_childLevel(self): """ Each call to L{Action.child} increments the new sub-level set on the child. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") child1 = action.child(logger, "newsystem:newname") child2 = action.child(logger, "newsystem:newname") child1_1 = child1.child(logger, "newsystem:other") self.assertEqual(child1._task_level, TaskLevel(level=[1])) self.assertEqual(child2._task_level, TaskLevel(level=[2])) self.assertEqual(child1_1._task_level, TaskLevel(level=[1, 1])) def test_childSerializers(self): """ L{Action.child} returns a new L{Action} with the serializers passed to it, rather than the parent's. """ logger = MemoryLogger() serializers = object() action = Action( logger, "unique", TaskLevel(level=[]), "sys:thename", serializers ) childSerializers = object() child = action.child(logger, "newsystem:newname", childSerializers) self.assertIs(child._serializers, childSerializers) def test_run(self): """ L{Action.run} runs the given function with given arguments, returning its result. """ action = Action(None, "", TaskLevel(level=[]), "") def f(*args, **kwargs): return args, kwargs result = action.run(f, 1, 2, x=3) self.assertEqual(result, ((1, 2), {"x": 3})) def test_runContext(self): """ L{Action.run} runs the given function with the action set as the current action. """ result = [] action = Action(None, "", TaskLevel(level=[]), "") action.run(lambda: result.append(current_action())) self.assertEqual(result, [action]) def test_per_thread_context(self): """Different threads have different contexts.""" in_thread = [] def run_in_thread(): action = Action(None, "", TaskLevel(level=[]), "") with action.context(): time.sleep(0.5) in_thread.append(current_action()) thread = Thread(target=run_in_thread) thread.start() time.sleep(0.2) self.assertEqual(current_action(), None) thread.join() self.assertIsInstance(in_thread[0], Action) def test_runContextUnsetOnReturn(self): """ L{Action.run} unsets the action once the given function returns. """ action = Action(None, "", TaskLevel(level=[]), "") action.run(lambda: None) self.assertIs(current_action(), None) def test_runContextUnsetOnRaise(self): """ L{Action.run} unsets the action once the given function raises an exception. """ action = Action(None, "", TaskLevel(level=[]), "") self.assertRaises(ZeroDivisionError, action.run, lambda: 1 / 0) self.assertIs(current_action(), None) def test_withSetsContext(self): """ L{Action.__enter__} sets the action as the current action. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") with action: self.assertIs(current_action(), action) def test_withUnsetOnReturn(self): """ L{Action.__exit__} unsets the action on successful block finish. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") with action: pass self.assertIs(current_action(), None) def test_withUnsetOnRaise(self): """ L{Action.__exit__} unsets the action if the block raises an exception. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") try: with action: 1 / 0 except ZeroDivisionError: pass else: self.fail("no exception") self.assertIs(current_action(), None) def test_withContextSetsContext(self): """ L{Action.context().__enter__} sets the action as the current action. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") with action.context(): self.assertIs(current_action(), action) def test_withContextReturnsaction(self): """ L{Action.context().__enter__} returns the action. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") with action.context() as action2: self.assertIs(action, action2) def test_withContextUnsetOnReturn(self): """ L{Action.context().__exit__} unsets the action on successful block finish. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") with action.context(): pass self.assertIs(current_action(), None) def test_withContextNoLogging(self): """ L{Action.context().__exit__} does not log any messages. """ logger = MemoryLogger() action = Action(logger, "", TaskLevel(level=[]), "") with action.context(): pass self.assertFalse(logger.messages) def test_withContextUnsetOnRaise(self): """ L{Action.conext().__exit__} unsets the action if the block raises an exception. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") try: with action.context(): 1 / 0 except ZeroDivisionError: pass else: self.fail("no exception") self.assertIs(current_action(), None) def test_finish(self): """ L{Action.finish} with no exception logs an C{action_status="succeeded"} message. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") action.finish() assertContainsFields( self, logger.messages[0], { "task_uuid": "unique", "task_level": [1], "action_type": "sys:thename", "action_status": "succeeded", }, ) def test_successfulFinishSerializer(self): """ L{Action.finish} with no exception passes the success L{eliot._validation._MessageSerializer} to the message it creates. """ serializers = ActionType( "sys:thename", [], [Field("key", lambda x: x, "")], "" )._serializers class Logger(list): def write(self, msg, serializer): self.append(serializer) logger = Logger() action = Action( logger, "unique", TaskLevel(level=[]), "sys:thename", serializers ) action.finish() self.assertIs(logger[0], serializers.success) def test_failureFinishSerializer(self): """ L{Action.finish} with an exception passes the failure L{eliot._validation._MessageSerializer} to the message it creates. """ serializers = ActionType( "sys:thename", [], [Field("key", lambda x: x, "")], "" )._serializers class Logger(list): def write(self, msg, serializer): self.append(serializer) logger = Logger() action = Action( logger, "unique", TaskLevel(level=[]), "sys:thename", serializers ) action.finish(Exception()) self.assertIs(logger[0], serializers.failure) def test_startFieldsNotInFinish(self): """ L{Action.finish} logs a message without the fields from L{Action._start}. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") action._start({"key": "value"}) action.finish() self.assertNotIn("key", logger.messages[1]) def test_finishWithBadException(self): """ L{Action.finish} still logs a message if the given exception raises another exception when called with C{unicode()}. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") class BadException(Exception): def __str__(self): raise TypeError() action.finish(BadException()) self.assertEqual( logger.messages[0]["reason"], "eliot: unknown, unicode() raised exception" ) def test_withLogsSuccessfulFinishMessage(self): """ L{Action.__exit__} logs an action finish message on a successful block finish. """ logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action: pass # Start message is only created if we use the action()/task() utility # functions, the intended public APIs. self.assertEqual(len(logger.messages), 1) assertContainsFields( self, logger.messages[0], { "task_uuid": "uuid", "task_level": [1, 1], "action_type": "sys:me", "action_status": "succeeded", }, ) def test_withLogsExceptionMessage(self): """ L{Action.__exit__} logs an action finish message on an exception raised from the block. """ logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") exception = RuntimeError("because") try: with action: raise exception except RuntimeError: pass else: self.fail("no exception") self.assertEqual(len(logger.messages), 1) assertContainsFields( self, logger.messages[0], { "task_uuid": "uuid", "task_level": [1, 1], "action_type": "sys:me", "action_status": "failed", "reason": "because", "exception": "%s.RuntimeError" % (RuntimeError.__module__,), }, ) def test_withReturnValue(self): """ L{Action.__enter__} returns the action itself. """ logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action as act: self.assertIs(action, act) def test_addSuccessFields(self): """ On a successful finish, L{Action.__exit__} adds fields from L{Action.addSuccessFields} to the result message. """ logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action as act: act.addSuccessFields(x=1, y=2) act.addSuccessFields(z=3) assertContainsFields(self, logger.messages[0], {"x": 1, "y": 2, "z": 3}) def test_nextTaskLevel(self): """ Each call to L{Action._nextTaskLevel()} increments a counter. """ action = Action(MemoryLogger(), "uuid", TaskLevel(level=[1]), "sys:me") self.assertEqual( [action._nextTaskLevel() for i in range(5)], [ TaskLevel(level=level) for level in ([1, 1], [1, 2], [1, 3], [1, 4], [1, 5]) ], ) def test_multipleFinishCalls(self): """ If L{Action.finish} is called, subsequent calls to L{Action.finish} have no effect. """ logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action as act: act.finish() act.finish(Exception()) act.finish() # Only initial finish message is logged: self.assertEqual(len(logger.messages), 1) class StartActionAndTaskTests(TestCase): """ Tests for L{start_action} and L{startTask}. """ def test_startTaskNewAction(self): """ L{startTask} creates a new top-level L{Action}. """ logger = MemoryLogger() action = startTask(logger, "sys:do") self.assertIsInstance(action, Action) self.assertEqual(action._task_level, TaskLevel(level=[])) def test_start_task_default_action_type(self): """ L{start_task} sets a default C{action_type} if none is set. """ logger = MemoryLogger() startTask(logger) assertContainsFields(self, logger.messages[0], {"action_type": ""}) def test_startTaskSerializers(self): """ If serializers are passed to L{startTask} they are attached to the resulting L{Action}. """ logger = MemoryLogger() serializers = _ActionSerializers(start=None, success=None, failure=None) action = startTask(logger, "sys:do", serializers) self.assertIs(action._serializers, serializers) def test_startActionSerializers(self): """ If serializers are passed to L{start_action} they are attached to the resulting L{Action}. """ logger = MemoryLogger() serializers = _ActionSerializers(start=None, success=None, failure=None) action = start_action(logger, "sys:do", serializers) self.assertIs(action._serializers, serializers) def test_startTaskNewUUID(self): """ L{startTask} creates an L{Action} with its own C{task_uuid}. """ logger = MemoryLogger() action = startTask(logger, "sys:do") action2 = startTask(logger, "sys:do") self.assertNotEqual( action._identification["task_uuid"], action2._identification["task_uuid"] ) def test_startTaskLogsStart(self): """ L{startTask} logs a start message for the newly created L{Action}. """ logger = MemoryLogger() action = startTask(logger, "sys:do", key="value") assertContainsFields( self, logger.messages[0], { "task_uuid": action._identification["task_uuid"], "task_level": [1], "action_type": "sys:do", "action_status": "started", "key": "value", }, ) def test_start_action_default_action_type(self): """ L{start_action} sets a default C{action_type} if none is set. """ logger = MemoryLogger() start_action(logger) assertContainsFields(self, logger.messages[0], {"action_type": ""}) def test_startActionNoParent(self): """ L{start_action} when C{current_action()} is C{None} creates a top-level L{Action}. """ logger = MemoryLogger() action = start_action(logger, "sys:do") self.assertIsInstance(action, Action) self.assertEqual(action._task_level, TaskLevel(level=[])) def test_startActionNoParentLogStart(self): """ L{start_action} when C{current_action()} is C{None} logs a start message. """ logger = MemoryLogger() action = start_action(logger, "sys:do", key="value") assertContainsFields( self, logger.messages[0], { "task_uuid": action._identification["task_uuid"], "task_level": [1], "action_type": "sys:do", "action_status": "started", "key": "value", }, ) def test_startActionWithParent(self): """ L{start_action} uses the C{current_action()} as parent for a new L{Action}. """ logger = MemoryLogger() parent = Action(logger, "uuid", TaskLevel(level=[2]), "other:thing") with parent: action = start_action(logger, "sys:do") self.assertIsInstance(action, Action) self.assertEqual(action._identification["task_uuid"], "uuid") self.assertEqual(action._task_level, TaskLevel(level=[2, 1])) def test_startActionWithParentLogStart(self): """ L{start_action} when C{current_action()} is an L{Action} logs a start message. """ logger = MemoryLogger() parent = Action(logger, "uuid", TaskLevel(level=[]), "other:thing") with parent: start_action(logger, "sys:do", key="value") assertContainsFields( self, logger.messages[0], { "task_uuid": "uuid", "task_level": [1, 1], "action_type": "sys:do", "action_status": "started", "key": "value", }, ) def test_startTaskNoLogger(self): """ When no logger is given L{startTask} logs to the default ``Logger``. """ messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) action = startTask(action_type="sys:do", key="value") assertContainsFields( self, messages[0], { "task_uuid": action._identification["task_uuid"], "task_level": [1], "action_type": "sys:do", "action_status": "started", "key": "value", }, ) def test_startActionNoLogger(self): """ When no logger is given L{start_action} logs to the default ``Logger``. """ messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) action = start_action(action_type="sys:do", key="value") assertContainsFields( self, messages[0], { "task_uuid": action._identification["task_uuid"], "task_level": [1], "action_type": "sys:do", "action_status": "started", "key": "value", }, ) class PEP8Tests(TestCase): """ Tests for PEP 8 method compatibility. """ def test_add_success_fields(self): """ L{Action.addSuccessFields} is the same as L{Action.add_success_fields}. """ self.assertEqual(Action.addSuccessFields, Action.add_success_fields) def test_serialize_task_id(self): """ L{Action.serialize_task_id} is the same as L{Action.serializeTaskId}. """ self.assertEqual(Action.serialize_task_id, Action.serializeTaskId) def test_continue_task(self): """ L{Action.continue_task} is the same as L{Action.continueTask}. """ self.assertEqual(Action.continue_task, Action.continueTask) class SerializationTests(TestCase): """ Tests for L{Action} serialization and deserialization. """ def test_serializeTaskId(self): """ L{Action.serialize_task_id} result is composed of the task UUID and an incremented task level. """ action = Action(None, "uniq123", TaskLevel(level=[1, 2]), "mytype") self.assertEqual( [ action._nextTaskLevel(), action.serialize_task_id(), action._nextTaskLevel(), ], [TaskLevel(level=[1, 2, 1]), b"uniq123@/1/2/2", TaskLevel(level=[1, 2, 3])], ) def test_continueTaskReturnsAction(self): """ L{Action.continue_task} returns an L{Action} whose C{task_level} and C{task_uuid} are derived from those in the given serialized task identifier. """ originalAction = Action(None, "uniq456", TaskLevel(level=[3, 4]), "mytype") taskId = originalAction.serializeTaskId() newAction = Action.continue_task(MemoryLogger(), taskId) self.assertEqual( [newAction.__class__, newAction._identification, newAction._task_level], [ Action, {"task_uuid": "uniq456", "action_type": "eliot:remote_task"}, TaskLevel(level=[3, 4, 1]), ], ) def test_continueTaskUnicode(self): """ L{Action.continue_task} can take a Unicode task identifier. """ original_action = Action(None, "uniq790", TaskLevel(level=[3, 4]), "mytype") task_id = unicode(original_action.serialize_task_id(), "utf-8") new_action = Action.continue_task(MemoryLogger(), task_id) self.assertEqual(new_action._identification["task_uuid"], "uniq790") def test_continueTaskStartsAction(self): """ L{Action.continue_task} starts the L{Action} it creates. """ originalAction = Action(None, "uniq456", TaskLevel(level=[3, 4]), "mytype") taskId = originalAction.serializeTaskId() logger = MemoryLogger() Action.continue_task(logger, taskId) assertContainsFields( self, logger.messages[0], { "task_uuid": "uniq456", "task_level": [3, 4, 1, 1], "action_type": "eliot:remote_task", "action_status": "started", }, ) def test_continueTaskNoLogger(self): """ L{Action.continue_task} can be called without a logger. """ originalAction = Action(None, "uniq456", TaskLevel(level=[3, 4]), "mytype") taskId = originalAction.serializeTaskId() messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) Action.continue_task(task_id=taskId) assertContainsFields( self, messages[-1], { "task_uuid": "uniq456", "task_level": [3, 4, 1, 1], "action_type": "eliot:remote_task", "action_status": "started", }, ) def test_continueTaskRequiredTaskId(self): """ L{Action.continue_task} requires a C{task_id} to be passed in. """ self.assertRaises(RuntimeError, Action.continue_task) class TaskLevelTests(TestCase): """ Tests for L{TaskLevel}. """ def assert_fully_less_than(self, x, y): """ Assert that x < y according to all the comparison operators. """ self.assertTrue( all( [ # lt x < y, not y < x, # le x <= y, not y <= x, # gt y > x, not x > y, # ge y >= x, not x >= y, # eq not x == y, not y == x, # ne x != y, y != x, ] ) ) def test_equality(self): """ L{TaskChild} correctly implements equality and hashing. """ a = TaskLevel(level=[1, 2]) a2 = TaskLevel(level=[1, 2]) b = TaskLevel(level=[2, 999]) self.assertTrue( all( [ a == a2, a2 == a, a != b, b != a, not b == a, not a == b, not a == 1, a != 1, hash(a) == hash(a2), hash(b) != hash(a), ] ) ) def test_as_list(self): """ L{TaskChild.as_list} returns the level. """ self.assertEqual(TaskLevel(level=[1, 2, 3]).as_list(), [1, 2, 3]) @given(lists(task_level_indexes)) def test_parent_of_child(self, base_task_level): """ L{TaskLevel.child} returns the first child of the task. """ base_task = TaskLevel(level=base_task_level) child_task = base_task.child() self.assertEqual(base_task, child_task.parent()) @given(task_level_lists) def test_child_greater_than_parent(self, task_level): """ L{TaskLevel.child} returns a child that is greater than its parent. """ task = TaskLevel(level=task_level) self.assert_fully_less_than(task, task.child()) @given(task_level_lists) def test_next_sibling_greater(self, task_level): """ L{TaskLevel.next_sibling} returns a greater task level. """ task = TaskLevel(level=task_level) self.assert_fully_less_than(task, task.next_sibling()) @given(task_level_lists) def test_next_sibling(self, task_level): """ L{TaskLevel.next_sibling} returns the next sibling of a task. """ task = TaskLevel(level=task_level) sibling = task.next_sibling() self.assertEqual( sibling, TaskLevel(level=task_level[:-1] + [task_level[-1] + 1]) ) def test_parent_of_root(self): """ L{TaskLevel.parent} of the root task level is C{None}. """ self.assertIs(TaskLevel(level=[]).parent(), None) def test_toString(self): """ L{TaskLevel.toString} serializes the object to a Unicode string. """ root = TaskLevel(level=[]) child2_1 = root.child().next_sibling().child() self.assertEqual([root.toString(), child2_1.toString()], ["/", "/2/1"]) def test_fromString(self): """ L{TaskLevel.fromString} deserializes the output of L{TaskLevel.toString}. """ self.assertEqual( [TaskLevel.fromString("/"), TaskLevel.fromString("/2/1")], [TaskLevel(level=[]), TaskLevel(level=[2, 1])], ) def test_from_string(self): """ L{TaskLevel.from_string} is the same as as L{TaskLevel.fromString}. """ self.assertEqual(TaskLevel.from_string, TaskLevel.fromString) def test_to_string(self): """ L{TaskLevel.to_string} is the same as as L{TaskLevel.toString}. """ self.assertEqual(TaskLevel.to_string, TaskLevel.toString) class WrittenActionTests(testtools.TestCase): """ Tests for L{WrittenAction}. """ @given(start_action_messages) def test_from_single_start_message(self, message): """ A L{WrittenAction} can be constructed from a single "start" message. Such an action inherits the C{action_type} of the start message, has no C{end_time}, and has a C{status} of C{STARTED_STATUS}. """ action = WrittenAction.from_messages(message) self.assertThat( action, MatchesStructure.byEquality( status=STARTED_STATUS, action_type=message.contents[ACTION_TYPE_FIELD], task_uuid=message.task_uuid, task_level=message.task_level.parent(), start_time=message.timestamp, children=pvector([]), end_time=None, reason=None, exception=None, ), ) @given(start_action_messages, message_dicts, integers(min_value=2)) def test_from_single_end_message(self, start_message, end_message_dict, n): """ A L{WrittenAction} can be constructed from a single "end" message. Such an action inherits the C{action_type} and C{task_level} of the end message, has no C{start_time}, and has a C{status} matching that of the end message. """ end_message = written_from_pmap( union( end_message_dict, { ACTION_STATUS_FIELD: SUCCEEDED_STATUS, ACTION_TYPE_FIELD: start_message.contents[ACTION_TYPE_FIELD], TASK_UUID_FIELD: start_message.task_uuid, TASK_LEVEL_FIELD: sibling_task_level(start_message, n), }, ) ) action = WrittenAction.from_messages(end_message=end_message) self.assertThat( action, MatchesStructure.byEquality( status=SUCCEEDED_STATUS, action_type=end_message.contents[ACTION_TYPE_FIELD], task_uuid=end_message.task_uuid, task_level=end_message.task_level.parent(), start_time=None, children=pvector([]), end_time=end_message.timestamp, reason=None, exception=None, ), ) @given(message_dicts) def test_from_single_child_message(self, message_dict): """ A L{WrittenAction} can be constructed from a single child message. Such an action inherits the C{task_level} of the message, has no C{start_time}, C{status}, C{task_type} or C{end_time}. """ message = written_from_pmap(message_dict) action = WrittenAction.from_messages(children=[message]) self.assertThat( action, MatchesStructure.byEquality( status=None, action_type=None, task_uuid=message.task_uuid, task_level=message.task_level.parent(), start_time=None, children=pvector([message]), end_time=None, reason=None, exception=None, ), ) @given(start_action_messages, message_dicts, integers(min_value=2)) def test_different_task_uuid(self, start_message, end_message_dict, n): """ By definition, an action is either a top-level task or takes place within such a task. If we try to assemble actions from messages with differing task UUIDs, we raise an error. """ assume(start_message.task_uuid != end_message_dict["task_uuid"]) action_type = start_message.as_dict()[ACTION_TYPE_FIELD] end_message = written_from_pmap( union( end_message_dict.set(ACTION_TYPE_FIELD, action_type), { ACTION_STATUS_FIELD: SUCCEEDED_STATUS, TASK_LEVEL_FIELD: sibling_task_level(start_message, n), }, ) ) self.assertRaises( WrongTask, WrittenAction.from_messages, start_message, end_message=end_message, ) @given(message_dicts) def test_invalid_start_message_missing_status(self, message_dict): """ A start message must have an C{ACTION_STATUS_FIELD} with the value C{STARTED_STATUS}, otherwise it's not a start message. If we receive such a message, raise an error. This test handles the case where the status field is not present. """ assume(ACTION_STATUS_FIELD not in message_dict) message = written_from_pmap(message_dict) self.assertRaises(InvalidStartMessage, WrittenAction.from_messages, message) @given( message_dict=start_action_message_dicts, status=(just(FAILED_STATUS) | just(SUCCEEDED_STATUS) | text()), ) def test_invalid_start_message_wrong_status(self, message_dict, status): """ A start message must have an C{ACTION_STATUS_FIELD} with the value C{STARTED_STATUS}, otherwise it's not a start message. If we receive such a message, raise an error. This test handles the case where the status field is present, but is not C{STARTED_STATUS}. """ message = written_from_pmap(message_dict.update({ACTION_STATUS_FIELD: status})) self.assertRaises(InvalidStartMessage, WrittenAction.from_messages, message) @given(start_action_message_dicts, integers(min_value=2)) def test_invalid_task_level_in_start_message(self, start_message_dict, i): """ All messages in an action have a task level. The first message in an action must have a task level ending in C{1}, indicating that it's the first message. If we try to start an action with a message that has a task level that does not end in C{1}, raise an error. """ new_level = start_message_dict[TASK_LEVEL_FIELD].append(i) message_dict = start_message_dict.set(TASK_LEVEL_FIELD, new_level) message = written_from_pmap(message_dict) self.assertRaises(InvalidStartMessage, WrittenAction.from_messages, message) @given(start_action_messages, message_dicts, text(), integers(min_value=1)) def test_action_type_mismatch(self, start_message, end_message_dict, end_type, n): """ The end message of an action must have the same C{ACTION_TYPE_FIELD} as the start message of an action. If we try to end an action with a message that has a different type, we raise an error. """ assume(end_type != start_message.contents[ACTION_TYPE_FIELD]) end_message = written_from_pmap( union( end_message_dict, { ACTION_STATUS_FIELD: SUCCEEDED_STATUS, ACTION_TYPE_FIELD: end_type, TASK_UUID_FIELD: start_message.task_uuid, TASK_LEVEL_FIELD: sibling_task_level(start_message, n), }, ) ) self.assertRaises( WrongActionType, WrittenAction.from_messages, start_message, end_message=end_message, ) @given(start_action_messages, message_dicts, integers(min_value=2)) def test_successful_end(self, start_message, end_message_dict, n): """ A L{WrittenAction} can be constructed with just a start message and an end message: in this case, an end message that indicates the action was successful. Such an action inherits the C{end_time} from the end message, and has a C{status} of C{SUCCEEDED_STATUS}. """ end_message = written_from_pmap( union( end_message_dict, { ACTION_STATUS_FIELD: SUCCEEDED_STATUS, ACTION_TYPE_FIELD: start_message.contents[ACTION_TYPE_FIELD], TASK_UUID_FIELD: start_message.task_uuid, TASK_LEVEL_FIELD: sibling_task_level(start_message, n), }, ) ) action = WrittenAction.from_messages(start_message, end_message=end_message) self.assertThat( action, MatchesStructure.byEquality( action_type=start_message.contents[ACTION_TYPE_FIELD], status=SUCCEEDED_STATUS, task_uuid=start_message.task_uuid, task_level=start_message.task_level.parent(), start_time=start_message.timestamp, children=pvector([]), end_time=end_message.timestamp, reason=None, exception=None, ), ) @given(start_action_messages, message_dicts, text(), text(), integers(min_value=2)) def test_failed_end(self, start_message, end_message_dict, exception, reason, n): """ A L{WrittenAction} can be constructed with just a start message and an end message: in this case, an end message that indicates that the action failed. Such an action inherits the C{end_time} from the end message, has a C{status} of C{FAILED_STATUS}, and an C{exception} and C{reason} that match the raised exception. """ end_message = written_from_pmap( union( end_message_dict, { ACTION_STATUS_FIELD: FAILED_STATUS, ACTION_TYPE_FIELD: start_message.contents[ACTION_TYPE_FIELD], TASK_UUID_FIELD: start_message.task_uuid, TASK_LEVEL_FIELD: sibling_task_level(start_message, n), EXCEPTION_FIELD: exception, REASON_FIELD: reason, }, ) ) action = WrittenAction.from_messages(start_message, end_message=end_message) self.assertThat( action, MatchesStructure.byEquality( action_type=start_message.contents[ACTION_TYPE_FIELD], status=FAILED_STATUS, task_uuid=start_message.task_uuid, task_level=start_message.task_level.parent(), start_time=start_message.timestamp, children=pvector([]), end_time=end_message.timestamp, reason=reason, exception=exception, ), ) @given(start_action_messages, message_dicts, integers(min_value=2)) def test_end_has_no_status(self, start_message, end_message_dict, n): """ If we try to end a L{WrittenAction} with a message that lacks an C{ACTION_STATUS_FIELD}, we raise an error, because it's not a valid end message. """ assume(ACTION_STATUS_FIELD not in end_message_dict) end_message = written_from_pmap( union( end_message_dict, { ACTION_TYPE_FIELD: start_message.contents[ACTION_TYPE_FIELD], TASK_UUID_FIELD: start_message.task_uuid, TASK_LEVEL_FIELD: sibling_task_level(start_message, n), }, ) ) self.assertRaises( InvalidStatus, WrittenAction.from_messages, start_message, end_message=end_message, ) # This test is slow, and when run under coverage on pypy on Travis won't # make the default of 5 examples. 1 is enough. @given(start_action_messages, lists(written_messages | written_actions)) @settings(suppress_health_check=[HealthCheck.too_slow]) def test_children(self, start_message, child_messages): """ We can construct a L{WrittenAction} with child messages. These messages can be either L{WrittenAction}s or L{WrittenMessage}s. They are available in the C{children} field. """ messages = [ reparent_action( start_message.task_uuid, TaskLevel(level=sibling_task_level(start_message, i)), message, ) for (i, message) in enumerate(child_messages, 2) ] action = WrittenAction.from_messages(start_message, messages) def task_level(m): return m.task_level self.assertEqual(sorted(messages, key=task_level), action.children) @given(start_action_messages, message_dicts) def test_wrong_task_uuid(self, start_message, child_message): """ All child messages of an action must have the same C{task_uuid} as the action. """ assume(child_message[TASK_UUID_FIELD] != start_message.task_uuid) message = written_from_pmap(child_message) self.assertRaises( WrongTask, WrittenAction.from_messages, start_message, v(message) ) @given(start_action_messages, message_dicts) def test_wrong_task_level(self, start_message, child_message): """ All child messages of an action must have a task level that is a direct child of the action's task level. """ assume( not start_message.task_level.is_sibling_of( TaskLevel(level=child_message[TASK_LEVEL_FIELD]) ) ) message = written_from_pmap( child_message.update({TASK_UUID_FIELD: start_message.task_uuid}) ) self.assertRaises( WrongTaskLevel, WrittenAction.from_messages, start_message, v(message) ) @given(start_action_messages, message_dicts, message_dicts, integers(min_value=2)) def test_duplicate_task_level(self, start_message, child1, child2, index): """ If we try to add a child to an action that has a task level that's the same as the task level of an existing child, we raise an error. """ parent_level = start_message.task_level.parent().level messages = [ written_from_pmap( union( child_message, { TASK_UUID_FIELD: start_message.task_uuid, TASK_LEVEL_FIELD: parent_level.append(index), }, ) ) for child_message in [child1, child2] ] assume(messages[0] != messages[1]) self.assertRaises( DuplicateChild, WrittenAction.from_messages, start_message, messages ) def make_error_extraction_tests(get_messages): """ Create a test case class for testing extraction of fields from exceptions. @param get_messages: Callable that takes an exception instance, returns all message dictionaries generated by logging it. @return: ``TestCase`` subclass. """ class ErrorFieldExtraction(TestCase): """ Tests for extracting fields from exceptions in failed actions. """ def test_matching_class(self): """ If an exception fails an action and the exact type has registered extractor, extract errors using it. """ class MyException(Exception): pass register_exception_extractor(MyException, lambda e: {"key": e.args[0]}) exception = MyException("a value") [message] = get_messages(exception) assertContainsFields(self, message, {"key": "a value"}) def test_subclass_falls_back_to_parent(self): """ If an exception fails an action and the exact type has not been registered but the error is a subclass of a registered class, extract errors using it. """ class MyException(Exception): pass class SubException(MyException): pass register_exception_extractor(MyException, lambda e: {"key": e.args[0]}) [message] = get_messages(SubException("the value")) assertContainsFields(self, message, {"key": "the value"}) def test_subclass_matches_first(self): """ If both a superclass and base class have registered extractors, the more specific one is used. """ class MyException(Exception): pass class SubException(MyException): pass class SubSubException(SubException): pass register_exception_extractor(MyException, lambda e: {"parent": e.args[0]}) register_exception_extractor(SubException, lambda e: {"child": e.args[0]}) [message] = get_messages(SubSubException("the value")) assertContainsFields(self, message, {"child": "the value"}) def test_error_in_extracter(self): """ If an error occurs in extraction, log the message as usual just without the extra fields, and an additional traceback. """ class MyException(Exception): pass def extract(e): return e.nosuchattribute register_exception_extractor(MyException, extract) messages = get_failed_action_messages(MyException()) assertContainsFields( self, messages[1], {"action_type": "sys:me", "action_status": "failed"} ) assertContainsFields(self, messages[0], {"message_type": "eliot:traceback"}) self.assertIn("nosuchattribute", str(messages[0]["reason"])) def test_environmenterror(self): """ ``EnvironmentError`` has a registered extractor that extracts the errno. """ [message] = get_messages(EnvironmentError(12, "oh noes")) assertContainsFields(self, message, {"errno": 12}) return ErrorFieldExtraction def get_failed_action_messages(exception): """ Fail an action using the given exception. :return: Logged dictionaries from the exception failing an action. """ action_type = ActionType("sys:me", [], []) logger = MemoryLogger() action = action_type.as_task(logger=logger) try: with action: raise exception except exception.__class__: pass logger.validate() return logger.messages[1:] class FailedActionExtractionTests( make_error_extraction_tests(get_failed_action_messages) ): """ Tests for error extraction in failed actions. """ def test_regular_fields(self): """ The normal failed action fields are still present when error extraction is used. """ class MyException(Exception): pass register_exception_extractor(MyException, lambda e: {"key": e.args[0]}) exception = MyException("because") messages = get_failed_action_messages(exception) assertContainsFields( self, messages[0], { "task_level": [2], "action_type": "sys:me", "action_status": "failed", "reason": "because", "exception": "eliot.tests.test_action.MyException", }, ) class PreserveContextTests(TestCase): """ Tests for L{preserve_context}. """ def add(self, x, y): """ Add two inputs. """ Message.log(message_type="child") return x + y def test_no_context(self): """ If C{preserve_context} is run outside an action context it just returns the same function. """ wrapped = preserve_context(self.add) self.assertEqual(wrapped(2, 3), 5) def test_with_context_calls_underlying(self): """ If run inside an Eliot context, the result of C{preserve_context} is the result of calling the underlying function. """ with start_action(action_type="parent"): wrapped = preserve_context(self.add) self.assertEqual(wrapped(3, y=4), 7) @capture_logging(None) def test_with_context_preserves_context(self, logger): """ If run inside an Eliot context, the result of C{preserve_context} runs the wrapped function within a C{eliot:task} which is a child of the original action. """ with start_action(action_type="parent"): wrapped = preserve_context(lambda: self.add(3, 4)) thread = Thread(target=wrapped) thread.start() thread.join() [tree] = Parser.parse_stream(logger.messages) root = tree.root() self.assertEqual( ( root.action_type, root.children[0].action_type, root.children[0].children[0].contents[MESSAGE_TYPE_FIELD], ), ("parent", "eliot:remote_task", "child"), ) def test_callable_only_once(self): """ The result of C{preserve_context} can only be called once. """ with start_action(action_type="parent"): wrapped = preserve_context(self.add) wrapped(1, 2) self.assertRaises(TooManyCalls, wrapped, 3, 4) @log_call def for_pickling(): pass class LogCallTests(TestCase): """Tests for log_call decorator.""" def assert_logged(self, logger, action_type, expected_params, expected_result): """Assert that an action of given structure was logged.""" if six.PY2: # On Python 2 we don't include the module or class: action_type = action_type.split(".")[-1] [tree] = Parser.parse_stream(logger.messages) root = tree.root() self.assertEqual(root.action_type, action_type) message = dict(root.start_message.contents) for field in [ACTION_STATUS_FIELD, ACTION_TYPE_FIELD]: message.pop(field) self.assertEqual(message, expected_params) self.assertEqual(root.end_message.contents["result"], expected_result) self.assertEqual(root.status, SUCCEEDED_STATUS) @capture_logging(None) def test_no_args_return(self, logger): """ C{@log_call} with no arguments logs return result, arguments, and has action type based on the action name. """ @log_call def myfunc(x, y): return 4 myfunc(2, 3) self.assert_logged(logger, self.id() + "..myfunc", {"x": 2, "y": 3}, 4) @capture_logging(None) def test_exception(self, logger): """C{@log_call} with an exception logs a failed action.""" @log_call def myfunc(x, y): 1 / 0 with self.assertRaises(ZeroDivisionError): myfunc(2, 4) [tree] = Parser.parse_stream(logger.messages) root = tree.root() self.assertIn("ZeroDivisionError", root.end_message.contents["exception"]) self.assertEqual(root.status, FAILED_STATUS) @capture_logging(None) def test_action_type(self, logger): """C{@log_call} can take an action type.""" @log_call(action_type="myaction") def myfunc(x, y): return 4 myfunc(2, 3) self.assert_logged(logger, "myaction", {"x": 2, "y": 3}, 4) @capture_logging(None) def test_default_argument_given(self, logger): """C{@log_call} logs default arguments that were passed in.""" @log_call def myfunc(x, y=1): return 4 myfunc(2, y=5) self.assert_logged(logger, self.id() + "..myfunc", {"x": 2, "y": 5}, 4) @capture_logging(None) def test_default_argument_missing(self, logger): """C{@log_call} logs default arguments that weren't passed in.""" @log_call def myfunc(x, y=1): return 6 myfunc(2) self.assert_logged(logger, self.id() + "..myfunc", {"x": 2, "y": 1}, 6) @capture_logging(None) def test_star_args_kwargs(self, logger): """C{@log_call} logs star args and kwargs.""" @log_call def myfunc(x, *y, **z): return 6 myfunc(2, 3, 4, a=1, b=2) self.assert_logged( logger, self.id() + "..myfunc", {"x": 2, "y": (3, 4), "z": {"a": 1, "b": 2}}, 6, ) @capture_logging(None) def test_whitelist_args(self, logger): """C{@log_call} only includes whitelisted arguments.""" @log_call(include_args=("x", "z")) def myfunc(x, y, z): return 6 myfunc(2, 3, 4) self.assert_logged(logger, self.id() + "..myfunc", {"x": 2, "z": 4}, 6) @skipIf(six.PY2, "Didn't bother implementing safety check on Python 2") def test_wrong_whitelist_args(self): """If C{include_args} doesn't match function, raise an exception.""" with self.assertRaises(ValueError): @log_call(include_args=["a", "x", "y"]) def f(x, y): pass @capture_logging(None) def test_no_result(self, logger): """C{@log_call} can omit logging the result.""" @log_call(include_result=False) def myfunc(x, y): return 6 myfunc(2, 3) [tree] = Parser.parse_stream(logger.messages) root = tree.root() self.assertNotIn("result", root.end_message.contents) self.assertEqual(root.status, SUCCEEDED_STATUS) def test_pickleable(self): """Functions decorated with C{log_call} are pickleable. This is necessary for e.g. Dask usage. """ self.assertIs(for_pickling, pickle.loads(pickle.dumps(for_pickling))) @capture_logging(None) def test_methods(self, logger): """self is not logged.""" class C(object): @log_call def f(self, x): pass C().f(2) self.assert_logged(logger, self.id() + "..C.f", {"x": 2}, None) class IndividualMessageLogTests(TestCase): """Action.log() tests.""" def test_log_creates_new_dictionary(self): """ L{Action.log} creates a new dictionary on each call. This is important because we might mutate the dictionary in ``Logger.write``. """ messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) with start_action(action_type="x") as action: action.log("mymessage", key=4) action.log(message_type="mymessage2", key=5) self.assertEqual(messages[1]["key"], 4) self.assertEqual(messages[2]["key"], 5) self.assertEqual(messages[1]["message_type"], "mymessage") self.assertEqual(messages[2]["message_type"], "mymessage2") @patch("time.time") def test_log_adds_timestamp(self, time_func): """ L{Action.log} adds a C{"timestamp"} field to the dictionary written to the logger, with the current time in seconds since the epoch. """ messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) time_func.return_value = timestamp = 1387299889.153187625 with start_action(action_type="x") as action: action.log("mymessage", key=4) self.assertEqual(messages[1]["timestamp"], timestamp) def test_part_of_action(self): """ L{Action.log} adds the identification fields from the given L{Action} to the dictionary written to the logger. """ messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) action = Action(None, "unique", TaskLevel(level=[37, 4]), "sys:thename") action.log("me", key=2) written = messages[0] del written["timestamp"] self.assertEqual( written, { "task_uuid": "unique", "task_level": [37, 4, 1], "key": 2, "message_type": "me", }, ) eliot-1.11.0/eliot/tests/test_api.py0000664000175000017500000000502113470775105021051 0ustar itamarstitamarst00000000000000""" Tests for the public API exposed by L{eliot}. """ from __future__ import unicode_literals from unittest import TestCase from .._output import Logger import eliot class PublicAPITests(TestCase): """ Tests for the public API. """ def test_addDestination(self): """ L{eliot.addDestination} adds destinations to the L{Destinations} attached to L{Logger}. """ o = object() eliot.addDestination(o) self.addCleanup(eliot.removeDestination, o) self.assertIn(o, Logger._destinations._destinations) def test_removeDestination(self): """ L{eliot.addDestination} removes destinations from the L{Destinations} attached to L{Logger}. """ self.assertEqual(eliot.removeDestination, Logger._destinations.remove) def test_addGlobalFields(self): """ L{eliot.addGlobalFields} calls the corresponding method on the L{Destinations} attached to L{Logger}. """ self.assertEqual(eliot.addGlobalFields, Logger._destinations.addGlobalFields) class PEP8Tests(TestCase): """ Tests for the PEP 8 variant of the the public API. """ def test_add_destination(self): """ L{eliot.addDestionation} is the same as L{eliot.add_destination}. """ self.assertIs(eliot.add_destination, eliot.addDestination) def test_remove_destination(self): """ L{eliot.removeDestionation} is the same as L{eliot.remove_destination}. """ self.assertIs(eliot.remove_destination, eliot.removeDestination) def test_add_global_fields(self): """ L{eliot.add_global_fields} is the same as L{eliot.addGlobalFields}. """ self.assertIs(eliot.add_global_fields, eliot.addGlobalFields) def test_write_traceback(self): """ L{eliot.writeTraceback} is the same as L{eliot.write_traceback}. """ self.assertIs(eliot.write_traceback, eliot.writeTraceback) def test_write_failure(self): """ L{eliot.writeFailure} is the same as L{eliot.write_failure}. """ self.assertIs(eliot.write_failure, eliot.writeFailure) def test_start_task(self): """ L{eliot.startTask} is the same as L{eliot.start_task}. """ self.assertIs(eliot.start_task, eliot.startTask) def test_start_action(self): """ L{eliot.startAction} is the same as L{eliot.start_action}. """ self.assertIs(eliot.start_action, eliot.startAction) eliot-1.11.0/eliot/tests/test_prettyprint.py0000664000175000017500000002131213573001140022666 0ustar itamarstitamarst00000000000000""" Tests for C{eliot.prettyprint}. """ from unittest import TestCase from subprocess import check_output, Popen, PIPE from collections import OrderedDict from datetime import datetime from pyrsistent import pmap from .._bytesjson import dumps from ..prettyprint import pretty_format, compact_format, REQUIRED_FIELDS SIMPLE_MESSAGE = { "timestamp": 1443193754, "task_uuid": "8c668cde-235b-4872-af4e-caea524bd1c0", "message_type": "messagey", "task_level": [1, 2], "keys": [123, 456], } UNTYPED_MESSAGE = { "timestamp": 1443193754, "task_uuid": "8c668cde-235b-4872-af4e-caea524bd1c0", "task_level": [1], "key": 1234, "abc": "def", } class FormattingTests(TestCase): """ Tests for L{pretty_format}. """ def test_message(self): """ A typed message is printed as expected. """ self.assertEqual( pretty_format(SIMPLE_MESSAGE), """\ 8c668cde-235b-4872-af4e-caea524bd1c0 -> /1/2 2015-09-25T15:09:14Z message_type: 'messagey' keys: [123, 456] """, ) def test_untyped_message(self): """ A message with no type is printed as expected. """ self.assertEqual( pretty_format(UNTYPED_MESSAGE), """\ 8c668cde-235b-4872-af4e-caea524bd1c0 -> /1 2015-09-25T15:09:14Z abc: 'def' key: 1234 """, ) def test_action(self): """ An action message is printed as expected. """ message = { "task_uuid": "8bc6ded2-446c-4b6d-abbc-4f21f1c9a7d8", "place": "Statue #1", "task_level": [2, 2, 2, 1], "action_type": "visited", "timestamp": 1443193958.0, "action_status": "started", } self.assertEqual( pretty_format(message), """\ 8bc6ded2-446c-4b6d-abbc-4f21f1c9a7d8 -> /2/2/2/1 2015-09-25T15:12:38Z action_type: 'visited' action_status: 'started' place: 'Statue #1' """, ) def test_multi_line(self): """ Multiple line values are indented nicely. """ message = { "timestamp": 1443193754, "task_uuid": "8c668cde-235b-4872-af4e-caea524bd1c0", "task_level": [1], "key": "hello\nthere\nmonkeys!\n", "more": "stuff", } self.assertEqual( pretty_format(message), """\ 8c668cde-235b-4872-af4e-caea524bd1c0 -> /1 2015-09-25T15:09:14Z key: 'hello | there | monkeys! | ' more: 'stuff' """, ) def test_tabs(self): """ Tabs are formatted as tabs, not quoted. """ message = { "timestamp": 1443193754, "task_uuid": "8c668cde-235b-4872-af4e-caea524bd1c0", "task_level": [1], "key": "hello\tmonkeys!", } self.assertEqual( pretty_format(message), """\ 8c668cde-235b-4872-af4e-caea524bd1c0 -> /1 2015-09-25T15:09:14Z key: 'hello monkeys!' """, ) def test_structured(self): """ Structured field values (e.g. a dictionary) are formatted in a helpful manner. """ message = { "timestamp": 1443193754, "task_uuid": "8c668cde-235b-4872-af4e-caea524bd1c0", "task_level": [1], "key": {"value": 123, "another": [1, 2, {"more": "data"}]}, } self.assertEqual( pretty_format(message), """\ 8c668cde-235b-4872-af4e-caea524bd1c0 -> /1 2015-09-25T15:09:14Z key: {'another': [1, 2, {'more': 'data'}], | 'value': 123} """, ) def test_microsecond(self): """ Microsecond timestamps are rendered in the output. """ message = { "timestamp": 1443193754.123455, "task_uuid": "8c668cde-235b-4872-af4e-caea524bd1c0", "task_level": [1], } self.assertEqual( pretty_format(message), """\ 8c668cde-235b-4872-af4e-caea524bd1c0 -> /1 2015-09-25T15:09:14.123455Z """, ) def test_compact(self): """ The compact mode does everything on a single line, including dictionaries and multi-line messages. """ message = { "timestamp": 1443193754, "task_uuid": "8c668cde-235b-4872-af4e-caea524bd1c0", "task_level": [1], "key": OrderedDict([("value", 123), ("another", [1, 2, {"more": "data"}])]), "multiline": "hello\n\tthere!\nabc", } self.assertEqual( compact_format(message), r'8c668cde-235b-4872-af4e-caea524bd1c0/1 2015-09-25T15:09:14Z key={"value":123,"another":[1,2,{"more":"data"}]} multiline="hello\n\tthere!\nabc"', ) def test_local(self): """ Timestamps can be generated in local timezone. """ message = { "timestamp": 1443193754, "task_uuid": "8c668cde-235b-4872-af4e-caea524bd1c0", "task_level": [1], } expected = datetime.fromtimestamp(1443193754).isoformat(sep="T") self.assertIn(expected, pretty_format(message, True)) self.assertIn(expected, compact_format(message, True)) class CommandLineTests(TestCase): """ Tests for the command-line tool. """ def test_help(self): """ C{--help} prints out the help text and exits. """ result = check_output(["eliot-prettyprint", "--help"]) self.assertIn(b"Convert Eliot messages into more readable", result) def write_and_read(self, lines, extra_args=()): """ Write the given lines to the command-line on stdin, return stdout. @param lines: Sequences of lines to write, as bytes, and lacking new lines. @return: Unicode-decoded result of subprocess stdout. """ process = Popen( [b"eliot-prettyprint"] + list(extra_args), stdin=PIPE, stdout=PIPE ) process.stdin.write(b"".join(line + b"\n" for line in lines)) process.stdin.close() result = process.stdout.read().decode("utf-8") process.stdout.close() return result def test_output(self): """ Lacking command-line arguments the process reads JSON lines from stdin and writes out a pretty-printed version. """ messages = [SIMPLE_MESSAGE, UNTYPED_MESSAGE, SIMPLE_MESSAGE] stdout = self.write_and_read(map(dumps, messages)) self.assertEqual( stdout, "".join(pretty_format(message) + "\n" for message in messages) ) def test_compact_output(self): """ In compact mode, the process reads JSON lines from stdin and writes out a pretty-printed compact version. """ messages = [SIMPLE_MESSAGE, UNTYPED_MESSAGE, SIMPLE_MESSAGE] stdout = self.write_and_read(map(dumps, messages), [b"--compact"]) self.assertEqual( stdout, "".join(compact_format(message) + "\n" for message in messages) ) def test_local_timezone(self): """ Local timezones are used if --local-timezone is given. """ message = { "timestamp": 1443193754, "task_uuid": "8c668cde-235b-4872-af4e-caea524bd1c0", "task_level": [1], } expected = datetime.fromtimestamp(1443193754).isoformat(sep="T") stdout = self.write_and_read( [dumps(message)], [b"--compact", b"--local-timezone"] ) self.assertIn(expected, stdout) stdout = self.write_and_read( [dumps(message)], [b"--compact", b"--local-timezone"] ) self.assertIn(expected, stdout) def test_not_json_message(self): """ Non-JSON lines are not formatted. """ not_json = b"NOT JSON!!" lines = [dumps(SIMPLE_MESSAGE), not_json, dumps(UNTYPED_MESSAGE)] stdout = self.write_and_read(lines) self.assertEqual( stdout, "{}\nNot JSON: {}\n\n{}\n".format( pretty_format(SIMPLE_MESSAGE), str(not_json), pretty_format(UNTYPED_MESSAGE), ), ) def test_missing_required_field(self): """ Non-Eliot JSON messages are not formatted. """ base = pmap(SIMPLE_MESSAGE) messages = [dumps(dict(base.remove(field))) for field in REQUIRED_FIELDS] + [ dumps(SIMPLE_MESSAGE) ] stdout = self.write_and_read(messages) self.assertEqual( stdout, "{}{}\n".format( "".join( "Not an Eliot message: {}\n\n".format(msg) for msg in messages[:-1] ), pretty_format(SIMPLE_MESSAGE), ), ) eliot-1.11.0/eliot/tests/test_dask.py0000664000175000017500000000660013470775105021226 0ustar itamarstitamarst00000000000000"""Tests for eliot.dask.""" from unittest import TestCase, skipUnless from ..testing import capture_logging, LoggedAction, LoggedMessage from .. import start_action, Message try: import dask from dask.bag import from_sequence except ImportError: dask = None else: from ..dask import compute_with_trace, _RunWithEliotContext, _add_logging @skipUnless(dask, "Dask not available.") class DaskTests(TestCase): """Tests for end-to-end functionality.""" def setUp(self): dask.config.set(scheduler="threading") def test_compute(self): """compute_with_trace() runs the same logic as compute().""" bag = from_sequence([1, 2, 3]) bag = bag.map(lambda x: x * 7).map(lambda x: x * 4) bag = bag.fold(lambda x, y: x + y) self.assertEqual(dask.compute(bag), compute_with_trace(bag)) @capture_logging(None) def test_logging(self, logger): """compute_with_trace() preserves Eliot context.""" def mult(x): Message.log(message_type="mult") return x * 4 def summer(x, y): Message.log(message_type="finally") return x + y bag = from_sequence([1, 2]) bag = bag.map(mult).fold(summer) with start_action(action_type="act1"): compute_with_trace(bag) [logged_action] = LoggedAction.ofType(logger.messages, "act1") self.assertEqual( logged_action.type_tree(), { "act1": [ { "dask:compute": [ {"eliot:remote_task": ["dask:task", "mult"]}, {"eliot:remote_task": ["dask:task", "mult"]}, {"eliot:remote_task": ["dask:task", "finally"]}, ] } ] }, ) # Make sure dependencies are tracked: mult1_msg, mult2_msg, final_msg = LoggedMessage.ofType( logger.messages, "dask:task" ) self.assertEqual( sorted(final_msg.message["dependencies"]), sorted([mult1_msg.message["key"], mult2_msg.message["key"]]), ) # Make sure dependencies are logically earlier in the logs: self.assertTrue( mult1_msg.message["task_level"] < final_msg.message["task_level"] ) self.assertTrue( mult2_msg.message["task_level"] < final_msg.message["task_level"] ) @skipUnless(dask, "Dask not available.") class AddLoggingTests(TestCase): """Tests for _add_logging().""" def test_add_logging_to_full_graph(self): """_add_logging() recreates Dask graph with wrappers.""" bag = from_sequence([1, 2, 3]) bag = bag.map(lambda x: x * 7).map(lambda x: x * 4) bag = bag.fold(lambda x, y: x + y) graph = bag.__dask_graph__() # Add logging: with start_action(action_type="bleh"): logging_added = _add_logging(graph) # Ensure resulting graph hasn't changed substantively: logging_removed = {} for key, value in logging_added.items(): if callable(value[0]): func, args = value[0], value[1:] self.assertIsInstance(func, _RunWithEliotContext) value = (func.func,) + args logging_removed[key] = value self.assertEqual(logging_removed, graph) eliot-1.11.0/eliot/tests/test_stdlib.py0000664000175000017500000000431513470775105021566 0ustar itamarstitamarst00000000000000"""Tests for standard library logging integration.""" from unittest import TestCase import logging import traceback from ..testing import assertContainsFields, capture_logging from ..stdlib import EliotHandler from .test_traceback import assert_expected_traceback class StdlibTests(TestCase): """Tests for stdlib integration.""" @capture_logging(None) def test_handler(self, logger): """The EliotHandler routes messages to Eliot.""" stdlib_logger = logging.getLogger("eliot-test") stdlib_logger.setLevel(logging.DEBUG) handler = EliotHandler() stdlib_logger.addHandler(handler) stdlib_logger.info("hello") stdlib_logger.warning("ono") message = logger.messages[0] assertContainsFields( self, message, { "message_type": "eliot:stdlib", "log_level": "INFO", "message": "hello", "logger": "eliot-test", }, ) message = logger.messages[1] assertContainsFields( self, message, { "message_type": "eliot:stdlib", "log_level": "WARNING", "message": "ono", "logger": "eliot-test", }, ) @capture_logging(None) def test_traceback(self, logger): """The EliotHandler routes tracebacks to Eliot.""" stdlib_logger = logging.getLogger("eliot-test2") stdlib_logger.setLevel(logging.DEBUG) handler = EliotHandler() stdlib_logger.addHandler(handler) try: raise RuntimeError() except Exception as e: exception = e expected_traceback = traceback.format_exc() stdlib_logger.exception("ono") message = logger.messages[0] assertContainsFields( self, message, { "message_type": "eliot:stdlib", "log_level": "ERROR", "message": "ono", "logger": "eliot-test2", }, ) assert_expected_traceback( self, logger, logger.messages[1], exception, expected_traceback ) eliot-1.11.0/eliot/tests/test_twisted.py0000664000175000017500000006173413573001140021761 0ustar itamarstitamarst00000000000000""" Tests for L{eliot.twisted}. """ from __future__ import absolute_import, unicode_literals, print_function import sys from functools import wraps try: from twisted.internet.defer import Deferred, succeed, fail, returnValue from twisted.trial.unittest import TestCase from twisted.python.failure import Failure from twisted.logger import globalLogPublisher except ImportError: # Make tests not run at all. TestCase = object else: # Make sure we always import this if Twisted is available, so broken # logwriter.py causes a failure: from ..twisted import ( DeferredContext, AlreadyFinished, _passthrough, redirectLogsForTrial, _RedirectLogsForTrial, TwistedDestination, inline_callbacks, ) from .test_generators import assert_expected_action_tree from .._action import start_action, current_action, Action, TaskLevel from .._output import MemoryLogger, Logger from .._message import Message from ..testing import assertContainsFields, capture_logging from .. import removeDestination, addDestination from .._traceback import write_traceback from .common import FakeSys class PassthroughTests(TestCase): """ Tests for L{_passthrough}. """ def test_passthrough(self): """ L{_passthrough} returns the passed-in value. """ obj = object() self.assertIs(obj, _passthrough(obj)) def withActionContext(f): """ Decorator that calls a function with an action context. @param f: A function. """ logger = MemoryLogger() action = start_action(logger, "test") @wraps(f) def test(self): with action.context(): return f(self) return test class DeferredContextTests(TestCase): """ Tests for L{DeferredContext}. """ def test_requireContext(self): """ L{DeferredContext} raises a L{RuntimeError} if it is called without an action context. """ self.assertRaises(RuntimeError, DeferredContext, Deferred()) @withActionContext def test_result(self): """ The passed-in L{Deferred} is available as the L{DeferredContext}'s C{result} attribute. """ result = Deferred() context = DeferredContext(result) self.assertIs(context.result, result) @withActionContext def test_addCallbacksCallbackToDeferred(self): """ L{DeferredContext.addCallbacks} passes the given callback and its corresponding arguments to the wrapped L{Deferred}'s C{addCallbacks}. """ called = [] def f(value, x, y): called.append((value, x, y)) result = Deferred() context = DeferredContext(result) context.addCallbacks(f, lambda x: None, (1,), {"y": 2}) result.callback(0) self.assertEqual(called, [(0, 1, 2)]) @withActionContext def test_addCallbacksErrbackToDeferred(self): """ L{DeferredContext.addCallbacks} passes the given errback and its corresponding arguments to the wrapped L{Deferred}'s C{addCallbacks}. """ called = [] def f(value, x, y): value.trap(RuntimeError) called.append((x, y)) result = Deferred() context = DeferredContext(result) context.addCallbacks(lambda x: None, f, None, None, (1,), {"y": 2}) result.errback(RuntimeError()) self.assertEqual(called, [(1, 2)]) @withActionContext def test_addCallbacksWithOnlyCallback(self): """ L{DeferredContext.addCallbacks} can be called with a single argument, a callback function, and passes it to the wrapped L{Deferred}'s C{addCallbacks}. """ called = [] def f(value): called.append(value) result = Deferred() context = DeferredContext(result) context.addCallbacks(f) result.callback(0) self.assertEqual(called, [0]) @withActionContext def test_addCallbacksWithOnlyCallbackErrorCase(self): """ L{DeferredContext.addCallbacks} can be called with a single argument, a callback function, and passes a pass-through errback to the wrapped L{Deferred}'s C{addCallbacks}. """ called = [] def f(value): called.append(value) class ExpectedException(Exception): pass result = Deferred() context = DeferredContext(result) context.addCallbacks(f) result.errback(Failure(ExpectedException())) self.assertEqual(called, []) # The assertion is inside `failureResultOf`. self.failureResultOf(result, ExpectedException) @withActionContext def test_addCallbacksReturnSelf(self): """ L{DeferredContext.addCallbacks} returns the L{DeferredContext}. """ result = Deferred() context = DeferredContext(result) self.assertIs(context, context.addCallbacks(lambda x: None, lambda x: None)) def test_addCallbacksCallbackContext(self): """ L{DeferedContext.addCallbacks} adds a callback that runs in context of action that the L{DeferredContext} was created with. """ logger = MemoryLogger() action1 = start_action(logger, "test") action2 = start_action(logger, "test") context = [] d = succeed(None) with action1.context(): d = DeferredContext(d) with action2.context(): d.addCallbacks(lambda x: context.append(current_action()), lambda x: x) self.assertEqual(context, [action1]) def test_addCallbacksErrbackContext(self): """ L{DeferedContext.addCallbacks} adds an errback that runs in context of action that the L{DeferredContext} was created with. """ logger = MemoryLogger() action1 = start_action(logger, "test") action2 = start_action(logger, "test") context = [] d = fail(RuntimeError()) with action1.context(): d = DeferredContext(d) with action2.context(): d.addCallbacks(lambda x: x, lambda x: context.append(current_action())) self.assertEqual(context, [action1]) @withActionContext def test_addCallbacksCallbackResult(self): """ A callback added with DeferredContext.addCallbacks has its result passed on to the next callback. """ d = succeed(0) d = DeferredContext(d) d.addCallbacks(lambda x: [x, 1], lambda x: x) self.assertEqual(self.successResultOf(d.result), [0, 1]) @withActionContext def test_addCallbacksErrbackResult(self): """ An errback added with DeferredContext.addCallbacks has its result passed on to the next callback. """ exception = ZeroDivisionError() d = fail(exception) d = DeferredContext(d) d.addCallbacks(lambda x: x, lambda x: [x.value, 1]) self.assertEqual(self.successResultOf(d.result), [exception, 1]) def test_addActionFinishNoImmediateLogging(self): """ L{DeferredContext.addActionFinish} does not log anything if the L{Deferred} hasn't fired yet. """ d = Deferred() logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action.context(): DeferredContext(d).addActionFinish() self.assertFalse(logger.messages) def test_addActionFinishSuccess(self): """ When the L{Deferred} referred to by L{DeferredContext.addActionFinish} fires successfully, a finish message is logged. """ d = Deferred() logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action.context(): DeferredContext(d).addActionFinish() d.callback("result") assertContainsFields( self, logger.messages[0], { "task_uuid": "uuid", "task_level": [1, 1], "action_type": "sys:me", "action_status": "succeeded", }, ) def test_addActionFinishSuccessPassThrough(self): """ L{DeferredContext.addActionFinish} passes through a successful result unchanged. """ d = Deferred() logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action.context(): DeferredContext(d).addActionFinish() d.callback("result") result = [] d.addCallback(result.append) self.assertEqual(result, ["result"]) def test_addActionFinishFailure(self): """ When the L{Deferred} referred to in L{DeferredContext.addActionFinish} fires with an exception, a finish message is logged. """ d = Deferred() logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action.context(): DeferredContext(d).addActionFinish() exception = RuntimeError("because") d.errback(exception) assertContainsFields( self, logger.messages[0], { "task_uuid": "uuid", "task_level": [1, 1], "action_type": "sys:me", "action_status": "failed", "reason": "because", "exception": "%s.RuntimeError" % (RuntimeError.__module__,), }, ) d.addErrback(lambda _: None) # don't let Failure go to Twisted logs def test_addActionFinishFailurePassThrough(self): """ L{DeferredContext.addActionFinish} passes through a failed result unchanged. """ d = Deferred() logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action.context(): DeferredContext(d).addActionFinish() failure = Failure(RuntimeError()) d.errback(failure) result = [] d.addErrback(result.append) self.assertEqual(result, [failure]) @withActionContext def test_addActionFinishRaisesAfterAddActionFinish(self): """ After L{DeferredContext.addActionFinish} is called, additional calls to L{DeferredContext.addActionFinish} result in a L{AlreadyFinished} exception. """ d = DeferredContext(Deferred()) d.addActionFinish() self.assertRaises(AlreadyFinished, d.addActionFinish) @withActionContext def test_addCallbacksRaisesAfterAddActionFinish(self): """ After L{DeferredContext.addActionFinish} is called, additional calls to L{DeferredContext.addCallbacks} result in a L{AlreadyFinished} exception. """ d = DeferredContext(Deferred()) d.addActionFinish() self.assertRaises(AlreadyFinished, d.addCallbacks, lambda x: x, lambda x: x) @withActionContext def test_addActionFinishResult(self): """ L{DeferredContext.addActionFinish} returns the L{Deferred}. """ d = Deferred() self.assertIs(d, DeferredContext(d).addActionFinish()) # Having made sure DeferredContext.addCallbacks does the right thing # regarding action contexts, for addCallback/addErrback/addBoth we only # need to ensure that they call DeferredContext.addCallbacks. @withActionContext def test_addCallbackCallsAddCallbacks(self): """ L{DeferredContext.addCallback} passes its arguments on to L{DeferredContext.addCallbacks}. """ result = Deferred() context = DeferredContext(result) called = [] def addCallbacks( callback, errback, callbackArgs=None, callbackKeywords=None, errbackArgs=None, errbackKeywords=None, ): called.append( ( callback, errback, callbackArgs, callbackKeywords, errbackArgs, errbackKeywords, ) ) context.addCallbacks = addCallbacks def f(x, y, z): return None context.addCallback(f, 2, z=3) self.assertEqual(called, [(f, _passthrough, (2,), {"z": 3}, None, None)]) @withActionContext def test_addCallbackReturnsSelf(self): """ L{DeferredContext.addCallback} returns the L{DeferredContext}. """ result = Deferred() context = DeferredContext(result) self.assertIs(context, context.addCallback(lambda x: None)) @withActionContext def test_addErrbackCallsAddCallbacks(self): """ L{DeferredContext.addErrback} passes its arguments on to L{DeferredContext.addCallbacks}. """ result = Deferred() context = DeferredContext(result) called = [] def addCallbacks( callback, errback, callbackArgs=None, callbackKeywords=None, errbackArgs=None, errbackKeywords=None, ): called.append( ( callback, errback, callbackArgs, callbackKeywords, errbackArgs, errbackKeywords, ) ) context.addCallbacks = addCallbacks def f(x, y, z): pass context.addErrback(f, 2, z=3) self.assertEqual(called, [(_passthrough, f, None, None, (2,), {"z": 3})]) @withActionContext def test_addErrbackReturnsSelf(self): """ L{DeferredContext.addErrback} returns the L{DeferredContext}. """ result = Deferred() context = DeferredContext(result) self.assertIs(context, context.addErrback(lambda x: None)) @withActionContext def test_addBothCallsAddCallbacks(self): """ L{DeferredContext.addBoth} passes its arguments on to L{DeferredContext.addCallbacks}. """ result = Deferred() context = DeferredContext(result) called = [] def addCallbacks( callback, errback, callbackArgs=None, callbackKeywords=None, errbackArgs=None, errbackKeywords=None, ): called.append( ( callback, errback, callbackArgs, callbackKeywords, errbackArgs, errbackKeywords, ) ) context.addCallbacks = addCallbacks def f(x, y, z): return None context.addBoth(f, 2, z=3) self.assertEqual(called, [(f, f, (2,), {"z": 3}, (2,), {"z": 3})]) @withActionContext def test_addBothReturnsSelf(self): """ L{DeferredContext.addBoth} returns the L{DeferredContext}. """ result = Deferred() context = DeferredContext(result) self.assertIs(context, context.addBoth(lambda x: None)) class RedirectLogsForTrialTests(TestCase): """ Tests for L{redirectLogsForTrial}. """ def assertDestinationAdded(self, programPath): """ Assert that when running under the given program a new destination is added by L{redirectLogsForTrial}. @param programPath: A path to a program. @type programPath: L{str} """ destination = _RedirectLogsForTrial(FakeSys([programPath], b""))() self.assertIsInstance(destination, TwistedDestination) # If this was not added as destination, removing it will raise an # exception: try: removeDestination(destination) except ValueError: self.fail("Destination was not added.") def test_withTrial(self): """ When C{sys.argv[0]} is C{"trial"} a new destination is added by L{redirectLogsForTrial}. """ self.assertDestinationAdded("trial") def test_withAbsoluteTrialPath(self): """ When C{sys.argv[0]} is an absolute path ending with C{"trial"} a new destination is added by L{redirectLogsForTrial}. """ self.assertDestinationAdded("/usr/bin/trial") def test_withRelativeTrialPath(self): """ When C{sys.argv[0]} is a relative path ending with C{"trial"} a new destination is added by L{redirectLogsForTrial}. """ self.assertDestinationAdded("./trial") def test_withoutTrialNoDestination(self): """ When C{sys.argv[0]} is not C{"trial"} no destination is added by L{redirectLogsForTrial}. """ originalDestinations = Logger._destinations._destinations[:] _RedirectLogsForTrial(FakeSys(["myprogram.py"], b""))() self.assertEqual(Logger._destinations._destinations, originalDestinations) def test_trialAsPathNoDestination(self): """ When C{sys.argv[0]} has C{"trial"} as directory name but not program name no destination is added by L{redirectLogsForTrial}. """ originalDestinations = Logger._destinations._destinations[:] _RedirectLogsForTrial(FakeSys(["./trial/myprogram.py"], b""))() self.assertEqual(Logger._destinations._destinations, originalDestinations) def test_withoutTrialResult(self): """ When not running under I{trial} L{None} is returned. """ self.assertIs(None, _RedirectLogsForTrial(FakeSys(["myprogram.py"], b""))()) def test_noDuplicateAdds(self): """ If a destination has already been added, calling L{redirectLogsForTrial} a second time does not add another destination. """ redirect = _RedirectLogsForTrial(FakeSys(["trial"], b"")) destination = redirect() self.addCleanup(removeDestination, destination) originalDestinations = Logger._destinations._destinations[:] redirect() self.assertEqual(Logger._destinations._destinations, originalDestinations) def test_noDuplicateAddsResult(self): """ If a destination has already been added, calling L{redirectLogsForTrial} a second time returns L{None}. """ redirect = _RedirectLogsForTrial(FakeSys(["trial"], b"")) destination = redirect() self.addCleanup(removeDestination, destination) result = redirect() self.assertIs(result, None) def test_publicAPI(self): """ L{redirectLogsForTrial} is an instance of L{_RedirectLogsForTrial}. """ self.assertIsInstance(redirectLogsForTrial, _RedirectLogsForTrial) def test_defaults(self): """ By default L{redirectLogsForTrial} looks at L{sys.argv}. """ self.assertEqual(redirectLogsForTrial._sys, sys) class TwistedDestinationTests(TestCase): """ Tests for L{TwistedDestination}. """ def redirect_to_twisted(self): """ Redirect Eliot logs to Twisted. @return: L{list} of L{dict} - the log messages written to Twisted will eventually be appended to this list. """ written = [] def got_event(event): if event.get("log_namespace") == "eliot": written.append((event["log_level"].name, event["eliot"])) globalLogPublisher.addObserver(got_event) self.addCleanup(globalLogPublisher.removeObserver, got_event) destination = TwistedDestination() addDestination(destination) self.addCleanup(removeDestination, destination) return written def redirect_to_list(self): """ Redirect Eliot logs to a list. @return: L{list} that will have eventually have the written Eliot messages added to it. """ written = [] destination = written.append addDestination(destination) self.addCleanup(removeDestination, destination) return written def test_normalMessages(self): """ Regular eliot messages are pretty-printed to the given L{LogPublisher}. """ writtenToTwisted = self.redirect_to_twisted() written = self.redirect_to_list() logger = Logger() Message.new(x=123, y=456).write(logger) self.assertEqual(writtenToTwisted, [("info", written[0])]) def test_tracebackMessages(self): """ Traceback eliot messages are written to the given L{LogPublisher} with the traceback formatted for easier reading. """ writtenToTwisted = self.redirect_to_twisted() written = self.redirect_to_list() logger = Logger() def raiser(): raise RuntimeError("because") try: raiser() except Exception: write_traceback(logger) self.assertEqual(writtenToTwisted, [("critical", written[0])]) class InlineCallbacksTests(TestCase): """Tests for C{inline_callbacks}.""" # Get our custom assertion failure messages *and* the standard ones. longMessage = True def _a_b_test(self, logger, g): """A yield was done in between messages a and b inside C{inline_callbacks}.""" with start_action(action_type="the-action"): self.assertIs(None, self.successResultOf(g())) assert_expected_action_tree(self, logger, "the-action", ["a", "yielded", "b"]) @capture_logging(None) def test_yield_none(self, logger): def g(): Message.log(message_type="a") yield Message.log(message_type="b") g = inline_callbacks(g, debug=True) self._a_b_test(logger, g) @capture_logging(None) def test_yield_fired_deferred(self, logger): def g(): Message.log(message_type="a") yield succeed(None) Message.log(message_type="b") g = inline_callbacks(g, debug=True) self._a_b_test(logger, g) @capture_logging(None) def test_yield_unfired_deferred(self, logger): waiting = Deferred() def g(): Message.log(message_type="a") yield waiting Message.log(message_type="b") g = inline_callbacks(g, debug=True) with start_action(action_type="the-action"): d = g() self.assertNoResult(waiting) waiting.callback(None) self.assertIs(None, self.successResultOf(d)) assert_expected_action_tree(self, logger, "the-action", ["a", "yielded", "b"]) @capture_logging(None) def test_returnValue(self, logger): result = object() @inline_callbacks def g(): if False: yield returnValue(result) with start_action(action_type="the-action"): d = g() self.assertIs(result, self.successResultOf(d)) assert_expected_action_tree(self, logger, "the-action", []) @capture_logging(None) def test_returnValue_in_action(self, logger): result = object() @inline_callbacks def g(): if False: yield with start_action(action_type="g"): returnValue(result) with start_action(action_type="the-action"): d = g() self.assertIs(result, self.successResultOf(d)) assert_expected_action_tree(self, logger, "the-action", [{"g": []}]) @capture_logging(None) def test_nested_returnValue(self, logger): result = object() another = object() def g(): d = h() # Run h through to the end but ignore its result. yield d # Give back _our_ result. returnValue(result) g = inline_callbacks(g, debug=True) def h(): yield returnValue(another) h = inline_callbacks(h, debug=True) with start_action(action_type="the-action"): d = g() self.assertIs(result, self.successResultOf(d)) assert_expected_action_tree(self, logger, "the-action", ["yielded", "yielded"]) @capture_logging(None) def test_async_returnValue(self, logger): result = object() waiting = Deferred() @inline_callbacks def g(): yield waiting returnValue(result) with start_action(action_type="the-action"): d = g() waiting.callback(None) self.assertIs(result, self.successResultOf(d)) @capture_logging(None) def test_nested_async_returnValue(self, logger): result = object() another = object() waiting = Deferred() @inline_callbacks def g(): yield h() returnValue(result) @inline_callbacks def h(): yield waiting returnValue(another) with start_action(action_type="the-action"): d = g() waiting.callback(None) self.assertIs(result, self.successResultOf(d)) eliot-1.11.0/eliot/tests/test_filter.py0000664000175000017500000000705713470775105021600 0ustar itamarstitamarst00000000000000""" Tests for L{eliot.filter}. """ from __future__ import unicode_literals import sys from unittest import TestCase from datetime import datetime from io import BytesIO import inspect from .common import FakeSys from .. import _bytesjson as json from ..filter import EliotFilter, main, USAGE class EliotFilterTests(TestCase): """ Tests for L{EliotFilter}. """ def test_expression(self): """ For each item in the incoming sequence L{EliotFilter.run} calls L{EliotFilter._evaluate} with the item decoded from JSON, and writes the result to the output file as JSON. """ f = BytesIO() efilter = EliotFilter("J", [b'"abcd"', b"[1, 2]"], f) efilter._evaluate = lambda expr: {"x": len(expr), "orig": expr} self.assertEqual(f.getvalue(), b"") efilter.run() self.assertEqual( f.getvalue(), json.dumps({"x": 4, "orig": "abcd"}) + b"\n" + json.dumps({"x": 2, "orig": [1, 2]}) + b"\n", ) def evaluateExpression(self, expr, message): """ Render a single message with the given expression using L{EliotFilter._evaluate}. """ efilter = EliotFilter(expr, [], BytesIO()) return efilter._evaluate(message) def test_J(self): """ The expression has access to the decoded JSON message as C{J} in its locals. """ result = self.evaluateExpression("J['a']", {"a": 123}) self.assertEqual(result, 123) def test_otherLocals(self): """ The expression has access to L{datetime} and L{timedelta} in its built-ins. """ result = self.evaluateExpression( "isinstance(datetime.utcnow() - datetime.utcnow(), timedelta)", {} ) self.assertEqual(result, True) def test_datetimeSerialization(self): """ Any L{datetime} in results will be serialized using L{datetime.isoformat}. """ dt = datetime(2012, 12, 31) f = BytesIO() EliotFilter("datetime(2012, 12, 31)", ["{}"], f).run() expected = json.dumps(dt.isoformat()) + b"\n" self.assertEqual(f.getvalue(), expected) def test_SKIP(self): """ A result of C{SKIP} indicates nothing should be output. """ f = BytesIO() EliotFilter("SKIP", [b'{"a": 123}'], f).run() self.assertEqual(f.getvalue(), b"") class MainTests(TestCase): """ Test cases for L{main}. """ def test_default(self): """ By default L{main} uses information from L{sys}. """ self.assertEqual(inspect.getargspec(main).defaults, (sys,)) def test_stdinOut(self): """ L{main} reads from the C{stdin} attribute of the given C{sys} equivalent, and writes rendered expressions to the C{stdout} attribute. """ sys = FakeSys(["eliotfilter", "J[0]"], b"[1, 2]\n[4, 5]\n") main(sys) self.assertEqual(sys.stdout.getvalue(), b"1\n4\n") def test_success(self): """ A successful run returns C{0}. """ sys = FakeSys(["eliotfilter", "J[0]"], b"[1, 2]\n[4, 5]\n") result = main(sys) self.assertEqual(result, 0) def test_noArguments(self): """ If given no arguments, usage documentation is printed to stderr and C{1} is returned. """ sys = FakeSys(["eliotfilter"], b"") result = main(sys) self.assertEqual(sys.stderr.getvalue(), USAGE) self.assertEqual(result, 1) eliot-1.11.0/eliot/tests/test_tai64n.py0000664000175000017500000000350413470775105021411 0ustar itamarstitamarst00000000000000""" Tests for L{eliot.tai64n}. """ from __future__ import unicode_literals import errno import time import subprocess from unittest import TestCase, SkipTest from ..tai64n import encode, decode class CodecTests(TestCase): """ Tests for L{encode} and L{decode}. """ def test_encode(self): """ L{encode} encodes timestamps in TAI64N format. """ t = 1387299889.153187625 self.assertEqual(encode(t), "@4000000052b0843b092174b9") def test_decode(self): """ L{decode} decodes timestamps from TAI64N format. """ t = time.time() self.assertAlmostEqual(t, decode(encode(t)), 9) class FunctionalTests(TestCase): """ Functional tests for L{encode}. """ def test_encode(self): """ The daemontools tai64nlocal tool can correctly decode timestamps output by L{encode}. """ try: process = subprocess.Popen( ["tai64nlocal"], bufsize=4096, stdin=subprocess.PIPE, stdout=subprocess.PIPE, ) except OSError as e: if e.errno == errno.ENOENT: raise SkipTest("This test requires the daemontools package") else: raise # Because of limitations of the time libraries tai64nlocal uses we # apparently can't verify beyond this level of accuracy. timestamp = int(time.time()) + 0.12345 process.stdin.write((encode(timestamp) + "\n").encode("ascii")) process.stdin.close() decodedToLocalTime = process.stdout.read().strip() self.assertEqual( time.strftime("%Y-%m-%d %H:%M:%S.12345", time.localtime(timestamp)).encode( "ascii" ), decodedToLocalTime[:25], ) eliot-1.11.0/eliot/tests/test_generators.py0000664000175000017500000002140313470775105022453 0ustar itamarstitamarst00000000000000""" Tests for L{eliot._generators}. """ from __future__ import unicode_literals, absolute_import from pprint import pformat from unittest import TestCase from eliot import Message, start_action from ..testing import capture_logging, assertHasAction from .._generators import eliot_friendly_generator_function def assert_expected_action_tree( testcase, logger, expected_action_type, expected_type_tree ): """ Assert that a logger has a certain logged action with certain children. @see: L{assert_generator_logs_action_tree} """ logged_action = assertHasAction(testcase, logger, expected_action_type, True) type_tree = logged_action.type_tree() testcase.assertEqual( {expected_action_type: expected_type_tree}, type_tree, "Logger had messages:\n{}".format(pformat(logger.messages, indent=4)), ) def assert_generator_logs_action_tree( testcase, generator_function, logger, expected_action_type, expected_type_tree ): """ Assert that exhausting a generator from the given function logs an action of the given type with children matching the given type tree. @param testcase: A test case instance to use to make assertions. @type testcase: L{unittest.TestCase} @param generator_function: A no-argument callable that returns a generator to be exhausted. @param logger: A logger to inspect for logged messages. @type logger: L{MemoryLogger} @param expected_action_type: An action type which should be logged by the generator. @type expected_action_type: L{unicode} @param expected_type_tree: The types of actions and messages which should be logged beneath the expected action. The structure of this value matches the structure returned by L{LoggedAction.type_tree}. @type expected_type_tree: L{list} """ list(eliot_friendly_generator_function(generator_function)()) assert_expected_action_tree( testcase, logger, expected_action_type, expected_type_tree ) class EliotFriendlyGeneratorFunctionTests(TestCase): """ Tests for L{eliot_friendly_generator_function}. """ # Get our custom assertion failure messages *and* the standard ones. longMessage = True @capture_logging(None) def test_yield_none(self, logger): @eliot_friendly_generator_function def g(): Message.log(message_type="hello") yield Message.log(message_type="goodbye") g.debug = True # output yielded messages with start_action(action_type="the-action"): list(g()) assert_expected_action_tree( self, logger, "the-action", ["hello", "yielded", "goodbye"] ) @capture_logging(None) def test_yield_value(self, logger): expected = object() @eliot_friendly_generator_function def g(): Message.log(message_type="hello") yield expected Message.log(message_type="goodbye") g.debug = True # output yielded messages with start_action(action_type="the-action"): self.assertEqual([expected], list(g())) assert_expected_action_tree( self, logger, "the-action", ["hello", "yielded", "goodbye"] ) @capture_logging(None) def test_yield_inside_another_action(self, logger): @eliot_friendly_generator_function def g(): Message.log(message_type="a") with start_action(action_type="confounding-factor"): Message.log(message_type="b") yield None Message.log(message_type="c") Message.log(message_type="d") g.debug = True # output yielded messages with start_action(action_type="the-action"): list(g()) assert_expected_action_tree( self, logger, "the-action", ["a", {"confounding-factor": ["b", "yielded", "c"]}, "d"], ) @capture_logging(None) def test_yield_inside_nested_actions(self, logger): @eliot_friendly_generator_function def g(): Message.log(message_type="a") with start_action(action_type="confounding-factor"): Message.log(message_type="b") yield None with start_action(action_type="double-confounding-factor"): yield None Message.log(message_type="c") Message.log(message_type="d") Message.log(message_type="e") g.debug = True # output yielded messages with start_action(action_type="the-action"): list(g()) assert_expected_action_tree( self, logger, "the-action", [ "a", { "confounding-factor": [ "b", "yielded", {"double-confounding-factor": ["yielded", "c"]}, "d", ] }, "e", ], ) @capture_logging(None) def test_generator_and_non_generator(self, logger): @eliot_friendly_generator_function def g(): Message.log(message_type="a") yield with start_action(action_type="action-a"): Message.log(message_type="b") yield Message.log(message_type="c") Message.log(message_type="d") yield g.debug = True # output yielded messages with start_action(action_type="the-action"): generator = g() next(generator) Message.log(message_type="0") next(generator) Message.log(message_type="1") next(generator) Message.log(message_type="2") self.assertRaises(StopIteration, lambda: next(generator)) assert_expected_action_tree( self, logger, "the-action", [ "a", "yielded", "0", {"action-a": ["b", "yielded", "c"]}, "1", "d", "yielded", "2", ], ) @capture_logging(None) def test_concurrent_generators(self, logger): @eliot_friendly_generator_function def g(which): Message.log(message_type="{}-a".format(which)) with start_action(action_type=which): Message.log(message_type="{}-b".format(which)) yield Message.log(message_type="{}-c".format(which)) Message.log(message_type="{}-d".format(which)) g.debug = True # output yielded messages gens = [g("1"), g("2")] with start_action(action_type="the-action"): while gens: for g in gens[:]: try: next(g) except StopIteration: gens.remove(g) assert_expected_action_tree( self, logger, "the-action", [ "1-a", {"1": ["1-b", "yielded", "1-c"]}, "2-a", {"2": ["2-b", "yielded", "2-c"]}, "1-d", "2-d", ], ) @capture_logging(None) def test_close_generator(self, logger): @eliot_friendly_generator_function def g(): Message.log(message_type="a") try: yield Message.log(message_type="b") finally: Message.log(message_type="c") g.debug = True # output yielded messages with start_action(action_type="the-action"): gen = g() next(gen) gen.close() assert_expected_action_tree(self, logger, "the-action", ["a", "yielded", "c"]) @capture_logging(None) def test_nested_generators(self, logger): @eliot_friendly_generator_function def g(recurse): with start_action(action_type="a-recurse={}".format(recurse)): Message.log(message_type="m-recurse={}".format(recurse)) if recurse: set(g(False)) else: yield g.debug = True # output yielded messages with start_action(action_type="the-action"): set(g(True)) assert_expected_action_tree( self, logger, "the-action", [ { "a-recurse=True": [ "m-recurse=True", {"a-recurse=False": ["m-recurse=False", "yielded"]}, ] } ], ) eliot-1.11.0/eliot/tests/test_journald.py0000664000175000017500000001733313470775105022127 0ustar itamarstitamarst00000000000000""" Tests for L{eliot.journald}. """ from os import getpid, strerror from unittest import skipUnless, TestCase from subprocess import check_output, CalledProcessError, STDOUT from errno import EINVAL from sys import argv from uuid import uuid4 from time import sleep from six import text_type as unicode from .._bytesjson import loads from .._output import MemoryLogger from .._message import TASK_UUID_FIELD from .. import start_action, Message, write_traceback try: from ..journald import sd_journal_send, JournaldDestination except ImportError: sd_journal_send = None def _journald_available(): """ :return: Boolean indicating whether journald is available to use. """ if sd_journal_send is None: return False try: check_output(["journalctl", "-b", "-n1"], stderr=STDOUT) except (OSError, CalledProcessError): return False return True def last_journald_message(): """ @return: Last journald message from this process as a dictionary in journald JSON format. """ # It may take a little for messages to actually reach journald, so we # write out marker message and wait until it arrives. We can then be # sure the message right before it is the one we want. marker = unicode(uuid4()) sd_journal_send(MESSAGE=marker.encode("ascii")) for i in range(500): messages = check_output( [ b"journalctl", b"-a", b"-o", b"json", b"-n2", b"_PID=" + str(getpid()).encode("ascii"), ] ) messages = [loads(m) for m in messages.splitlines()] if len(messages) == 2 and messages[1]["MESSAGE"] == marker: return messages[0] sleep(0.01) raise RuntimeError("Message never arrived?!") class SdJournaldSendTests(TestCase): """ Functional tests for L{sd_journal_send}. """ @skipUnless( _journald_available(), "journald unavailable or inactive on this machine." ) def setUp(self): pass def assert_roundtrip(self, value): """ Write a value as a C{MESSAGE} field, assert it is output. @param value: Value to write as unicode. """ sd_journal_send(MESSAGE=value) result = last_journald_message() self.assertEqual(value, result["MESSAGE"].encode("utf-8")) def test_message(self): """ L{sd_journal_send} can write a C{MESSAGE} field. """ self.assert_roundtrip(b"hello") def test_percent(self): """ L{sd_journal_send} can write a C{MESSAGE} field with a percent. Underlying C API calls does printf formatting so this is a plausible failure mode. """ self.assert_roundtrip(b"hello%world") def test_large(self): """ L{sd_journal_send} can write a C{MESSAGE} field with a large message. """ self.assert_roundtrip(b"hello world" * 20000) def test_multiple_fields(self): """ L{sd_journal_send} can send multiple fields. """ sd_journal_send(MESSAGE=b"hello", BONUS_FIELD=b"world") result = last_journald_message() self.assertEqual( (b"hello", b"world"), (result["MESSAGE"].encode("ascii"), result["BONUS_FIELD"].encode("ascii")), ) def test_error(self): """ L{sd_journal_send} raises an error when it gets a non-0 result from the underlying API. """ with self.assertRaises(IOError) as context: sd_journal_send(**{"": b"123"}) exc = context.exception self.assertEqual((exc.errno, exc.strerror), (EINVAL, strerror(EINVAL))) class JournaldDestinationTests(TestCase): """ Tests for L{JournaldDestination}. """ @skipUnless( _journald_available(), "journald unavailable or inactive on this machine." ) def setUp(self): self.destination = JournaldDestination() self.logger = MemoryLogger() def test_json(self): """ The message is stored as JSON in the MESSAGE field. """ Message.new(hello="world", key=123).write(self.logger) message = self.logger.messages[0] self.destination(message) self.assertEqual(loads(last_journald_message()["MESSAGE"]), message) def assert_field_for(self, message, field_name, field_value): """ If the given message is logged by Eliot, the given journald field has the expected value. @param message: Dictionary to log. @param field_name: Journald field name to check. @param field_value: Expected value for the field. """ self.destination(message) self.assertEqual(last_journald_message()[field_name], field_value) def test_action_type(self): """ The C{action_type} is stored in the ELIOT_TYPE field. """ action_type = "test:type" start_action(self.logger, action_type=action_type) self.assert_field_for(self.logger.messages[0], "ELIOT_TYPE", action_type) def test_message_type(self): """ The C{message_type} is stored in the ELIOT_TYPE field. """ message_type = "test:type:message" Message.new(message_type=message_type).write(self.logger) self.assert_field_for(self.logger.messages[0], "ELIOT_TYPE", message_type) def test_no_type(self): """ An empty string is stored in ELIOT_TYPE if no type is known. """ Message.new().write(self.logger) self.assert_field_for(self.logger.messages[0], "ELIOT_TYPE", "") def test_uuid(self): """ The task UUID is stored in the ELIOT_TASK field. """ start_action(self.logger, action_type="xxx") self.assert_field_for( self.logger.messages[0], "ELIOT_TASK", self.logger.messages[0][TASK_UUID_FIELD], ) def test_info_priorities(self): """ Untyped messages, action start, successful action end, random typed message all get priority 6 ("info"). """ with start_action(self.logger, action_type="xxx"): Message.new(message_type="msg").write(self.logger) Message.new(x=123).write(self.logger) priorities = [] for message in self.logger.messages: self.destination(message) priorities.append(last_journald_message()["PRIORITY"]) self.assertEqual(priorities, ["6", "6", "6", "6"]) def test_error_priority(self): """ A failed action gets priority 3 ("error"). """ try: with start_action(self.logger, action_type="xxx"): raise ZeroDivisionError() except ZeroDivisionError: pass self.assert_field_for(self.logger.messages[-1], "PRIORITY", "3") def test_critical_priority(self): """ A traceback gets priority 2 ("critical"). """ try: raise ZeroDivisionError() except ZeroDivisionError: write_traceback(logger=self.logger) self.assert_field_for(self.logger.serialize()[-1], "PRIORITY", "2") def test_identifier(self): """ C{SYSLOG_IDENTIFIER} defaults to C{os.path.basename(sys.argv[0])}. """ identifier = "/usr/bin/testing123" try: original = argv[0] argv[0] = identifier # Recreate JournaldDestination with the newly set argv[0]. self.destination = JournaldDestination() Message.new(message_type="msg").write(self.logger) self.assert_field_for( self.logger.messages[0], "SYSLOG_IDENTIFIER", "testing123" ) finally: argv[0] = original eliot-1.11.0/eliot/tests/test_serializers.py0000664000175000017500000000163613460352650022637 0ustar itamarstitamarst00000000000000""" Tests for L{eliot.serializers}. """ from __future__ import unicode_literals from unittest import TestCase from datetime import datetime from hashlib import md5 from ..serializers import timestamp, identity, md5hex class SerializerTests(TestCase): """ Tests for standard serializers. """ def test_timestamp(self): """ L{timestamp} converts a UTC L{datetime} to a Unicode strings. """ dt = datetime(2012, 9, 28, 14, 53, 6, 123456) self.assertEqual(timestamp(dt), "2012-09-28T14:53:06.123456Z") def test_identity(self): """ L{identity} returns the input object. """ obj = object() self.assertIs(identity(obj), obj) def test_md5hex(self): """ L{md5hex} returns the hex value of a MD5 checksum. """ data = b"01234456789" self.assertEqual(md5hex(data), md5(data).hexdigest()) eliot-1.11.0/eliot/tests/test_coroutines.py0000664000175000017500000000567613470775105022512 0ustar itamarstitamarst00000000000000""" Tests for coroutines. Imported into test_coroutine.py when running tests under Python 3.5 or later; in earlier versions of Python this code is a syntax error. """ import asyncio from unittest import TestCase from ..testing import capture_logging from ..parse import Parser from .. import start_action async def standalone_coro(): """ Log a message inside a new coroutine. """ await asyncio.sleep(0.1) with start_action(action_type="standalone"): pass async def calling_coro(): """ Log an action inside a coroutine, and call another coroutine. """ with start_action(action_type="calling"): await standalone_coro() def run_coroutines(*async_functions): """ Run a coroutine until it finishes. """ loop = asyncio.get_event_loop() futures = [asyncio.ensure_future(f()) for f in async_functions] async def wait_for_futures(): for future in futures: await future loop.run_until_complete(wait_for_futures()) class CoroutineTests(TestCase): """ Tests for coroutines. """ @capture_logging(None) def test_multiple_coroutines_contexts(self, logger): """ Each top-level coroutine has its own Eliot logging context. """ async def waiting_coro(): with start_action(action_type="waiting"): await asyncio.sleep(0.5) run_coroutines(waiting_coro, standalone_coro) trees = Parser.parse_stream(logger.messages) self.assertEqual( sorted([(t.root().action_type, t.root().children) for t in trees]), [("standalone", []), ("waiting", [])], ) @capture_logging(None) def test_await_inherits_coroutine_contexts(self, logger): """ awaited coroutines inherit the logging context. """ run_coroutines(calling_coro) [tree] = Parser.parse_stream(logger.messages) root = tree.root() [child] = root.children self.assertEqual( (root.action_type, child.action_type, child.children), ("calling", "standalone", []), ) @capture_logging(None) def test_interleaved_coroutines(self, logger): """ start_action() started in one coroutine doesn't impact another in a different coroutine. """ async def coro_sleep(delay, action_type): with start_action(action_type=action_type): await asyncio.sleep(delay) async def main(): with start_action(action_type="main"): f1 = asyncio.ensure_future(coro_sleep(1, "a")) f2 = asyncio.ensure_future(coro_sleep(0.5, "b")) await f1 await f2 run_coroutines(main) [tree] = list(Parser.parse_stream(logger.messages)) root = tree.root() self.assertEqual(root.action_type, "main") self.assertEqual(sorted([c.action_type for c in root.children]), ["a", "b"]) eliot-1.11.0/eliot/tests/__init__.py0000664000175000017500000000032513470775105021002 0ustar itamarstitamarst00000000000000""" Tests for the eliot package. """ # Increase hypothesis deadline so we don't time out on PyPy: from hypothesis import settings settings.register_profile("eliot", deadline=1000) settings.load_profile("eliot") eliot-1.11.0/eliot/_util.py0000664000175000017500000000351713470775105017223 0ustar itamarstitamarst00000000000000""" Utilities that don't go anywhere else. """ from __future__ import unicode_literals import sys from types import ModuleType from six import exec_, text_type as unicode, PY3 def safeunicode(o): """ Like C{unicode()}, but catches and swallows any raised exceptions. @param o: An object of some sort. @return: C{unicode(o)}, or an error message if that failed. @rtype: C{unicode} """ try: return unicode(o) except: # Not much we can do about this... return "eliot: unknown, unicode() raised exception" def saferepr(o): """ Like C{unicode(repr())}, but catches and swallows any raised exceptions. @param o: An object of some sort. @return: C{unicode(repr(o))}, or an error message if that failed. @rtype: C{unicode} """ try: return unicode(repr(o)) except: # Not much we can do about this... return "eliot: unknown, unicode() raised exception" def load_module(name, original_module): """ Load a copy of a module, distinct from what you'd get if you imported it directly. @param str name: The name of the new module. @param original_module: The original module we're recreating. @return: A new, distinct module. """ module = ModuleType(name) if PY3: import importlib.util spec = importlib.util.find_spec(original_module.__name__) source = spec.loader.get_code(original_module.__name__) else: if getattr(sys, "frozen", False): raise NotImplementedError("Can't load modules on Python 2 with PyInstaller") path = original_module.__file__ if path.endswith(".pyc") or path.endswith(".pyo"): path = path[:-1] with open(path) as f: source = f.read() exec_(source, module.__dict__, module.__dict__) return module eliot-1.11.0/eliot/_message.py0000664000175000017500000001316313573001140017651 0ustar itamarstitamarst00000000000000""" Log messages and related utilities. """ from __future__ import unicode_literals import time from warnings import warn from six import text_type as unicode from pyrsistent import PClass, pmap_field MESSAGE_TYPE_FIELD = "message_type" TASK_UUID_FIELD = "task_uuid" TASK_LEVEL_FIELD = "task_level" TIMESTAMP_FIELD = "timestamp" EXCEPTION_FIELD = "exception" REASON_FIELD = "reason" class Message(object): """ A log message. Messages are basically dictionaries, mapping "fields" to "values". Field names should not start with C{'_'}, as those are reserved for system use (e.g. C{"_id"} is used by Elasticsearch for unique message identifiers and may be auto-populated by logstash). """ # Overrideable for testing purposes: _time = time.time @classmethod def new(_class, _serializer=None, **fields): """ Create a new L{Message}. The keyword arguments will become the initial contents of the L{Message}. @param _serializer: A positional argument, either C{None} or a L{eliot._validation._MessageSerializer} with which a L{eliot.ILogger} may choose to serialize the message. If you're using L{eliot.MessageType} this will be populated for you. @return: The new L{Message} """ warn( "Message.new() is deprecated since 1.11.0, " "use eliot.log_message() instead.", DeprecationWarning, stacklevel=2, ) return _class(fields, _serializer) @classmethod def log(_class, **fields): """ Write a new L{Message} to the default L{Logger}. The keyword arguments will become contents of the L{Message}. """ warn( "Message.log() is deprecated since 1.11.0, " "use Action.log() or eliot.log_message() instead.", DeprecationWarning, stacklevel=2, ) _class(fields).write() def __init__(self, contents, serializer=None): """ You can also use L{Message.new} to create L{Message} objects. @param contents: The contents of this L{Message}, a C{dict} whose keys must be C{unicode}, or text that has been UTF-8 encoded to C{bytes}. @param serializer: Either C{None}, or L{eliot._validation._MessageSerializer} with which a L{eliot.Logger} may choose to serialize the message. If you're using L{eliot.MessageType} this will be populated for you. """ self._contents = contents.copy() self._serializer = serializer def bind(self, **fields): """ Return a new L{Message} with this message's contents plus the additional given bindings. """ contents = self._contents.copy() contents.update(fields) return Message(contents, self._serializer) def contents(self): """ Return a copy of L{Message} contents. """ return self._contents.copy() def _timestamp(self): """ Return the current time. """ return self._time() def write(self, logger=None, action=None): """ Write the message to the given logger. This will additionally include a timestamp, the action context if any, and any other fields. Byte field names will be converted to Unicode. @type logger: L{eliot.ILogger} or C{None} indicating the default one. @param action: The L{Action} which is the context for this message. If C{None}, the L{Action} will be deduced from the current call stack. """ fields = dict(self._contents) if "message_type" not in fields: fields["message_type"] = "" if self._serializer is not None: fields["__eliot_serializer__"] = self._serializer if action is None: fields["__eliot_logger__"] = logger log_message(**fields) else: action.log(**fields) class WrittenMessage(PClass): """ A L{Message} that has been logged. @ivar _logged_dict: The originally logged dictionary. """ _logged_dict = pmap_field((str, unicode), object) @property def timestamp(self): """ The Unix timestamp of when the message was logged. """ return self._logged_dict[TIMESTAMP_FIELD] @property def task_uuid(self): """ The UUID of the task in which the message was logged. """ return self._logged_dict[TASK_UUID_FIELD] @property def task_level(self): """ The L{TaskLevel} of this message appears within the task. """ return TaskLevel(level=self._logged_dict[TASK_LEVEL_FIELD]) @property def contents(self): """ A C{PMap}, the message contents without Eliot metadata. """ return ( self._logged_dict.discard(TIMESTAMP_FIELD) .discard(TASK_UUID_FIELD) .discard(TASK_LEVEL_FIELD) ) @classmethod def from_dict(cls, logged_dictionary): """ Reconstruct a L{WrittenMessage} from a logged dictionary. @param logged_dictionary: A C{PMap} representing a parsed log entry. @return: A L{WrittenMessage} for that dictionary. """ return cls(_logged_dict=logged_dictionary) def as_dict(self): """ Return the dictionary that was used to write this message. @return: A C{dict}, as might be logged by Eliot. """ return self._logged_dict # Import at end to deal with circular imports: from ._action import log_message, TaskLevel eliot-1.11.0/eliot/journald.py0000664000175000017500000000511513470775105017721 0ustar itamarstitamarst00000000000000""" journald support for Eliot. """ from cffi import FFI from os import strerror from sys import argv from os.path import basename from ._bytesjson import dumps from ._message import TASK_UUID_FIELD, MESSAGE_TYPE_FIELD from ._action import ACTION_TYPE_FIELD, ACTION_STATUS_FIELD, FAILED_STATUS _ffi = FFI() _ffi.cdef( """ int sd_journal_send(const char *format, ...); """ ) try: try: _journald = _ffi.dlopen("libsystemd.so.0") except OSError: # Older versions of systemd have separate library: _journald = _ffi.dlopen("libsystemd-journal.so.0") except OSError as e: raise ImportError("Failed to load journald: " + str(e)) def sd_journal_send(**kwargs): """ Send a message to the journald log. @param kwargs: Mapping between field names to values, both as bytes. @raise IOError: If the operation failed. """ # The function uses printf formatting, so we need to quote # percentages. fields = [ _ffi.new("char[]", key.encode("ascii") + b"=" + value.replace(b"%", b"%%")) for key, value in kwargs.items() ] fields.append(_ffi.NULL) result = _journald.sd_journal_send(*fields) if result != 0: raise IOError(-result, strerror(-result)) class JournaldDestination(object): """ A logging destination that writes to journald. The message will be logged as JSON, with an additional field C{ELIOT_TASK} storing the C{task_uuid} and C{ELIOT_TYPE} storing the C{message_type} or C{action_type}. Messages for failed actions will get priority 3 ("error"), and traceback messages will get priority 2 ("critical"). All other messages will get priority 1 ("info"). """ def __init__(self): self._identifier = basename(argv[0]).encode("utf-8") def __call__(self, message): """ Write the given message to journald. @param message: Dictionary passed from a C{Logger}. """ eliot_type = "" priority = b"6" if ACTION_TYPE_FIELD in message: eliot_type = message[ACTION_TYPE_FIELD] if message[ACTION_STATUS_FIELD] == FAILED_STATUS: priority = b"3" elif MESSAGE_TYPE_FIELD in message: eliot_type = message[MESSAGE_TYPE_FIELD] if eliot_type == "eliot:traceback": priority = b"2" sd_journal_send( MESSAGE=dumps(message), ELIOT_TASK=message[TASK_UUID_FIELD].encode("utf-8"), ELIOT_TYPE=eliot_type.encode("utf-8"), SYSLOG_IDENTIFIER=self._identifier, PRIORITY=priority, ) eliot-1.11.0/eliot/_version.py0000664000175000017500000000103013573001162017704 0ustar itamarstitamarst00000000000000 # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. from __future__ import absolute_import import json version_json = ''' { "date": "2019-12-07T14:22:41-0500", "dirty": false, "error": null, "full-revisionid": "4ca0fa7519321aceec860e982123a5c448a9debd", "version": "1.11.0" } ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) eliot-1.11.0/eliot/dask.py0000664000175000017500000001026213573001140017005 0ustar itamarstitamarst00000000000000"""Support for Eliot tracing with Dask computations.""" from pyrsistent import PClass, field from dask import compute, optimize from dask.core import toposort, get_dependencies from . import start_action, current_action, Action class _RunWithEliotContext(PClass): """ Run a callable within an Eliot context. @ivar task_id: The serialized Eliot task ID. @ivar func: The function that Dask wants to run. @ivar key: The key in the Dask graph. @ivar dependencies: The keys in the Dask graph this depends on. """ task_id = field(type=str) func = field() # callable key = field(type=str) dependencies = field() # Pretend to be underlying callable for purposes of equality; necessary for # optimizer to be happy: def __eq__(self, other): return self.func == other def __ne__(self, other): return self.func != other def __hash__(self): return hash(self.func) def __call__(self, *args, **kwargs): with Action.continue_task(task_id=self.task_id) as action: action.log( message_type="dask:task", key=self.key, dependencies=self.dependencies ) return self.func(*args, **kwargs) def compute_with_trace(*args): """Do Dask compute(), but with added Eliot tracing. Dask is a graph of tasks, but Eliot logs trees. So we need to emulate a graph using a tree. We do this by making Eliot action for each task, but having it list the tasks it depends on. We use the following algorithm: 1. Create a top-level action. 2. For each entry in the dask graph, create a child with serialize_task_id. Do this in likely order of execution, so that if B depends on A the task level of B is higher than the task Ievel of A. 3. Replace each function with a wrapper that uses the corresponding task ID (with Action.continue_task), and while it's at it also records which other things this function depends on. Known issues: 1. Retries will confuse Eliot. Probably need different distributed-tree mechanism within Eliot to solve that. """ # 1. Create top-level Eliot Action: with start_action(action_type="dask:compute"): # In order to reduce logging verbosity, add logging to the already # optimized graph: optimized = optimize(*args, optimizations=[_add_logging]) return compute(*optimized, optimize_graph=False) def _add_logging(dsk, ignore=None): """ Add logging to a Dask graph. @param dsk: The Dask graph. @return: New Dask graph. """ ctx = current_action() result = {} # Use topological sort to ensure Eliot actions are in logical order of # execution in Dask: keys = toposort(dsk) # Give each key a string name. Some keys are just aliases to other # keys, so make sure we have underlying key available. Later on might # want to shorten them as well. def simplify(k): if isinstance(k, str): return k return "-".join(str(o) for o in k) key_names = {} for key in keys: value = dsk[key] if not callable(value) and value in keys: # It's an alias for another key: key_names[key] = key_names[value] else: key_names[key] = simplify(key) # 2. Create Eliot child Actions for each key, in topological order: key_to_action_id = {key: str(ctx.serialize_task_id(), "utf-8") for key in keys} # 3. Replace function with wrapper that logs appropriate Action: for key in keys: func = dsk[key][0] args = dsk[key][1:] if not callable(func): # This key is just an alias for another key, no need to add # logging: result[key] = dsk[key] continue wrapped_func = _RunWithEliotContext( task_id=key_to_action_id[key], func=func, key=key_names[key], dependencies=[key_names[k] for k in get_dependencies(dsk, key)], ) result[key] = (wrapped_func,) + tuple(args) assert set(result.keys()) == set(dsk.keys()) return result __all__ = ["compute_with_trace"] eliot-1.11.0/eliot/serializers.py0000664000175000017500000000114713460352650020433 0ustar itamarstitamarst00000000000000""" Standardized serialization code. """ from __future__ import unicode_literals from hashlib import md5 _TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" def timestamp(dt): """ Convert a UTC datetime to a string. @param dt: A C{datetime.datetime} in UTC timezone. @return: C{unicode} """ return dt.strftime(_TIME_FORMAT) def identity(value): """ Return the passed in object. """ return value def md5hex(data): """ Return hex MD5 of the input bytes. @param data: Some C{bytes}. @return: Hex-encoded MD5 of the data. """ return md5(data).hexdigest() eliot-1.11.0/eliot/tai64n.py0000664000175000017500000000250013470775105017203 0ustar itamarstitamarst00000000000000""" TAI64N encoding and decoding. TAI64N encodes nanosecond-accuracy timestamps and is supported by logstash. @see: U{http://cr.yp.to/libtai/tai64.html}. """ from __future__ import unicode_literals import struct from binascii import b2a_hex, a2b_hex _STRUCTURE = b">QI" _OFFSET = (2 ** 62) + 10 # last 10 are leap seconds def encode(timestamp): """ Convert seconds since epoch to TAI64N string. @param timestamp: Seconds since UTC Unix epoch as C{float}. @return: TAI64N-encoded time, as C{unicode}. """ seconds = int(timestamp) nanoseconds = int((timestamp - seconds) * 1000000000) seconds = seconds + _OFFSET encoded = b2a_hex(struct.pack(_STRUCTURE, seconds, nanoseconds)) return "@" + encoded.decode("ascii") def decode(tai64n): """ Convert TAI64N string to seconds since epoch. Note that dates before 2013 may not decode accurately due to leap second issues. If you need correct decoding for earlier dates you can try the tai64n package available from PyPI (U{https://pypi.python.org/pypi/tai64n}). @param tai64n: TAI64N-encoded time, as C{unicode}. @return: Seconds since UTC Unix epoch as C{float}. """ seconds, nanoseconds = struct.unpack(_STRUCTURE, a2b_hex(tai64n[1:])) seconds -= _OFFSET return seconds + (nanoseconds / 1000000000.0) eliot-1.11.0/eliot/logwriter.py0000664000175000017500000000773013470775105020126 0ustar itamarstitamarst00000000000000""" A log destination for use by Twisted applications. Runs in a thread, so that we don't do blocking I/O in the event loop thread. """ from __future__ import unicode_literals, absolute_import import threading import select from warnings import warn from twisted.application.service import Service from twisted.internet.threads import deferToThreadPool if getattr(select, "poll", None): from twisted.internet.pollreactor import PollReactor as Reactor else: from twisted.internet.selectreactor import SelectReactor as Reactor from . import addDestination, removeDestination from ._output import FileDestination class ThreadedWriter(Service): """ An non-blocking Eliot log destination that wraps a blocking destination, writing log messages to the latter in a managed thread. Unfortunately Python's Queue is not reentrant (http://bugs.python.org/issue14976) and neither is RLock (http://bugs.python.org/issue13697). In order to queue items in a thread we therefore rely on the self-pipe trick, and the easiest way to do that is by running another reactor in the thread. @ivar _reactor: A private reactor running in a thread which will do the log writes. @ivar _thread: C{None}, or a L{threading.Thread} running the private reactor. """ name = "Eliot Log Writer" def __init__(self, destination, reactor): """ @param destination: The underlying destination for log files. This will be called from a non-reactor thread. @param reactor: The main reactor. """ self._destination = destination self._reactor = Reactor() # Ick. See https://twistedmatrix.com/trac/ticket/6982 for real solution. self._reactor._registerAsIOThread = False self._mainReactor = reactor self._thread = None def startService(self): """ Start the writer thread. """ Service.startService(self) self._thread = threading.Thread(target=self._writer) self._thread.start() addDestination(self) def stopService(self): """ Stop the writer thread, wait for it to finish. """ Service.stopService(self) removeDestination(self) self._reactor.callFromThread(self._reactor.stop) return deferToThreadPool( self._mainReactor, self._mainReactor.getThreadPool(), self._thread.join ) def __call__(self, data): """ Add the data to the queue, to be serialized to JSON and written by the writer thread with a newline added. @param data: C{bytes} to write to disk. """ self._reactor.callFromThread(self._destination, data) def _writer(self): """ The function run by the writer thread. """ self._reactor.run(installSignalHandlers=False) class ThreadedFileWriter(ThreadedWriter): """ ``ThreadedWriter`` that takes a log file and writes to it using a ``FileDestination``. This exists for backwards compatibility purpose. The recommended API is ``ThreadedWriter``. """ def __init__(self, logFile, reactor): """ @param logFile: A C{file}-like object that is at the end of its existing contents (e.g. opened with append mode) and accepts bytes. @type logFile: C{file}, or any file-like object with C{write}, C{flush} and C{close} methods e.g. a L{twisted.python.logfile.LogFile} if you want log rotation. @param reactor: The main reactor. """ warn( "ThreadedFileWriter is deprecated since 0.9.0. " "Use ThreadedWriter instead.", DeprecationWarning, stacklevel=2, ) self._logFile = logFile ThreadedWriter.__init__(self, FileDestination(file=logFile), reactor) def stopService(self): d = ThreadedWriter.stopService(self) d.addCallback(lambda _: self._logFile.close()) return d eliot-1.11.0/eliot/twisted.py0000664000175000017500000002006313470775105017565 0ustar itamarstitamarst00000000000000""" APIs for using Eliot from Twisted. """ from __future__ import absolute_import, unicode_literals import os import sys from twisted.logger import Logger as TwistedLogger from twisted.python.failure import Failure from twisted.internet.defer import inlineCallbacks from ._action import current_action from . import addDestination from ._generators import eliot_friendly_generator_function __all__ = [ "AlreadyFinished", "DeferredContext", "redirectLogsForTrial", "inline_callbacks", ] def _passthrough(result): return result class AlreadyFinished(Exception): """ L{DeferredContext.addCallbacks} or similar method was called after L{DeferredContext.addActionFinish}. This indicates a programming bug, e.g. forgetting to unwrap the underlying L{Deferred} when passing on to some other piece of code that doesn't care about the action context. """ class DeferredContext(object): """ A L{Deferred} equivalent of L{eliot.Action.context} and L{eliot.action.finish}. Makes a L{Deferred}'s callbacks run in a L{eliot.Action}'s context, and allows indicating which callbacks to wait for before the action is finished. The action to use will be taken from the call context. @ivar result: The wrapped L{Deferred}. """ def __init__(self, deferred): """ @param deferred: L{twisted.internet.defer.Deferred} to wrap. """ self.result = deferred self._action = current_action() self._finishAdded = False if self._action is None: raise RuntimeError( "DeferredContext() should only be created in the context of " "an eliot.Action." ) def addCallbacks( self, callback, errback=None, callbackArgs=None, callbackKeywords=None, errbackArgs=None, errbackKeywords=None, ): """ Add a pair of callbacks that will be run in the context of an eliot action. @return: C{self} @rtype: L{DeferredContext} @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called. This indicates a programmer error. """ if self._finishAdded: raise AlreadyFinished() if errback is None: errback = _passthrough def callbackWithContext(*args, **kwargs): return self._action.run(callback, *args, **kwargs) def errbackWithContext(*args, **kwargs): return self._action.run(errback, *args, **kwargs) self.result.addCallbacks( callbackWithContext, errbackWithContext, callbackArgs, callbackKeywords, errbackArgs, errbackKeywords, ) return self def addCallback(self, callback, *args, **kw): """ Add a success callback that will be run in the context of an eliot action. @return: C{self} @rtype: L{DeferredContext} @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called. This indicates a programmer error. """ return self.addCallbacks( callback, _passthrough, callbackArgs=args, callbackKeywords=kw ) def addErrback(self, errback, *args, **kw): """ Add a failure callback that will be run in the context of an eliot action. @return: C{self} @rtype: L{DeferredContext} @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called. This indicates a programmer error. """ return self.addCallbacks( _passthrough, errback, errbackArgs=args, errbackKeywords=kw ) def addBoth(self, callback, *args, **kw): """ Add a single callback as both success and failure callbacks. @return: C{self} @rtype: L{DeferredContext} @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called. This indicates a programmer error. """ return self.addCallbacks(callback, callback, args, kw, args, kw) def addActionFinish(self): """ Indicates all callbacks that should run within the action's context have been added, and that the action should therefore finish once those callbacks have fired. @return: The wrapped L{Deferred}. @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called previously. This indicates a programmer error. """ if self._finishAdded: raise AlreadyFinished() self._finishAdded = True def done(result): if isinstance(result, Failure): exception = result.value else: exception = None self._action.finish(exception) return result self.result.addBoth(done) return self.result class TwistedDestination(object): """ An Eliot logging destination that forwards logs to Twisted's logging. Do not use if you're also redirecting Twisted's logs to Eliot, since then you'll have an infinite loop. """ def __init__(self): self._logger = TwistedLogger(namespace="eliot") def __call__(self, message): """ Log an Eliot message to Twisted's log. @param message: A rendered Eliot message. @type message: L{dict} """ if message.get("message_type") == "eliot:traceback": method = self._logger.critical else: method = self._logger.info method(format="Eliot message: {eliot}", eliot=message) class _RedirectLogsForTrial(object): """ When called inside a I{trial} process redirect Eliot log messages to Twisted's logging system, otherwise do nothing. This allows reading Eliot logs output by running unit tests with I{trial} in its normal log location: C{_trial_temp/test.log}. The way you use it is by calling it a module level in some module that will be loaded by trial, typically the top-level C{__init__.py} of your package. This function can usually be safely called in all programs since it will have no side-effects if used outside of trial. The only exception is you are redirecting Twisted logs to Eliot; you should make sure not call this function in that case so as to prevent infinite loops. In addition, calling the function multiple times has the same effect as calling it once. (This is not thread-safe at the moment, so in theory multiple threads calling this might result in multiple destinatios being added - see https://github.com/itamarst/eliot/issues/78). Currently this works by checking if C{sys.argv[0]} is called C{trial}; the ideal mechanism would require https://twistedmatrix.com/trac/ticket/6939 to be fixed, but probably there are better solutions even without that - https://github.com/itamarst/eliot/issues/76 covers those. @ivar _sys: An object similar to, and typically identical to, Python's L{sys} module. @ivar _redirected: L{True} if trial logs have been redirected once already. """ def __init__(self, sys): self._sys = sys self._redirected = False def __call__(self): """ Do the redirect if necessary. @return: The destination added to Eliot if any, otherwise L{None}. """ if os.path.basename(self._sys.argv[0]) == "trial" and not self._redirected: self._redirected = True destination = TwistedDestination() addDestination(destination) return destination redirectLogsForTrial = _RedirectLogsForTrial(sys) def inline_callbacks(original, debug=False): """ Decorate a function like ``inlineCallbacks`` would but in a more Eliot-friendly way. Use it just like ``inlineCallbacks`` but where you want Eliot action contexts to Do The Right Thing inside the decorated function. """ f = eliot_friendly_generator_function(original) if debug: f.debug = True return inlineCallbacks(f) eliot-1.11.0/eliot/json.py0000664000175000017500000000202513470775105017051 0ustar itamarstitamarst00000000000000"""Custom JSON encoding support.""" from __future__ import absolute_import import json import sys class EliotJSONEncoder(json.JSONEncoder): """JSON encoder with additional functionality. In particular, supports NumPy types. """ def default(self, o): numpy = sys.modules.get("numpy", None) if numpy is not None: if isinstance(o, numpy.floating): return float(o) if isinstance(o, numpy.integer): return int(o) if isinstance(o, (numpy.bool, numpy.bool_)): return bool(o) if isinstance(o, numpy.ndarray): if o.size > 10000: # Too big to want to log as-is, log a summary: return { "array_start": o.flat[:10000].tolist(), "original_shape": o.shape, } else: return o.tolist() return json.JSONEncoder.default(self, o) __all__ = ["EliotJSONEncoder"] eliot-1.11.0/eliot/_generators.py0000664000175000017500000001333513573001140020377 0ustar itamarstitamarst00000000000000""" Support for maintaining an action context across generator suspension. """ from __future__ import unicode_literals, absolute_import from sys import exc_info from functools import wraps from contextlib import contextmanager from contextvars import copy_context from weakref import WeakKeyDictionary from . import log_message class _GeneratorContext(object): """Generator sub-context for C{_ExecutionContext}.""" def __init__(self, execution_context): self._execution_context = execution_context self._contexts = WeakKeyDictionary() self._current_generator = None def init_stack(self, generator): """Create a new stack for the given generator.""" self._contexts[generator] = copy_context() @contextmanager def in_generator(self, generator): """Context manager: set the given generator as the current generator.""" previous_generator = self._current_generator try: self._current_generator = generator yield finally: self._current_generator = previous_generator class GeneratorSupportNotEnabled(Exception): """ An attempt was made to use a decorated generator without first turning on the generator context manager. """ def eliot_friendly_generator_function(original): """ Decorate a generator function so that the Eliot action context is preserved across ``yield`` expressions. """ @wraps(original) def wrapper(*a, **kw): # Keep track of whether the next value to deliver to the generator is # a non-exception or an exception. ok = True # Keep track of the next value to deliver to the generator. value_in = None # Create the generator with a call to the generator function. This # happens with whatever Eliot action context happens to be active, # which is fine and correct and also irrelevant because no code in the # generator function can run until we call send or throw on it. gen = original(*a, **kw) # Initialize the per-generator context to a copy of the current context. context = copy_context() while True: try: # Whichever way we invoke the generator, we will do it # with the Eliot action context stack we've saved for it. # Then the context manager will re-save it and restore the # "outside" stack for us. # # Regarding the support of Twisted's inlineCallbacks-like # functionality (see eliot.twisted.inline_callbacks): # # The invocation may raise the inlineCallbacks internal # control flow exception _DefGen_Return. It is not wrong to # just let that propagate upwards here but inlineCallbacks # does think it is wrong. The behavior triggers a # DeprecationWarning to try to get us to fix our code. We # could explicitly handle and re-raise the _DefGen_Return but # only at the expense of depending on a private Twisted API. # For now, I'm opting to try to encourage Twisted to fix the # situation (or at least not worsen it): # https://twistedmatrix.com/trac/ticket/9590 # # Alternatively, _DefGen_Return is only required on Python 2. # When Python 2 support is dropped, this concern can be # eliminated by always using `return value` instead of # `returnValue(value)` (and adding the necessary logic to the # StopIteration handler below). def go(): if ok: value_out = gen.send(value_in) else: value_out = gen.throw(*value_in) # We have obtained a value from the generator. In # giving it to us, it has given up control. Note this # fact here. Importantly, this is within the # generator's action context so that we get a good # indication of where the yield occurred. # # This is noisy, enable only for debugging: if wrapper.debug: log_message(message_type="yielded") return value_out value_out = context.run(go) except StopIteration: # When the generator raises this, it is signaling # completion. Leave the loop. break else: try: # Pass the generator's result along to whoever is # driving. Capture the result as the next value to # send inward. value_in = yield value_out except: # Or capture the exception if that's the flavor of the # next value. This could possibly include GeneratorExit # which turns out to be just fine because throwing it into # the inner generator effectively propagates the close # (and with the right context!) just as you would want. # True, the GeneratorExit does get re-throwing out of the # gen.throw call and hits _the_generator_context's # contextmanager. But @contextmanager extremely # conveniently eats it for us! Thanks, @contextmanager! ok = False value_in = exc_info() else: ok = True wrapper.debug = False return wrapper eliot-1.11.0/eliot/testing.py0000664000175000017500000003475513515376117017574 0ustar itamarstitamarst00000000000000""" Utilities to aid unit testing L{eliot} and code that uses it. """ from __future__ import unicode_literals from unittest import SkipTest from functools import wraps from pyrsistent import PClass, field from six import text_type from ._action import ( ACTION_STATUS_FIELD, ACTION_TYPE_FIELD, STARTED_STATUS, FAILED_STATUS, SUCCEEDED_STATUS, ) from ._message import MESSAGE_TYPE_FIELD, TASK_LEVEL_FIELD, TASK_UUID_FIELD from ._output import MemoryLogger from . import _output COMPLETED_STATUSES = (FAILED_STATUS, SUCCEEDED_STATUS) def issuperset(a, b): """ Use L{assertContainsFields} instead. @type a: C{dict} @type b: C{dict} @return: Boolean indicating whether C{a} has all key/value pairs that C{b} does. """ aItems = a.items() return all(pair in aItems for pair in b.items()) def assertContainsFields(test, message, fields): """ Assert that the given message contains the given fields. @param test: L{unittest.TestCase} being run. @param message: C{dict}, the message we are checking. @param fields: C{dict}, the fields we expect the message to have. @raises AssertionError: If the message doesn't contain the fields. """ messageSubset = dict( [(key, value) for key, value in message.items() if key in fields] ) test.assertEqual(messageSubset, fields) class LoggedAction(PClass): """ An action whose start and finish messages have been logged. @ivar startMessage: A C{dict}, the start message contents. Also available as C{start_message}. @ivar endMessage: A C{dict}, the end message contents (in both success and failure cases). Also available as C{end_message}. @ivar children: A C{list} of direct child L{LoggedMessage} and L{LoggedAction} instances. """ startMessage = field(mandatory=True) endMessage = field(mandatory=True) children = field(mandatory=True) def __new__(cls, startMessage, endMessage, children): return PClass.__new__( cls, startMessage=startMessage, endMessage=endMessage, children=children ) @property def start_message(self): return self.startMessage @property def end_message(self): return self.endMessage @classmethod def fromMessages(klass, uuid, level, messages): """ Given a task uuid and level (identifying an action) and a list of dictionaries, create a L{LoggedAction}. All child messages and actions will be added as L{LoggedAction} or L{LoggedMessage} children. Note that some descendant messages may be missing if you end up logging to two or more different ILogger providers. @param uuid: The uuid of the task (C{unicode}). @param level: The C{task_level} of the action's start message, e.g. C{"/1/2/1"}. @param messages: A list of message C{dict}s. @return: L{LoggedAction} constructed from start and finish messages for this specific action. @raises: L{ValueError} if one or both of the action's messages cannot be found. """ startMessage = None endMessage = None children = [] levelPrefix = level[:-1] for message in messages: if message[TASK_UUID_FIELD] != uuid: # Different task altogether: continue messageLevel = message[TASK_LEVEL_FIELD] if messageLevel[:-1] == levelPrefix: status = message.get(ACTION_STATUS_FIELD) if status == STARTED_STATUS: startMessage = message elif status in COMPLETED_STATUSES: endMessage = message else: # Presumably a message in this action: children.append(LoggedMessage(message)) elif ( len(messageLevel) == len(levelPrefix) + 2 and messageLevel[:-2] == levelPrefix and messageLevel[-1] == 1 ): # If start message level is [1], [1, 2, 1] implies first # message of a direct child. child = klass.fromMessages(uuid, message[TASK_LEVEL_FIELD], messages) children.append(child) if startMessage is None: raise ValueError("Missing start message") if endMessage is None: raise ValueError( "Missing end message of type " + message.get(ACTION_TYPE_FIELD, "unknown") ) return klass(startMessage, endMessage, children) # PEP 8 variant: from_messages = fromMessages @classmethod def of_type(klass, messages, actionType): """ Find all L{LoggedAction} of the specified type. @param messages: A list of message C{dict}s. @param actionType: A L{eliot.ActionType}, the type of the actions to find, or the type as a C{str}. @return: A C{list} of L{LoggedAction}. """ if not isinstance(actionType, text_type): actionType = actionType.action_type result = [] for message in messages: if ( message.get(ACTION_TYPE_FIELD) == actionType and message[ACTION_STATUS_FIELD] == STARTED_STATUS ): result.append( klass.fromMessages( message[TASK_UUID_FIELD], message[TASK_LEVEL_FIELD], messages ) ) return result # Backwards compat: ofType = of_type def descendants(self): """ Find all descendant L{LoggedAction} or L{LoggedMessage} of this instance. @return: An iterable of L{LoggedAction} and L{LoggedMessage} instances. """ for child in self.children: yield child if isinstance(child, LoggedAction): for descendant in child.descendants(): yield descendant @property def succeeded(self): """ Indicate whether this action succeeded. @return: C{bool} indicating whether the action succeeded. """ return self.endMessage[ACTION_STATUS_FIELD] == SUCCEEDED_STATUS def type_tree(self): """Return dictionary of all child action and message types. Actions become dictionaries that look like C{{: [, ]}} @return: C{dict} where key is action type, and value is list of child types: either strings for messages, or dicts for actions. """ children = [] for child in self.children: if isinstance(child, LoggedAction): children.append(child.type_tree()) else: children.append(child.message[MESSAGE_TYPE_FIELD]) return {self.startMessage[ACTION_TYPE_FIELD]: children} class LoggedMessage(PClass): """ A message that has been logged. @ivar message: A C{dict}, the message contents. """ message = field(mandatory=True) def __new__(cls, message): return PClass.__new__(cls, message=message) @classmethod def of_type(klass, messages, messageType): """ Find all L{LoggedMessage} of the specified type. @param messages: A list of message C{dict}s. @param messageType: A L{eliot.MessageType}, the type of the messages to find, or the type as a L{str}. @return: A C{list} of L{LoggedMessage}. """ result = [] if not isinstance(messageType, text_type): messageType = messageType.message_type for message in messages: if message.get(MESSAGE_TYPE_FIELD) == messageType: result.append(klass(message)) return result # Backwards compat: ofType = of_type class UnflushedTracebacks(Exception): """ The L{MemoryLogger} had some tracebacks logged which were not flushed. This means either your code has a bug and logged an unexpected traceback. If you expected the traceback then you will need to flush it using L{MemoryLogger.flushTracebacks}. """ def check_for_errors(logger): """ Raise exception if logger has unflushed tracebacks or validation errors. @param logger: A L{MemoryLogger}. @raise L{UnflushedTracebacks}: If any tracebacks were unflushed. """ # Check for unexpected tracebacks first, since that indicates business # logic errors: if logger.tracebackMessages: raise UnflushedTracebacks(logger.tracebackMessages) # If those are fine, validate the logging: logger.validate() def swap_logger(logger): """Swap out the global logging sink. @param logger: An C{ILogger}. @return: The current C{ILogger}. """ previous_logger = _output._DEFAULT_LOGGER _output._DEFAULT_LOGGER = logger return previous_logger def validateLogging(assertion, *assertionArgs, **assertionKwargs): """ Decorator factory for L{unittest.TestCase} methods to add logging validation. 1. The decorated test method gets a C{logger} keyword argument, a L{MemoryLogger}. 2. All messages logged to this logger will be validated at the end of the test. 3. Any unflushed logged tracebacks will cause the test to fail. For example: from unittest import TestCase from eliot.testing import assertContainsFields, validateLogging class MyTests(TestCase): def assertFooLogging(self, logger): assertContainsFields(self, logger.messages[0], {"key": 123}) @param assertion: A callable that will be called with the L{unittest.TestCase} instance, the logger and C{assertionArgs} and C{assertionKwargs} once the actual test has run, allowing for extra logging-related assertions on the effects of the test. Use L{None} if you want the cleanup assertions registered but no custom assertions. @param assertionArgs: Additional positional arguments to pass to C{assertion}. @param assertionKwargs: Additional keyword arguments to pass to C{assertion}. """ def decorator(function): @wraps(function) def wrapper(self, *args, **kwargs): skipped = False kwargs["logger"] = logger = MemoryLogger() self.addCleanup(check_for_errors, logger) # TestCase runs cleanups in reverse order, and we want this to # run *before* tracebacks are checked: if assertion is not None: self.addCleanup( lambda: skipped or assertion(self, logger, *assertionArgs, **assertionKwargs) ) try: return function(self, *args, **kwargs) except SkipTest: skipped = True raise return wrapper return decorator # PEP 8 variant: validate_logging = validateLogging def capture_logging(assertion, *assertionArgs, **assertionKwargs): """ Capture and validate all logging that doesn't specify a L{Logger}. See L{validate_logging} for details on the rest of its behavior. """ def decorator(function): @validate_logging(assertion, *assertionArgs, **assertionKwargs) @wraps(function) def wrapper(self, *args, **kwargs): logger = kwargs["logger"] previous_logger = swap_logger(logger) def cleanup(): swap_logger(previous_logger) self.addCleanup(cleanup) return function(self, *args, **kwargs) return wrapper return decorator def assertHasMessage(testCase, logger, messageType, fields=None): """ Assert that the given logger has a message of the given type, and the first message found of this type has the given fields. This can be used as the assertion function passed to L{validateLogging} or as part of a unit test. @param testCase: L{unittest.TestCase} instance. @param logger: L{eliot.MemoryLogger} whose messages will be checked. @param messageType: L{eliot.MessageType} indicating which message we're looking for. @param fields: The first message of the given type found must have a superset of the given C{dict} as its fields. If C{None} then fields are not checked. @return: The first found L{LoggedMessage} of the given type, if field validation succeeded. @raises AssertionError: No message was found, or the fields were not superset of given fields. """ if fields is None: fields = {} messages = LoggedMessage.ofType(logger.messages, messageType) testCase.assertTrue(messages, "No messages of type %s" % (messageType,)) loggedMessage = messages[0] assertContainsFields(testCase, loggedMessage.message, fields) return loggedMessage def assertHasAction( testCase, logger, actionType, succeeded, startFields=None, endFields=None ): """ Assert that the given logger has an action of the given type, and the first action found of this type has the given fields and success status. This can be used as the assertion function passed to L{validateLogging} or as part of a unit test. @param testCase: L{unittest.TestCase} instance. @param logger: L{eliot.MemoryLogger} whose messages will be checked. @param actionType: L{eliot.ActionType} or C{str} indicating which message we're looking for. @param succeeded: Expected success status of the action, a C{bool}. @param startFields: The first action of the given type found must have a superset of the given C{dict} as its start fields. If C{None} then fields are not checked. @param endFields: The first action of the given type found must have a superset of the given C{dict} as its end fields. If C{None} then fields are not checked. @return: The first found L{LoggedAction} of the given type, if field validation succeeded. @raises AssertionError: No action was found, or the fields were not superset of given fields. """ if startFields is None: startFields = {} if endFields is None: endFields = {} actions = LoggedAction.ofType(logger.messages, actionType) testCase.assertTrue(actions, "No actions of type %s" % (actionType,)) action = actions[0] testCase.assertEqual(action.succeeded, succeeded) assertContainsFields(testCase, action.startMessage, startFields) assertContainsFields(testCase, action.endMessage, endFields) return action eliot-1.11.0/eliot/prettyprint.py0000664000175000017500000001170413573001140020471 0ustar itamarstitamarst00000000000000""" API and command-line support for human-readable Eliot messages. """ import pprint import argparse from datetime import datetime from sys import stdin, stdout from collections import OrderedDict from json import dumps from ._bytesjson import loads from ._message import ( TIMESTAMP_FIELD, TASK_UUID_FIELD, TASK_LEVEL_FIELD, MESSAGE_TYPE_FIELD, ) from ._action import ACTION_TYPE_FIELD, ACTION_STATUS_FIELD # Ensure binary stdin, since we expect specifically UTF-8 encoded # messages, not platform-encoding messages. stdin = stdin.buffer # Fields that all Eliot messages are expected to have: REQUIRED_FIELDS = {TASK_LEVEL_FIELD, TASK_UUID_FIELD, TIMESTAMP_FIELD} # Fields that get treated specially when formatting. _skip_fields = { TIMESTAMP_FIELD, TASK_UUID_FIELD, TASK_LEVEL_FIELD, MESSAGE_TYPE_FIELD, ACTION_TYPE_FIELD, ACTION_STATUS_FIELD, } # First fields to render: _first_fields = [ACTION_TYPE_FIELD, MESSAGE_TYPE_FIELD, ACTION_STATUS_FIELD] def _render_timestamp(message: dict, local_timezone: bool) -> str: """Convert a message's timestamp to a string.""" # If we were returning or storing the datetime we'd want to use an # explicit timezone instead of a naive datetime, but since we're # just using it for formatting we needn't bother. if local_timezone: dt = datetime.fromtimestamp(message[TIMESTAMP_FIELD]) else: dt = datetime.utcfromtimestamp(message[TIMESTAMP_FIELD]) result = dt.isoformat(sep="T") if not local_timezone: result += "Z" return result def pretty_format(message: dict, local_timezone: bool = False) -> str: """ Convert a message dictionary into a human-readable string. @param message: Message to parse, as dictionary. @return: Unicode string. """ def add_field(previous, key, value): value = ( pprint.pformat(value, width=40).replace("\\n", "\n ").replace("\\t", "\t") ) # Reindent second line and later to match up with first line's # indentation: lines = value.split("\n") # indent lines are " | " indent = "{}| ".format(" " * (2 + len(key))) value = "\n".join([lines[0]] + [indent + l for l in lines[1:]]) return " %s: %s\n" % (key, value) remaining = "" for field in _first_fields: if field in message: remaining += add_field(remaining, field, message[field]) for (key, value) in sorted(message.items()): if key not in _skip_fields: remaining += add_field(remaining, key, value) level = "/" + "/".join(map(str, message[TASK_LEVEL_FIELD])) return "%s -> %s\n%s\n%s" % ( message[TASK_UUID_FIELD], level, _render_timestamp(message, local_timezone), remaining, ) def compact_format(message: dict, local_timezone: bool = False) -> str: """Format an Eliot message into a single line. The message is presumed to be JSON-serializable. """ ordered_message = OrderedDict() for field in _first_fields: if field in message: ordered_message[field] = message[field] for (key, value) in sorted(message.items()): if key not in _skip_fields: ordered_message[key] = value # drop { and } from JSON: rendered = " ".join( "{}={}".format(key, dumps(value, separators=(",", ":"))) for (key, value) in ordered_message.items() ) return "%s%s %s %s" % ( message[TASK_UUID_FIELD], "/" + "/".join(map(str, message[TASK_LEVEL_FIELD])), _render_timestamp(message, local_timezone), rendered, ) _CLI_HELP = """\ Convert Eliot messages into more readable format. Reads JSON lines from stdin, write out pretty-printed results on stdout. """ def _main(): """ Command-line program that reads in JSON from stdin and writes out pretty-printed messages to stdout. """ parser = argparse.ArgumentParser( description=_CLI_HELP, usage="cat messages | %(prog)s [options]" ) parser.add_argument( "-c", "--compact", action="store_true", dest="compact", help="Compact format, one message per line.", ) parser.add_argument( "-l", "--local-timezone", action="store_true", dest="local_timezone", help="Use local timezone instead of UTC.", ) args = parser.parse_args() if args.compact: formatter = compact_format else: formatter = pretty_format for line in stdin: try: message = loads(line) except ValueError: stdout.write("Not JSON: {}\n\n".format(line.rstrip(b"\n"))) continue if REQUIRED_FIELDS - set(message.keys()): stdout.write("Not an Eliot message: {}\n\n".format(line.rstrip(b"\n"))) continue result = formatter(message, args.local_timezone) + "\n" stdout.write(result) __all__ = ["pretty_format", "compact_format"] eliot-1.11.0/eliot/_output.py0000664000175000017500000003525613573001140017574 0ustar itamarstitamarst00000000000000""" Implementation of hooks and APIs for outputting log messages. """ import sys import traceback import inspect import json as pyjson from threading import Lock from functools import wraps from io import IOBase from pyrsistent import PClass, field from . import _bytesjson as bytesjson from zope.interface import Interface, implementer from ._traceback import write_traceback, TRACEBACK_MESSAGE from ._message import EXCEPTION_FIELD, MESSAGE_TYPE_FIELD, REASON_FIELD from ._util import saferepr, safeunicode from .json import EliotJSONEncoder from ._validation import ValidationError class _DestinationsSendError(Exception): """ An error occured sending to one or more destinations. @ivar errors: A list of tuples output from C{sys.exc_info()}. """ def __init__(self, errors): self.errors = errors Exception.__init__(self, errors) class BufferingDestination(object): """ Buffer messages in memory. """ def __init__(self): self.messages = [] def __call__(self, message): self.messages.append(message) while len(self.messages) > 1000: self.messages.pop(0) class Destinations(object): """ Manage a list of destinations for message dictionaries. The global instance of this class is where L{Logger} instances will send written messages. """ def __init__(self): self._destinations = [BufferingDestination()] self._any_added = False self._globalFields = {} def addGlobalFields(self, **fields): """ Add fields that will be included in all messages sent through this destination. @param fields: Keyword arguments mapping field names to values. """ self._globalFields.update(fields) def send(self, message): """ Deliver a message to all destinations. The passed in message might be mutated. @param message: A message dictionary that can be serialized to JSON. @type message: L{dict} """ message.update(self._globalFields) errors = [] for dest in self._destinations: try: dest(message) except: errors.append(sys.exc_info()) if errors: raise _DestinationsSendError(errors) def add(self, *destinations): """ Adds new destinations. A destination should never ever throw an exception. Seriously. A destination should not mutate the dictionary it is given. @param destinations: A list of callables that takes message dictionaries. """ buffered_messages = None if not self._any_added: # These are first set of messages added, so we need to clear # BufferingDestination: self._any_added = True buffered_messages = self._destinations[0].messages self._destinations = [] self._destinations.extend(destinations) if buffered_messages: # Re-deliver buffered messages: for message in buffered_messages: self.send(message) def remove(self, destination): """ Remove an existing destination. @param destination: A destination previously added with C{self.add}. @raises ValueError: If the destination is unknown. """ self._destinations.remove(destination) class ILogger(Interface): """ Write out message dictionaries to some destination. """ def write(dictionary, serializer=None): """ Write a dictionary to the appropriate destination. @note: This method is thread-safe. @param serializer: Either C{None}, or a L{eliot._validation._MessageSerializer} which can be used to validate this message. @param dictionary: The message to write out. The given dictionary will not be mutated. @type dictionary: C{dict} """ @implementer(ILogger) class Logger(object): """ Write out messages to the globally configured destination(s). You will typically want to create one of these for every chunk of code whose messages you want to unit test in isolation, e.g. a class. The tests can then replace a specific L{Logger} with a L{MemoryLogger}. """ _destinations = Destinations() _log_tracebacks = True def _safeUnicodeDictionary(self, dictionary): """ Serialize a dictionary to a unicode string no matter what it contains. The resulting dictionary will loosely follow Python syntax but it is not expected to actually be a lossless encoding in all cases. @param dictionary: A L{dict} to serialize. @return: A L{unicode} string representing the input dictionary as faithfully as can be done without putting in too much effort. """ try: return str( dict( (saferepr(key), saferepr(value)) for (key, value) in dictionary.items() ) ) except: return saferepr(dictionary) def write(self, dictionary, serializer=None): """ Serialize the dictionary, and write it to C{self._destinations}. """ dictionary = dictionary.copy() try: if serializer is not None: serializer.serialize(dictionary) except: write_traceback(self) from ._action import log_message log_message( "eliot:serialization_failure", message=self._safeUnicodeDictionary(dictionary), __eliot_logger__=self, ) return try: self._destinations.send(dictionary) except _DestinationsSendError as e: from ._action import log_message if self._log_tracebacks: for (exc_type, exception, exc_traceback) in e.errors: # Can't use same Logger as serialization errors because # if destination continues to error out we will get # infinite recursion. So instead we have to manually # construct a Logger that won't retry. logger = Logger() logger._log_tracebacks = False logger._destinations = self._destinations msg = { MESSAGE_TYPE_FIELD: "eliot:destination_failure", REASON_FIELD: safeunicode(exception), EXCEPTION_FIELD: exc_type.__module__ + "." + exc_type.__name__, "message": self._safeUnicodeDictionary(dictionary), "__eliot_logger__": logger, } log_message(**msg) else: # Nothing we can do here, raising exception to caller will # break business logic, better to have that continue to # work even if logging isn't. pass def exclusively(f): """ Decorate a function to make it thread-safe by serializing invocations using a per-instance lock. """ @wraps(f) def exclusively_f(self, *a, **kw): with self._lock: return f(self, *a, **kw) return exclusively_f @implementer(ILogger) class MemoryLogger(object): """ Store written messages in memory. When unit testing you don't want to create this directly but rather use the L{eliot.testing.validateLogging} decorator on a test method, which will provide additional testing integration. @ivar messages: A C{list} of the dictionaries passed to L{MemoryLogger.write}. Do not mutate this list. @ivar serializers: A C{list} of the serializers passed to L{MemoryLogger.write}, each corresponding to a message L{MemoryLogger.messages}. Do not mutate this list. @ivar tracebackMessages: A C{list} of messages written to this logger for tracebacks using L{eliot.write_traceback} or L{eliot.writeFailure}. Do not mutate this list. """ def __init__(self): self._lock = Lock() self.reset() @exclusively def flushTracebacks(self, exceptionType): """ Flush all logged tracebacks whose exception is of the given type. This means they are expected tracebacks and should not cause the test to fail. @param exceptionType: A subclass of L{Exception}. @return: C{list} of flushed messages. """ result = [] remaining = [] for message in self.tracebackMessages: if isinstance(message[REASON_FIELD], exceptionType): result.append(message) else: remaining.append(message) self.tracebackMessages = remaining return result # PEP 8 variant: flush_tracebacks = flushTracebacks @exclusively def write(self, dictionary, serializer=None): """ Add the dictionary to list of messages. """ # Validate copy of the dictionary, to ensure what we store isn't # mutated. try: self._validate_message(dictionary.copy(), serializer) except Exception as e: # Skip irrelevant frames that don't help pinpoint the problem: from . import _output, _message, _action skip_filenames = [_output.__file__, _message.__file__, _action.__file__] for frame in inspect.stack(): if frame[1] not in skip_filenames: break self._failed_validations.append( "{}: {}".format(e, "".join(traceback.format_stack(frame[0]))) ) self.messages.append(dictionary) self.serializers.append(serializer) if serializer is TRACEBACK_MESSAGE._serializer: self.tracebackMessages.append(dictionary) def _validate_message(self, dictionary, serializer): """Validate an individual message. As a side-effect, the message is replaced with its serialized contents. @param dictionary: A message C{dict} to be validated. Might be mutated by the serializer! @param serializer: C{None} or a serializer. @raises TypeError: If a field name is not unicode, or the dictionary fails to serialize to JSON. @raises eliot.ValidationError: If serializer was given and validation failed. """ if serializer is not None: serializer.validate(dictionary) for key in dictionary: if not isinstance(key, str): if isinstance(key, bytes): key.decode("utf-8") else: raise TypeError(dictionary, "%r is not unicode" % (key,)) if serializer is not None: serializer.serialize(dictionary) try: bytesjson.dumps(dictionary) pyjson.dumps(dictionary) except Exception as e: raise TypeError("Message %s doesn't encode to JSON: %s" % (dictionary, e)) @exclusively def validate(self): """ Validate all written messages. Does minimal validation of types, and for messages with corresponding serializers use those to do additional validation. As a side-effect, the messages are replaced with their serialized contents. @raises TypeError: If a field name is not unicode, or the dictionary fails to serialize to JSON. @raises eliot.ValidationError: If serializer was given and validation failed. """ for dictionary, serializer in zip(self.messages, self.serializers): try: self._validate_message(dictionary, serializer) except (TypeError, ValidationError) as e: # We already figured out which messages failed validation # earlier. This just lets us figure out which exception type to # raise. raise e.__class__("\n\n".join(self._failed_validations)) @exclusively def serialize(self): """ Serialize all written messages. This is the Field-based serialization, not JSON. @return: A C{list} of C{dict}, the serialized messages. """ result = [] for dictionary, serializer in zip(self.messages, self.serializers): dictionary = dictionary.copy() serializer.serialize(dictionary) result.append(dictionary) return result @exclusively def reset(self): """ Clear all logged messages. Any logged tracebacks will also be cleared, and will therefore not cause a test failure. This is useful to ensure a logger is in a known state before testing logging of a specific code path. """ self.messages = [] self.serializers = [] self.tracebackMessages = [] self._failed_validations = [] class FileDestination(PClass): """ Callable that writes JSON messages to a file. On Python 3 the file may support either C{bytes} or C{unicode}. On Python 2 only C{bytes} are supported since that is what all files expect in practice. @ivar file: The file to which messages will be written. @ivar _dumps: Function that serializes an object to JSON. @ivar _linebreak: C{"\n"} as either bytes or unicode. """ file = field(mandatory=True) encoder = field(mandatory=True) _dumps = field(mandatory=True) _linebreak = field(mandatory=True) def __new__(cls, file, encoder=EliotJSONEncoder): if isinstance(file, IOBase) and not file.writable(): raise RuntimeError("Given file {} is not writeable.") unicodeFile = False try: file.write(b"") except TypeError: unicodeFile = True if unicodeFile: # On Python 3 native json module outputs unicode: _dumps = pyjson.dumps _linebreak = "\n" else: _dumps = bytesjson.dumps _linebreak = b"\n" return PClass.__new__( cls, file=file, _dumps=_dumps, _linebreak=_linebreak, encoder=encoder ) def __call__(self, message): """ @param message: A message dictionary. """ self.file.write(self._dumps(message, cls=self.encoder) + self._linebreak) self.file.flush() def to_file(output_file, encoder=EliotJSONEncoder): """ Add a destination that writes a JSON message per line to the given file. @param output_file: A file-like object. """ Logger._destinations.add(FileDestination(file=output_file, encoder=encoder)) # The default Logger, used when none is specified: _DEFAULT_LOGGER = Logger() eliot-1.11.0/eliot/stdlib.py0000664000175000017500000000111613573001140017342 0ustar itamarstitamarst00000000000000"""Integration with the standard library ``logging`` package.""" from logging import Handler from ._action import log_message from ._traceback import write_traceback class EliotHandler(Handler): """A C{logging.Handler} that routes log messages to Eliot.""" def emit(self, record): log_message( message_type="eliot:stdlib", log_level=record.levelname, logger=record.name, message=record.getMessage(), ) if record.exc_info: write_traceback(exc_info=record.exc_info) __all__ = ["EliotHandler"] eliot-1.11.0/eliot/filter.py0000664000175000017500000000660013470775105017370 0ustar itamarstitamarst00000000000000""" Command line program for filtering line-based Eliot logs. """ from __future__ import unicode_literals, absolute_import if __name__ == "__main__": import eliot.filter eliot.filter.main() import sys from datetime import datetime, timedelta from json import JSONEncoder from ._bytesjson import dumps, loads class _DatetimeJSONEncoder(JSONEncoder): """ JSON encoder that supports L{datetime}. """ def default(self, o): if isinstance(o, datetime): return o.isoformat() return JSONEncoder.default(self, o) class EliotFilter(object): """ Filter Eliot log lines using a Python expression. @ivar code: A Python code object, the compiled filter expression. """ _SKIP = object() def __init__(self, expr, incoming, output): """ @param expr: A Python expression that will be called for each log message. @type expr: L{str} @param incoming: An iterable of L{bytes}, each of which is a serialized Eliot message. @param output: A file to which output should be written. @type output: L{file} or a file-like object. """ self.code = compile(expr, "", "eval") self.incoming = incoming self.output = output def run(self): """ For each incoming message, decode the JSON, evaluate expression, encode as JSON and write that to the output file. """ for line in self.incoming: message = loads(line) result = self._evaluate(message) if result is self._SKIP: continue self.output.write(dumps(result, cls=_DatetimeJSONEncoder) + b"\n") def _evaluate(self, message): """ Evaluate the expression with the given Python object in its locals. @param message: A decoded JSON input. @return: The resulting object. """ return eval( self.code, globals(), { "J": message, "timedelta": timedelta, "datetime": datetime, "SKIP": self._SKIP, }, ) USAGE = b"""\ Usage: cat eliot.log | python -m eliot.filter Read JSON-expression per line from stdin, and filter it using a Python expression . The expression will have a local `J` containing decoded JSON. `datetime` and `timedelta` from Python's `datetime` module are also available as locals, containing the corresponding classes. `SKIP` is also available, if it's the expression result that indicates nothing should be output. The output will be written to stdout using JSON serialization. `datetime` objects will be serialized to ISO format. Examples: - Pass through the messages unchanged: $ cat eliot.log | python -m eliot.filter J - Retrieve a specific field from a specific message type, dropping messages of other types: $ cat eliot.log | python -m eliot.filter \\ "J['field'] if J.get('message_type') == 'my:message' else SKIP" """ def main(sys=sys): """ Run the program. Accept arguments from L{sys.argv}, read from L{sys.stdin}, write to L{sys.stdout}. @param sys: An object with same interface and defaulting to the L{sys} module. """ if len(sys.argv) != 2: sys.stderr.write(USAGE) return 1 EliotFilter(sys.argv[1], sys.stdin, sys.stdout).run() return 0 eliot-1.11.0/eliot/_bytesjson.py0000664000175000017500000000247313470775105020266 0ustar itamarstitamarst00000000000000""" Python 2/3 JSON encoding/decoding, emulating Python 2's json module. Python 3 json module doesn't support decoding bytes or encoding. Rather than adding isinstance checks in main code path which would slow down Python 2, instead we write our encoder that can support those. """ from __future__ import absolute_import import json as pyjson import warnings from six import PY2 def _loads(s): """ Support decoding bytes. """ if isinstance(s, bytes): s = s.decode("utf-8") return pyjson.loads(s) def _dumps(obj, cls=pyjson.JSONEncoder): """ Encode to bytes, and presume bytes in inputs are UTF-8 encoded strings. """ class WithBytes(cls): """ JSON encoder that supports L{bytes}. """ def default(self, o): if isinstance(o, bytes): warnings.warn( "Eliot will soon stop supporting encoding bytes in JSON" " on Python 3", DeprecationWarning, ) return o.decode("utf-8") return cls.default(self, o) return pyjson.dumps(obj, cls=WithBytes).encode("utf-8") if PY2: # No need for the above on Python 2 loads, dumps = pyjson.loads, pyjson.dumps else: loads, dumps = _loads, _dumps __all__ = ["loads", "dumps"] eliot-1.11.0/eliot/_traceback.py0000664000175000017500000000746613470775105020174 0ustar itamarstitamarst00000000000000""" Logging of tracebacks and L{twisted.python.failure.Failure} instances, as well as common utilities for handling exception logging. """ from __future__ import unicode_literals import traceback import sys from ._message import EXCEPTION_FIELD, REASON_FIELD from ._util import safeunicode, load_module from ._validation import MessageType, Field from ._errors import _error_extraction TRACEBACK_MESSAGE = MessageType( "eliot:traceback", [ Field(REASON_FIELD, safeunicode, "The exception's value."), Field("traceback", safeunicode, "The traceback."), Field( EXCEPTION_FIELD, lambda typ: "%s.%s" % (typ.__module__, typ.__name__), "The exception type's FQPN.", ), ], "An unexpected exception indicating a bug.", ) # The fields here are actually subset of what you might get in practice, # due to exception extraction, so we hackily modify the serializer: TRACEBACK_MESSAGE._serializer.allow_additional_fields = True def _writeTracebackMessage(logger, typ, exception, traceback): """ Write a traceback to the log. @param typ: The class of the exception. @param exception: The L{Exception} instance. @param traceback: The traceback, a C{str}. """ msg = TRACEBACK_MESSAGE(reason=exception, traceback=traceback, exception=typ) msg = msg.bind(**_error_extraction.get_fields_for_exception(logger, exception)) msg.write(logger) # The default Python standard library traceback.py formatting functions # involving reading source from disk. This is a potential performance hit # since disk I/O can block. We therefore format the tracebacks with in-memory # information only. # # Unfortunately, the easiest way to do this is... exciting. def _get_traceback_no_io(): """ Return a version of L{traceback} that doesn't do I/O. """ try: module = load_module(str("_traceback_no_io"), traceback) except NotImplementedError: # Can't fix the I/O problem, oh well: return traceback class FakeLineCache(object): def checkcache(self, *args, **kwargs): None def getline(self, *args, **kwargs): return "" def lazycache(self, *args, **kwargs): return None module.linecache = FakeLineCache() return module _traceback_no_io = _get_traceback_no_io() def write_traceback(logger=None, exc_info=None): """ Write the latest traceback to the log. This should be used inside an C{except} block. For example: try: dostuff() except: write_traceback(logger) Or you can pass the result of C{sys.exc_info()} to the C{exc_info} parameter. """ if exc_info is None: exc_info = sys.exc_info() typ, exception, tb = exc_info traceback = "".join(_traceback_no_io.format_exception(typ, exception, tb)) _writeTracebackMessage(logger, typ, exception, traceback) def writeFailure(failure, logger=None): """ Write a L{twisted.python.failure.Failure} to the log. This is for situations where you got an unexpected exception and want to log a traceback. For example, if you have C{Deferred} that might error, you'll want to wrap it with a L{eliot.twisted.DeferredContext} and then add C{writeFailure} as the error handler to get the traceback logged: d = DeferredContext(dostuff()) d.addCallback(process) # Final error handler. d.addErrback(writeFailure) @param failure: L{Failure} to write to the log. @type logger: L{eliot.ILogger}. Will be deprecated at some point, so just ignore it. @return: None """ # Failure.getBriefTraceback does not include source code, so does not do # I/O. _writeTracebackMessage( logger, failure.value.__class__, failure.value, failure.getBriefTraceback() ) eliot-1.11.0/eliot/parse.py0000664000175000017500000001403013470775105017211 0ustar itamarstitamarst00000000000000""" Parse a stream of serialized messages into a forest of ``WrittenAction`` and ``WrittenMessage`` objects. """ from __future__ import unicode_literals from six import text_type as unicode from pyrsistent import PClass, pmap_field, pset_field, discard from ._message import WrittenMessage, TASK_UUID_FIELD from ._action import ( TaskLevel, WrittenAction, ACTION_STATUS_FIELD, STARTED_STATUS, ACTION_TYPE_FIELD, ) class Task(PClass): """ A tree of actions with the same task UUID. """ _nodes = pmap_field(TaskLevel, (WrittenAction, WrittenMessage)) _completed = pset_field(TaskLevel) _root_level = TaskLevel(level=[]) def root(self): """ @return: The root L{WrittenAction}. """ return self._nodes[self._root_level] def is_complete(self): """ @return bool: True only if all messages in the task tree have been added to it. """ return self._root_level in self._completed def _insert_action(self, node): """ Add a L{WrittenAction} to the tree. Parent actions will be created as necessary. @param child: A L{WrittenAction} to add to the tree. @return: Updated L{Task}. """ task = self if ( node.end_message and node.start_message and (len(node.children) == node.end_message.task_level.level[-1] - 2) ): # Possibly this action is complete, make sure all sub-actions # are complete: completed = True for child in node.children: if ( isinstance(child, WrittenAction) and child.task_level not in self._completed ): completed = False break if completed: task = task.transform(["_completed"], lambda s: s.add(node.task_level)) task = task.transform(["_nodes", node.task_level], node) return task._ensure_node_parents(node) def _ensure_node_parents(self, child): """ Ensure the node (WrittenAction/WrittenMessage) is referenced by parent nodes. Parent actions will be created as necessary. @param child: A L{WrittenMessage} or L{WrittenAction} which is being added to the tree. @return: Updated L{Task}. """ task_level = child.task_level if task_level.parent() is None: return self parent = self._nodes.get(task_level.parent()) if parent is None: parent = WrittenAction( task_level=task_level.parent(), task_uuid=child.task_uuid ) parent = parent._add_child(child) return self._insert_action(parent) def add(self, message_dict): """ Update the L{Task} with a dictionary containing a serialized Eliot message. @param message_dict: Dictionary whose task UUID matches this one. @return: Updated L{Task}. """ is_action = message_dict.get(ACTION_TYPE_FIELD) is not None written_message = WrittenMessage.from_dict(message_dict) if is_action: action_level = written_message.task_level.parent() action = self._nodes.get(action_level) if action is None: action = WrittenAction( task_level=action_level, task_uuid=message_dict[TASK_UUID_FIELD] ) if message_dict[ACTION_STATUS_FIELD] == STARTED_STATUS: # Either newly created MissingAction, or one created by # previously added descendant of the action. action = action._start(written_message) else: action = action._end(written_message) return self._insert_action(action) else: # Special case where there is no action: if written_message.task_level.level == [1]: return self.transform( ["_nodes", self._root_level], written_message, ["_completed"], lambda s: s.add(self._root_level), ) else: return self._ensure_node_parents(written_message) class Parser(PClass): """ Parse serialized Eliot messages into L{Task} instances. @ivar _tasks: Map from UUID to corresponding L{Task}. """ _tasks = pmap_field(unicode, Task) def add(self, message_dict): """ Update the L{Parser} with a dictionary containing a serialized Eliot message. @param message_dict: Dictionary of serialized Eliot message. @return: Tuple of (list of completed L{Task} instances, updated L{Parser}). """ uuid = message_dict[TASK_UUID_FIELD] if uuid in self._tasks: task = self._tasks[uuid] else: task = Task() task = task.add(message_dict) if task.is_complete(): parser = self.transform(["_tasks", uuid], discard) return [task], parser else: parser = self.transform(["_tasks", uuid], task) return [], parser def incomplete_tasks(self): """ @return: List of L{Task} that are not yet complete. """ return list(self._tasks.values()) @classmethod def parse_stream(cls, iterable): """ Parse a stream of messages into a stream of L{Task} instances. :param iterable: An iterable of serialized Eliot message dictionaries. :return: An iterable of parsed L{Task} instances. Remaining incomplete L{Task} will be returned when the input stream is exhausted. """ parser = Parser() for message_dict in iterable: completed, parser = parser.add(message_dict) for task in completed: yield task for task in parser.incomplete_tasks(): yield task __all__ = ["Parser", "Task", "TaskLevel", "WrittenMessage", "WrittenAction"] eliot-1.11.0/eliot/_action.py0000664000175000017500000007446013573001140017511 0ustar itamarstitamarst00000000000000""" Support for actions and tasks. Actions have a beginning and an eventual end, and can be nested. Tasks are top-level actions. """ from __future__ import unicode_literals, absolute_import import threading from uuid import uuid4 from contextlib import contextmanager from functools import partial from inspect import getcallargs from contextvars import ContextVar from pyrsistent import field, PClass, optional, pmap_field, pvector from boltons.funcutils import wraps from six import text_type as unicode, PY3 from ._message import ( WrittenMessage, EXCEPTION_FIELD, REASON_FIELD, TASK_UUID_FIELD, MESSAGE_TYPE_FIELD, ) from ._util import safeunicode from ._errors import _error_extraction ACTION_STATUS_FIELD = "action_status" ACTION_TYPE_FIELD = "action_type" STARTED_STATUS = "started" SUCCEEDED_STATUS = "succeeded" FAILED_STATUS = "failed" VALID_STATUSES = (STARTED_STATUS, SUCCEEDED_STATUS, FAILED_STATUS) _ACTION_CONTEXT = ContextVar("eliot.action") from ._message import TIMESTAMP_FIELD, TASK_LEVEL_FIELD def current_action(): """ @return: The current C{Action} in context, or C{None} if none were set. """ return _ACTION_CONTEXT.get(None) class TaskLevel(object): """ The location of a message within the tree of actions of a task. @ivar level: A pvector of integers. Each item indicates a child relationship, and the value indicates message count. E.g. C{[2, 3]} indicates this is the third message within an action which is the second item in the task. """ def __init__(self, level): self._level = level def as_list(self): """Return the current level. @return: List of integers. """ return self._level[:] # Backwards compatibility: @property def level(self): return pvector(self._level) def __lt__(self, other): return self._level < other._level def __le__(self, other): return self._level <= other._level def __gt__(self, other): return self._level > other._level def __ge__(self, other): return self._level >= other._level def __eq__(self, other): if other.__class__ != TaskLevel: return False return self._level == other._level def __ne__(self, other): if other.__class__ != TaskLevel: return True return self._level != other._level def __hash__(self): return hash(tuple(self._level)) @classmethod def fromString(cls, string): """ Convert a serialized Unicode string to a L{TaskLevel}. @param string: Output of L{TaskLevel.toString}. @return: L{TaskLevel} parsed from the string. """ return cls(level=[int(i) for i in string.split("/") if i]) def toString(self): """ Convert to a Unicode string, for serialization purposes. @return: L{unicode} representation of the L{TaskLevel}. """ return "/" + "/".join(map(unicode, self._level)) def next_sibling(self): """ Return the next L{TaskLevel}, that is a task at the same level as this one, but one after. @return: L{TaskLevel} which follows this one. """ new_level = self._level[:] new_level[-1] += 1 return TaskLevel(level=new_level) def child(self): """ Return a child of this L{TaskLevel}. @return: L{TaskLevel} which is the first child of this one. """ new_level = self._level[:] new_level.append(1) return TaskLevel(level=new_level) def parent(self): """ Return the parent of this L{TaskLevel}, or C{None} if it doesn't have one. @return: L{TaskLevel} which is the parent of this one. """ if not self._level: return None return TaskLevel(level=self._level[:-1]) def is_sibling_of(self, task_level): """ Is this task a sibling of C{task_level}? """ return self.parent() == task_level.parent() # PEP 8 compatibility: from_string = fromString to_string = toString _TASK_ID_NOT_SUPPLIED = object() import time class Action(object): """ Part of a nested heirarchy of ongoing actions. An action has a start and an end; a message is logged for each. Actions should only be used from a single thread, by implication the thread where they were created. @ivar _identification: Fields identifying this action. @ivar _successFields: Fields to be included in successful finish message. @ivar _finished: L{True} if the L{Action} has finished, otherwise L{False}. """ def __init__(self, logger, task_uuid, task_level, action_type, serializers=None): """ Initialize the L{Action} and log the start message. You probably do not want to use this API directly: use L{start_action} or L{startTask} instead. @param logger: The L{eliot.ILogger} to which to write messages. @param task_uuid: The uuid of the top-level task, e.g. C{"123525"}. @param task_level: The action's level in the task. @type task_level: L{TaskLevel} @param action_type: The type of the action, e.g. C{"yourapp:subsystem:dosomething"}. @param serializers: Either a L{eliot._validation._ActionSerializers} instance or C{None}. In the latter case no validation or serialization will be done for messages generated by the L{Action}. """ self._successFields = {} self._logger = _output._DEFAULT_LOGGER if (logger is None) else logger self._task_level = task_level self._last_child = None self._identification = { TASK_UUID_FIELD: task_uuid, ACTION_TYPE_FIELD: action_type, } self._serializers = serializers self._finished = False @property def task_uuid(self): """ @return str: the current action's task UUID. """ return self._identification[TASK_UUID_FIELD] def serialize_task_id(self): """ Create a unique identifier for the current location within the task. The format is C{b"@"}. @return: L{bytes} encoding the current location within the task. """ return "{}@{}".format( self._identification[TASK_UUID_FIELD], self._nextTaskLevel().toString() ).encode("ascii") @classmethod def continue_task(cls, logger=None, task_id=_TASK_ID_NOT_SUPPLIED): """ Start a new action which is part of a serialized task. @param logger: The L{eliot.ILogger} to which to write messages, or C{None} if the default one should be used. @param task_id: A serialized task identifier, the output of L{Action.serialize_task_id}, either ASCII-encoded bytes or unicode string. Required. @return: The new L{Action} instance. """ if task_id is _TASK_ID_NOT_SUPPLIED: raise RuntimeError("You must supply a task_id keyword argument.") if isinstance(task_id, bytes): task_id = task_id.decode("ascii") uuid, task_level = task_id.split("@") action = cls( logger, uuid, TaskLevel.fromString(task_level), "eliot:remote_task" ) action._start({}) return action # Backwards-compat variants: serializeTaskId = serialize_task_id continueTask = continue_task def _nextTaskLevel(self): """ Return the next C{task_level} for messages within this action. Called whenever a message is logged within the context of an action. @return: The message's C{task_level}. """ if not self._last_child: self._last_child = self._task_level.child() else: self._last_child = self._last_child.next_sibling() return self._last_child def _start(self, fields): """ Log the start message. The action identification fields, and any additional given fields, will be logged. In general you shouldn't call this yourself, instead using a C{with} block or L{Action.finish}. """ fields[ACTION_STATUS_FIELD] = STARTED_STATUS fields[TIMESTAMP_FIELD] = time.time() fields.update(self._identification) fields[TASK_LEVEL_FIELD] = self._nextTaskLevel().as_list() if self._serializers is None: serializer = None else: serializer = self._serializers.start self._logger.write(fields, serializer) def finish(self, exception=None): """ Log the finish message. The action identification fields, and any additional given fields, will be logged. In general you shouldn't call this yourself, instead using a C{with} block or L{Action.finish}. @param exception: C{None}, in which case the fields added with L{Action.addSuccessFields} are used. Or an L{Exception}, in which case an C{"exception"} field is added with the given L{Exception} type and C{"reason"} with its contents. """ if self._finished: return self._finished = True serializer = None if exception is None: fields = self._successFields fields[ACTION_STATUS_FIELD] = SUCCEEDED_STATUS if self._serializers is not None: serializer = self._serializers.success else: fields = _error_extraction.get_fields_for_exception(self._logger, exception) fields[EXCEPTION_FIELD] = "%s.%s" % ( exception.__class__.__module__, exception.__class__.__name__, ) fields[REASON_FIELD] = safeunicode(exception) fields[ACTION_STATUS_FIELD] = FAILED_STATUS if self._serializers is not None: serializer = self._serializers.failure fields[TIMESTAMP_FIELD] = time.time() fields.update(self._identification) fields[TASK_LEVEL_FIELD] = self._nextTaskLevel().as_list() self._logger.write(fields, serializer) def child(self, logger, action_type, serializers=None): """ Create a child L{Action}. Rather than calling this directly, you can use L{start_action} to create child L{Action} using the execution context. @param logger: The L{eliot.ILogger} to which to write messages. @param action_type: The type of this action, e.g. C{"yourapp:subsystem:dosomething"}. @param serializers: Either a L{eliot._validation._ActionSerializers} instance or C{None}. In the latter case no validation or serialization will be done for messages generated by the L{Action}. """ newLevel = self._nextTaskLevel() return self.__class__( logger, self._identification[TASK_UUID_FIELD], newLevel, action_type, serializers, ) def run(self, f, *args, **kwargs): """ Run the given function with this L{Action} as its execution context. """ parent = _ACTION_CONTEXT.set(self) try: return f(*args, **kwargs) finally: _ACTION_CONTEXT.reset(parent) def addSuccessFields(self, **fields): """ Add fields to be included in the result message when the action finishes successfully. @param fields: Additional fields to add to the result message. """ self._successFields.update(fields) # PEP 8 variant: add_success_fields = addSuccessFields @contextmanager def context(self): """ Create a context manager that ensures code runs within action's context. The action does NOT finish when the context is exited. """ parent = _ACTION_CONTEXT.set(self) try: yield self finally: _ACTION_CONTEXT.reset(parent) # Python context manager implementation: def __enter__(self): """ Push this action onto the execution context. """ self._parent_token = _ACTION_CONTEXT.set(self) return self def __exit__(self, type, exception, traceback): """ Pop this action off the execution context, log finish message. """ _ACTION_CONTEXT.reset(self._parent_token) self._parent_token = None self.finish(exception) ## Message logging def log(self, message_type, **fields): """Log individual message.""" fields[TIMESTAMP_FIELD] = time.time() fields[TASK_UUID_FIELD] = self._identification[TASK_UUID_FIELD] fields[TASK_LEVEL_FIELD] = self._nextTaskLevel().as_list() fields[MESSAGE_TYPE_FIELD] = message_type self._logger.write(fields, fields.pop("__eliot_serializer__", None)) class WrongTask(Exception): """ Tried to add a message to an action, but the message was from another task. """ def __init__(self, action, message): Exception.__init__( self, "Tried to add {} to {}. Expected task_uuid = {}, got {}".format( message, action, action.task_uuid, message.task_uuid ), ) class WrongTaskLevel(Exception): """ Tried to add a message to an action, but the task level of the message indicated that it was not a direct child. """ def __init__(self, action, message): Exception.__init__( self, "Tried to add {} to {}, but {} is not a sibling of {}".format( message, action, message.task_level, action.task_level ), ) class WrongActionType(Exception): """ Tried to end a message with a different action_type than the beginning. """ def __init__(self, action, message): error_msg = "Tried to end {} with {}. Expected action_type = {}, got {}" Exception.__init__( self, error_msg.format( action, message, action.action_type, message.contents.get(ACTION_TYPE_FIELD, ""), ), ) class InvalidStatus(Exception): """ Tried to end a message with an invalid status. """ def __init__(self, action, message): error_msg = "Tried to end {} with {}. Expected status {} or {}, got {}" Exception.__init__( self, error_msg.format( action, message, SUCCEEDED_STATUS, FAILED_STATUS, message.contents.get(ACTION_STATUS_FIELD, ""), ), ) class DuplicateChild(Exception): """ Tried to add a child to an action that already had a child at that task level. """ def __init__(self, action, message): Exception.__init__( self, "Tried to add {} to {}, but already had child at {}".format( message, action, message.task_level ), ) class InvalidStartMessage(Exception): """ Tried to start an action with an invalid message. """ def __init__(self, message, reason): Exception.__init__(self, "Invalid start message {}: {}".format(message, reason)) @classmethod def wrong_status(cls, message): return cls(message, 'must have status "STARTED"') @classmethod def wrong_task_level(cls, message): return cls(message, "first message must have task level ending in 1") class WrittenAction(PClass): """ An Action that has been logged. This class is intended to provide a definition within Eliot of what an action actually is, and a means of constructing actions that are known to be valid. @ivar WrittenMessage start_message: A start message whose task UUID and level match this action, or C{None} if it is not yet set on the action. @ivar WrittenMessage end_message: An end message hose task UUID and level match this action. Can be C{None} if the action is unfinished. @ivar TaskLevel task_level: The action's task level, e.g. if start message has level C{[2, 3, 1]} it will be C{TaskLevel(level=[2, 3])}. @ivar UUID task_uuid: The UUID of the task to which this action belongs. @ivar _children: A L{pmap} from L{TaskLevel} to the L{WrittenAction} and L{WrittenMessage} objects that make up this action. """ start_message = field(type=optional(WrittenMessage), mandatory=True, initial=None) end_message = field(type=optional(WrittenMessage), mandatory=True, initial=None) task_level = field(type=TaskLevel, mandatory=True) task_uuid = field(type=unicode, mandatory=True, factory=unicode) # Pyrsistent doesn't support pmap_field with recursive types. _children = pmap_field(TaskLevel, object) @classmethod def from_messages(cls, start_message=None, children=pvector(), end_message=None): """ Create a C{WrittenAction} from C{WrittenMessage}s and other C{WrittenAction}s. @param WrittenMessage start_message: A message that has C{ACTION_STATUS_FIELD}, C{ACTION_TYPE_FIELD}, and a C{task_level} that ends in C{1}, or C{None} if unavailable. @param children: An iterable of C{WrittenMessage} and C{WrittenAction} @param WrittenMessage end_message: A message that has the same C{action_type} as this action. @raise WrongTask: If C{end_message} has a C{task_uuid} that differs from C{start_message.task_uuid}. @raise WrongTaskLevel: If any child message or C{end_message} has a C{task_level} that means it is not a direct child. @raise WrongActionType: If C{end_message} has an C{ACTION_TYPE_FIELD} that differs from the C{ACTION_TYPE_FIELD} of C{start_message}. @raise InvalidStatus: If C{end_message} doesn't have an C{action_status}, or has one that is not C{SUCCEEDED_STATUS} or C{FAILED_STATUS}. @raise InvalidStartMessage: If C{start_message} does not have a C{ACTION_STATUS_FIELD} of C{STARTED_STATUS}, or if it has a C{task_level} indicating that it is not the first message of an action. @return: A new C{WrittenAction}. """ actual_message = [ message for message in [start_message, end_message] + list(children) if message ][0] action = cls( task_level=actual_message.task_level.parent(), task_uuid=actual_message.task_uuid, ) if start_message: action = action._start(start_message) for child in children: if action._children.get(child.task_level, child) != child: raise DuplicateChild(action, child) action = action._add_child(child) if end_message: action = action._end(end_message) return action @property def action_type(self): """ The type of this action, e.g. C{"yourapp:subsystem:dosomething"}. """ if self.start_message: return self.start_message.contents[ACTION_TYPE_FIELD] elif self.end_message: return self.end_message.contents[ACTION_TYPE_FIELD] else: return None @property def status(self): """ One of C{STARTED_STATUS}, C{SUCCEEDED_STATUS}, C{FAILED_STATUS} or C{None}. """ message = self.end_message if self.end_message else self.start_message if message: return message.contents[ACTION_STATUS_FIELD] else: return None @property def start_time(self): """ The Unix timestamp of when the action started, or C{None} if there has been no start message added so far. """ if self.start_message: return self.start_message.timestamp @property def end_time(self): """ The Unix timestamp of when the action ended, or C{None} if there has been no end message. """ if self.end_message: return self.end_message.timestamp @property def exception(self): """ If the action failed, the name of the exception that was raised to cause it to fail. If the action succeeded, or hasn't finished yet, then C{None}. """ if self.end_message: return self.end_message.contents.get(EXCEPTION_FIELD, None) @property def reason(self): """ The reason the action failed. If the action succeeded, or hasn't finished yet, then C{None}. """ if self.end_message: return self.end_message.contents.get(REASON_FIELD, None) @property def children(self): """ The list of child messages and actions sorted by task level, excluding the start and end messages. """ return pvector(sorted(self._children.values(), key=lambda m: m.task_level)) def _validate_message(self, message): """ Is C{message} a valid direct child of this action? @param message: Either a C{WrittenAction} or a C{WrittenMessage}. @raise WrongTask: If C{message} has a C{task_uuid} that differs from the action's C{task_uuid}. @raise WrongTaskLevel: If C{message} has a C{task_level} that means it's not a direct child. """ if message.task_uuid != self.task_uuid: raise WrongTask(self, message) if not message.task_level.parent() == self.task_level: raise WrongTaskLevel(self, message) def _add_child(self, message): """ Return a new action with C{message} added as a child. Assumes C{message} is not an end message. @param message: Either a C{WrittenAction} or a C{WrittenMessage}. @raise WrongTask: If C{message} has a C{task_uuid} that differs from the action's C{task_uuid}. @raise WrongTaskLevel: If C{message} has a C{task_level} that means it's not a direct child. @return: A new C{WrittenAction}. """ self._validate_message(message) level = message.task_level return self.transform(("_children", level), message) def _start(self, start_message): """ Start this action given its start message. @param WrittenMessage start_message: A start message that has the same level as this action. @raise InvalidStartMessage: If C{start_message} does not have a C{ACTION_STATUS_FIELD} of C{STARTED_STATUS}, or if it has a C{task_level} indicating that it is not the first message of an action. """ if start_message.contents.get(ACTION_STATUS_FIELD, None) != STARTED_STATUS: raise InvalidStartMessage.wrong_status(start_message) if start_message.task_level.level[-1] != 1: raise InvalidStartMessage.wrong_task_level(start_message) return self.set(start_message=start_message) def _end(self, end_message): """ End this action with C{end_message}. Assumes that the action has not already been ended. @param WrittenMessage end_message: An end message that has the same level as this action. @raise WrongTask: If C{end_message} has a C{task_uuid} that differs from the action's C{task_uuid}. @raise WrongTaskLevel: If C{end_message} has a C{task_level} that means it's not a direct child. @raise InvalidStatus: If C{end_message} doesn't have an C{action_status}, or has one that is not C{SUCCEEDED_STATUS} or C{FAILED_STATUS}. @return: A new, completed C{WrittenAction}. """ action_type = end_message.contents.get(ACTION_TYPE_FIELD, None) if self.action_type not in (None, action_type): raise WrongActionType(self, end_message) self._validate_message(end_message) status = end_message.contents.get(ACTION_STATUS_FIELD, None) if status not in (FAILED_STATUS, SUCCEEDED_STATUS): raise InvalidStatus(self, end_message) return self.set(end_message=end_message) def start_action(logger=None, action_type="", _serializers=None, **fields): """ Create a child L{Action}, figuring out the parent L{Action} from execution context, and log the start message. You can use the result as a Python context manager, or use the L{Action.finish} API to explicitly finish it. with start_action(logger, "yourapp:subsystem:dosomething", entry=x) as action: do(x) result = something(x * 2) action.addSuccessFields(result=result) Or alternatively: action = start_action(logger, "yourapp:subsystem:dosomething", entry=x) with action.context(): do(x) result = something(x * 2) action.addSuccessFields(result=result) action.finish() @param logger: The L{eliot.ILogger} to which to write messages, or C{None} to use the default one. @param action_type: The type of this action, e.g. C{"yourapp:subsystem:dosomething"}. @param _serializers: Either a L{eliot._validation._ActionSerializers} instance or C{None}. In the latter case no validation or serialization will be done for messages generated by the L{Action}. @param fields: Additional fields to add to the start message. @return: A new L{Action}. """ parent = current_action() if parent is None: return startTask(logger, action_type, _serializers, **fields) else: action = parent.child(logger, action_type, _serializers) action._start(fields) return action def startTask(logger=None, action_type="", _serializers=None, **fields): """ Like L{action}, but creates a new top-level L{Action} with no parent. @param logger: The L{eliot.ILogger} to which to write messages, or C{None} to use the default one. @param action_type: The type of this action, e.g. C{"yourapp:subsystem:dosomething"}. @param _serializers: Either a L{eliot._validation._ActionSerializers} instance or C{None}. In the latter case no validation or serialization will be done for messages generated by the L{Action}. @param fields: Additional fields to add to the start message. @return: A new L{Action}. """ action = Action( logger, unicode(uuid4()), TaskLevel(level=[]), action_type, _serializers ) action._start(fields) return action class TooManyCalls(Exception): """ The callable was called more than once. This typically indicates a coding bug: the result of C{preserve_context} should only be called once, and C{preserve_context} should therefore be called each time you want to pass the callable to a thread. """ def preserve_context(f): """ Package up the given function with the current Eliot context, and then restore context and call given function when the resulting callable is run. This allows continuing the action context within a different thread. The result should only be used once, since it relies on L{Action.serialize_task_id} whose results should only be deserialized once. @param f: A callable. @return: One-time use callable that calls given function in context of a child of current Eliot action. """ action = current_action() if action is None: return f task_id = action.serialize_task_id() called = threading.Lock() def restore_eliot_context(*args, **kwargs): # Make sure the function has not already been called: if not called.acquire(False): raise TooManyCalls(f) with Action.continue_task(task_id=task_id): return f(*args, **kwargs) return restore_eliot_context def log_call( wrapped_function=None, action_type=None, include_args=None, include_result=True ): """Decorator/decorator factory that logs inputs and the return result. If used with inputs (i.e. as a decorator factory), it accepts the following parameters: @param action_type: The action type to use. If not given the function name will be used. @param include_args: If given, should be a list of strings, the arguments to log. @param include_result: True by default. If False, the return result isn't logged. """ if wrapped_function is None: return partial( log_call, action_type=action_type, include_args=include_args, include_result=include_result, ) if action_type is None: if PY3: action_type = "{}.{}".format( wrapped_function.__module__, wrapped_function.__qualname__ ) else: action_type = wrapped_function.__name__ if PY3 and include_args is not None: from inspect import signature sig = signature(wrapped_function) if set(include_args) - set(sig.parameters): raise ValueError( ( "include_args ({}) lists arguments not in the " "wrapped function" ).format(include_args) ) @wraps(wrapped_function) def logging_wrapper(*args, **kwargs): callargs = getcallargs(wrapped_function, *args, **kwargs) # Remove self is it's included: if "self" in callargs: callargs.pop("self") # Filter arguments to log, if necessary: if include_args is not None: callargs = {k: callargs[k] for k in include_args} with start_action(action_type=action_type, **callargs) as ctx: result = wrapped_function(*args, **kwargs) if include_result: ctx.add_success_fields(result=result) return result return logging_wrapper def log_message(message_type, **fields): """Log a message in the context of the current action. If there is no current action, a new UUID will be generated. """ # Loggers will hopefully go away... logger = fields.pop("__eliot_logger__", None) action = current_action() if action is None: action = Action(logger, str(uuid4()), TaskLevel(level=[]), "") action.log(message_type, **fields) from . import _output eliot-1.11.0/eliot/_errors.py0000664000175000017500000000353413470775105017561 0ustar itamarstitamarst00000000000000""" Error-handling utility code. """ from __future__ import unicode_literals from inspect import getmro class ErrorExtraction(object): """ Extract fields from exceptions for failed-action messages. @ivar registry: Map exception class to function that extracts fields. """ def __init__(self): self.registry = {} def register_exception_extractor(self, exception_class, extractor): """ Register a function that converts exceptions to fields. @param exception_class: Class to register for. @param extractor: Single-argument callable that takes an exception of the given class (or a subclass) and returns a dictionary, fields to include in a failed action message. """ self.registry[exception_class] = extractor def get_fields_for_exception(self, logger, exception): """ Given an exception instance, return fields to add to the failed action message. @param logger: ``ILogger`` currently being used. @param exception: An exception instance. @return: Dictionary with fields to include. """ for klass in getmro(exception.__class__): if klass in self.registry: extractor = self.registry[klass] try: return extractor(exception) except: from ._traceback import write_traceback write_traceback(logger) return {} return {} _error_extraction = ErrorExtraction() register_exception_extractor = _error_extraction.register_exception_extractor get_fields_for_exception = _error_extraction.get_fields_for_exception # Default handler for OSError and IOError by registered EnvironmentError: register_exception_extractor(EnvironmentError, lambda e: {"errno": e.errno}) eliot-1.11.0/eliot/__init__.py0000664000175000017500000000640013573001140017621 0ustar itamarstitamarst00000000000000""" Eliot: Logging for Complex & Distributed Systems. """ from warnings import warn from sys import version_info # Enable asyncio contextvars support in Python 3.5/3.6: if version_info < (3, 7): # On Python 3.5.2 and earlier, some of the necessary attributes aren't exposed: if version_info < (3, 5, 3): raise RuntimeError( "This version of Eliot doesn't work on Python 3.5.2 or earlier. " "Either upgrade to Python 3.5.3 or later (on Ubuntu 16.04 " "you can use https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa " "to get Python 3.6), or pin Eliot to version 1.7." ) import aiocontextvars dir(aiocontextvars) # pacify pyflakes del aiocontextvars # Expose the public API: from ._message import Message from ._action import ( start_action, startTask, Action, preserve_context, current_action, log_call, log_message, ) from ._output import ILogger, Logger, MemoryLogger, to_file, FileDestination from ._validation import Field, fields, MessageType, ActionType, ValidationError from ._traceback import write_traceback, writeFailure from ._errors import register_exception_extractor from ._version import get_versions # Backwards compatibility: def add_destination(destination): warn( "add_destination is deprecated since 1.1.0. " "Use add_destinations instead.", DeprecationWarning, stacklevel=2, ) Logger._destinations.add(destination) # Backwards compatibility: def use_asyncio_context(): warn( "This function is no longer as needed as of Eliot 1.8.0.", DeprecationWarning, stacklevel=2, ) # Backwards compatibilty: addDestination = add_destination removeDestination = Logger._destinations.remove addGlobalFields = Logger._destinations.addGlobalFields writeTraceback = write_traceback startAction = start_action # PEP 8 variants: start_task = startTask write_failure = writeFailure add_destinations = Logger._destinations.add remove_destination = removeDestination add_global_fields = addGlobalFields # Backwards compatibility for old versions of eliot-tree, which rely on # eliot._parse: def _parse_compat(): # Force eliot.parse to be imported in way that works with old Python: from .parse import Parser del Parser import sys sys.modules["eliot._parse"] = sys.modules["eliot.parse"] return sys.modules["eliot.parse"] _parse = _parse_compat() del _parse_compat __all__ = [ "Message", "writeTraceback", "writeFailure", "startAction", "startTask", "Action", "preserve_context", "Field", "fields", "MessageType", "ActionType", "ILogger", "Logger", "MemoryLogger", "addDestination", "removeDestination", "addGlobalFields", "FileDestination", "register_exception_extractor", "current_action", "use_asyncio_context", "ValidationError", # PEP 8 variants: "write_traceback", "write_failure", "start_action", "start_task", "add_destination", "add_destinations", "remove_destination", "add_global_fields", "to_file", "log_call", "log_message", "__version__", # Backwards compat for eliot-tree: "_parse", ] __version__ = get_versions()["version"] del get_versions eliot-1.11.0/examples/0000775000175000017500000000000013573001162016216 5ustar itamarstitamarst00000000000000eliot-1.11.0/examples/cross_process_client.py0000664000175000017500000000150213460352650023021 0ustar itamarstitamarst00000000000000""" Cross-process log tracing: HTTP client. """ from __future__ import unicode_literals import sys import requests from eliot import to_file, start_action, add_global_fields add_global_fields(process="client") to_file(sys.stdout) def remote_divide(x, y): with start_action(action_type="http_request", x=x, y=y) as action: task_id = action.serialize_task_id() response = requests.get( "http://localhost:5000/?x={}&y={}".format(x, y), headers={"x-eliot-task-id": task_id}) response.raise_for_status() # ensure this is a successful response result = float(response.text) action.add_success_fields(result=result) return result if __name__ == '__main__': with start_action(action_type="main"): remote_divide(int(sys.argv[1]), int(sys.argv[2])) eliot-1.11.0/examples/logfile.py0000664000175000017500000000153713573001140020213 0ustar itamarstitamarst00000000000000""" Output an Eliot message to a log file using the threaded log writer. """ from __future__ import unicode_literals, print_function from twisted.internet.task import react from eliot.logwriter import ThreadedWriter from eliot import log_message, FileDestination def main(reactor): print("Logging to example-eliot.log...") logWriter = ThreadedWriter( FileDestination(file=open("example-eliot.log", "ab")), reactor) # Manually start the service, which will add it as a # destination. Normally we'd register ThreadedWriter with the usual # Twisted Service/Application infrastructure. logWriter.startService() # Log a message: log_message(message_type="test", value="hello", another=1) # Manually stop the service. done = logWriter.stopService() return done if __name__ == '__main__': react(main, []) eliot-1.11.0/examples/cross_process_server.py0000664000175000017500000000130713460352650023054 0ustar itamarstitamarst00000000000000""" Cross-process log tracing: HTTP server. """ from __future__ import unicode_literals import sys from flask import Flask, request from eliot import to_file, Action, start_action, add_global_fields add_global_fields(process="server") to_file(sys.stdout) app = Flask("server") def divide(x, y): with start_action(action_type="divide", x=x, y=y) as action: result = x / y action.add_success_fields(result=result) return result @app.route("/") def main(): with Action.continue_task(task_id=request.headers["x-eliot-task-id"]): x = int(request.args["x"]) y = int(request.args["y"]) return str(divide(x, y)) if __name__ == '__main__': app.run() eliot-1.11.0/examples/journald.py0000664000175000017500000000062413573001140020404 0ustar itamarstitamarst00000000000000""" Write some logs to journald. """ from __future__ import print_function from eliot import log_message, start_action, add_destinations from eliot.journald import JournaldDestination add_destinations(JournaldDestination()) def divide(a, b): with start_action(action_type="divide", a=a, b=b): return a / b print(divide(10, 2)) log_message(message_type="inbetween") print(divide(10, 0)) eliot-1.11.0/examples/trio_say.py0000664000175000017500000000066113460352650020432 0ustar itamarstitamarst00000000000000from eliot import start_action, to_file import trio to_file(open("trio.log", "w")) async def say(message, delay): with start_action(action_type="say", message=message): await trio.sleep(delay) async def main(): with start_action(action_type="main"): async with trio.open_nursery() as nursery: nursery.start_soon(say, "hello", 1) nursery.start_soon(say, "world", 2) trio.run(main) eliot-1.11.0/examples/cross_thread.py0000664000175000017500000000125713460352650021263 0ustar itamarstitamarst00000000000000#!/usr/bin/env python """ Example of an Eliot action context spanning multiple threads. """ from __future__ import unicode_literals from threading import Thread from sys import stdout from eliot import to_file, preserve_context, start_action to_file(stdout) def add_in_thread(x, y): with start_action(action_type="in_thread", x=x, y=y) as context: context.add_success_fields(result=x+y) with start_action(action_type="main_thread"): # Preserve Eliot context and restore in new thread: thread = Thread(target=preserve_context(add_in_thread), kwargs={"x": 3, "y": 4}) thread.start() # Wait for the thread to exit: thread.join() eliot-1.11.0/examples/stdlib.py0000664000175000017500000000205013460352650020054 0ustar itamarstitamarst00000000000000""" Example of routing standard library logging to Eliot. The assumption is you have legacy logging using stdlib, and are switching over to Eliot. """ import logging import sys from eliot.stdlib import EliotHandler from eliot import start_action, to_file # A Logger left over from before switch to Eliot LEGACY_LOGGER = logging.Logger("mypackage") def do_a_thing(i): with start_action(action_type="mypackage:do_a_thing"): # run your business logic.... if i == 3: LEGACY_LOGGER.error("The number 3 is a bad number, don't use it.") raise ValueError("I hate the number 3") def main(): with start_action(action_type="mypackage:main"): for i in [1, 3]: try: do_a_thing(i) except ValueError: LEGACY_LOGGER.info("Number {} was rejected.".format(i)) if __name__ == '__main__': # Hook up stdlib logging to Eliot: LEGACY_LOGGER.addHandler(EliotHandler()) # Write Eliot logs to stdout: to_file(sys.stdout) # Run the code: main() eliot-1.11.0/examples/asyncio_linkcheck.py0000664000175000017500000000134713460352650022263 0ustar itamarstitamarst00000000000000import asyncio import aiohttp from eliot import start_action, to_file to_file(open("linkcheck.log", "w")) async def check_links(urls): session = aiohttp.ClientSession() with start_action(action_type="check_links", urls=urls): for url in urls: try: with start_action(action_type="download", url=url): async with session.get(url) as response: response.raise_for_status() except Exception as e: raise ValueError(str(e)) try: loop = asyncio.get_event_loop() loop.run_until_complete( check_links(["http://eliot.readthedocs.io", "http://nosuchurl"]) ) except ValueError: print("Not all links were valid.") eliot-1.11.0/examples/linkcheck.py0000664000175000017500000000112213460352650020525 0ustar itamarstitamarst00000000000000import requests from eliot import start_action, to_file to_file(open("linkcheck.log", "w")) def check_links(urls): with start_action(action_type="check_links", urls=urls): for url in urls: try: with start_action(action_type="download", url=url): response = requests.get(url) response.raise_for_status() except Exception as e: raise ValueError(str(e)) try: check_links(["http://eliot.readthedocs.io", "http://nosuchurl"]) except ValueError: print("Not all links were valid.") eliot-1.11.0/examples/dask_eliot.py0000664000175000017500000000210713460352650020714 0ustar itamarstitamarst00000000000000from os import getpid from dask.bag import from_sequence import dask.config from dask.distributed import Client from eliot import log_call, to_file from eliot.dask import compute_with_trace @log_call def multiply(x, y=7): return x * y @log_call def add(x, y): return x + y @log_call def main_computation(): bag = from_sequence([1, 2, 3]) bag = bag.map(multiply).fold(add) return compute_with_trace(bag)[0] # instead of dask.compute(bag) def _start_logging(): # Name log file based on PID, so different processes so stomp on each # others' logfiles: to_file(open("{}.log".format(getpid()), "a")) def main(): # Setup logging on the main process: _start_logging() # Start three worker processes on the local machine: client = Client(n_workers=3, threads_per_worker=1) # Setup Eliot logging on each worker process: client.run(_start_logging) # Run the Dask computation in the worker processes: result = main_computation() print("Result:", result) if __name__ == '__main__': import dask_eliot dask_eliot.main() eliot-1.11.0/examples/rometrip_actions.py0000664000175000017500000000140513573001140022145 0ustar itamarstitamarst00000000000000from sys import stdout from eliot import start_action, to_file to_file(stdout) class Place(object): def __init__(self, name, contained=()): self.name = name self.contained = contained def visited(self, people): # No need to repetitively log people, since caller will: with start_action(action_type="visited", place=self.name): for thing in self.contained: thing.visited(people) def honeymoon(family, destination): with start_action(action_type="honeymoon", people=family): destination.visited(family) honeymoon(["Mrs. Casaubon", "Mr. Casaubon"], Place("Rome, Italy", [Place("Vatican Museum", [Place("Statue #1"), Place("Statue #2")])])) eliot-1.11.0/examples/stdout.py0000664000175000017500000000057213573001140020112 0ustar itamarstitamarst00000000000000""" Output a few Eliot message to standard out. """ from __future__ import unicode_literals import sys import time from eliot import log_message, to_file to_file(sys.stdout) def main(): log_message(message_type="test", value="hello", another=1) time.sleep(0.2) log_message(message_type="test", value="goodbye", another=2) if __name__ == '__main__': main() eliot-1.11.0/setup.cfg0000664000175000017500000000040213573001162016215 0ustar itamarstitamarst00000000000000[bdist_wheel] universal = 1 [metadata] license_file = LICENSE [versioneer] vcs = git style = pep440 versionfile_source = eliot/_version.py versionfile_build = eliot/_version.py tag_prefix = parentdir_prefix = eliot- [egg_info] tag_build = tag_date = 0 eliot-1.11.0/versioneer.py0000664000175000017500000020605113460352650017145 0ustar itamarstitamarst00000000000000 # Version: 0.18 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/warner/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other langauges) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/warner/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/warner/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ### Unicode version strings While Versioneer works (and is continually tested) with both Python 2 and Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. Newer releases probably generate unicode version strings on py2. It's not clear that this is wrong, but it may be surprising for applications when then write these strings to a network connection or include them in bytes-oriented APIs like cryptographic checksums. [Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates this question. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . """ from __future__ import print_function try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. from __future__ import absolute_import import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(): """Get the custom setuptools/distutils subclasses used by Versioneer.""" if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: from py2exe.build_exe import py2exe as _py2exe # py2 class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): """Main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1) eliot-1.11.0/docs/0000775000175000017500000000000013573001162015330 5ustar itamarstitamarst00000000000000eliot-1.11.0/docs/Makefile0000664000175000017500000001515713460352650017007 0ustar itamarstitamarst00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Eliot.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Eliot.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Eliot" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Eliot" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." eliot-1.11.0/docs/source/0000775000175000017500000000000013573001162016630 5ustar itamarstitamarst00000000000000eliot-1.11.0/docs/source/index.rst0000664000175000017500000000650213573001140020470 0ustar itamarstitamarst00000000000000Eliot: Logging that tells you *why* it happened ================================================ Python's built-in ``logging`` and other similar systems output a stream of factoids: they're interesting, but you can't really tell what's going on. * Why is your application slow? * What caused this code path to be chosen? * Why did this error happen? Standard logging can't answer these questions. But with a better model you could understand what and why things happened in your application. You could pinpoint performance bottlenecks, you could understand what happened when, who called what. That is what Eliot does. ``eliot`` is a Python logging system that outputs causal chains of **actions**: actions can spawn other actions, and eventually they either **succeed or fail**. The resulting logs tell you the story of what your software did: what happened, and what caused it. Eliot supports a range of use cases and 3rd party libraries: * Logging within a single process. * Causal tracing across a distributed system. * Scientific computing, with :doc:`built-in support for NumPy and Dask `. * :doc:`Asyncio and Trio coroutines ` and the :doc:`Twisted networking framework `. Eliot is only used to generate your logs; you might still need tools like Logstash and ElasticSearch to aggregate and store logs if you are using multiple processes across multiple machines. * **Start here:** :doc:`Quickstart documentation ` * Need help or have any questions? `File an issue `_. * Eliot is licensed under the `Apache 2.0 license `_, and the source code is `available on GitHub `_. * Eliot supports Python 3.8, 3.7, 3.6, and 3.5, as well as PyPy3. Python 2.7 is in legacy support mode (see :ref:`python2` for details). * **Commercial support** is available from `Python⇒Speed `_. * Read on for the full documentation. Media ----- `PyCon 2019 talk: Logging for Scientific Computing `_ (also available in a `prose version `_). `Podcast.__init__ episode 133 `_ covers Eliot: .. raw:: html Testimonials ------------ "Eliot has made tracking down causes of failure (in complex external integrations and internal uses) tremendously easier. Our errors are logged to Sentry with the Eliot task UUID. That means we can go from a Sentry notification to a high-level trace of operations—with important metadata at each operation—in a few seconds. We immediately know which user did what in which part of the system." —Jonathan Jacobs Documentation ------------- .. toctree:: :maxdepth: 2 :titlesonly: quickstart introduction news generating/index outputting/index reading/index scientific-computing python2 development eliot-1.11.0/docs/source/scientific-computing.rst0000664000175000017500000001713113470775105023523 0ustar itamarstitamarst00000000000000Scientific Computing with Eliot =============================== When it takes hours or days to run your computation, it can take a long time before you notice something has gone wrong, so your feedback cycle for fixes can be very slow. If you want to solve problems quickly—whether it's inconsistent results, crashes, or slowness—you need to understand what was going on in your process as it was running: you need logging. Eliot is an ideal logging library for these cases: * It provides structured logging, instead of prose, so you can see inputs, outputs, and intermediate results of your calculations. * It gives you a trace of what happened, including causality: instead of just knowing that ``f()`` was called, you can distinguish between calls to ``f()`` from different code paths. * It supports scientific libraries: NumPy and Dask. By default, Eliot will automatically serialize NumPy integers, floats, arrays, and bools to JSON (see :ref:`custom_json` for details). At PyCon 2019 Itamar Turner-Trauring gave talk about logging for scientific computing, in part using Eliot—you can `watch the video `_ or `read a prose version `_. .. _large_numpy_arrays: Logging large arrays -------------------- Logging large arrays is a problem: it will take a lot of CPU, and it's no fun discovering that your batch process was slow because you mistakenly logged an array with 30 million integers every time you called a core function. So how do you deal with logging large arrays? 1. **Log a summary (default behavior):** By default, if you log an array with size > 10,000, Eliot will only log the first 10,000 values, along with the shape. 2. **Omit the array:** You can also just choose not to log the array at all. With ``log_call`` you can use the ``include_args`` parameter to ensure the array isn't logged (see :ref:`log_call decorator`). With ``start_action`` you can just not pass it in. 3. **Manual transformation:** If you're using ``start_action`` you can also manually modify the array yourself before passing it in. For example, you could write it to some sort of temporary storage, and then log the path to that file. Or you could summarize it some other way than the default. .. _dask_usage: Using Dask ---------- If you're using the `Dask `_ distributed computing framework, you can automatically use Eliot to trace computations across multiple processes or even machines. This is mostly useful for Dask's Bag and Delayed support, but can also be used with arrays and dataframes. In order to do this you will need to: * Ensure all worker processes write the Eliot logs to disk (if you're using the ``multiprocessing`` or ``distributed`` backends). * If you're using multiple worker machines, aggregate all log files into a single place, so you can more easily analyze them with e.g. `eliot-tree `_. * Replace ``dask.compute()`` with ``eliot.dask.compute_with_trace()``. In the following example, you can see how this works for a Dask run using ``distributed``, the recommended Dask scheduler. We'll be using multiple worker processes, but only use a single machine: .. literalinclude:: ../../examples/dask_eliot.py In the output you can see how the various Dask tasks depend on each other, and the full trace of the computation: .. code-block:: shell-session $ python examples/dask_eliot.py Result: 42 $ ls *.log 7254.log 7269.log 7271.log 7273.log $ eliot-tree *.log ca126b8a-c611-447e-aaa7-f61701e2a371 └── main_computation/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.047s ├── dask:compute/2/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.029s │ ├── eliot:remote_task/2/8/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.001s │ │ ├── dask:task/2/8/2 2019-01-01 17:27:13 │ │ │ ├── dependencies: │ │ │ │ └── 0: map-multiply-75feec3a197bf253863e330f3483d3ac-0 │ │ │ └── key: reduce-part-71950de8264334e8cea3cc79d1c2e639-0 │ │ ├── multiply/2/8/3/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.000s │ │ │ ├── x: 1 │ │ │ ├── y: 7 │ │ │ └── multiply/2/8/3/2 ⇒ succeeded 2019-01-01 17:27:13 │ │ │ └── result: 7 │ │ └── eliot:remote_task/2/8/4 ⇒ succeeded 2019-01-01 17:27:13 │ ├── eliot:remote_task/2/9/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.001s │ │ ├── dask:task/2/9/2 2019-01-01 17:27:13 │ │ │ ├── dependencies: │ │ │ │ └── 0: map-multiply-75feec3a197bf253863e330f3483d3ac-1 │ │ │ └── key: reduce-part-71950de8264334e8cea3cc79d1c2e639-1 │ │ ├── multiply/2/9/3/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.000s │ │ │ ├── x: 2 │ │ │ ├── y: 7 │ │ │ └── multiply/2/9/3/2 ⇒ succeeded 2019-01-01 17:27:13 │ │ │ └── result: 14 │ │ └── eliot:remote_task/2/9/4 ⇒ succeeded 2019-01-01 17:27:13 │ ├── eliot:remote_task/2/10/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.001s │ │ ├── dask:task/2/10/2 2019-01-01 17:27:13 │ │ │ ├── dependencies: │ │ │ │ └── 0: map-multiply-75feec3a197bf253863e330f3483d3ac-2 │ │ │ └── key: reduce-part-71950de8264334e8cea3cc79d1c2e639-2 │ │ ├── multiply/2/10/3/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.000s │ │ │ ├── x: 3 │ │ │ ├── y: 7 │ │ │ └── multiply/2/10/3/2 ⇒ succeeded 2019-01-01 17:27:13 │ │ │ └── result: 21 │ │ └── eliot:remote_task/2/10/4 ⇒ succeeded 2019-01-01 17:27:13 │ ├── eliot:remote_task/2/11/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.001s │ │ ├── dask:task/2/11/2 2019-01-01 17:27:13 │ │ │ ├── dependencies: │ │ │ │ ├── 0: reduce-part-71950de8264334e8cea3cc79d1c2e639-0 │ │ │ │ ├── 1: reduce-part-71950de8264334e8cea3cc79d1c2e639-1 │ │ │ │ └── 2: reduce-part-71950de8264334e8cea3cc79d1c2e639-2 │ │ │ └── key: reduce-aggregate-71950de8264334e8cea3cc79d1c2e639 │ │ ├── add/2/11/3/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.000s │ │ │ ├── x: 7 │ │ │ ├── y: 14 │ │ │ └── add/2/11/3/2 ⇒ succeeded 2019-01-01 17:27:13 │ │ │ └── result: 21 │ │ ├── add/2/11/4/1 ⇒ started 2019-01-01 17:27:13 ⧖ 0.000s │ │ │ ├── x: 21 │ │ │ ├── y: 21 │ │ │ └── add/2/11/4/2 ⇒ succeeded 2019-01-01 17:27:13 │ │ │ └── result: 42 │ │ └── eliot:remote_task/2/11/5 ⇒ succeeded 2019-01-01 17:27:13 │ └── dask:compute/2/12 ⇒ succeeded 2019-01-01 17:27:13 └── main_computation/3 ⇒ succeeded 2019-01-01 17:27:13 └── result: 42 .. warning:: Retries within Dask will result in confusing log messages; this will eventually be fixed in a future release. eliot-1.11.0/docs/source/introduction.rst0000664000175000017500000000775313573001140022113 0ustar itamarstitamarst00000000000000Why Eliot? ========== .. epigraph:: Suppose we turn from outside estimates of a man, to wonder, with keener interest, what is the report of his own consciousness about his doings or capacity: with what hindrances he is carrying on his daily labors; what fading of hopes, or what deeper fixity of self-delusion the years are marking off within him; and with what spirit he wrestles against universal pressure, which will one day be too heavy for him, and bring his heart to its final pause. — George Eliot, *Middlemarch* The log messages generated by a piece of software ought tell a story: what, where, when, even why and how if you’re lucky. But most logging systems omit the all-important *why*. You know that some things happened, but not how they relate to each other. The problem: What caused this to happen? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Most log messages in your program are going to involve actions: Not long after that dinner-party she had become Mrs. Casaubon, and was on her way to Rome. A marriage has a beginning and eventually an end. The end may be successful, presuming “until death do us part” is a form of success, or a failure. The same is true of all actions, much like function calls in Python are started and eventually return a result or throw an exception. Actions may of course span multiple function calls or extended periods of time. Actions also generate other actions: a marriage leads to a trip to Rome, the trip to Rome might lead to a visit to the Vatican Museum, and so on. Other unrelated actions are occurring at the same time, resulting in a forest of actions, with root actions that grow a tree of child actions. You might want to trace an action from beginning to end, e.g. to measure how long it took to run. You might want to know what high-level action caused a particular unexpected low-level action. You might want to know what actions a specific entity was involved with. None of these are possible in most logging systems since they have no concept of actions. The solution: Eliot ^^^^^^^^^^^^^^^^^^^ Eliot is designed to solve these problems: the basic logging abstraction is the action. An “action” is something with a start and an end; the end can be successful or it can fail due to an exception. Log messages, as well as log actions, know the log action whose context they are running in. The result is a tree of actions. In the following example we have one top-level action (the honeymoon), which leads to other action (travel): .. literalinclude:: ../../examples/rometrip_actions.py Actions provide a Python context manager. When the action starts, a start message is logged. If the block finishes successfully a success message is logged for the action; if an exception is thrown a failure message is logged for the action with the exception type and contents. By default the messages are machine-parseable JSON, but for human consumption a visualization is better. Here’s how the log messages generated by the new code look, as summarized by the `eliot-tree `_ tool: .. code-block:: console f9dcc74f-ecda-4543-9e9a-1bb062d199f0 +-- honeymoon@1/started |-- people: ['Mrs. Casaubon', 'Mr. Casaubon'] +-- visited@2,1/started |-- place: Rome, Italy +-- visited@2,2,1/started |-- place: Vatican Museum +-- visited@2,2,2,1/started |-- place: Statue #1 +-- visited@2,2,2,2/succeeded +-- visited@2,2,3,1/started |-- place: Statue #2 +-- visited@2,2,3,2/succeeded +-- visited@2,2,4/succeeded +-- visited@2,3/succeeded +-- honeymoon@3/succeeded No longer isolated fragments of meaning, our log messages are now a story. Log events have context, you can tell where they came from and what they led to without guesswork. Was looking at a statue the result of the honeymoon? It most definitely was. eliot-1.11.0/docs/source/reading/0000775000175000017500000000000013573001162020241 5ustar itamarstitamarst00000000000000eliot-1.11.0/docs/source/reading/index.rst0000664000175000017500000000007613460352650022113 0ustar itamarstitamarst00000000000000Reading Logs ============ .. toctree:: reading fields eliot-1.11.0/docs/source/reading/fields.rst0000664000175000017500000000403113573001140022233 0ustar itamarstitamarst00000000000000Message Fields in Depth ======================= Structure --------- Eliot messages are typically serialized to JSON objects. Fields therefore must have ``str`` as their name. Message values must be supported by JSON: ``int``, ``float``, ``None``, ``str``, ``dict`` or ``list``. The latter two can only be composed of other supported types. Built-in Fields --------------- A number of fields are reserved by Eliot's built-in message structure and should not be added to messages you create. All messages contain ``task_uuid`` and ``task_level`` fields. Each message is uniquely identified by the combined values in these fields. For more information see the :ref:`actions and tasks ` documentation. In addition, the following field will also be present: * ``timestamp``: Number of seconds since Unix epoch as a ``float`` (the output of ``time.time()``). Since system time may move backwards and resolution may not be high enough this cannot be relied on for message ordering. Every logged message will have either ``message_type`` or ``action_type`` fields depending whether they originated as a standalone message or as the start or end of an action. Present in regular messages: * ``message_type``: The type of the message, e.g. ``"yourapp:yoursubsystem:yourmessage"``. Present in action messages: * ``action_type``: The type of the action, e.g. ``"yourapp:yoursubsystem:youraction"``. * ``action_status``: One of ``"started"``, ``"succeeded"`` or ``"failed"``. The following fields can be added to your messages, but should preserve the same meaning: * ``exception``: The fully qualified Python name (i.e. import path) of an exception type, e.g. ``"yourpackage.yourmodule.YourException"``. * ``reason``: A prose string explaining why something happened. Avoid usage if possible, better to use structured data. * ``traceback``: A string with a traceback. User-Created Fields ------------------- It is recommended, but not necessary (and perhaps impossible across organizations) that fields with the same name have the same semantic content. eliot-1.11.0/docs/source/reading/reading.rst0000664000175000017500000000362713573001140022410 0ustar itamarstitamarst00000000000000Reading and Filtering Eliot Logs ================================ Eliot includes a command-line tool that makes it easier to read JSON-formatted Eliot messages: .. code-block:: shell-session $ python examples/stdout.py | eliot-prettyprint af79ef5c-280c-4b9f-9652-e14deb85d52d@/1 2015-09-25T19:41:37.850208Z another: 1 value: hello 0572701c-e791-48e8-9dd2-1fb3bf06826f@/1 2015-09-25T19:41:38.050767Z another: 2 value: goodbye Run ``eliot-prettyprint --help`` to see the various formatting options; you can for example use a more compact one-message-per-line format. Additionally, the **highly recommended third-party `eliot-tree`_ tool** renders JSON-formatted Eliot messages into a tree visualizing the tasks' actions. Filtering logs -------------- Eliot logs are structured, and by default stored in one JSON object per line. That means you can filter them in multiple ways: 1. Line-oriented tools like grep. You can grep for a particular task's UUIDs, or for a particular message type (e.g. tracebacks). 2. JSON-based filtering tools. `jq`_ allows you to filter a stream of JSON messages. 3. `eliot-tree`_ has some filtering and searching support built-in. For example, here's how you'd extract a particular field with `jq`_: .. code-block:: shell-session $ python examples/stdout.py | jq '.value' "hello" "goodbye" .. _eliot-tree: https://github.com/jonathanj/eliottree .. _jq: https://stedolan.github.io/jq/ .. _parsing_logs: Parsing Logs ------------ Eliot also includes a parser for parsing logs into Python objects: .. code-block:: python import json from eliot.parse import Parser def load_messages(logfile_path): for line in open(logfile_path): yield json.loads(line) def parse(logfile_path): for task in Parser.parse_stream(load_messages(logfile_path)): print("Root action type is", task.root().action_type) eliot-1.11.0/docs/source/development.rst0000664000175000017500000000075313470775105021724 0ustar itamarstitamarst00000000000000Contributing to Eliot ^^^^^^^^^^^^^^^^^^^^^ To run the full test suite, the Daemontools package should be installed. All modules should have the ``from __future__ import unicode_literals`` statement, to ensure Unicode is used by default. Coding standard is PEP8, with the only exception being camel case methods for the Twisted-related modules. Some camel case methods remain for backwards compatibility reasons with the old coding standard. You should use ``black`` to format the code. eliot-1.11.0/docs/source/generating/0000775000175000017500000000000013573001162020753 5ustar itamarstitamarst00000000000000eliot-1.11.0/docs/source/generating/actions.rst0000664000175000017500000002102113573001140023135 0ustar itamarstitamarst00000000000000Actions and Tasks ================= Actions: A Start and a Finish ----------------------------- A higher-level construct than messages is the concept of an action. An action can be started, and then finishes either successfully or with some sort of an exception. Success in this case simply means no exception was thrown; the result of an action may be a successful response saying "this did not work". Log messages are emitted for action start and finish. Actions are also nested; one action can be the parent of another. An action's parent is deduced from the Python call stack and context managers like ``Action.context()``. Log messages will also note the action they are part of if they can deduce it from the call stack. The result of all this is that you can trace the operation of your code as it logs various actions, and see a narrative of what happened and what caused it to happen. Logging Actions --------------- Here's a basic example of logging an action: .. code-block:: python from eliot import start_action with start_action(action_type=u"store_data"): x = get_data() store_data(x) This will log an action start message and if the block finishes successfully an action success message. If an exception is thrown by the block then an action failure message will be logged along with the exception type and reason as additional fields. Each action thus results in two messages being logged: at the start and finish of the action. No traceback will be logged so if you want a traceback you will need to do so explicitly. Notice that the action has a name, with a subsystem prefix. Again, this should be a logical name. Note that all code called within this block is within the context of this action. While running the block of code within the ``with`` statement new actions created with ``start_action`` will get the top-level ``start_action`` as their parent. .. _log_call decorator: Logging Functions ----------------- If you want to log the inputs and results of a function, you can use the ``log_call`` decorator: .. code-block:: python from eliot import log_call @log_call def calculate(x, y): return x * y This will log an action of type ``calculate`` with arguments ``x`` and ``y``, as well as logging the result. You can also customize the output: .. code-block:: python from eliot import log_call @log_call(action_type="CALC", include_args=["x"], include_result=False) def calculate(x, y): return x * y This changes the action type to ``CALC``, logs only the ``x`` argument, and doesn't log the result. Tasks: Top-level Actions ------------------------ A top-level action with no parent is called a task, the root cause of all its child actions. E.g. a web server receiving a new HTTP request would create a task for that new request. Log messages emitted from Eliot are therefore logically structured as a forest: trees of actions with tasks at the root. If you want to ignore the context and create a top-level task you can use the ``eliot.start_task`` API. .. _task fields: From Actions to Messages ------------------------ While the logical structure of log messages is a forest of actions, the actual output is effectively a list of dictionaries (e.g. a series of JSON messages written to a file). To bridge the gap between the two structures each output message contains special fields expressing the logical relationship between it and other messages: * ``task_uuid``: The unique identifier of the task (top-level action) the message is part of. * ``task_level``: The specific location of this message within the task's tree of actions. For example, ``[3, 2, 4]`` indicates the message is the 4th child of the 2nd child of the 3rd child of the task. Consider the following code sample: .. code-block:: python from eliot import start_action, start_task with start_task(action_type="parent") as action: action.log(message_type="info", x=1) with start_action(action_type="child") as action: action.log(message_type="info", x=2) raise RuntimeError("ono") All these messages will share the same UUID in their ``task_uuid`` field, since they are all part of the same high-level task. If you sort the resulting messages by their ``task_level`` you will get the tree of messages: .. code:: task_level=[1] action_type="parent" action_status="started" task_level=[2] message_type="info" x=1 task_level=[3, 1] action_type="child" action_status="started" task_level=[3, 2] message_type="info" x=2 task_level=[3, 3] action_type="child" action_status="succeeded" task_level=[4] action_type="parent" action_status="failed" exception="exceptions.RuntimeError" reason="ono" Action Fields ------------- You can add fields to both the start message and the success message of an action. .. code-block:: python from eliot import start_action with start_action(action_type=u"yourapp:subsystem:frob", # Fields added to start message only: key=123, foo=u"bar") as action: x = _beep(123) result = frobinate(x) # Fields added to success message only: action.add_success_fields(result=result) If you want to include some extra information in case of failures beyond the exception you can always log a regular message with that information. Since the message will be recorded inside the context of the action its information will be clearly tied to the result of the action by the person (or code!) reading the logs later on. Using Generators ---------------- Generators (functions with ``yield``) and context managers (``with X:``) don't mix well in Python. So if you're going to use ``with start_action()`` in a generator, just make sure it doesn't wrap a ``yield`` and you'll be fine. Here's what you SHOULD NOT DO: .. code-block:: python def generator(): with start_action(action_type="x"): # BAD! DO NOT yield inside a start_action() block: yield make_result() Here's what can do instead: .. code-block:: python def generator(): with start_action(action_type="x"): result = make_result() # This is GOOD, no yield inside the start_action() block: yield result Non-Finishing Contexts ---------------------- Sometimes you want to have the action be the context for other messages but not finish automatically when the block finishes. You can do so with ``Action.context()``. You can explicitly finish an action by calling ``eliot.Action.finish``. If called with an exception it indicates the action finished unsuccessfully. If called with no arguments it indicates that the action finished successfully. .. code-block:: python from eliot import start_action action = start_action(action_type=u"yourapp:subsystem:frob") try: with action.context(): x = _beep() with action.context(): frobinate(x) # Action still isn't finished, need to so explicitly. except FrobError as e: action.finish(e) else: action.finish() The ``context()`` method returns the ``Action``: .. code-block:: python from eliot import start_action with start_action(action_type=u"your_type").context() as action: # do some stuff... action.finish() You shouldn't log within an action's context after it has been finished: .. code-block:: python from eliot import start_action with start_action(action_type=u"message_late").context() as action: action.log(message_type=u"ok") # finish the action: action.finish() # Don't do this! This message is being added to a finished action! action.log(message_type=u"late") As an alternative to ``with``, you can also explicitly run a function within the action context: .. code-block:: python from eliot import start_action action = start_action(action_type=u"yourapp:subsystem:frob") # Call do_something(x=1) in context of action, return its result: result = action.run(do_something, x=1) Getting the Current Action -------------------------- Sometimes it can be useful to get the current action. For example, you might want to record the current task UUID for future reference, in a bug report for example. You might also want to pass around the ``Action`` explicitly, rather than relying on the implicit context. You can get the current ``Action`` by calling ``eliot.current_action()``. For example: .. code-block:: python from eliot import current_action def get_current_uuid(): return current_action().task_uuid eliot-1.11.0/docs/source/generating/index.rst0000664000175000017500000000024013573001140022604 0ustar itamarstitamarst00000000000000Generating Logs =============== .. toctree:: actions messages errors loglevels migrating threads testing types asyncio twisted eliot-1.11.0/docs/source/generating/loglevels.rst0000664000175000017500000000215013460352650023505 0ustar itamarstitamarst00000000000000Log Levels ========== Eliot does not have a native set of logging levels, as some systems do. It does distinguish between normal log messages and errors—failed actions and tracebacks can both be considered as errors. However, you can add log levels yourself. Generating messages with log levels ----------------------------------- All you need to do to add a log level is just add an appropriate field to your logging, for example: .. code-block:: python from eliot import start_action with start_action(action_type=u"store_data", log_level="INFO"): x = get_data() store_data(x) Choosing log levels ------------------- In an excellent `article by Daniel Lebroro `_, he explains that he chose the logging levels "for test environment", "for production environment", "investigate tomorrow", and "wake me in the middle of the night". These seem rather more informative and useful than "INFO" or "WARN". If you are implementing a service you will be running, consider choosing log levels that are meaningful on an organizational level. eliot-1.11.0/docs/source/generating/testing.rst0000664000175000017500000002355413573001140023167 0ustar itamarstitamarst00000000000000Unit Testing Your Logging ========================= Now that you've got some code emitting log messages (or even better, before you've written the code) you can write unit tests to verify it. Given good test coverage all code branches should already be covered by tests unrelated to logging. Logging can be considered just another aspect of testing those code branches. Rather than recreating all those tests as separate functions Eliot provides a decorator the allows adding logging assertions to existing tests. Linting your logs ----------------- Decorating a test function with ``eliot.testing.capture_logging`` validation will ensure that: 1. You haven't logged anything that isn't JSON serializable. 2. There are no unexpected tracebacks, indicating a bug somewhere in your code. .. code-block:: python from eliot.testing import capture_logging class MyTest(unittest.TestCase): @capture_logging(None) def test_mytest(self, logger): call_my_function() Making assertions about the logs -------------------------------- You can also ensure the correct messages were logged. .. code-block:: python from eliot import log_message class UserRegistration(object): def __init__(self): self.db = {} def register(self, username, password, age): self.db[username] = (password, age) log_message(message_type="user_registration", username=username, password=password, age=age) Here's how we'd test it: .. code-block:: python from unittest import TestCase from eliot import MemoryLogger from eliot.testing import assertContainsFields, capture_logging from myapp.registration import UserRegistration class LoggingTests(TestCase): def assertRegistrationLogging(self, logger): """ Logging assertions for test_registration. """ self.assertEqual(len(logger.messages), 1) msg = logger.messages[0] assertContainsFields(self, msg, {u"username": u"john", u"password": u"password", u"age": 12})) @capture_logging(assertRegistrationLogging) def test_registration(self, logger): """ Registration adds entries to the in-memory database. """ registry = UserRegistration() registry.register(u"john", u"password", 12) self.assertEqual(registry.db[u"john"], (u"passsword", 12)) Testing Tracebacks ------------------ Tests decorated with ``@capture_logging`` will fail if there are any tracebacks logged (using ``write_traceback`` or ``writeFailure``) on the theory that these are unexpected errors indicating a bug. If you expected a particular traceback to be logged you can call ``MemoryLogger.flush_tracebacks``, after which it will no longer cause a test failure. The result will be a list of traceback message dictionaries for the particular exception. .. code-block:: python from unittest import TestCase from eliot.testing import capture_logging class MyTests(TestCase): def assertMythingBadPathLogging(self, logger): messages = logger.flush_tracebacks(OSError) self.assertEqual(len(messages), 1) @capture_logging(assertMythingBadPathLogging) def test_mythingBadPath(self, logger): mything = MyThing() # Trigger an error that will cause a OSError traceback to be logged: self.assertFalse(mything.load("/nonexistent/path")) Testing Message and Action Structure ------------------------------------ Eliot provides utilities for making assertions about the structure of individual messages and actions. The simplest method is using the ``assertHasMessage`` utility function which asserts that a message of a given message type has the given fields: .. code-block:: python from eliot.testing import assertHasMessage, capture_logging class LoggingTests(TestCase): @capture_logging(assertHasMessage, "user_registration", {u"username": u"john", u"password": u"password", u"age": 12}) def test_registration(self, logger): """ Registration adds entries to the in-memory database. """ registry = UserRegistration() registry.register(u"john", u"password", 12) self.assertEqual(registry.db[u"john"], (u"passsword", 12)) ``assertHasMessage`` returns the found message and can therefore be used within more complex assertions. ``assertHasAction`` provides similar functionality for actions (see example below). More generally, ``eliot.testing.LoggedAction`` and ``eliot.testing.LoggedMessage`` are utility classes to aid such testing. ``LoggedMessage.of_type`` lets you find all messages of a specific message type. A ``LoggedMessage`` has an attribute ``message`` which contains the logged message dictionary. For example, we could rewrite the registration logging test above like so: .. code-block:: python from eliot.testing import LoggedMessage, capture_logging class LoggingTests(TestCase): def assertRegistrationLogging(self, logger): """ Logging assertions for test_registration. """ logged = LoggedMessage.of_type(logger.messages, "user_registration")[0] assertContainsFields(self, logged.message, {u"username": u"john", u"password": u"password", u"age": 12})) @capture_logging(assertRegistrationLogging) def test_registration(self, logger): """ Registration adds entries to the in-memory database. """ registry = UserRegistration() registry.register(u"john", u"password", 12) self.assertEqual(registry.db[u"john"], (u"passsword", 12)) Similarly, ``LoggedAction.of_type`` finds all logged actions of a specific action type. A ``LoggedAction`` instance has ``start_message`` and ``end_message`` containing the respective message dictionaries, and a ``children`` attribute containing a list of child ``LoggedAction`` and ``LoggedMessage``. That is, a ``LoggedAction`` knows about the messages logged within its context. ``LoggedAction`` also has a utility method ``descendants()`` that returns an iterable of all its descendants. We can thus assert that a particular message (or action) was logged within the context of another action. For example, let's say we have some code like this: .. code-block:: python from eliot import start_action, Message class Search: def search(self, servers, database, key): with start_action(action_type="log_search", database=database, key=key): for server in servers: Message.log(message_type="log_check", server=server) if server.check(database, key): return True return False We want to assert that the "log_check" message was written in the context of the "log_search" action. The test would look like this: .. code-block:: python from eliot.testing import LoggedAction, LoggedMessage, capture_logging import searcher class LoggingTests(TestCase): @capture_logging(None) def test_logging(self, logger): searcher = Search() servers = [buildServer(), buildServer()] searcher.search(servers, "users", "theuser") action = LoggedAction.of_type(logger.messages, "log_search")[0] messages = LoggedMessage.of_type(logger.messages, "log_check") # The action start message had the appropriate fields: assertContainsFields(self, action.start_message, {"database": "users", "key": "theuser"}) # Messages were logged in the context of the action self.assertEqual(action.children, messages) # Each message had the respective server set. self.assertEqual(servers, [msg.message["server"] for msg in messages]) Or we can simplify further by using ``assertHasMessage`` and ``assertHasAction``: .. code-block:: python from eliot.testing import LoggedAction, LoggedMessage, capture_logging import searcher class LoggingTests(TestCase): @capture_logging(None) def test_logging(self, logger): searcher = Search() servers = [buildServer(), buildServer()] searcher.search(servers, "users", "theuser") action = assertHasAction(self, logger, "log_search", succeeded=True, startFields={"database": "users", "key": "theuser"}) # Messages were logged in the context of the action messages = LoggedMessage.of_type(logger.messages, "log_check") self.assertEqual(action.children, messages) # Each message had the respective server set. self.assertEqual(servers, [msg.message["server"] for msg in messages]) Custom testing setup -------------------- In some cases ``@capture_logging`` may not do what you want. You can achieve the same effect, but with more control, with some lower-level APIs: .. code-block:: python from eliot import MemoryLogger from eliot.testing import swap_logger, check_for_errors def custom_capture_logging(): # Replace default logging setup with a testing logger: test_logger = MemoryLogger() original_logger = swap_logger(test_logger) try: run_some_code() finally: # Restore original logging setup: swap_logger(original_logger) # Validate log messages, check for tracebacks: check_for_errors(test_logger) eliot-1.11.0/docs/source/generating/types.rst0000664000175000017500000002121013460352650022653 0ustar itamarstitamarst00000000000000Using Types to Structure Messages and Actions ============================================= .. _type system: Why Typing? ----------- So far we've been creating messages and actions in an unstructured manner. This means it's harder to support Python objects that aren't built-in and to validate message structure. Moreover there's no documentation of what fields messages and action messages expect. To improve this we introduce an optional API for creating actions and standalone messages: ``ActionType`` and ``MessageType``. Here's an example demonstrating how we create a message type, bind some values and then log the message: .. code-block:: python from eliot import Field, MessageType class Coordinate(object): def __init__(self, x, y): self.x = self.x self.y = self.y # This field takes a complex type that will be stored in a single Field, # so we pass in a serializer function that converts it to a list with two # ints: _LOCATION = Field(u"location", lambda loc: [loc.x, loc.y], u"The location.") # These fields are just basic supported types, in this case int and unicode # respectively: _COUNT = Field.for_types(u"count", [int], u"The number of items to deliver.") _NAME = Field.for_types(u"name", [unicode], u"The name of the delivery person.") # This is a type definition for a message. It is used to hook up # serialization of field values, and for message validation in unit tests: LOG_DELIVERY_SCHEDULED = MessageType( u"pizzadelivery:schedule", [_LOCATION, _COUNT, _NAME], u"A pizza delivery has been scheduled.") def deliver_pizzas(deliveries): person = get_free_delivery_person() for location, count in deliveries: delivery_database.insert(person, location, count) LOG_DELIVERY_SCHEDULED.log( name=person.name, count=count, location=location) Fields ------ A ``Field`` instance is used to validate fields of messages, and to serialize rich types to the built-in supported types. It is created with the name of the field, a serialization function that converts the input to an output and a description. The serialization function must return a result that is JSON-encodable. You can also pass in an extra validation function. If you pass this function in it will be called with values that are being validated; if it raises ``eliot.ValidationError`` that value will fail validation. A couple of utility functions allow creating specific types of ``Field`` instances. ``Field.for_value`` returns a ``Field`` that only can have a single value. More generally useful, ``Field.for_types`` returns a ``Field`` that can only be one of certain specific types: some subset of ``unicode``, ``bytes``, ``int``, ``float``, ``bool``, ``list`` and ``dict`` as well as ``None`` which technically isn't a class. As always, ``bytes`` must only contain UTF-8 encoded Unicode. .. code-block:: python from eliot import Field def userToUsername(user): """ Extract username from a User object. """ return user.username USERNAME = Field(u"username", userToUsername, u"The name of the user.") # Validation is useful for unit tests and catching bugs; it's not used in # the actual logging code path. We therefore don't bother catching things # we'd do in e.g. web form validation. def _validateAge(value): if value is not None and value < 0: raise ValidationError("Field 'age' must be positive:", value) AGE = Field.for_types(u"age", [int, None], u"The age of the user, might be None if unknown", _validateAge) Message Types ------------- Now that you have some fields you can create a custom ``MessageType``. This takes a message name which will be put in the ``message_type`` field of resulting messages. It also takes a list of ``Field`` instances and a description. .. code-block:: python from eliot import MessageType, Field USERNAME = Field.for_types("username", [str]) AGE = Field.for_types("age", [int]) LOG_USER_REGISTRATION = MessageType(u"yourapp:authentication:registration", [USERNAME, AGE], u"We've just registered a new user.") Since this syntax is rather verbose a utility function called ``fields`` is provided which creates a ``list`` of ``Field`` instances for you, with support to specifying the types of the fields. The equivalent to the code above is: .. code-block:: python from eliot import MessageType, fields LOG_USER_REGISTRATION = MessageType(u"yourapp:authentication:registration", fields(username=str, age=int)) Or you can even use existing ``Field`` instances with ``fields``: .. code-block:: python from eliot import MessageType, Field, fields USERNAME = Field.for_types("username", [str]) LOG_USER_REGISTRATION = MessageType(u"yourapp:authentication:registration", fields(USERNAME, age=int)) Given a ``MessageType`` you can create a ``Message`` instance with the ``message_type`` field pre-populated by calling the type. You can then use it the way you would normally use ``Message``, e.g. ``bind()`` or ``write()``. You can also just call ``MessageType.log()`` to write out a message directly: .. code-block:: python # Simple version: LOG_USER_REGISTRATION.log(username=user, age=193) # Equivalent more complex API: LOG_USER_REGISTRATION(username=user).bind(age=193).write() A ``Message`` created from a ``MessageType`` will automatically use the ``MessageType`` ``Field`` instances to serialize its fields. Keep in mind that no validation is done when messages are created. Instead, validation is intended to be done in your unit tests. If you're not unit testing all your log messages you're doing it wrong. Luckily, Eliot makes it pretty easy to test logging as we'll see in a bit. Action Types ------------ Similarly to ``MessageType`` you can also create types for actions. Unlike a ``MessageType`` you need two sets of fields: one for action start, one for success. .. code-block:: python from eliot import ActionType, fields LOG_USER_SIGNIN = ActionType(u"yourapp:authentication:signin", # Start message fields: fields(username=str), # Success message fields: fields(status=int), # Description: u"A user is attempting to sign in.") Calling the resulting instance is equivalent to ``start_action``. For ``start_task`` you can call ``LOG_USER_SIGNIN.as_task``. .. code-block:: python def signin(user, password): with LOG_USER_SIGNIN(username=user) as action: status = user.authenticate(password) action.add_success_fields(status=status) return status Again, as with ``MessageType``, field values will be serialized using the ``Field`` definitions in the ``ActionType``. Serialization Errors -------------------- While validation of field values typically only happens when unit testing, serialization must run in the normal logging code path. Eliot tries to very hard never to raise exceptions from the log writing code path so as not to prevent actual code from running. If a message fails to serialize then a ``eliot:traceback`` message will be logged, along with a ``eliot:serialization_failure`` message with an attempt at showing the message that failed to serialize. .. code-block:: json {"exception": "exceptions.ValueError", "timestamp": "2013-11-22T14:16:51.386745Z", "traceback": "Traceback (most recent call last):\n ... ValueError: invalid literal for int() with base 10: 'hello'\n", "system": "eliot:output", "reason": "invalid literal for int() with base 10: 'hello'", "message_type": "eliot:traceback"} {"timestamp": "2013-11-22T14:16:51.386827Z", "message": "{u\"u'message_type'\": u\"'test'\", u\"u'field'\": u\"'hello'\", u\"u'timestamp'\": u\"'2013-11-22T14:16:51.386634Z'\"}", "message_type": "eliot:serialization_failure"} Testing ------- The ``eliot.testing.assertHasAction`` and ``assertHasMessage`` APIs accept ``ActionType`` and ``MessageType`` instances, not just the ``action_type`` and ``message_type`` strings. Any function decorated with ``@capture_logging`` will additionally validate messages that were created using ``ActionType`` and ``MessageType`` using the applicable ``Field`` definitions. This will ensure you've logged all the necessary fields, no additional fields, and used the correct types. eliot-1.11.0/docs/source/generating/twisted.rst0000664000175000017500000001433213573001140023167 0ustar itamarstitamarst00000000000000Using Eliot with Twisted ======================== Eliot provides a variety of APIs to support integration with the `Twisted`_ networking framework. .. _Twisted: https://twistedmatrix.com .. _ThreadedWriter: Non-blocking Destinations ------------------------- ``eliot.logwriter.ThreadedWriter`` is a logging destination that wraps a blocking destination and writes to it in a non-reactor thread. This is useful because it keeps the Twisted reactor from blocking, e.g. if you're writing to a log file and the hard drive is overloaded. ``ThreadedWriter`` is a Twisted ``Service`` and starting it will call ``add_destinations`` for you and stopping it will call ``remove_destination``; there is no need to call those directly. .. literalinclude:: ../../../examples/logfile.py If you want log rotation you can pass in an ``eliot.FileDestination`` wrapping one of the classes from `twisted.python.logfile`_ as the destination file. .. _twisted.python.logfile: https://twistedmatrix.com/documents/current/api/twisted.python.logfile.html ``twisted.logger`` integration ------------------------------ If you wish you can direct Eliot logs to Twisted's logging subsystem, if that is the primary logging system you're using. .. code-block:: python from eliot import add_destinations from eliot.twisted import TwistedDestination add_destinations(TwistedDestination()) Trial Integration ----------------- If you're using Twisted's ``trial`` program to run your tests you can redirect your Eliot logs to Twisted's logs by calling ``eliot.twisted.redirectLogsForTrial()``. This function will automatically detect whether or not it is running under ``trial``. If it is then you will be able to read your Eliot logs in ``_trial_temp/test.log``, where ``trial`` writes out logs by default. If it is not running under ``trial`` it will not do anything. In addition calling it multiple times has the same effect as calling it once. The way you use it is by putting it in your package's ``__init__.py``: it will do the right thing and only redirect if you're using ``trial``. Take care if you are separately redirecting Twisted logs to Eliot; you should make sure not to call ``redirectLogsForTrial`` in that case so as to prevent infinite loops. Logging Failures ---------------- ``eliot.writeFailure`` is the equivalent of ``eliot.write_traceback``, only for ``Failure`` instances: .. code-block:: python from eliot import writeFailure class YourClass(object): def run(self): d = dosomething() d.addErrback(writeFailure) Actions and inlineCallbacks --------------------------- Eliot provides a decorator that is compatible with Twisted's ``inlineCallbacks`` but which also behaves well with Eliot's actions. Simply substitute ``eliot.twisted.inline_callbacks`` for ``twisted.internet.defer.inlineCallbacks`` in your code. To understand why, consider the following example: .. code-block:: python from eliot import start_action from twisted.internet.defer import inlineCallbacks @inlineCallbacks # don't use this in real code, use eliot.twisted.inline_callbacks def go(): with start_action(action_type=u"yourapp:subsystem:frob") as action: d = some_deferred_api() x = yield d action.log(message_type=u"some-report", x=x) The action started by this generator remains active as ``yield d`` gives up control to the ``inlineCallbacks`` controller. The next bit of code to run will be considered to be a child of ``action``. Since that code may be any arbitrary code that happens to get scheduled, this is certainly wrong. Additionally, when the ``inlineCallbacks`` controller resumes the generator, it will most likely do so with no active action at all. This means that the log message following the yield will be recorded with no parent action, also certainly wrong. These problems are solved by using ``eliot.twisted.inline_callbacks`` instead of ``twisted.internet.defer.inlineCallbacks``. The behavior of the two decorators is identical except that Eliot's version will preserve the generator's action context and contain it within the generator. This extends the ``inlineCallbacks`` illusion of "synchronous" code to Eliot actions. Actions and Deferreds --------------------- An additional set of APIs is available to help log actions when using Deferreds. To understand why, consider the following example: .. code-block:: python from eliot import start_action def go(): action = start_action(action_type=u"yourapp:subsystem:frob") with action: d = Deferred() d.addCallback(gotResult, x=1) return d This has two problems. First, ``gotResult`` is not going to run in the context of the action. Second, the action finishes once the ``with`` block finishes, i.e. before ``gotResult`` runs. If we want ``gotResult`` to be run in the context of the action and to delay the action finish we need to do some extra work, and manually wrapping all callbacks would be tedious. To solve this problem you can use the ``eliot.twisted.DeferredContext`` class. It grabs the action context when it is first created and provides the same API as ``Deferred`` (``addCallbacks`` and friends), with the difference that added callbacks run in the context of the action. When all callbacks have been added you can indicate that the action should finish after those callbacks have run by calling ``DeferredContext.addActionFinish``. As you would expect, if the ``Deferred`` fires with a regular result that will result in success message. If the ``Deferred`` fires with an errback that will result in failure message. Finally, you can unwrap the ``DeferredContext`` and access the wrapped ``Deferred`` by accessing its ``result`` attribute. .. code-block:: python from eliot import start_action from eliot.twisted import DeferredContext def go(): with start_action(action_type=u"your_type").context() as action: d = DeferredContext(Deferred()) # gotResult(result, x=1) will be called in the context of the action: d.addCallback(gotResult, x=1) # After gotResult finishes, finish the action: d.addActionFinish() # Return the underlying Deferred: return d.result eliot-1.11.0/docs/source/generating/messages.rst0000664000175000017500000000207213573001140023311 0ustar itamarstitamarst00000000000000.. _messages: Messages ======== Sometimes you don't want to generate actions. sometimes you just want an individual isolated message, the way traditional logging systems work. Here's how to do that. When you have an action ----------------------- If you already have an action object, you can log a message in that action's context: .. code-block:: python from eliot import start_action class YourClass(object): def run(self): with start_action(action_type="myaction") as ctx: ctx.log(message_type="mymessage", key="abc", key2=4) If you don't have an action --------------------------- If you don't have a reference to an action, or you're worried the function will sometimes be called outside the context of any action at all, you can use ``log_message``: .. code-block:: python from eliot import log_message def run(x): log_message(message_type="in_run", xfield=x) The main downside to using this function is that it's a little slower, since it needs to handle the case where there is no action in context. eliot-1.11.0/docs/source/generating/errors.rst0000664000175000017500000000345213460352650023033 0ustar itamarstitamarst00000000000000Errors and Exceptions ===================== Exceptions and Tracebacks ------------------------- If you are using actions you don't need to do anything special to log exceptions: if an exception is thrown within the context of an action and not caught, the action will be marked as failed and the exception will be logged with it. If you get a completely unexpected exception you may wish to log a traceback to aid debugging: .. code-block:: python from eliot import write_traceback class YourClass(object): def run(self): try: dosomething() except: write_traceback() You can also pass in the output of ``sys.exc_info()``: .. code-block:: python import sys from eliot import write_traceback write_traceback(exc_info=sys.exc_info()) .. _extract errors: Custom Exception Logging ------------------------ By default both failed actions and tracebacks log the class and string-representation of the logged exception. You can add additional fields to these messages by registering a callable that converts exceptions into fields. If no extraction function is registered for a class Eliot will look for registered functions for the exception's base classes. For example, the following registration means all failed actions that fail with a ``MyException`` will have a ``code`` field in the action end message, as will tracebacks logged with this exception: .. code-block:: python class MyException(Exception): def __init__(self, code): self.code = code from eliot import register_exception_extractor register_exception_extractor(MyException, lambda e: {"code": e.code}) By default Eliot will automatically extract fields from ``OSError``, ``IOError`` and other subclasses of Python's ``EnvironmentError``. eliot-1.11.0/docs/source/generating/asyncio.rst0000664000175000017500000000600713470775105023170 0ustar itamarstitamarst00000000000000.. _asyncio_coroutine: Asyncio/Trio Coroutine Support ============================== As of Eliot 1.8, ``asyncio`` and ``trio`` coroutines have appropriate context propogation for Eliot, automatically. Asyncio -------- On Python 3.7 or later, no particular care is needed. For Python 3.5 and 3.6 you will need to import either ``eliot`` (or the backport package ``aiocontextvars``) before you create your first event loop. Here's an example using ``aiohttp``: .. literalinclude:: ../../../examples/asyncio_linkcheck.py And the resulting logs: .. code-block:: shell-session $ eliot-tree linkcheck.log 0a9a5e1b-330c-4251-b7db-fd3161403443 └── check_links/1 ⇒ started 2019-04-06 19:49:16 ⧖ 0.535s ├── urls: │ ├── 0: http://eliot.readthedocs.io │ └── 1: http://nosuchurl ├── download/2/1 ⇒ started 2019-04-06 19:49:16 ⧖ 0.527s │ ├── url: http://eliot.readthedocs.io │ └── download/2/2 ⇒ succeeded 2019-04-06 19:49:16 ├── download/3/1 ⇒ started 2019-04-06 19:49:16 ⧖ 0.007s │ ├── url: http://nosuchurl │ └── download/3/2 ⇒ failed 2019-04-06 19:49:16 │ ├── errno: -2 │ ├── exception: aiohttp.client_exceptions.ClientConnectorError │ └── reason: Cannot connect to host nosuchurl:80 ssl:None [Name or service not known] └── check_links/4 ⇒ failed 2019-04-06 19:49:16 ├── exception: builtins.ValueError └── reason: Cannot connect to host nosuchurl:80 ssl:None [Name or service not known] Trio ---- Here's an example of using Trio—we put the action outside the nursery so that it finishes only when the nursery shuts down. .. literalinclude:: ../../../examples/trio_say.py And the resulting logs: .. code-block:: shell-session $ eliot-tree trio.log 93a4de27-8c95-498b-a188-f0e91482ad10 └── main/1 ⇒ started 2019-04-10 21:07:20 ⧖ 2.003s ├── say/2/1 ⇒ started 2019-04-10 21:07:20 ⧖ 2.002s │ ├── message: world │ └── say/2/2 ⇒ succeeded 2019-04-10 21:07:22 ├── say/3/1 ⇒ started 2019-04-10 21:07:20 ⧖ 1.001s │ ├── message: hello │ └── say/3/2 ⇒ succeeded 2019-04-10 21:07:21 └── main/4 ⇒ succeeded 2019-04-10 21:07:22 If you put the ``start_action`` *inside* the nursery context manager: 1. The two ``say`` calls will be scheduled, but not started. 2. The parent action will end. 3. Only then will the child actions be created. The result is somewhat confusing output. Trying to improve this situation is covered in `issue #401 `_. eliot-1.11.0/docs/source/generating/threads.rst0000664000175000017500000001337413460352650023155 0ustar itamarstitamarst00000000000000Spanning Processes and Threads ============================== Introduction ------------ In many applications we are interested in tasks that exist in more than just a single thread or in a single process. For example, one server may send a request to another server over a network and we would like to trace the combined operation across both servers' logs. To make this as easy as possible Eliot supports serializing task identifiers for transfer over the network (or between threads), allowing tasks to span multiple processes. .. _cross thread tasks: Cross-Thread Tasks ------------------ To trace actions across threads Eliot provides the ``eliot.preserve_context`` API. It takes a callable that is about to be passed to a thread constructor and preserves the current Eliot context, returning a new callable. This new callable should only be used, in the thread where it will run; it will restore the Eliot context and run the original function inside of it. For example: .. literalinclude:: ../../../examples/cross_thread.py Here's what the result is when run: .. code-block:: shell-session $ python examples/cross_thread.py | eliot-tree 11a85c42-a13f-491c-ad44-c48b2efad0e3 +-- main_thread@1/started +-- eliot:remote_task@2,1/started +-- in_thread@2,2,1/started |-- x: 3 `-- y: 4 +-- in_thread@2,2,2/succeeded |-- result: 7 +-- eliot:remote_task@2,3/succeeded +-- main_thread@3/succeeded .. _cross process tasks: Cross-Process Tasks ------------------- ``eliot.Action.serialize_task_id()`` can be used to create some ``bytes`` identifying a particular location within a task. ``eliot.Action.continue_task()`` converts a serialized task identifier into an ``eliot.Action`` and then starts the ``Action``. The process which created the task serializes the task identifier and sends it over the network to the process which will continue the task. This second process deserializes the identifier and uses it as a context for its own messages. In the following example the task identifier is added as a header to a HTTP request: .. literalinclude:: ../../../examples/cross_process_client.py The server that receives the request then extracts the identifier: .. literalinclude:: ../../../examples/cross_process_server.py Tracing logs across multiple processes makes debugging problems dramatically easier. For example, let's run the following: .. code-block:: shell-session $ python examples/cross_process_server.py > server.log $ python examples/cross_process_client.py 5 0 > client.log Here are the resulting combined logs, as visualized by `eliot-tree`_. The reason the client received a 500 error code is completely obvious in these logs: .. code-block:: shell-session $ cat client.log server.log | eliot-tree 1e0be9be-ae56-49ef-9bce-60e850a7db09 +-- main@1/started |-- process: client +-- http_request@2,1/started |-- process: client |-- x: 3 `-- y: 0 +-- eliot:remote_task@2,2,1/started |-- process: server +-- divide@2,2,2,1/started |-- process: server |-- x: 3 `-- y: 0 +-- divide@2,2,2,2/failed |-- exception: exceptions.ZeroDivisionError |-- process: server |-- reason: integer division or modulo by zero +-- eliot:remote_task@2,2,3/failed |-- exception: exceptions.ZeroDivisionError |-- process: server |-- reason: integer division or modulo by zero +-- http_request@2,3/failed |-- exception: requests.exceptions.HTTPError |-- process: client |-- reason: 500 Server Error: INTERNAL SERVER ERROR +-- main@3/failed |-- exception: requests.exceptions.HTTPError |-- process: client |-- reason: 500 Server Error: INTERNAL SERVER ERROR .. _eliot-tree: https://warehouse.python.org/project/eliot-tree/ Cross-Thread Tasks ------------------ ``eliot.Action`` objects should only be used on the thread that created them. If you want your task to span multiple threads use the API described above. Ensuring Message Uniqueness --------------------------- Serialized task identifiers should be used at most once. For example, every time a remote operation is retried a new call to ``serialize_task_id()`` should be made to create a new identifier. Otherwise there is a chance that you will end up with messages that have duplicate identification (i.e. two messages with matching ``task_uuid`` and ``task_level`` values), making it more difficult to trace causality. If this is not possible you may wish to start a new Eliot task upon receiving a remote request, while still making sure to log the serialized remote task identifier. The inclusion of the remote task identifier will allow manual or automated reconstruction of the cross-process relationship between the original and new tasks. Another alternative in some cases is to rely on unique process or thread identity to distinguish between the log messages. For example if the same serialized task identifier is sent to multiple processes, log messages within the task can still have a unique identity if a process identifier is included with each message. Logging Output for Multiple Processes ------------------------------------- If logs are being combined from multiple processes an identifier indicating the originating process should be included in log messages. This can be done a number of ways, e.g.: * Have your destination add another field to the output. * Rely on Logstash, or whatever your logging pipeline tool is, to add a field when shipping the logs to your centralized log store. eliot-1.11.0/docs/source/generating/migrating.rst0000664000175000017500000000604013460352650023474 0ustar itamarstitamarst00000000000000Integrating and Migrating Existing Logging ========================================== If you have an existing code base, you likely have existing log messages. This document will explain how to migrate and integrate existing logging into your new Eliot log setup. In particular, this will focus on the Python standard library ``logging`` package, but the same principles apply to other logging libraries. .. _migrating: Route existing logs to Eliot ---------------------------- Eliot includes a ``logging.Handler`` that can take standard library log messages and route them into Eliot. These log messages will *automatically* appear in the correct place in the action tree! Once you add actions to your code these log messages will automatically benefit from Eliot's causal information. To begin with, however, we'll just add routing of log messages to Eliot: .. code-block:: python # Add Eliot Handler to root Logger. You may wish to only route specific # Loggers to Eliot. import logging from eliot.stdlib import EliotHandler logging.getLogger().addHandler(EliotHandler()) Add actions at entry points and other key points ------------------------------------------------ Simply by adding a few key actions—the entry points to the code, as well as key sub-actions—you can start getting value from Eliot's functionality while still getting information from your existing logs. You can leave existing log messages in place, replacing them with Eliot logging opportunistically; they will still be included in your output. .. literalinclude:: ../../../examples/stdlib.py The stdlib logging messages will be included in the correct part of the tree: .. code-block:: shell-session $ python examples/stdlib.py | eliot-tree 3f465ee3-7fa9-40e2-8b20-9c0595612a8b └── mypackage:main/1 ⇒ started ├── timestamp: 2018-07-15 16:50:39.230467 ├── mypackage:do_a_thing/2/1 ⇒ started │ ├── timestamp: 2018-07-15 16:50:39.230709 │ └── mypackage:do_a_thing/2/2 ⇒ succeeded │ └── timestamp: 2018-07-15 16:50:39.230836 ├── mypackage:do_a_thing/3/1 ⇒ started │ ├── timestamp: 2018-07-15 16:50:39.230980 │ ├── eliot:stdlib/3/2 │ │ ├── log_level: ERROR │ │ ├── logger: mypackage │ │ ├── message: The number 3 is a bad number, don't use it. │ │ └── timestamp: 2018-07-15 16:50:39.231157 │ └── mypackage:do_a_thing/3/3 ⇒ failed │ ├── exception: builtins.ValueError │ ├── reason: I hate the number 3 │ └── timestamp: 2018-07-15 16:50:39.231364 ├── eliot:stdlib/4 │ ├── log_level: INFO │ ├── logger: mypackage │ ├── message: Number 3 was rejected. │ └── timestamp: 2018-07-15 16:50:39.231515 └── mypackage:main/5 ⇒ succeeded └── timestamp: 2018-07-15 16:50:39.231641 eliot-1.11.0/docs/source/quickstart.rst0000664000175000017500000001126713460352650021571 0ustar itamarstitamarst00000000000000Quickstart ========== Let's see how easy it is to use Eliot. Installing Eliot ---------------- To install Eliot and the other tools we'll use in this example, run the following in your shell: .. code-block:: shell-session $ pip install eliot eliot-tree requests You can also install it using Conda: .. code-block:: shell-session $ conda install -c conda-forge eliot eliot-tree requests This will install: 1. Eliot itself. 2. `eliot-tree `_, a tool that lets you visualize Eliot logs easily. 3. ``requests``, a HTTP client library we'll use in the example code below. You don't need it for real Eliot usage, though. Our example program ------------------- We're going to add logging code to the following script, which checks if a list of links are valid URLs: .. code-block:: python import requests def check_links(urls): for url in urls: try: response = requests.get(url) response.raise_for_status() except Exception as e: raise ValueError(str(e)) try: check_links(["http://eliot.readthedocs.io", "http://nosuchurl"]) except ValueError: print("Not all links were valid.") Adding Eliot logging -------------------- To add logging to this program, we do two things: 1. Tell Eliot to log messages to file called "linkcheck.log" by using ``eliot.to_file()``. 2. Create two actions using ``eliot.start_action()``. Actions succeed when the ``eliot.start_action()`` context manager finishes successfully, and fail when an exception is raised. .. literalinclude:: ../../examples/linkcheck.py :emphasize-lines: 2,3,7,10 Running the code ---------------- Let's run the code: .. code-block:: shell-session $ python linkcheck.py Not all the links were valid. We can see the resulting log file is composed of JSON messages, one per line: .. code-block:: shell-session $ cat linkcheck.log {"action_status": "started", "task_uuid": "b1cb58cf-2c2f-45c0-92b2-838ac00b20cc", "task_level": [1], "timestamp": 1509136967.2066844, "action_type": "check_links", "urls": ["http://eliot.readthedocs.io", "http://nosuchurl"]} ... So far these logs seem similar to the output of regular logging systems: individual isolated messages. But unlike those logging systems, Eliot produces logs that can be reconstructed into a tree, for example using the ``eliot-tree`` utility: .. code-block:: shell-session :emphasize-lines: 3,8,13,16-19,21-23 $ eliot-tree linkcheck.log b1cb58cf-2c2f-45c0-92b2-838ac00b20cc └── check_links/1 ⇒ started ├── timestamp: 2017-10-27 20:42:47.206684 ├── urls: │ ├── 0: http://eliot.readthedocs.io │ └── 1: http://nosuchurl ├── download/2/1 ⇒ started │ ├── timestamp: 2017-10-27 20:42:47.206933 │ ├── url: http://eliot.readthedocs.io │ └── download/2/2 ⇒ succeeded │ └── timestamp: 2017-10-27 20:42:47.439203 ├── download/3/1 ⇒ started │ ├── timestamp: 2017-10-27 20:42:47.439412 │ ├── url: http://nosuchurl │ └── download/3/2 ⇒ failed │ ├── errno: None │ ├── exception: requests.exceptions.ConnectionError │ ├── reason: HTTPConnectionPool(host='nosuchurl', port=80): Max retries exceeded with url: / (Caused by NewConnec… │ └── timestamp: 2017-10-27 20:42:47.457133 └── check_links/4 ⇒ failed ├── exception: builtins.ValueError ├── reason: HTTPConnectionPool(host='nosuchurl', port=80): Max retries exceeded with url: / (Caused by NewConnec… └── timestamp: 2017-10-27 20:42:47.457332 Notice how: 1. Eliot tells you which actions succeeded and which failed. 2. Failed actions record their exceptions. 3. You can see just from the logs that the ``check_links`` action caused the ``download`` action. Next steps ---------- You can learn more by reading the rest of the documentation, including: * The :doc:`motivation behind Eliot `. * How to generate :doc:`actions `, :doc:`standalone messages `, and :doc:`handle errors `. * How to integrate or migrate your :doc:`existing stdlib logging messages `. * How to output logs :doc:`to a file or elsewhere `. * Using :doc:`asyncio or Trio coroutines `, :doc:`threads and processes `, or :doc:`Twisted `. * Using Eliot for :doc:`scientific computing `. eliot-1.11.0/docs/source/python2.rst0000664000175000017500000000135713460352650021001 0ustar itamarstitamarst00000000000000.. _python2: Python 2.7 Support ================== The last version of Eliot to support Python 2.7 was release 1.7. If you are using Eliot with Python 2, keep the following in mind: * I will provide critical bug fixes for Python 2 until March 2020. I will accept patches for critical bug fixes after that (or you can `pay for my services `_ to do additional work). * Make sure you use an up-to-date ``setuptools`` and ``pip``; in theory this should result in only downloading versions of the package that support Python 2. * For extra safety, you can pin Eliot in ``setup.py`` or ``requirements.txt`` by setting: ``eliot < 1.8``. * Critical bug fixes for Python 2 will be released as 1.7.1, 1.7.2, etc.. eliot-1.11.0/docs/source/conf.py0000664000175000017500000002053513460352650020142 0ustar itamarstitamarst00000000000000# -*- coding: utf-8 -*- # # Eliot documentation build configuration file, created by # sphinx-quickstart on Mon Apr 14 12:04:03 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # Make sure local eliot is used when importing: sys.path.insert(0, os.path.abspath(os.path.join('..', '..'))) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Eliot' copyright = u'2014-2018, ClusterHQ and Itamar Turner-Trauring' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. import eliot version = eliot.__version__ # Versioneer adds .dirty suffix to version if checkout is dirty, and # therefore ReadTheDocs somehow ends up with this in its version, so strip # it out. if version.endswith(".dirty"): version = version[:-len(".dirty")] # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Eliotdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Eliot.tex', u'Eliot Documentation', u'Itamar Turner-Trauring', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'eliot', u'Eliot Documentation', [u'Itamar Turner-Trauring'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Eliot', u'Eliot Documentation', u'Itamar Turner-Trauring', 'Eliot', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False eliot-1.11.0/docs/source/news.rst0000664000175000017500000003355413573001140020344 0ustar itamarstitamarst00000000000000What's New ========== 1.11.0 ^^^^^^ Features: * ``Message.log()`` has been replaced by top-level function ``log_message()``. Or if you're in the context of action ``ctx``, you can call ``ctx.log()``. See :ref:`messages` for details. * Python 3.8 is now supported. * The ``eliot-prettyprint`` command line tool now supports a more compact format by using the ``--compact`` argument. * The ``eliot-prettyprint`` command line tool now supports outputting in local timezones using the ``--local-timezone`` argument. 1.10.0 ^^^^^^ Bug fixes: * ``@eliot.testing.capture_logging`` now passes ``*args`` and ``**kwargs`` to the wrapped function, as one would expect. Fixes #420. Thanks to Jean-Paul Calderone for the bug report. * Eliot works with Dask 2.0. Thanks to Dan Myung for the bug report. 1.9.0 ^^^^^ Deprecation: * Python versions older than 3.5.3, e.g. the 3.5.2 on Ubuntu Xenial, don't work with Eliot, so added a more informative error message explaining that. Fixes #418. Thanks to Richard van der Hoff for the bug report. Features: * If you call ``to_file()/FileDestination()`` with a non-writable file, an exception will be raised. This prevents logging from being silently swallowed when the program runs. Fixes #403. * PyPy3 is now officially supported. Changes: * If you log a NumPy array whose size > 10000, only a subset will logged. This is to ensure logging giant arrays by mistake doesn't impact your software's performance. If you want to customize logging of large arrays, see :ref:`large_numpy_arrays`. Fixes #410. 1.8.0 ^^^^^ Features: * Eliot now supports Trio coroutines, as well as other frameworks that utilize Python 3.7's ``contextvars`` (Python 3.5 and 3.6 are also supported, using backport packages). Deprecation: * ``eliot.use_asyncio_context()`` is no longer necessary. On Python 3.5 and 3.6, however, you should make sure to import ``eliot`` (or ``aiocontextvars``) before you start your first event loop. Changes: * Python 2.7 is now in legacy support mode; the last major Eliot release supporting it is 1.7.0. See :ref:`python2` for details. * Python 3.4 is no longer supported. 1.7.0 ^^^^^ Documentation: * Eliot has an API for testing that your logs were output correctly. Until now, however, the documentation was overly focused on requiring usage of types, which are optional, so it has been rewritten to be more generic: :doc:`read more about the testing API here`. Features: * Generating messages is much faster. * Eliot now works with PyInstaller. Thanks to Jean-Paul Calderone for the bug report. Fixes issue #386. * The testing infrastructure now has slightly more informative error messages. Thanks to Jean-Paul Calderone for the bug report. Fixes issue #373. * Added lower-level testing infrastructure—``eliot.testing.swap_logger`` and ``eliot.testing.check_for_errors``—which is useful for cases when the ``@capture_logging`` decorator is insufficient. For example, test methods that are async, or return Twisted ``Deferred``. See the :doc:`testing documentation` for details. Thanks to Jean-Paul Calderone for the feature request. Fixes #364. * ``eliot.ValidationError``, as raised by e.g. ``capture_logging``, is now part of the public API. Fixed issue #146. Twisted-related features: * New decorator, ``@eliot.twisted.inline_callbacks`` , which is like Twisted's ``inlineCallbacks`` but which also manages the Eliot context. Thanks to Jean-Paul Calderone for the fix. Fixed issue #259. * ``eliot.twisted.DeferredContext.addCallbacks`` now supports omitting the errback, for compatibility with Twisted's ``Deferred``. Thanks to Jean-Paul Calderone for the fix. Fixed issue #366. Bug fixes: * Fixed bug in the ``asyncio`` coroutine support where only the thread where ``use_asyncio_context()`` was called supported coroutine-specific contexts. Fixes issue #388. * ``ILogger.write`` is now explicitly thread-safe. The ``MemoryLogger`` (as used by tests) implementation of this method which was previously not thread-safe is now thread-safe. Thanks to Jean-Paul Calderone for the patch. Fixes issue #382. 1.6.0 ^^^^^ Deprecation: * Python 2 is still supported, but will be dropped in one of the next releases. See :ref:`python2`. Features: * NumPy integers, floats, bools and arrays are now automatically serialized to JSON, via a new default JSON encoder (``eliot.json.EliotJSONEncoder``). * Dask support: replace ``dask.compute()`` with ``eliot.dask.compute_with_trace()`` to automatically preserve Eliot context for ``Bag`` and ``Delayed`` Dask computations. See :ref:`dask_usage` for details. * New decorator, ``@eliot.log_call``, which automatically creates an action that starts when function is called and ends when it returns. See :ref:`log_call decorator`. * A parser library for parsing serialized Eliot JSON messages into a tree of Python objects. See :ref:`parsing_logs` for details. Testing features: * ``eliot.testing.LoggedAction`` has a new method, ``type_tree()``, that returns the tree of action and message types. This allows for easier testing of action structure. * ``eliot.testing.LoggedAction.of_type`` now accepts the type as a string, not just an ``eliot.ActionType`` instance. Similarly, ``LoggedMessage.of_type`` also accepts the type as a string. 1.5.0 ^^^^^ Bug fixes: * The standard library ``logging`` bridge now logs tracebacks, not just messages. Features: * You can now pass in an explicit traceback tuple to ``write_traceback``. Changes: * The deprecated ``system`` argument to ``write_traceback`` and ``writeFailure`` has been removed. 1.4.0 ^^^^^ Features: * Added support for routing standard library logging into Eliot; see :ref:`migrating` for details. * Added support for Python 3.7. Output format changes: * All messages now have either ``message_type`` or ``action_type`` fields. Documentation: * Documented how to add log levels, and how to filter Eliot logs. * Logstash configuration is closer to modern version's options, though still untested. * Explained how to integrate/migrate existing logging with Eliot. 1.3.0 ^^^^^ Features: * The default JSON output format now supports custom JSON encoders. See :ref:`custom_json` for details. Thanks to Jonathan Jacobs for feedback. Bug fixes: * ``MemoryLogger.validate()`` now gives more informative errors if JSON encoding fails. Thanks to Jean-Paul Calderone for the bug report. Deprecations: * On Python 3, the JSON encoder used by ``to_file`` and ``FileDestination`` would accept ``bytes``... sometimes. This is deprecated, and will cease to work in a future release of Eliot (on Python 3, it will continue to work on Python 2). If you wish to include ``bytes`` in JSON logging, convert it to a string in the log-generating code, use Eliot's type system, or use a custom JSON encoder. 1.2.0 ^^^^^ Features: * Eliot now does the right thing for ``asyncio`` coroutines in Python 3.5 and later. See :ref:`asyncio_coroutine` for details. Thanks to x0zzz for the bug report. Misc: * ``Action.continue_task`` can now accept text task IDs (``str`` in Python 3, ``unicode`` in Python 2). 1.1.0 ^^^^^ Features: * Messages are no longer lost if they are logged before any destinations are added. In particular, messages will be buffered in memory until the first set of destinations are added, at which point those messages will be delivered. Thanks to Jean-Paul Calderone for the feature request. * ``eliot.add_destinations`` replaces ``eliot.add_destination``, and accepts multiple Destinations at once. * ``eliot.twisted.TwistedDestination`` allows redirecting Eliot logs to ``twisted.logger``. Thanks to Glyph Lefkowitz for the feature request. Misc: * Coding standard switched to PEP-8. * Dropped support for Python 3.3. * Dropped support for versions of Twisted older than 15.2 (or whenever it was that ``twisted.logger`` was introduced). * Dropped support for ``ujson``. 1.0.0 ^^^^^ Eliot is stable, and has been for a while, so switching to v1.0. Features: * New API: ``MessageType.log()``, the equivalent of ``Message.log()``, allows you to quickly create a new typed log message and write it out. * New APIs: ``eliot.current_action()`` returns the current ``Action``, and ``Action.task_uuid`` is the task's UUID. * You can now do ``with YOUR_ACTION().context() as action:``, i.e. ``Action.context()`` context manager returns the ``Action`` instance. * ``ActionType.as_task`` no longer requires a logger argument, matching the other APIs where passing in a logger is optional. 0.12.0 ^^^^^^ Features: * Python 3.6 support. Misc: * Made test suite pass again with latest Hypothesis release. 0.11.0 ^^^^^^ Features: * Eliot tasks can now more easily :ref:`span multiple threads ` using the new ``eliot.preserve_context`` API. * ``eliot-prettyprint`` command line tool now pretty prints field values in a more informative manner. Bug fixes: * ``eliot-prettyprint`` now handles unparseable lines by skipping formatting them rather than exiting. 0.10.1 ^^^^^^ Bug fixes: * Fixed regression in 0.10.0: fix validation of failed actions and tracebacks with extracted additional fields. 0.10.0 ^^^^^^ Features: * ``register_exception_extractor`` allows for more useful :ref:`logging of failed actions and tracebacks` by extracting additional fields from exceptions. * Python 3.5 support. Bug fixes: * Journald support works on Python 3. 0.9.0 ^^^^^ Features: * Native :ref:`journald support`. * ``eliot-prettyprint`` is a command-line tool that formats JSON Eliot messages into a more human-friendly format. * ``eliot.logwriter.ThreadedWriter`` is a Twisted non-blocking wrapper for any blocking destination. 0.8.0 ^^^^^ Features: * ``Message.log`` will log a new message, combining the existing ``Message.new`` and ``Message.write``. * ``write_traceback`` and ``writeFailure`` no longer require a ``Logger``; they now default to using the global one. * The logs written with ``redirectLogsForTrial`` are now written in JSON format, rather than with ``pformat``. Bug fixes: * ``FileDestination`` will now call ``flush()`` on the given file object after writing the log message. Previously log messages would not end up being written out until the file buffer filled up. * Each ``Message`` logged outside the context of an action now gets a unique ``task_id``. 0.7.0 ^^^^^ * Creating your own ``Logger`` instances is no longer necessary; all relevant APIs now default to using a global one. A new testing decorator (``eliot.testing.capture_logging``) was added to capture global logging. * Support positional ``Field``-instance arguments to ``fields()`` to make combining existing field types and simple fields more convenient. Contributed by Jonathan Jacobs. * ``write_traceback`` and ``writeFailure`` no longer require a ``system`` argument, as the combination of traceback and action context should suffice to discover the origin of the problem. This is a minor change to output format as the field is also omitted from the resulting ``eliot:traceback`` messages. * The ``validate_logging`` testing utility now skips validation when the decorated test method raises ``SkipTest``. * Exceptions in destinations are now handled better: instead of being dropped silently an attempt is made to log a message about the problem. If that also fails then the exception is dropped. 0.6.0 ^^^^^ .. warning:: Incompatible output format change! In previous versions the ordering of messages and actions was ambiguous and could not be deduced from out-of-order logs, and even where it was possible sorting correctly was difficult. To fix this the ``action_counter`` field was removed and now all messages can be uniquely located within a specific task by the values in an :ref:`improved task_level field `. Features: * Eliot tasks can now :ref:`span multiple processes and threads `, allowing for easy tracing of actions in complex and distributed applications. * :ref:`eliot.add_global_fields ` allows adding fields with specific values to all Eliot messages logged by your program. This can be used to e.g. distinguish between log messages from different processes by including relevant identifying information. Bug fixes: * On Python 3 files that accept unicode (e.g. ``sys.stdout``) should now work. 0.5.0 ^^^^^ Features: * Added support for Python 3.4. * Most public methods and functions now have underscore-based equivalents to the camel case versions, e.g. ``eliot.write_traceback`` and ``eliot.writeTraceback``, for use in PEP 8 styled programs. Twisted-facing APIs and pyunit assertions do not provide these additional APIs, as camel-case is the native idiom. * ``eliot.to_file`` outputs log messages to a file. * Documented how to load Eliot logging into ElasticSearch via Logstash. * Documentation has been significantly reorganized. 0.4.0 ^^^^^ Note that this is the last release that will make incompatible API changes without interim deprecation warnings. Incompatible changes from 0.3.0: * ``Logger`` no longer does JSON serialization; it's up to destinations to decide how to serialize the dictionaries they receive. * Timestamps are no longer encoded in TAI64N format; they are now provided as seconds since the Unix epoch. * ``ActionType`` no longer supports defining additional failure fields, and therefore accepts one argument less. * ``Action.runCallback`` and ``Action.finishAfter`` have been removed, as they are replaced by ``DeferredContext`` (see below). Features: * Added a simpler API (``fields()``) for defining fields for ``ActionType`` and ``MessageType``. * Added support for Python 3.3. * Actions can now be explicitly finished using a public API: ``Action.finish()``. * ``Action.context()`` context manager allows setting an action context without finishing the action when exiting the block. * Added a new API for Twisted ``Deferred`` support: ``eliot.twisted.DeferredContext``. * ``eliot.twisted.redirectLogsForTrial`` will redirect Eliot logs to Twisted's logs when running under the ``trial`` test runner. eliot-1.11.0/docs/source/outputting/0000775000175000017500000000000013573001162021052 5ustar itamarstitamarst00000000000000eliot-1.11.0/docs/source/outputting/index.rst0000664000175000017500000000012713460352650022721 0ustar itamarstitamarst00000000000000Outputting Logs =============== .. toctree:: output journald elasticsearch eliot-1.11.0/docs/source/outputting/elasticsearch.rst0000664000175000017500000000312113460352650024421 0ustar itamarstitamarst00000000000000Using Logstash and ElasticSearch to Process Eliot Logs ====================================================== .. note:: Logstash, Elasticsearch and Kibana change frequently. These instructions might not be quite accurate. `ElasticSearch`_ is a search and analytics engine which can be used to store Eliot logging output. The logs can then be browsed by humans using the `Kibana`_ web UI, or on the command-line using the `logstash-cli`_ tool. Automated systems can access the logs using the ElasticSearch query API. `Logstash`_ is a log processing tool that can be used to load Eliot log files into ElasticSearch. The combination of ElasticSearch, Logstash, and Kibana is sometimes referred to as ELK. .. _logstash-cli: https://github.com/jedi4ever/logstash-cli .. _Logstash: http://logstash.net/ .. _ElasticSearch: http://elasticsearch.org .. _Kibana: http://www.elasticsearch.org/overview/kibana/ Example Logstash Configuration ------------------------------ Assuming each Eliot message is written out as a JSON message on its own line (which is the case for ``eliot.to_file()`` and ``eliot.logwriter.ThreadedFileWriter``), the following Logstash configuration will load these log messages into an in-process ElasticSearch database: :download:`logstash_standalone.conf` .. literalinclude:: logstash_standalone.conf We can then pipe JSON messages from Eliot into ElasticSearch using Logstash: .. code-block:: console $ python examples/stdout.py | logstash web -- agent --config logstash_standalone.conf You can then use the Kibana UI to search and browse the logs by visiting http://localhost:9292/. eliot-1.11.0/docs/source/outputting/logstash_standalone.conf0000664000175000017500000000123613460352650025765 0ustar itamarstitamarst00000000000000input { stdin { codec => json_lines { charset => "UTF-8" } } } filter { date { # Parse Eliot timestamp filed into the special @timestamp field Logstash # expects: match => [ "timestamp", "UNIX" ] target => ["@timestamp"] } } output { # Stdout output for debugging: stdout { codec => rubydebug } elasticsearch { # We make the document id unique (for a specific index/mapping type pair) by # using the relevant Eliot fields. This means replaying messages will not # result in duplicates, as long as the replayed messages end up in the same # index. document_id => "%{task_uuid}_%{task_level}" } } eliot-1.11.0/docs/source/outputting/output.rst0000664000175000017500000000624213460352650023156 0ustar itamarstitamarst00000000000000Configuring Logging Output ========================== You can register "destinations" to handle logging output; a destination is a callable that takes a message dictionary. For example, if we want to just print each new message: .. code-block:: python import json, sys from eliot import add_destinations def stdout(message): print(message) add_destinations(stdout) Before destinations are added ----------------------------- Up to a 1000 messages will be buffered in memory until the first set of destinations are added, at which point those messages will be delivered to newly added set of destinations. This ensures that no messages will be lost if logging happens during configuration but before a destination is added. Outputting JSON to a file ------------------------- Since JSON is a common output format, Eliot provides a utility class that logs to a file, ``eliot.FileDestination(file=yourfile)``. Each Eliot message will be encoded in JSON and written on a new line. As a short hand you can call ``eliot.to_file``, which will create the destination and then add it automatically. For example: .. code-block:: python from eliot import to_file to_file(open("eliot.log", "ab")) .. note:: This destination is blocking: if writing to a file takes a long time your code will not be able to proceed until writing is done. If you're using Twisted you can wrap a ``eliot.FileDestination`` with a non-blocking :ref:`eliot.logwriter.ThreadedWriter`. This allows you to log to a file without blocking the Twisted ``reactor``. .. _custom_json: Customizing JSON Encoding ------------------------- If you're using Eliot's JSON output you may wish to customize encoding. By default Eliot uses ``eliot.json.EliotJSONEncoder`` (a subclass of ``json.JSONEncoder``) to encode objects. You can customize encoding by passing a custom subclass to either ``eliot.FileDestination`` or ``eliot.to_file``: .. code-block:: python from eliot.json import EliotJSONEncoder from eliot import to_file class MyClass: def __init__(self, x): self.x = x class MyEncoder(EliotJSONEncoder): def default(self, obj): if isinstance(obj, MyClass): return {"x": obj.x} return EliotJSONEncoder.default(self, obj) to_file(open("eliot.log", "ab"), encoder=MyEncoder) For more details on JSON encoding see the Python `JSON documentation `_. .. _add_global_fields: Adding Fields to All Messages ----------------------------- Sometimes you want to add a field to all messages output by your process, regardless of destination. For example if you're aggregating logs from multiple processes into a central location you might want to include a field ``process_id`` that records the name and process id of your process in every log message. Use the ``eliot.add_global_fields`` API to do so, e.g.: .. code-block:: python import os, sys from eliot import add_global_fields add_global_fields(process_id="%s:%d" % (sys.argv[0], os.getpid())) You should call ``add_global_fields`` before ``add_destinations`` to ensure all messages get the global fields. eliot-1.11.0/docs/source/outputting/journald.rst0000664000175000017500000000477713460352650023447 0ustar itamarstitamarst00000000000000.. _journald: Journald ======== ``journald`` is the native logging system on Linux operating systems that use ``systemd`` with support for structured, indexed log storage. Eliot provides native ``journald`` support, with the following features: * The default message field (``MESSAGE``) stores the Eliot message as JSON. * Failed actions get priority 3 ("err") and tracebacks get priority 2 ("crit"). * The ``ELIOT_TASK`` field stores the task UUID. * The ``ELIOT_TYPE`` field stores the message or action type if available. * The ``SYSLOG_IDENTIFIER`` stores ``sys.argv[0]``. Installation ------------ Journald requires additional libraries that are not installed by default by Eliot. You can install them by running: .. code-block:: shell-session $ pip install eliot[journald] Generating logs --------------- The following example demonstrates how to enable ``journald`` output. .. literalinclude:: ../../../examples/journald.py Querying logs ------------- The ``journalctl`` utility can be used to extract logs from ``journald``. Useful options include ``--all`` which keeps long fields from being truncated and ``--output cat`` which only outputs the body of the ``MESSAGE`` field, i.e. the JSON-serialized Eliot message. Let's generate some logs: .. code-block:: shell-session $ python journald.py We can find all messages with a specific type: .. code-block:: shell-session $ sudo journalctl --all --output cat ELIOT_TYPE=inbetween | eliot-prettyprint 32ab1286-c356-439d-86f8-085fec3b65d0 -> /1 2015-09-23 21:26:37.972403Z message_type: inbetween We can filter to those that indicate errors: .. code-block:: shell-session $ sudo journalctl --all --output cat --priority=err ELIOT_TYPE=divide | eliot-prettyprint ce64eb77-bb7f-4e69-83f8-07d7cdaffaca -> /2 2015-09-23 21:26:37.972945Z action_type: divide action_status: failed exception: exceptions.ZeroDivisionError reason: integer division or modulo by zero We can also search by task UUID, in which case ``eliot-tree`` can also be used to process the output: .. code-block:: shell-session $ sudo journalctl --all --output cat ELIOT_TASK=ce64eb77-bb7f-4e69-83f8-07d7cdaffaca | eliot-tree ce64eb77-bb7f-4e69-83f8-07d7cdaffaca +-- divide@1/started |-- a: 10 |-- b: 0 `-- timestamp: 2015-09-23 17:26:37.972716 +-- divide@2/failed |-- exception: exceptions.ZeroDivisionError |-- reason: integer division or modulo by zero `-- timestamp: 2015-09-23 17:26:37.972945 eliot-1.11.0/docs/make.bat0000664000175000017500000001506413460352650016751 0ustar itamarstitamarst00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source set I18NSPHINXOPTS=%SPHINXOPTS% source if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Eliot.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Eliot.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %BUILDDIR%/.. echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %BUILDDIR%/.. echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end eliot-1.11.0/eliot.egg-info/0000775000175000017500000000000013573001162017206 5ustar itamarstitamarst00000000000000eliot-1.11.0/eliot.egg-info/requires.txt0000664000175000017500000000042113573001162021603 0ustar itamarstitamarst00000000000000six zope.interface pyrsistent>=0.11.8 boltons>=19.0.1 [:python_version < "3.7" and python_version > "2.7"] aiocontextvars [dev] setuptools>=40 twine>=1.12.1 coverage sphinx sphinx_rtd_theme flake8 black [journald] cffi>=1.1.2 [test] hypothesis>=1.14.0 testtools pytest eliot-1.11.0/eliot.egg-info/PKG-INFO0000664000175000017500000001036613573001162020311 0ustar itamarstitamarst00000000000000Metadata-Version: 2.1 Name: eliot Version: 1.11.0 Summary: Logging library that tells you why it happened Home-page: https://github.com/itamarst/eliot/ Maintainer: Itamar Turner-Trauring Maintainer-email: itamar@itamarst.org License: Apache 2.0 Description: Eliot: Logging that tells you *why* it happened ================================================ .. image:: https://travis-ci.org/itamarst/eliot.png?branch=master :target: http://travis-ci.org/itamarst/eliot :alt: Build Status Python's built-in ``logging`` and other similar systems output a stream of factoids: they're interesting, but you can't really tell what's going on. * Why is your application slow? * What caused this code path to be chosen? * Why did this error happen? Standard logging can't answer these questions. But with a better model you could understand what and why things happened in your application. You could pinpoint performance bottlenecks, you could understand what happened when, who called what. That is what Eliot does. ``eliot`` is a Python logging system that outputs causal chains of **actions**: actions can spawn other actions, and eventually they either **succeed or fail**. The resulting logs tell you the story of what your software did: what happened, and what caused it. Eliot supports a range of use cases and 3rd party libraries: * Logging within a single process. * Causal tracing across a distributed system. * Scientific computing, with `built-in support for NumPy and Dask `_. * `Asyncio and Trio coroutines `_ and the `Twisted networking framework `_. Eliot is only used to generate your logs; you will might need tools like Logstash and ElasticSearch to aggregate and store logs if you are using multiple processes across multiple machines. Eliot supports Python 3.5, 3.6, 3.7, and 3.8, as well as PyPy3. It is maintained by Itamar Turner-Trauring, and released under the Apache 2.0 License. Python 2.7 is in legacy support mode, with the last release supported being 1.7; see `here `_ for details. * `Read the documentation `_. * Download from `PyPI`_ or `conda-forge `_. * Need help or have any questions? `File an issue `_ on GitHub. * **Commercial support** is available from `Python⇒Speed `_. Testimonials ------------ "Eliot has made tracking down causes of failure (in complex external integrations and internal uses) tremendously easier. Our errors are logged to Sentry with the Eliot task UUID. That means we can go from a Sentry notification to a high-level trace of operations—with important metadata at each operation—in a few seconds. We immediately know which user did what in which part of the system." —Jonathan Jacobs .. _Github: https://github.com/itamarst/eliot .. _PyPI: https://pypi.python.org/pypi/eliot Keywords: logging Platform: UNKNOWN Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: System :: Logging Requires-Python: >=3.5.3 Provides-Extra: test Provides-Extra: journald Provides-Extra: dev eliot-1.11.0/eliot.egg-info/dependency_links.txt0000664000175000017500000000000113573001162023254 0ustar itamarstitamarst00000000000000 eliot-1.11.0/eliot.egg-info/entry_points.txt0000664000175000017500000000007713573001162022510 0ustar itamarstitamarst00000000000000[console_scripts] eliot-prettyprint = eliot.prettyprint:_main eliot-1.11.0/eliot.egg-info/SOURCES.txt0000664000175000017500000000504013573001162021071 0ustar itamarstitamarst00000000000000LICENSE MANIFEST.in README.rst setup.cfg setup.py versioneer.py benchmarks/logwriter.py benchmarks/serialization.py docs/Makefile docs/make.bat docs/source/conf.py docs/source/development.rst docs/source/index.rst docs/source/introduction.rst docs/source/news.rst docs/source/python2.rst docs/source/quickstart.rst docs/source/scientific-computing.rst docs/source/generating/actions.rst docs/source/generating/asyncio.rst docs/source/generating/errors.rst docs/source/generating/index.rst docs/source/generating/loglevels.rst docs/source/generating/messages.rst docs/source/generating/migrating.rst docs/source/generating/testing.rst docs/source/generating/threads.rst docs/source/generating/twisted.rst docs/source/generating/types.rst docs/source/outputting/elasticsearch.rst docs/source/outputting/index.rst docs/source/outputting/journald.rst docs/source/outputting/logstash_standalone.conf docs/source/outputting/output.rst docs/source/reading/fields.rst docs/source/reading/index.rst docs/source/reading/reading.rst eliot/__init__.py eliot/_action.py eliot/_bytesjson.py eliot/_errors.py eliot/_generators.py eliot/_message.py eliot/_output.py eliot/_traceback.py eliot/_util.py eliot/_validation.py eliot/_version.py eliot/dask.py eliot/filter.py eliot/journald.py eliot/json.py eliot/logwriter.py eliot/parse.py eliot/prettyprint.py eliot/serializers.py eliot/stdlib.py eliot/tai64n.py eliot/testing.py eliot/twisted.py eliot.egg-info/PKG-INFO eliot.egg-info/SOURCES.txt eliot.egg-info/dependency_links.txt eliot.egg-info/entry_points.txt eliot.egg-info/requires.txt eliot.egg-info/top_level.txt eliot/tests/__init__.py eliot/tests/common.py eliot/tests/strategies.py eliot/tests/test_action.py eliot/tests/test_api.py eliot/tests/test_coroutines.py eliot/tests/test_dask.py eliot/tests/test_filter.py eliot/tests/test_generators.py eliot/tests/test_journald.py eliot/tests/test_json.py eliot/tests/test_logwriter.py eliot/tests/test_message.py eliot/tests/test_output.py eliot/tests/test_parse.py eliot/tests/test_prettyprint.py eliot/tests/test_pyinstaller.py eliot/tests/test_serializers.py eliot/tests/test_stdlib.py eliot/tests/test_tai64n.py eliot/tests/test_testing.py eliot/tests/test_traceback.py eliot/tests/test_twisted.py eliot/tests/test_util.py eliot/tests/test_validation.py examples/asyncio_linkcheck.py examples/cross_process_client.py examples/cross_process_server.py examples/cross_thread.py examples/dask_eliot.py examples/journald.py examples/linkcheck.py examples/logfile.py examples/rometrip_actions.py examples/stdlib.py examples/stdout.py examples/trio_say.pyeliot-1.11.0/eliot.egg-info/top_level.txt0000664000175000017500000000000613573001162021734 0ustar itamarstitamarst00000000000000eliot eliot-1.11.0/setup.py0000664000175000017500000000504513573001140016112 0ustar itamarstitamarst00000000000000from setuptools import setup import versioneer def read(path): """ Read the contents of a file. """ with open(path) as f: return f.read() setup( classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: System :: Logging", ], name="eliot", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description="Logging library that tells you why it happened", python_requires=">=3.5.3", install_requires=[ # Python 3 compatibility: "six", # Internal code documentation: "zope.interface", # Persistent objects for Python: "pyrsistent >= 0.11.8", # version with multi-type pvector/pmap_field # Better decorators, with version that works better with type annotations: "boltons >= 19.0.1", # Backwards compatibility for Python 3.5 and 3.6: 'aiocontextvars;python_version<"3.7" and python_version>"2.7"', ], extras_require={ "journald": [ # We use cffi to talk to the journald API: "cffi >= 1.1.2" # significant API changes in older releases ], "test": [ # Bug-seeking missile: "hypothesis >= 1.14.0", # Tasteful testing for Python: "testtools", "pytest", ], "dev": [ # Ensure we can do python_requires correctly: "setuptools >= 40", # For uploading releases: "twine >= 1.12.1", # Allows us to measure code coverage: "coverage", "sphinx", "sphinx_rtd_theme", "flake8", "black", ], }, entry_points={"console_scripts": ["eliot-prettyprint = eliot.prettyprint:_main"]}, keywords="logging", license="Apache 2.0", packages=["eliot", "eliot.tests"], url="https://github.com/itamarst/eliot/", maintainer="Itamar Turner-Trauring", maintainer_email="itamar@itamarst.org", long_description=read("README.rst"), )