lttnganalyses-0.4.3/0000775000175000017500000000000012667421106016063 5ustar mjeansonmjeanson00000000000000lttnganalyses-0.4.3/lttnganalyses/0000775000175000017500000000000012667421106020753 5ustar mjeansonmjeanson00000000000000lttnganalyses-0.4.3/lttnganalyses/_version.py0000664000175000017500000000072712667421106023157 0ustar mjeansonmjeanson00000000000000 # This file was generated by 'versioneer.py' (0.15) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json import sys version_json = ''' { "dirty": false, "error": null, "full-revisionid": "6ec799a0912c32612a0651bf094ec68cec493327", "version": "0.4.3" } ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) lttnganalyses-0.4.3/lttnganalyses/common/0000775000175000017500000000000012667421106022243 5ustar mjeansonmjeanson00000000000000lttnganalyses-0.4.3/lttnganalyses/common/version_utils.py0000664000175000017500000000402312665072151025521 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from functools import total_ordering @total_ordering class Version: def __init__(self, major, minor, patch, extra=None): self.major = major self.minor = minor self.patch = patch self.extra = extra def __lt__(self, other): if self.major < other.major: return True if self.major > other.major: return False if self.minor < other.minor: return True if self.minor > other.minor: return False return self.patch < other.patch def __eq__(self, other): return ( self.major == other.major and self.minor == other.minor and self.patch == other.patch ) def __repr__(self): version_str = '{}.{}.{}'.format(self.major, self.minor, self.patch) if self.extra: version_str += self.extra return version_str lttnganalyses-0.4.3/lttnganalyses/common/__init__.py0000664000175000017500000000217012665072151024354 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. lttnganalyses-0.4.3/lttnganalyses/common/format_utils.py0000664000175000017500000000636612665072151025340 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2016 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import math def format_size(size, binary_prefix=True): """Convert an integral number of bytes to a human-readable string Args: size (int): a non-negative number of bytes binary_prefix (bool, optional): whether to use binary units prefixes, over SI prefixes. default: True Returns: The formatted string comprised of the size and units Raises: ValueError: if size < 0 """ if size < 0: raise ValueError('Cannot format negative size') if binary_prefix: base = 1024 units = [' B', 'KiB', 'MiB', 'GiB','TiB', 'PiB', 'EiB', 'ZiB', 'YiB'] else: base = 1000 units = [' B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] if size == 0: exponent = 0 else: exponent = int(math.log(size, base)) if exponent >= len(units): # Don't try and use a unit above YiB/YB exponent = len(units) - 1 size /= math.pow(base, exponent) unit = units[exponent] if exponent == 0: # Don't display fractions of a byte format_str = '{:0.0f} {}' else: format_str = '{:0.2f} {}' return format_str.format(size, unit) def format_prio_list(prio_list): """Format a list of prios into a string of unique prios with count Args: prio_list: a list of PrioEvent objects Returns: The formatted string containing the unique priorities and their count if they occurred more than once. """ prio_count = {} prio_str = None for prio_event in prio_list: prio = prio_event.prio if prio not in prio_count: prio_count[prio] = 0 prio_count[prio] += 1 for prio in sorted(prio_count.keys()): count = prio_count[prio] if count > 1: count_str = ' ({} times)'.format(count) else: count_str = '' if prio_str is None: prio_str = '[{}{}'.format(prio, count_str) else: prio_str += ', {}{}'.format(prio, count_str) if prio_str is None: prio_str = '[]' else: prio_str += ']' return prio_str lttnganalyses-0.4.3/lttnganalyses/__init__.py0000664000175000017500000000015012553274232023060 0ustar mjeansonmjeanson00000000000000"""TODO""" from ._version import get_versions __version__ = get_versions()['version'] del get_versions lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/0000775000175000017500000000000012667421106024042 5ustar mjeansonmjeanson00000000000000lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/mem.py0000664000175000017500000000601312665072151025172 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import sp class MemStateProvider(sp.StateProvider): def __init__(self, state): cbs = { 'mm_page_alloc': self._process_mm_page_alloc, 'kmem_mm_page_alloc': self._process_mm_page_alloc, 'mm_page_free': self._process_mm_page_free, 'kmem_mm_page_free': self._process_mm_page_free, } super().__init__(state, cbs) def _get_current_proc(self, event): cpu_id = event['cpu_id'] if cpu_id not in self._state.cpus: return None cpu = self._state.cpus[cpu_id] if cpu.current_tid is None: return None return self._state.tids[cpu.current_tid] def _process_mm_page_alloc(self, event): self._state.mm.page_count += 1 # Increment the number of pages allocated during the execution # of all currently syscall io requests for process in self._state.tids.values(): if process.current_syscall is None: continue if process.current_syscall.io_rq: process.current_syscall.io_rq.pages_allocated += 1 current_process = self._get_current_proc(event) if current_process is None: return self._state.send_notification_cb('tid_page_alloc', proc=current_process, cpu_id=event['cpu_id']) def _process_mm_page_free(self, event): if self._state.mm.page_count == 0: return self._state.mm.page_count -= 1 current_process = self._get_current_proc(event) if current_process is None: return self._state.send_notification_cb('tid_page_free', proc=current_process, cpu_id=event['cpu_id']) lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/syscalls.py0000664000175000017500000000533412665072151026256 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import sp, sv class SyscallsStateProvider(sp.StateProvider): def __init__(self, state): cbs = { 'syscall_entry': self._process_syscall_entry, 'syscall_exit': self._process_syscall_exit } super().__init__(state, cbs) def _process_syscall_entry(self, event): cpu_id = event['cpu_id'] if cpu_id not in self._state.cpus: return cpu = self._state.cpus[cpu_id] if cpu.current_tid is None: return proc = self._state.tids[cpu.current_tid] proc.current_syscall = sv.SyscallEvent.new_from_entry(event) def _process_syscall_exit(self, event): cpu_id = event['cpu_id'] if cpu_id not in self._state.cpus: return cpu = self._state.cpus[cpu_id] if cpu.current_tid is None: return proc = self._state.tids[cpu.current_tid] current_syscall = proc.current_syscall if current_syscall is None: return current_syscall.process_exit(event) self._state.send_notification_cb('syscall_exit', proc=proc, event=event, cpu_id=cpu_id) # If it's an IO Syscall, the IO state provider will take care of # clearing the current syscall, so only clear here if it's not if current_syscall.name not in sv.SyscallConsts.IO_SYSCALLS: self._state.tids[cpu.current_tid].current_syscall = None lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/automaton.py0000664000175000017500000000557012665072151026432 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from .sched import SchedStateProvider from .mem import MemStateProvider from .irq import IrqStateProvider from .syscalls import SyscallsStateProvider from .io import IoStateProvider from .statedump import StatedumpStateProvider from .block import BlockStateProvider from .net import NetStateProvider from .sv import MemoryManagement class State: def __init__(self): self.cpus = {} self.tids = {} self.disks = {} self.mm = MemoryManagement() self._notification_cbs = {} # State changes can be handled differently depending on # version of tracer used, so keep track of it. self._tracer_version = None def register_notification_cbs(self, cbs): for name in cbs: if name not in self._notification_cbs: self._notification_cbs[name] = [] self._notification_cbs[name].append(cbs[name]) def send_notification_cb(self, name, **kwargs): if name in self._notification_cbs: for cb in self._notification_cbs[name]: cb(**kwargs) class Automaton: def __init__(self): self._state = State() self._state_providers = [ SchedStateProvider(self._state), MemStateProvider(self._state), IrqStateProvider(self._state), SyscallsStateProvider(self._state), IoStateProvider(self._state), StatedumpStateProvider(self._state), BlockStateProvider(self._state), NetStateProvider(self._state) ] def process_event(self, ev): for sp in self._state_providers: sp.process_event(ev) @property def state(self): return self._state lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/sv.py0000664000175000017500000003612312665072151025051 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import socket from . import common class StateVariable: pass class Process(): def __init__(self, tid=None, pid=None, comm='', prio=None): self.tid = tid self.pid = pid self.comm = comm self.prio = prio # indexed by fd self.fds = {} self.current_syscall = None # the process scheduled before this one self.prev_tid = None self.last_wakeup = None self.last_waker = None class CPU(): def __init__(self, cpu_id): self.cpu_id = cpu_id self.current_tid = None self.current_hard_irq = None # softirqs use a dict because multiple ones can be raised before # handling. They are indexed by vec, and each entry is a list, # ordered chronologically self.current_softirqs = {} class MemoryManagement(): def __init__(self): self.page_count = 0 class SyscallEvent(): def __init__(self, name, begin_ts): self.name = name self.begin_ts = begin_ts self.end_ts = None self.ret = None self.duration = None # Only applicable to I/O syscalls self.io_rq = None def process_exit(self, event): self.end_ts = event.timestamp # On certain architectures (notably arm32), lttng-modules # versions prior to 2.8 would erroneously trace certain # syscalls (e.g. mmap2) without their return value. In this # case, get() will simply set self.ret to None. These syscalls # with a None return value should simply be ignored down the # line. self.ret = event.get('ret') self.duration = self.end_ts - self.begin_ts @classmethod def new_from_entry(cls, event): name = common.get_syscall_name(event) return cls(name, event.timestamp) class Disk(): def __init__(self): # pending block IO Requests, indexed by sector self.pending_requests = {} class FDType(): unknown = 0 disk = 1 net = 2 # not 100% sure they are network FDs (assumed when net_dev_xmit is # called during a write syscall and the type in unknown). maybe_net = 3 @staticmethod def get_fd_type(name, family): if name in SyscallConsts.NET_OPEN_SYSCALLS: if family in SyscallConsts.INET_FAMILIES: return FDType.net if family in SyscallConsts.DISK_FAMILIES: return FDType.disk if name in SyscallConsts.DISK_OPEN_SYSCALLS: return FDType.disk return FDType.unknown class FD(): def __init__(self, fd, filename='unknown', fd_type=FDType.unknown, cloexec=False, family=None): self.fd = fd self.filename = filename self.fd_type = fd_type self.cloexec = cloexec self.family = family @classmethod def new_from_fd(cls, fd): return cls(fd.fd, fd.filename, fd.fd_type, fd.cloexec, fd.family) @classmethod def new_from_open_rq(cls, io_rq): return cls(io_rq.fd, io_rq.filename, io_rq.fd_type, io_rq.cloexec, io_rq.family) class IRQ(): def __init__(self, id, cpu_id, begin_ts=None): self.id = id self.cpu_id = cpu_id self.begin_ts = begin_ts self.end_ts = None @property def duration(self): if not self.end_ts or not self.begin_ts: return None return self.end_ts - self.begin_ts class HardIRQ(IRQ): def __init__(self, id, cpu_id, begin_ts): super().__init__(id, cpu_id, begin_ts) self.ret = None @classmethod def new_from_irq_handler_entry(cls, event): id = event['irq'] cpu_id = event['cpu_id'] begin_ts = event.timestamp return cls(id, cpu_id, begin_ts) class SoftIRQ(IRQ): def __init__(self, id, cpu_id, raise_ts=None, begin_ts=None): super().__init__(id, cpu_id, begin_ts) self.raise_ts = raise_ts @classmethod def new_from_softirq_raise(cls, event): id = event['vec'] cpu_id = event['cpu_id'] raise_ts = event.timestamp return cls(id, cpu_id, raise_ts) @classmethod def new_from_softirq_entry(cls, event): id = event['vec'] cpu_id = event['cpu_id'] begin_ts = event.timestamp return cls(id, cpu_id, begin_ts=begin_ts) class IORequest(): # I/O operations OP_OPEN = 1 OP_READ = 2 OP_WRITE = 3 OP_CLOSE = 4 OP_SYNC = 5 # Operation used for requests that both read and write, # e.g. splice and sendfile OP_READ_WRITE = 6 def __init__(self, begin_ts, size, tid, operation): self.begin_ts = begin_ts self.end_ts = None self.duration = None # request size in bytes self.size = size self.operation = operation # tid of process that triggered the rq self.tid = tid # Error number if request failed self.errno = None @staticmethod def is_equivalent_operation(left_op, right_op): """Predicate used to compare equivalence of IO_OPERATION. This method is employed because OP_READ_WRITE behaves like a set containing both OP_READ and OP_WRITE and is therefore equivalent to these operations as well as itself """ if left_op == IORequest.OP_READ_WRITE: return right_op in [IORequest.OP_READ, IORequest.OP_WRITE, IORequest.OP_READ_WRITE] if left_op == IORequest.OP_READ: return right_op in [IORequest.OP_READ, IORequest.OP_READ_WRITE] if left_op == IORequest.OP_WRITE: return right_op in [IORequest.OP_WRITE, IORequest.OP_READ_WRITE] return left_op == right_op class SyscallIORequest(IORequest): def __init__(self, begin_ts, size, tid, operation, syscall_name): super().__init__(begin_ts, None, tid, operation) self.fd = None self.syscall_name = syscall_name # Number of pages alloc'd/freed/written to disk during the rq self.pages_allocated = 0 self.pages_freed = 0 self.pages_written = 0 # Whether kswapd was forced to wakeup during the rq self.woke_kswapd = False def update_from_exit(self, event): self.end_ts = event.timestamp self.duration = self.end_ts - self.begin_ts if event['ret'] < 0: self.errno = -event['ret'] class OpenIORequest(SyscallIORequest): def __init__(self, begin_ts, tid, syscall_name, filename, fd_type): super().__init__(begin_ts, None, tid, IORequest.OP_OPEN, syscall_name) # FD set on syscall exit self.fd = None self.filename = filename self.fd_type = fd_type self.family = None self.cloexec = False def update_from_exit(self, event): super().update_from_exit(event) if event['ret'] >= 0: self.fd = event['ret'] @classmethod def new_from_disk_open(cls, event, tid): begin_ts = event.timestamp name = common.get_syscall_name(event) filename = event['filename'] req = cls(begin_ts, tid, name, filename, FDType.disk) req.cloexec = event['flags'] & common.O_CLOEXEC == common.O_CLOEXEC return req @classmethod def new_from_accept(cls, event, tid): # Handle both accept and accept4 begin_ts = event.timestamp name = common.get_syscall_name(event) req = cls(begin_ts, tid, name, 'socket', FDType.net) if 'family' in event: req.family = event['family'] # Set filename to ip:port if INET socket if req.family == socket.AF_INET: req.filename = '%s:%d' % (common.get_v4_addr_str( event['v4addr']), event['sport']) return req @classmethod def new_from_socket(cls, event, tid): begin_ts = event.timestamp req = cls(begin_ts, tid, 'socket', 'socket', FDType.net) if 'family' in event: req.family = event['family'] return req @classmethod def new_from_old_fd(cls, event, tid, old_fd): begin_ts = event.timestamp name = common.get_syscall_name(event) if old_fd is None: filename = 'unknown' fd_type = FDType.unknown else: filename = old_fd.filename fd_type = old_fd.fd_type return cls(begin_ts, tid, name, filename, fd_type) class CloseIORequest(SyscallIORequest): def __init__(self, begin_ts, tid, fd): super().__init__(begin_ts, None, tid, IORequest.OP_CLOSE, 'close') self.fd = fd class ReadWriteIORequest(SyscallIORequest): def __init__(self, begin_ts, size, tid, operation, syscall_name): super().__init__(begin_ts, size, tid, operation, syscall_name) # The size returned on syscall exit, in bytes. May differ from # the size initially requested self.returned_size = None # Unused if fd is set self.fd_in = None self.fd_out = None def update_from_exit(self, event): super().update_from_exit(event) ret = event['ret'] if ret >= 0: self.returned_size = ret # Set the size to the returned one if none was set at # entry, as with recvmsg or sendmsg if self.size is None: self.size = ret @classmethod def new_from_splice(cls, event, tid): begin_ts = event.timestamp size = event['len'] req = cls(begin_ts, size, tid, IORequest.OP_READ_WRITE, 'splice') req.fd_in = event['fd_in'] req.fd_out = event['fd_out'] return req @classmethod def new_from_sendfile64(cls, event, tid): begin_ts = event.timestamp size = event['count'] req = cls(begin_ts, size, tid, IORequest.OP_READ_WRITE, 'sendfile64') req.fd_in = event['in_fd'] req.fd_out = event['out_fd'] return req @classmethod def new_from_fd_event(cls, event, tid, size_key): begin_ts = event.timestamp # Some events, like recvmsg or sendmsg, only have size info on return if size_key is not None: size = event[size_key] else: size = None syscall_name = common.get_syscall_name(event) if syscall_name in SyscallConsts.READ_SYSCALLS: operation = IORequest.OP_READ else: operation = IORequest.OP_WRITE req = cls(begin_ts, size, tid, operation, syscall_name) req.fd = event['fd'] return req class SyncIORequest(SyscallIORequest): def __init__(self, begin_ts, size, tid, syscall_name): super().__init__(begin_ts, size, tid, IORequest.OP_SYNC, syscall_name) @classmethod def new_from_sync(cls, event, tid): begin_ts = event.timestamp size = None return cls(begin_ts, size, tid, 'sync') @classmethod def new_from_fsync(cls, event, tid): # Also handle fdatasync begin_ts = event.timestamp size = None syscall_name = common.get_syscall_name(event) req = cls(begin_ts, size, tid, syscall_name) req.fd = event['fd'] return req @classmethod def new_from_sync_file_range(cls, event, tid): begin_ts = event.timestamp size = event['nbytes'] req = cls(begin_ts, size, tid, 'sync_file_range') req.fd = event['fd'] return req class BlockIORequest(IORequest): # Logical sector size in bytes, according to the kernel SECTOR_SIZE = 512 def __init__(self, begin_ts, tid, operation, dev, sector, nr_sector): size = nr_sector * BlockIORequest.SECTOR_SIZE super().__init__(begin_ts, size, tid, operation) self.dev = dev self.sector = sector self.nr_sector = nr_sector def update_from_rq_complete(self, event): self.end_ts = event.timestamp self.duration = self.end_ts - self.begin_ts @classmethod def new_from_rq_issue(cls, event): begin_ts = event.timestamp dev = event['dev'] sector = event['sector'] nr_sector = event['nr_sector'] tid = event['tid'] # An even rwbs indicates read operation, odd indicates write if event['rwbs'] % 2 == 0: operation = IORequest.OP_READ else: operation = IORequest.OP_WRITE return cls(begin_ts, tid, operation, dev, sector, nr_sector) class BlockRemapRequest(): def __init__(self, dev, sector, old_dev, old_sector): self.dev = dev self.sector = sector self.old_dev = old_dev self.old_sector = old_sector class SyscallConsts(): # TODO: decouple socket/family logic from this class INET_FAMILIES = [socket.AF_INET, socket.AF_INET6] DISK_FAMILIES = [socket.AF_UNIX] # list nof syscalls that open a FD on disk (in the exit_syscall event) DISK_OPEN_SYSCALLS = ['open', 'openat'] # list of syscalls that open a FD on the network # (in the exit_syscall event) NET_OPEN_SYSCALLS = ['socket'] # list of syscalls that can duplicate a FD DUP_OPEN_SYSCALLS = ['fcntl', 'dup', 'dup2', 'dup3'] SYNC_SYSCALLS = ['sync', 'sync_file_range', 'fsync', 'fdatasync'] # merge the 3 open lists OPEN_SYSCALLS = DISK_OPEN_SYSCALLS + NET_OPEN_SYSCALLS + DUP_OPEN_SYSCALLS # list of syscalls that close a FD (in the 'fd =' field) CLOSE_SYSCALLS = ['close'] # list of syscall that read on a FD, value in the exit_syscall following READ_SYSCALLS = ['read', 'recvmsg', 'recvfrom', 'readv', 'pread', 'pread64', 'preadv'] # list of syscall that write on a FD, value in the exit_syscall following WRITE_SYSCALLS = ['write', 'sendmsg', 'sendto', 'writev', 'pwrite', 'pwrite64', 'pwritev'] # list of syscalls that both read and write on two FDs READ_WRITE_SYSCALLS = ['splice', 'sendfile64'] # All I/O related syscalls IO_SYSCALLS = OPEN_SYSCALLS + CLOSE_SYSCALLS + READ_SYSCALLS + \ WRITE_SYSCALLS + SYNC_SYSCALLS + READ_WRITE_SYSCALLS lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/statedump.py0000664000175000017500000001134612665072151026427 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import sp, sv, common class StatedumpStateProvider(sp.StateProvider): def __init__(self, state): cbs = { 'lttng_statedump_process_state': self._process_lttng_statedump_process_state, 'lttng_statedump_file_descriptor': self._process_lttng_statedump_file_descriptor } super().__init__(state, cbs) def _process_lttng_statedump_process_state(self, event): tid = event['tid'] pid = event['pid'] name = event['name'] # prio is not in the payload for LTTng-modules < 2.8. Using # get() will set it to None if the key is not found prio = event.get('prio') if tid not in self._state.tids: self._state.tids[tid] = sv.Process(tid=tid) proc = self._state.tids[tid] # Even if the process got created earlier, some info might be # missing, add it now. proc.pid = pid proc.comm = name # However don't override the prio value if we already got the # information from sched_* events. if proc.prio is None: proc.prio = prio if pid != tid: # create the parent if pid not in self._state.tids: # FIXME: why is the parent's name set to that of the # child? does that make sense? # tid == pid for the parent process self._state.tids[pid] = sv.Process(tid=pid, pid=pid, comm=name) parent = self._state.tids[pid] # If the thread had opened FDs, they need to be assigned # to the parent. StatedumpStateProvider._assign_fds_to_parent(proc, parent) self._state.send_notification_cb('create_parent_proc', proc=proc, parent_proc=parent) def _process_lttng_statedump_file_descriptor(self, event): pid = event['pid'] fd = event['fd'] filename = event['filename'] cloexec = event['flags'] & common.O_CLOEXEC == common.O_CLOEXEC if pid not in self._state.tids: self._state.tids[pid] = sv.Process(tid=pid, pid=pid) proc = self._state.tids[pid] if fd not in proc.fds: proc.fds[fd] = sv.FD(fd, filename, sv.FDType.unknown, cloexec) self._state.send_notification_cb('create_fd', fd=fd, parent_proc=proc, timestamp=event.timestamp, cpu_id=event['cpu_id']) else: # just fix the filename proc.fds[fd].filename = filename self._state.send_notification_cb('update_fd', fd=fd, parent_proc=proc, timestamp=event.timestamp, cpu_id=event['cpu_id']) @staticmethod def _assign_fds_to_parent(proc, parent): if proc.fds: toremove = [] for fd in proc.fds: if fd not in parent.fds: parent.fds[fd] = proc.fds[fd] else: # best effort to fix the filename if not parent.fds[fd].filename: parent.fds[fd].filename = proc.fds[fd].filename toremove.append(fd) for fd in toremove: del proc.fds[fd] lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/__init__.py0000664000175000017500000000217512665072151026160 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/irq.py0000664000175000017500000001102712665072151025210 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import sp, sv class IrqStateProvider(sp.StateProvider): def __init__(self, state): cbs = { 'irq_handler_entry': self._process_irq_handler_entry, 'irq_handler_exit': self._process_irq_handler_exit, 'softirq_raise': self._process_softirq_raise, 'softirq_entry': self._process_softirq_entry, 'softirq_exit': self._process_softirq_exit } super().__init__(state, cbs) def _get_cpu(self, cpu_id): if cpu_id not in self._state.cpus: self._state.cpus[cpu_id] = sv.CPU(cpu_id) return self._state.cpus[cpu_id] # Hard IRQs def _process_irq_handler_entry(self, event): cpu = self._get_cpu(event['cpu_id']) irq = sv.HardIRQ.new_from_irq_handler_entry(event) cpu.current_hard_irq = irq self._state.send_notification_cb('irq_handler_entry', id=irq.id, irq_name=event['name']) def _process_irq_handler_exit(self, event): cpu = self._get_cpu(event['cpu_id']) if cpu.current_hard_irq is None or \ cpu.current_hard_irq.id != event['irq']: cpu.current_hard_irq = None return cpu.current_hard_irq.end_ts = event.timestamp cpu.current_hard_irq.ret = event['ret'] self._state.send_notification_cb('irq_handler_exit', hard_irq=cpu.current_hard_irq) cpu.current_hard_irq = None # SoftIRQs def _process_softirq_raise(self, event): cpu = self._get_cpu(event['cpu_id']) vec = event['vec'] if vec not in cpu.current_softirqs: cpu.current_softirqs[vec] = [] # Don't append a SoftIRQ object if one has already been raised, # because they are level-triggered. The only exception to this # is if the first SoftIRQ object already had a begin_ts which # means this raise was triggered after its entry, and will be # handled in the following softirq_entry if cpu.current_softirqs[vec] and \ cpu.current_softirqs[vec][0].begin_ts is None: return irq = sv.SoftIRQ.new_from_softirq_raise(event) cpu.current_softirqs[vec].append(irq) def _process_softirq_entry(self, event): cpu = self._get_cpu(event['cpu_id']) vec = event['vec'] if cpu.current_softirqs[vec]: cpu.current_softirqs[vec][0].begin_ts = event.timestamp else: # SoftIRQ entry without a corresponding raise irq = sv.SoftIRQ.new_from_softirq_entry(event) cpu.current_softirqs[vec].append(irq) def _process_softirq_exit(self, event): cpu = self._get_cpu(event['cpu_id']) vec = event['vec'] # List of enqueued softirqs for the current cpu/vec # combination. None if vec is not found in the dictionary. current_softirqs = cpu.current_softirqs.get(vec) # Ignore the exit if either vec was not in the cpu's dict or # if its irq list was empty (i.e. no matching raise). if not current_softirqs: return current_softirqs[0].end_ts = event.timestamp self._state.send_notification_cb('softirq_exit', softirq=current_softirqs[0]) del current_softirqs[0] lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/sched.py0000664000175000017500000002063312665072151025506 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import sp, sv from ..common import version_utils class SchedStateProvider(sp.StateProvider): # The priority offset for sched_wak* events was fixed in # lttng-modules 2.7.1 upwards PRIO_OFFSET_FIX_VERSION = version_utils.Version(2, 7, 1) def __init__(self, state): cbs = { 'sched_switch': self._process_sched_switch, 'sched_migrate_task': self._process_sched_migrate_task, 'sched_wakeup': self._process_sched_wakeup, 'sched_wakeup_new': self._process_sched_wakeup, 'sched_waking': self._process_sched_wakeup, 'sched_process_fork': self._process_sched_process_fork, 'sched_process_exec': self._process_sched_process_exec, 'sched_pi_setprio': self._process_sched_pi_setprio, } super().__init__(state, cbs) def _sched_switch_per_cpu(self, cpu_id, next_tid): if cpu_id not in self._state.cpus: self._state.cpus[cpu_id] = sv.CPU(cpu_id) cpu = self._state.cpus[cpu_id] # exclude swapper process if next_tid == 0: cpu.current_tid = None else: cpu.current_tid = next_tid def _create_proc(self, tid): if tid not in self._state.tids: if tid == 0: # special case for the swapper self._state.tids[tid] = sv.Process(tid=tid, pid=0) else: self._state.tids[tid] = sv.Process(tid=tid) def _sched_switch_per_tid(self, next_tid, next_comm, prev_tid): # Instantiate processes if new self._create_proc(prev_tid) self._create_proc(next_tid) next_proc = self._state.tids[next_tid] next_proc.comm = next_comm next_proc.prev_tid = prev_tid def _check_prio_changed(self, timestamp, tid, prio): # Ignore swapper if tid == 0: return proc = self._state.tids[tid] if proc.prio != prio: proc.prio = prio self._state.send_notification_cb( 'prio_changed', timestamp=timestamp, tid=tid, prio=prio) def _process_sched_switch(self, event): timestamp = event.timestamp cpu_id = event['cpu_id'] next_tid = event['next_tid'] next_comm = event['next_comm'] next_prio = event['next_prio'] prev_tid = event['prev_tid'] prev_prio = event['prev_prio'] self._sched_switch_per_cpu(cpu_id, next_tid) self._sched_switch_per_tid(next_tid, next_comm, prev_tid) self._check_prio_changed(timestamp, prev_tid, prev_prio) self._check_prio_changed(timestamp, next_tid, next_prio) wakee_proc = self._state.tids[next_tid] waker_proc = None if wakee_proc.last_waker is not None: waker_proc = self._state.tids[wakee_proc.last_waker] cb_data = { 'timestamp': timestamp, 'cpu_id': cpu_id, 'prev_tid': prev_tid, 'next_tid': next_tid, 'next_comm': next_comm, 'wakee_proc': wakee_proc, 'waker_proc': waker_proc, } self._state.send_notification_cb('sched_switch_per_cpu', **cb_data) self._state.send_notification_cb('sched_switch_per_tid', **cb_data) wakee_proc.last_wakeup = None wakee_proc.last_waker = None def _process_sched_migrate_task(self, event): tid = event['tid'] prio = event['prio'] if tid not in self._state.tids: proc = sv.Process() proc.tid = tid proc.comm = event['comm'] self._state.tids[tid] = proc else: proc = self._state.tids[tid] self._state.send_notification_cb( 'sched_migrate_task', proc=proc, cpu_id=event['cpu_id']) self._check_prio_changed(event.timestamp, tid, prio) def _process_sched_wakeup(self, event): target_cpu = event['target_cpu'] current_cpu = event['cpu_id'] prio = event['prio'] tid = event['tid'] if self._state.tracer_version < self.PRIO_OFFSET_FIX_VERSION: prio -= 100 if target_cpu not in self._state.cpus: self._state.cpus[target_cpu] = sv.CPU(target_cpu) if current_cpu not in self._state.cpus: self._state.cpus[current_cpu] = sv.CPU(current_cpu) # If the TID is already executing on a CPU, ignore this wakeup for cpu_id in self._state.cpus: cpu = self._state.cpus[cpu_id] if cpu.current_tid == tid: return if tid not in self._state.tids: proc = sv.Process() proc.tid = tid self._state.tids[tid] = proc self._check_prio_changed(event.timestamp, tid, prio) # A process can be woken up multiple times, only record # the first one if self._state.tids[tid].last_wakeup is None: self._state.tids[tid].last_wakeup = event.timestamp if self._state.cpus[current_cpu].current_tid is not None: self._state.tids[tid].last_waker = \ self._state.cpus[current_cpu].current_tid def _process_sched_process_fork(self, event): child_tid = event['child_tid'] child_pid = event['child_pid'] child_comm = event['child_comm'] parent_pid = event['parent_pid'] parent_tid = event['parent_pid'] parent_comm = event['parent_comm'] if parent_tid not in self._state.tids: self._state.tids[parent_tid] = sv.Process( parent_tid, parent_pid, parent_comm) else: self._state.tids[parent_tid].pid = parent_pid self._state.tids[parent_tid].comm = parent_comm parent_proc = self._state.tids[parent_pid] child_proc = sv.Process(child_tid, child_pid, child_comm) for fd in parent_proc.fds: old_fd = parent_proc.fds[fd] child_proc.fds[fd] = sv.FD.new_from_fd(old_fd) # Note: the parent_proc key in the notification function # refers to the parent of the FD, which in this case is # the child_proc created by the fork self._state.send_notification_cb( 'create_fd', fd=fd, parent_proc=child_proc, timestamp=event.timestamp, cpu_id=event['cpu_id']) self._state.tids[child_tid] = child_proc def _process_sched_process_exec(self, event): tid = event['tid'] if tid not in self._state.tids: proc = sv.Process() proc.tid = tid self._state.tids[tid] = proc else: proc = self._state.tids[tid] # Use LTTng procname context if available if 'procname' in event: proc.comm = event['procname'] toremove = [] for fd in proc.fds: if proc.fds[fd].cloexec: toremove.append(fd) for fd in toremove: self._state.send_notification_cb( 'close_fd', fd=fd, parent_proc=proc, timestamp=event.timestamp, cpu_id=event['cpu_id']) del proc.fds[fd] def _process_sched_pi_setprio(self, event): timestamp = event.timestamp newprio = event['newprio'] tid = event['tid'] self._check_prio_changed(timestamp, tid, newprio) lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/io.py0000664000175000017500000003154712665072151025035 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import socket from babeltrace import CTFScope from . import sp, sv, common class IoStateProvider(sp.StateProvider): def __init__(self, state): cbs = { 'syscall_entry': self._process_syscall_entry, 'syscall_exit': self._process_syscall_exit, 'syscall_entry_connect': self._process_connect, 'writeback_pages_written': self._process_writeback_pages_written, 'mm_vmscan_wakeup_kswapd': self._process_mm_vmscan_wakeup_kswapd, 'mm_page_free': self._process_mm_page_free } super().__init__(state, cbs) def _process_syscall_entry(self, event): # Only handle IO Syscalls name = common.get_syscall_name(event) if name not in sv.SyscallConsts.IO_SYSCALLS: return cpu_id = event['cpu_id'] if cpu_id not in self._state.cpus: return cpu = self._state.cpus[cpu_id] if cpu.current_tid is None: return proc = self._state.tids[cpu.current_tid] # check if we can fix the pid from a context self._fix_context_pid(event, proc) if name in sv.SyscallConsts.OPEN_SYSCALLS: self._track_open(event, name, proc) elif name in sv.SyscallConsts.CLOSE_SYSCALLS: self._track_close(event, name, proc) elif name in sv.SyscallConsts.READ_SYSCALLS or \ name in sv.SyscallConsts.WRITE_SYSCALLS: self._track_read_write(event, name, proc) elif name in sv.SyscallConsts.SYNC_SYSCALLS: self._track_sync(event, name, proc) def _process_syscall_exit(self, event): cpu_id = event['cpu_id'] if cpu_id not in self._state.cpus: return cpu = self._state.cpus[cpu_id] if cpu.current_tid is None: return proc = self._state.tids[cpu.current_tid] current_syscall = proc.current_syscall if current_syscall is None: return name = current_syscall.name if name not in sv.SyscallConsts.IO_SYSCALLS: return self._track_io_rq_exit(event, proc) proc.current_syscall = None def _process_connect(self, event): cpu_id = event['cpu_id'] if cpu_id not in self._state.cpus: return cpu = self._state.cpus[cpu_id] if cpu.current_tid is None: return proc = self._state.tids[cpu.current_tid] parent_proc = self._get_parent_proc(proc) # FIXME: handle on syscall_exit_connect only when succesful if 'family' in event and event['family'] == socket.AF_INET: fd = event['fd'] if fd in parent_proc.fds: parent_proc.fds[fd].filename = ( '%s:%d' % (common.get_v4_addr_str(event['v4addr']), event['dport'])) def _process_writeback_pages_written(self, event): for cpu in self._state.cpus.values(): if cpu.current_tid is None: continue current_syscall = self._state.tids[cpu.current_tid].current_syscall if current_syscall is None: continue if current_syscall.io_rq: current_syscall.io_rq.pages_written += event['pages'] def _process_mm_vmscan_wakeup_kswapd(self, event): cpu_id = event['cpu_id'] if cpu_id not in self._state.cpus: return cpu = self._state.cpus[cpu_id] if cpu.current_tid is None: return current_syscall = self._state.tids[cpu.current_tid].current_syscall if current_syscall is None: return if current_syscall.io_rq: current_syscall.io_rq.woke_kswapd = True def _process_mm_page_free(self, event): for cpu in self._state.cpus.values(): if cpu.current_tid is None: continue proc = self._state.tids[cpu.current_tid] # if the current process is kswapd0, we need to # attribute the page freed to the process that # woke it up. if proc.comm == 'kswapd0' and proc.prev_tid > 0: proc = self._state.tids[proc.prev_tid] current_syscall = proc.current_syscall if current_syscall is None: continue if current_syscall.io_rq and current_syscall.io_rq.woke_kswapd: current_syscall.io_rq.pages_freed += 1 def _track_open(self, event, name, proc): current_syscall = proc.current_syscall if name in sv.SyscallConsts.DISK_OPEN_SYSCALLS: current_syscall.io_rq = sv.OpenIORequest.new_from_disk_open( event, proc.tid) elif name in ['accept', 'accept4']: current_syscall.io_rq = sv.OpenIORequest.new_from_accept( event, proc.tid) elif name == 'socket': current_syscall.io_rq = sv.OpenIORequest.new_from_socket( event, proc.tid) elif name in sv.SyscallConsts.DUP_OPEN_SYSCALLS: self._track_dup(event, name, proc) def _track_dup(self, event, name, proc): current_syscall = proc.current_syscall # If the process that triggered the io_rq is a thread, # its FDs are that of the parent process parent_proc = self._get_parent_proc(proc) fds = parent_proc.fds if name == 'dup': oldfd = event['fildes'] elif name in ['dup2', 'dup3']: oldfd = event['oldfd'] newfd = event['newfd'] if newfd in fds: self._close_fd(parent_proc, newfd, event.timestamp, event['cpu_id']) elif name == 'fcntl': # Only handle if cmd == F_DUPFD (0) if event['cmd'] != 0: return oldfd = event['fd'] old_file = None if oldfd in fds: old_file = fds[oldfd] current_syscall.io_rq = sv.OpenIORequest.new_from_old_fd( event, proc.tid, old_file) if name == 'dup3': cloexec = event['flags'] & common.O_CLOEXEC == common.O_CLOEXEC current_syscall.io_rq.cloexec = cloexec def _track_close(self, event, name, proc): proc.current_syscall.io_rq = sv.CloseIORequest( event.timestamp, proc.tid, event['fd']) def _track_read_write(self, event, name, proc): current_syscall = proc.current_syscall if name == 'splice': current_syscall.io_rq = sv.ReadWriteIORequest.new_from_splice( event, proc.tid) return elif name == 'sendfile64': current_syscall.io_rq = sv.ReadWriteIORequest.new_from_sendfile64( event, proc.tid) return if name in ['writev', 'pwritev', 'readv', 'preadv']: size_key = 'vlen' elif name == 'recvfrom': size_key = 'size' elif name == 'sendto': size_key = 'len' elif name in ['recvmsg', 'sendmsg']: size_key = None else: size_key = 'count' current_syscall.io_rq = sv.ReadWriteIORequest.new_from_fd_event( event, proc.tid, size_key) def _track_sync(self, event, name, proc): current_syscall = proc.current_syscall if name == 'sync': current_syscall.io_rq = sv.SyncIORequest.new_from_sync( event, proc.tid) elif name in ['fsync', 'fdatasync']: current_syscall.io_rq = sv.SyncIORequest.new_from_fsync( event, proc.tid) elif name == 'sync_file_range': current_syscall.io_rq = sv.SyncIORequest.new_from_sync_file_range( event, proc.tid) def _track_io_rq_exit(self, event, proc): ret = event['ret'] cpu_id = event['cpu_id'] io_rq = proc.current_syscall.io_rq # io_rq can be None in the case of fcntl when cmd is not # F_DUPFD, in which case we disregard the syscall as it did # not open any FD if io_rq is None: return io_rq.update_from_exit(event) if ret >= 0: self._create_fd(proc, io_rq, cpu_id) parent_proc = self._get_parent_proc(proc) self._state.send_notification_cb('io_rq_exit', io_rq=io_rq, proc=proc, parent_proc=parent_proc, cpu_id=cpu_id) if isinstance(io_rq, sv.CloseIORequest) and ret == 0: self._close_fd(proc, io_rq.fd, io_rq.end_ts, cpu_id) def _create_fd(self, proc, io_rq, cpu_id): parent_proc = self._get_parent_proc(proc) if io_rq.fd is not None and io_rq.fd not in parent_proc.fds: if isinstance(io_rq, sv.OpenIORequest): parent_proc.fds[io_rq.fd] = sv.FD.new_from_open_rq(io_rq) else: parent_proc.fds[io_rq.fd] = sv.FD(io_rq.fd) self._state.send_notification_cb('create_fd', fd=io_rq.fd, parent_proc=parent_proc, timestamp=io_rq.end_ts, cpu_id=cpu_id) elif isinstance(io_rq, sv.ReadWriteIORequest): if io_rq.fd_in is not None and io_rq.fd_in not in parent_proc.fds: parent_proc.fds[io_rq.fd_in] = sv.FD(io_rq.fd_in) self._state.send_notification_cb('create_fd', fd=io_rq.fd_in, parent_proc=parent_proc, timestamp=io_rq.end_ts, cpu_id=cpu_id) if io_rq.fd_out is not None and \ io_rq.fd_out not in parent_proc.fds: parent_proc.fds[io_rq.fd_out] = sv.FD(io_rq.fd_out) self._state.send_notification_cb('create_fd', fd=io_rq.fd_out, parent_proc=parent_proc, timestamp=io_rq.end_ts, cpu_id=cpu_id) def _close_fd(self, proc, fd, timestamp, cpu_id): parent_proc = self._get_parent_proc(proc) self._state.send_notification_cb('close_fd', fd=fd, parent_proc=parent_proc, timestamp=timestamp, cpu_id=cpu_id) del parent_proc.fds[fd] def _get_parent_proc(self, proc): if proc.pid is not None and proc.tid != proc.pid: parent_proc = self._state.tids[proc.pid] else: parent_proc = proc return parent_proc def _fix_context_pid(self, event, proc): for context in event.field_list_with_scope( CTFScope.STREAM_EVENT_CONTEXT): if context != 'pid': continue # make sure the 'pid' field is not also in the event # payload, otherwise we might clash for context in event.field_list_with_scope( CTFScope.EVENT_FIELDS): if context == 'pid': return if proc.pid is None: proc.pid = event['pid'] if event['pid'] != proc.tid: proc.pid = event['pid'] parent_proc = sv.Process(proc.pid, proc.pid, proc.comm, proc.prio) self._state.tids[parent_proc.pid] = parent_proc lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/sp.py0000664000175000017500000000342012665072151025035 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. class StateProvider: def __init__(self, state, cbs): self._state = state self._cbs = cbs def process_event(self, ev): name = ev.name if name in self._cbs: self._cbs[name](ev) # for now we process all the syscalls at the same place elif 'syscall_entry' in self._cbs and \ (name.startswith('sys_') or name.startswith('syscall_entry_')): self._cbs['syscall_entry'](ev) elif 'syscall_exit' in self._cbs and \ (name.startswith('exit_syscall') or name.startswith('syscall_exit_')): self._cbs['syscall_exit'](ev) lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/net.py0000664000175000017500000000556112665072151025211 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import sp, sv class NetStateProvider(sp.StateProvider): def __init__(self, state): cbs = { 'net_dev_xmit': self._process_net_dev_xmit, 'netif_receive_skb': self._process_netif_receive_skb, } super().__init__(state, cbs) def _process_net_dev_xmit(self, event): self._state.send_notification_cb('net_dev_xmit', iface_name=event['name'], sent_bytes=event['len'], cpu_id=event['cpu_id']) cpu_id = event['cpu_id'] if cpu_id not in self._state.cpus: return cpu = self._state.cpus[cpu_id] if cpu.current_tid is None: return proc = self._state.tids[cpu.current_tid] current_syscall = proc.current_syscall if current_syscall is None: return if proc.pid is not None and proc.pid != proc.tid: proc = self._state.tids[proc.pid] if current_syscall.name in sv.SyscallConsts.WRITE_SYSCALLS: # TODO: find a way to set fd_type on the write rq to allow # setting FD Type if FD hasn't yet been created fd = current_syscall.io_rq.fd if fd in proc.fds and proc.fds[fd].fd_type == sv.FDType.unknown: proc.fds[fd].fd_type = sv.FDType.maybe_net def _process_netif_receive_skb(self, event): self._state.send_notification_cb('netif_receive_skb', iface_name=event['name'], recv_bytes=event['len'], cpu_id=event['cpu_id']) lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/common.py0000664000175000017500000002320412665072151025705 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import re import time import datetime import socket import struct NSEC_PER_SEC = 1000000000 NSEC_PER_MSEC = 1000000 NSEC_PER_USEC = 1000 BYTES_PER_TIB = 1099511627776 BYTES_PER_GIB = 1073741824 BYTES_PER_MIB = 1048576 BYTES_PER_KIB = 1024 O_CLOEXEC = 0o2000000 def get_syscall_name(event): name = event.name if name.startswith('sys_'): # Strip first 4 because sys_ is 4 chars long return name[4:] # Name begins with syscall_entry_ (14 chars long) return name[14:] def is_multi_day_trace_collection(handles): time_begin = None for handle in handles.values(): if time_begin is None: time_begin = time.localtime(handle.timestamp_begin / NSEC_PER_SEC) year_begin = time_begin.tm_year month_begin = time_begin.tm_mon day_begin = time_begin.tm_mday time_end = time.localtime(handle.timestamp_end / NSEC_PER_SEC) year_end = time_end.tm_year month_end = time_end.tm_mon day_end = time_end.tm_mday if year_begin != year_end: return True elif month_begin != month_end: return True elif day_begin != day_end: return True return False def trace_collection_date(handles): if is_multi_day_trace_collection(handles): return None for handle in handles.values(): trace_time = time.localtime(handle.timestamp_begin / NSEC_PER_SEC) year = trace_time.tm_year month = trace_time.tm_mon day = trace_time.tm_mday return (year, month, day) def extract_timerange(handles, timerange, gmt): pattern = re.compile(r'^\[(?P.*),(?P.*)\]$') if not pattern.match(timerange): return None, None begin_str = pattern.search(timerange).group('begin').strip() end_str = pattern.search(timerange).group('end').strip() begin = date_to_epoch_nsec(handles, begin_str, gmt) end = date_to_epoch_nsec(handles, end_str, gmt) return (begin, end) def date_to_epoch_nsec(handles, date, gmt): # match 2014-12-12 17:29:43.802588035 or 2014-12-12T17:29:43.802588035 pattern1 = re.compile(r'^(?P\d{4})-(?P[01]\d)-' r'(?P[0-3]\d)[\sTt]' r'(?P\d{2}):(?P\d{2}):(?P\d{2})\.' r'(?P\d{9})$') # match 2014-12-12 17:29:43 or 2014-12-12T17:29:43 pattern2 = re.compile(r'^(?P\d{4})-(?P[01]\d)-' r'(?P[0-3]\d)[\sTt]' r'(?P\d{2}):(?P\d{2}):(?P\d{2})$') # match 17:29:43.802588035 pattern3 = re.compile(r'^(?P\d{2}):(?P\d{2}):(?P\d{2})\.' r'(?P\d{9})$') # match 17:29:43 pattern4 = re.compile(r'^(?P\d{2}):(?P\d{2}):(?P\d{2})$') # match 93847238974923874 pattern5 = re.compile(r'^\d+$') if pattern1.match(date): year = pattern1.search(date).group('year') month = pattern1.search(date).group('mon') day = pattern1.search(date).group('day') hour = pattern1.search(date).group('hour') minute = pattern1.search(date).group('min') sec = pattern1.search(date).group('sec') nsec = pattern1.search(date).group('nsec') elif pattern2.match(date): year = pattern2.search(date).group('year') month = pattern2.search(date).group('mon') day = pattern2.search(date).group('day') hour = pattern2.search(date).group('hour') minute = pattern2.search(date).group('min') sec = pattern2.search(date).group('sec') nsec = 0 elif pattern3.match(date): collection_date = trace_collection_date(handles) if collection_date is None: print("Use the format 'yyyy-mm-dd hh:mm:ss[.nnnnnnnnn]' " "for multi-day traces") return None (year, month, day) = collection_date hour = pattern3.search(date).group('hour') minute = pattern3.search(date).group('min') sec = pattern3.search(date).group('sec') nsec = pattern3.search(date).group('nsec') elif pattern4.match(date): collection_date = trace_collection_date(handles) if collection_date is None: print("Use the format 'yyyy-mm-dd hh:mm:ss[.nnnnnnnnn]' " "for multi-day traces") return None (year, month, day) = collection_date hour = pattern4.search(date).group('hour') minute = pattern4.search(date).group('min') sec = pattern4.search(date).group('sec') nsec = 0 elif pattern5.match(date): return int(date) else: return None date_time = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(sec)) if gmt: date_time = date_time + datetime.timedelta(seconds=time.timezone) return int(date_time.timestamp()) * NSEC_PER_SEC + int(nsec) def ns_to_asctime(ns): return time.asctime(time.localtime(ns/NSEC_PER_SEC)) def ns_to_hour(ns): date = time.localtime(ns / NSEC_PER_SEC) return '%02d:%02d:%02d' % (date.tm_hour, date.tm_min, date.tm_sec) def ns_to_hour_nsec(ns, multi_day=False, gmt=False): if gmt: date = time.gmtime(ns / NSEC_PER_SEC) else: date = time.localtime(ns / NSEC_PER_SEC) if multi_day: return ('%04d-%02d-%02d %02d:%02d:%02d.%09d' % (date.tm_year, date.tm_mon, date.tm_mday, date.tm_hour, date.tm_min, date.tm_sec, ns % NSEC_PER_SEC)) else: return ('%02d:%02d:%02d.%09d' % (date.tm_hour, date.tm_min, date.tm_sec, ns % NSEC_PER_SEC)) def ns_to_sec(ns): return '%lu.%09u' % (ns / NSEC_PER_SEC, ns % NSEC_PER_SEC) def ns_to_day(ns): date = time.localtime(ns/NSEC_PER_SEC) return '%04d-%02d-%02d' % (date.tm_year, date.tm_mon, date.tm_mday) def sec_to_hour(ns): date = time.localtime(ns) return '%02d:%02d:%02d' % (date.tm_hour, date.tm_min, date.tm_sec) def sec_to_nsec(sec): return sec * NSEC_PER_SEC def seq_to_ipv4(ip): return '{}.{}.{}.{}'.format(ip[0], ip[1], ip[2], ip[3]) def int_to_ipv4(ip): return socket.inet_ntoa(struct.pack('!I', ip)) def size_str_to_bytes(size_str): try: units_index = next(i for i, c in enumerate(size_str) if c.isalpha()) except StopIteration: # no units found units_index = None if units_index is not None: size = size_str[:units_index] units = size_str[units_index:] else: size = size_str units = None try: size = float(size) except ValueError: raise ValueError('invalid size: {}'.format(size)) # no units defaults to bytes if units is not None: if units in ['t', 'T', 'tB', 'TB']: size *= BYTES_PER_TIB elif units in ['g', 'G', 'gB', 'GB']: size *= BYTES_PER_GIB elif units in ['m', 'M', 'mB', 'MB']: size *= BYTES_PER_MIB elif units in ['k', 'K', 'kB', 'KB']: size *= BYTES_PER_KIB elif units == 'B': # bytes is already the target unit pass else: raise ValueError('unrecognised units: {}'.format(units)) size = int(size) return size def duration_str_to_ns(duration_str): try: units_index = next(i for i, c in enumerate(duration_str) if c.isalpha()) except StopIteration: # no units found units_index = None if units_index is not None: duration = duration_str[:units_index] units = duration_str[units_index:].lower() else: duration = duration_str units = None try: duration = float(duration) except ValueError: raise ValueError('invalid duration: {}'.format(duration)) if units is not None: if units == 's': duration *= NSEC_PER_SEC elif units == 'ms': duration *= NSEC_PER_MSEC elif units in ['us', 'µs']: duration *= NSEC_PER_USEC elif units == 'ns': # ns is already the target unit pass else: raise ValueError('unrecognised units: {}'.format(units)) else: # no units defaults to seconds duration *= NSEC_PER_SEC duration = int(duration) return duration def get_v4_addr_str(ip): # depending on the version of lttng-modules, the v4addr is a # string (< 2.6) or sequence (>= 2.6) try: return seq_to_ipv4(ip) except TypeError: return int_to_ipv4(ip) lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/block.py0000664000175000017500000001046012665072151025507 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import sp, sv class BlockStateProvider(sp.StateProvider): def __init__(self, state): cbs = { 'block_rq_complete': self._process_block_rq_complete, 'block_rq_issue': self._process_block_rq_issue, 'block_bio_remap': self._process_block_bio_remap, 'block_bio_backmerge': self._process_block_bio_backmerge, } super().__init__(state, cbs) self._remap_requests = [] def _process_block_bio_remap(self, event): dev = event['dev'] sector = event['sector'] old_dev = event['old_dev'] old_sector = event['old_sector'] for req in self._remap_requests: if req.dev == old_dev and req.sector == old_sector: req.dev = dev req.sector = sector return req = sv.BlockRemapRequest(dev, sector, old_dev, old_sector) self._remap_requests.append(req) # For backmerge requests, just remove the request from the # _remap_requests queue, because we rely later on the nr_sector # which has all the info we need def _process_block_bio_backmerge(self, event): dev = event['dev'] sector = event['sector'] for remap_req in self._remap_requests: if remap_req.dev == dev and remap_req.sector == sector: self._remap_requests.remove(remap_req) def _process_block_rq_issue(self, event): dev = event['dev'] sector = event['sector'] nr_sector = event['nr_sector'] if nr_sector == 0: return req = sv.BlockIORequest.new_from_rq_issue(event) for remap_req in self._remap_requests: if remap_req.dev == dev and remap_req.sector == sector: dev = remap_req.old_dev break if dev not in self._state.disks: self._state.disks[dev] = sv.Disk() self._state.disks[dev].pending_requests[sector] = req def _process_block_rq_complete(self, event): dev = event['dev'] sector = event['sector'] nr_sector = event['nr_sector'] if nr_sector == 0: return for remap_req in self._remap_requests: if remap_req.dev == dev and remap_req.sector == sector: dev = remap_req.old_dev self._remap_requests.remove(remap_req) break if dev not in self._state.disks: self._state.disks[dev] = sv.Disk() disk = self._state.disks[dev] # Ignore rq_complete without matching rq_issue if sector not in disk.pending_requests: return req = disk.pending_requests[sector] # Ignore rq_complete if nr_sector does not match rq_issue's if req.nr_sector != nr_sector: return req.update_from_rq_complete(event) if req.tid in self._state.tids.keys(): proc = self._state.tids[req.tid] else: proc = None self._state.send_notification_cb('block_rq_complete', req=req, proc=proc, cpu_id=event['cpu_id']) del disk.pending_requests[sector] lttnganalyses-0.4.3/lttnganalyses/cli/0000775000175000017500000000000012667421106021522 5ustar mjeansonmjeanson00000000000000lttnganalyses-0.4.3/lttnganalyses/cli/mi.py0000664000175000017500000002705212665072151022507 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from collections import namedtuple class Tags: CPU = 'cpu' MEMORY = 'memory' INTERRUPT = 'interrupt' SCHED = 'sched' SYSCALL = 'syscall' IO = 'io' TOP = 'top' STATS = 'stats' FREQ = 'freq' LOG = 'log' class ColumnDescription: def __init__(self, key, title, do_class, unit=None): self._key = key self._title = title self._do_class = do_class self._unit = unit @property def key(self): return self._key def to_native_object(self): obj = { 'title': self._title, 'class': self._do_class, } if self._unit: obj['unit'] = self._unit return obj class TableClass: def __init__(self, name, title, column_descriptions_tuples=None, inherit=None): if column_descriptions_tuples is None: column_descriptions_tuples = [] self._inherit = inherit self._name = name self._title = title self._column_descriptions = [] for column_descr_tuple in column_descriptions_tuples: key = column_descr_tuple[0] title = column_descr_tuple[1] do_type = column_descr_tuple[2] unit = None if len(column_descr_tuple) > 3: unit = column_descr_tuple[3] column_descr = ColumnDescription(key, title, do_type.CLASS, unit) self._column_descriptions.append(column_descr) @property def name(self): return self._name @property def title(self): return self._title def to_native_object(self): obj = {} column_descrs = self._column_descriptions native_column_descrs = [c.to_native_object() for c in column_descrs] if self._inherit is not None: obj['inherit'] = self._inherit if self._title is not None: obj['title'] = self._title if native_column_descrs: obj['column-descriptions'] = native_column_descrs return obj def get_column_named_tuple(self): keys = [cd.key for cd in self._column_descriptions] return namedtuple('Column', keys) class ResultTable: def __init__(self, table_class, begin, end, subtitle=None): self._table_class = table_class self._column_named_tuple = table_class.get_column_named_tuple() self._subtitle = subtitle self._timerange = TimeRange(begin, end) self._rows = [] @property def table_class(self): return self._table_class @property def timerange(self): return self._timerange @property def title(self): return self._table_class.title @property def subtitle(self): return self._subtitle def append_row(self, **kwargs): row = self._column_named_tuple(**kwargs) self._rows.append(row) def append_row_tuple(self, row_tuple): self._rows.append(row_tuple) @property def rows(self): return self._rows def to_native_object(self): obj = { 'class': self._table_class.name, 'time-range': self._timerange.to_native_object(), } row_objs = [] if self._table_class.name: if self._subtitle is not None: full_title = '{} [{}]'.format(self.title, self._subtitle) table_class = TableClass(None, full_title, inherit=self._table_class.name) self._table_class = table_class if self._table_class.name is None: obj['class'] = self._table_class.to_native_object() for row in self._rows: row_obj = [] for cell in row: row_obj.append(cell.to_native_object()) row_objs.append(row_obj) obj['data'] = row_objs return obj class _DataObject: def to_native_object(self): raise NotImplementedError def __eq__(self, other): # ensure we're comparing the same type first if not isinstance(other, self.__class__): return False # call specific equality method return self._eq(other) def _eq(self, other): raise NotImplementedError class _UnstructuredDataObject(_DataObject): def __init__(self, value): self._value = value @property def value(self): return self._value def to_native_object(self): return self._value def __str__(self): return str(self._value) def _eq(self, other): return self._value == other._value class _StructuredDataObject(_DataObject): def to_native_object(self): base = {'class': self.CLASS} base.update(self._to_native_object()) return base def _to_native_object(self): raise NotImplementedError class Boolean(_UnstructuredDataObject): CLASS = 'bool' class Integer(_UnstructuredDataObject): CLASS = 'int' class Float(_UnstructuredDataObject): CLASS = 'float' class String(_UnstructuredDataObject): CLASS = 'string' class Empty(_DataObject): def to_native_object(self): return None def _eq(self, other): return True class Unknown(_StructuredDataObject): CLASS = 'unknown' def _to_native_object(self): return {} def _eq(self, other): return True def __str__(self): return '?' class _SimpleValue(_StructuredDataObject): def __init__(self, value): self._value = value @property def value(self): return self._value def _to_native_object(self): return {'value': self._value} def __str__(self): return str(self._value) def _eq(self, other): return self._value == other._value class _SimpleName(_StructuredDataObject): def __init__(self, name): self._name = name @property def name(self): return self._name def _to_native_object(self): return {'name': self._name} def __str__(self): return self._name def _eq(self, other): return self._name == other._name class Ratio(_SimpleValue): CLASS = 'ratio' @classmethod def from_percentage(cls, value): return cls(value / 100) def to_percentage(self): return self._value * 100 class Timestamp(_SimpleValue): CLASS = 'timestamp' class Duration(_SimpleValue): CLASS = 'duration' @classmethod def from_ms(cls, ms): return cls(ms * 1000000) @classmethod def from_us(cls, us): return cls(us * 1000) def to_ms(self): return self._value / 1000000 def to_us(self): return self._value / 1000 class Size(_SimpleValue): CLASS = 'size' class Bitrate(_SimpleValue): CLASS = 'bitrate' @classmethod def from_size_duration(cls, size, duration): return cls(size * 8 / duration) class TimeRange(_StructuredDataObject): CLASS = 'time-range' def __init__(self, begin, end): self._begin = begin self._end = end @property def begin(self): return self._begin @property def end(self): return self._end def _to_native_object(self): return {'begin': self._begin, 'end': self._end} def _eq(self, other): return (self._begin, self._end) == (other._begin, other._end) class Syscall(_SimpleName): CLASS = 'syscall' class Process(_StructuredDataObject): CLASS = 'process' def __init__(self, name=None, pid=None, tid=None): self._name = name self._pid = pid self._tid = tid @property def name(self): return self._name @property def pid(self): return self._pid @property def tid(self): return self._tid def _to_native_object(self): ret_dict = {} if self._name is not None: ret_dict['name'] = self._name if self._pid is not None: ret_dict['pid'] = self._pid if self._tid is not None: ret_dict['tid'] = self._tid return ret_dict def _eq(self, other): self_tuple = (self._name, self._pid, self._tid) other_tuple = (other._name, other._pid, other._tid) return self_tuple == other_tuple class Path(_StructuredDataObject): CLASS = 'path' def __init__(self, path): self._path = path @property def path(self): return self._path def _to_native_object(self): return {'path': self._path} def _eq(self, other): return self._path == other._path class Fd(_StructuredDataObject): CLASS = 'fd' def __init__(self, fd): self._fd = fd @property def fd(self): return self._fd def _to_native_object(self): return {'fd': self._fd} def _eq(self, other): return self._fd == other._fd class Irq(_StructuredDataObject): CLASS = 'irq' def __init__(self, is_hard, nr, name=None): self._is_hard = is_hard self._nr = nr self._name = name @property def is_hard(self): return self._is_hard @property def nr(self): return self._nr @property def name(self): return self._name def _to_native_object(self): obj = {'hard': self._is_hard, 'nr': self._nr} if self._name is not None: obj['name'] = self._name return obj def _eq(self, other): self_tuple = (self._is_hard, self._nr, self._name) other_tuple = (other._is_hard, other._nr, other._name) return self_tuple == other_tuple class Cpu(_StructuredDataObject): CLASS = 'cpu' def __init__(self, cpu_id): self._id = cpu_id @property def id(self): return self._id def _to_native_object(self): return {'id': self._id} def _eq(self, other): return self._id == other._id class Disk(_SimpleName): CLASS = 'disk' class Partition(_SimpleName): CLASS = 'part' class NetIf(_SimpleName): CLASS = 'netif' def get_metadata(version, title, description, authors, url, tags, table_classes): t_classes = {t.name: t.to_native_object() for t in table_classes} return { 'version': { 'major': version.major, 'minor': version.minor, 'patch': version.patch, 'extra': version.extra }, 'title': title, 'description': description, 'authors': authors, 'url': url, 'tags': tags, 'table-classes': t_classes, } lttnganalyses-0.4.3/lttnganalyses/cli/__init__.py0000664000175000017500000000217512665072151023640 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. lttnganalyses-0.4.3/lttnganalyses/cli/termgraph.py0000664000175000017500000001465012665072151024073 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2016 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from collections import namedtuple GraphDatum = namedtuple('GraphDatum', ['value', 'value_str']) BarGraphDatum = namedtuple('BarGraphDatum', ['value', 'value_str', 'label']) FreqGraphDatum = namedtuple( 'FreqGraphDatum', ['value', 'value_str', 'lower_bound'] ) class Graph(): MAX_GRAPH_WIDTH = 80 BAR_CHAR = '█' HR_CHAR = '#' def __init__(self, data, get_value, get_value_str, title, unit): self._data = data self._get_value = get_value self._title = title self._unit = unit self._max_value = 0 self._max_value_len = 0 if get_value_str is not None: self._get_value_str_cb = get_value_str else: self._get_value_str_cb = Graph._get_value_str_default def _transform_data(self, data): graph_data = [] for datum in data: graph_datum = self._get_graph_datum(datum) if graph_datum.value > self._max_value: self._max_value = graph_datum.value if len(graph_datum.value_str) > self._max_value_len: self._max_value_len = len(graph_datum.value_str) graph_data.append(graph_datum) return graph_data def _get_value_str(self, value): return self._get_value_str_cb(value) def _get_graph_datum(self, datum): value = self._get_value(datum) value_str = self._get_value_str(value) return GraphDatum(value, value_str) def _print_header(self): if self._title: print(self._title) def _print_separator(self): print(self.HR_CHAR * self.MAX_GRAPH_WIDTH) def _print_body(self): raise NotImplementedError() def print_graph(self): if not self._data: return self._print_header() self._print_separator() self._print_body() print() @staticmethod def _get_value_str_default(value): if isinstance(value, float): value_str = '{:0.02f}'.format(value) else: value_str = str(value) return value_str class BarGraph(Graph): def __init__(self, data, get_value, get_label, get_value_str=None, title=None, label_header=None, unit=None): super().__init__(data, get_value, get_value_str, title, unit) self._get_label = get_label self._label_header = label_header self._data = self._transform_data(self._data) def _get_graph_datum(self, datum): value = self._get_value(datum) value_str = self._get_value_str(value) label = self._get_label(datum) return BarGraphDatum(value, value_str, label) def _get_value_str(self, value): value_str = super()._get_value_str(value) if self._unit: value_str += ' ' + self._unit return value_str def _get_graph_header(self): if not self._label_header: return self._title title_len = len(self._title) space_width = (self.MAX_GRAPH_WIDTH - title_len) + \ 1 + self._max_value_len + 1 return self._title + ' ' * space_width + self._label_header def _print_header(self): header = self._get_graph_header() print(header) def _get_bar_str(self, datum): if self._max_value == 0: bar_width = 0 else: bar_width = int(self.MAX_GRAPH_WIDTH * datum.value / self._max_value) space_width = self.MAX_GRAPH_WIDTH - bar_width bar_str = self.BAR_CHAR * bar_width + ' ' * space_width return bar_str def _print_body(self): for datum in self._data: bar_str = self._get_bar_str(datum) value_padding = ' ' * (self._max_value_len - len(datum.value_str)) print(bar_str, value_padding + datum.value_str, datum.label) class FreqGraph(Graph): LOWER_BOUND_WIDTH = 8 def __init__(self, data, get_value, get_lower_bound, get_value_str=None, title=None, unit=None): super().__init__(data, get_value, get_value_str, title, unit) self._get_lower_bound = get_lower_bound self._data = self._transform_data(self._data) def _get_graph_datum(self, datum): value = self._get_value(datum) value_str = self._get_value_str(value) lower_bound = self._get_lower_bound(datum) return FreqGraphDatum(value, value_str, lower_bound) def _print_header(self): header = self._title if self._unit: header += ' ({})'.format(self._unit) print(header) def _get_bar_str(self, datum): max_width = self.MAX_GRAPH_WIDTH - self.LOWER_BOUND_WIDTH if self._max_value == 0: bar_width = 0 else: bar_width = int(max_width * datum.value / self._max_value) space_width = max_width - bar_width bar_str = self.BAR_CHAR * bar_width + ' ' * space_width return bar_str def _print_body(self): for datum in self._data: bound_str = FreqGraph._get_bound_str(datum) bar_str = self._get_bar_str(datum) value_padding = ' ' * (self._max_value_len - len(datum.value_str)) print(bound_str, bar_str, value_padding + datum.value_str) @staticmethod def _get_bound_str(datum): return '{:>7.03f}'.format(datum.lower_bound) lttnganalyses-0.4.3/lttnganalyses/cli/memtop.py0000664000175000017500000001636612665072151023411 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import operator from .command import Command from ..core import memtop from . import mi from . import termgraph class Memtop(Command): _DESC = """The memtop command.""" _ANALYSIS_CLASS = memtop.Memtop _MI_TITLE = 'Top memory usage' _MI_DESCRIPTION = 'Per-TID top allocated/freed memory' _MI_TAGS = [mi.Tags.MEMORY, mi.Tags.TOP] _MI_TABLE_CLASS_ALLOCD = 'allocd' _MI_TABLE_CLASS_FREED = 'freed' _MI_TABLE_CLASS_TOTAL = 'total' _MI_TABLE_CLASS_SUMMARY = 'summary' _MI_TABLE_CLASSES = [ ( _MI_TABLE_CLASS_ALLOCD, 'Per-TID top allocated memory', [ ('process', 'Process', mi.Process), ('pages', 'Allocated pages', mi.Integer, 'pages'), ] ), ( _MI_TABLE_CLASS_FREED, 'Per-TID top freed memory', [ ('process', 'Process', mi.Process), ('pages', 'Freed pages', mi.Integer, 'pages'), ] ), ( _MI_TABLE_CLASS_TOTAL, 'Total allocated/freed memory', [ ('allocd', 'Total allocated pages', mi.Integer, 'pages'), ('freed', 'Total freed pages', mi.Integer, 'pages'), ] ), ( _MI_TABLE_CLASS_SUMMARY, 'Memory usage - summary', [ ('time_range', 'Time range', mi.TimeRange), ('allocd', 'Total allocated pages', mi.Integer, 'pages'), ('freed', 'Total freed pages', mi.Integer, 'pages'), ] ), ] def _analysis_tick(self, begin_ns, end_ns): allocd_table = self._get_per_tid_allocd_result_table(begin_ns, end_ns) freed_table = self._get_per_tid_freed_result_table(begin_ns, end_ns) total_table = self._get_total_result_table(begin_ns, end_ns) if self._mi_mode: self._mi_append_result_table(allocd_table) self._mi_append_result_table(freed_table) self._mi_append_result_table(total_table) else: self._print_date(begin_ns, end_ns) self._print_per_tid_allocd(allocd_table) self._print_per_tid_freed(freed_table) self._print_total(total_table) def _create_summary_result_tables(self): total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL) begin = total_tables[0].timerange.begin end = total_tables[-1].timerange.end summary_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY, begin, end) for total_table in total_tables: total_allocd = total_table.rows[0].allocd total_freed = total_table.rows[0].freed summary_table.append_row( time_range=total_table.timerange, allocd=total_allocd, freed=total_freed, ) self._mi_clear_result_tables() self._mi_append_result_table(summary_table) def _get_per_tid_attr_result_table(self, table_class, attr, begin_ns, end_ns): result_table = self._mi_create_result_table(table_class, begin_ns, end_ns) count = 0 for tid in sorted(self._analysis.tids.values(), key=operator.attrgetter(attr), reverse=True): result_table.append_row( process=mi.Process(tid.comm, tid=tid.tid), pages=mi.Integer(getattr(tid, attr)), ) count += 1 if self._args.limit > 0 and count >= self._args.limit: break return result_table def _get_per_tid_allocd_result_table(self, begin_ns, end_ns): return self._get_per_tid_attr_result_table(self._MI_TABLE_CLASS_ALLOCD, 'allocated_pages', begin_ns, end_ns) def _get_per_tid_freed_result_table(self, begin_ns, end_ns): return self._get_per_tid_attr_result_table(self._MI_TABLE_CLASS_FREED, 'freed_pages', begin_ns, end_ns) def _get_total_result_table(self, begin_ns, end_ns): result_table = self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL, begin_ns, end_ns) alloc = 0 freed = 0 for tid in self._analysis.tids.values(): alloc += tid.allocated_pages freed += tid.freed_pages result_table.append_row( allocd=mi.Integer(alloc), freed=mi.Integer(freed), ) return result_table def _print_per_tid_result(self, result_table, title): graph = termgraph.BarGraph( title=title, unit='pages', get_value=lambda row: row.pages.value, get_label=lambda row: '%s (%d)' % (row.process.name, row.process.tid), label_header='Process', data=result_table.rows ) graph.print_graph() def _print_per_tid_allocd(self, result_table): self._print_per_tid_result(result_table, 'Per-TID Memory Allocations') def _print_per_tid_freed(self, result_table): self._print_per_tid_result(result_table, 'Per-TID Memory Deallocations') def _print_total(self, result_table): alloc = result_table.rows[0].allocd.value freed = result_table.rows[0].freed.value print('\nTotal memory usage:\n- %d pages allocated\n- %d pages freed' % (alloc, freed)) def _add_arguments(self, ap): Command._add_proc_filter_args(ap) Command._add_top_args(ap) def _run(mi_mode): memtopcmd = Memtop(mi_mode=mi_mode) memtopcmd.run() # entry point (human) def run(): _run(mi_mode=False) # entry point (MI) def run_mi(): _run(mi_mode=True) lttnganalyses-0.4.3/lttnganalyses/cli/irq.py0000664000175000017500000006244312665072151022700 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import itertools import math import statistics import sys from . import mi from . import termgraph from .command import Command from ..core import irq as core_irq from ..linuxautomaton import common, sv class IrqAnalysisCommand(Command): _DESC = """The irq command.""" _ANALYSIS_CLASS = core_irq.IrqAnalysis _MI_TITLE = 'System interrupt analysis' _MI_DESCRIPTION = 'Interrupt frequency distribution, statistics, and log' _MI_TAGS = [mi.Tags.INTERRUPT, mi.Tags.STATS, mi.Tags.FREQ, mi.Tags.LOG] _MI_TABLE_CLASS_LOG = 'log' _MI_TABLE_CLASS_HARD_STATS = 'hard-stats' _MI_TABLE_CLASS_SOFT_STATS = 'soft-stats' _MI_TABLE_CLASS_FREQ = 'freq' _MI_TABLE_CLASS_SUMMARY = 'summary' _MI_TABLE_CLASSES = [ ( _MI_TABLE_CLASS_LOG, 'Interrupt log', [ ('time_range', 'Time range', mi.TimeRange), ('raised_ts', 'Raised timestamp', mi.Timestamp), ('cpu', 'CPU', mi.Cpu), ('irq', 'Interrupt', mi.Irq), ] ), ( _MI_TABLE_CLASS_HARD_STATS, 'Hardware interrupt statistics', [ ('irq', 'Interrupt', mi.Irq), ('count', 'Interrupt count', mi.Integer, 'interrupts'), ('min_duration', 'Minimum duration', mi.Duration), ('avg_duration', 'Average duration', mi.Duration), ('max_duration', 'Maximum duration', mi.Duration), ('stdev_duration', 'Interrupt duration standard deviation', mi.Duration), ] ), ( _MI_TABLE_CLASS_SOFT_STATS, 'Hardware interrupt statistics', [ ('irq', 'Interrupt', mi.Irq), ('count', 'Interrupt count', mi.Integer, 'interrupts'), ('min_duration', 'Minimum duration', mi.Duration), ('avg_duration', 'Average duration', mi.Duration), ('max_duration', 'Maximum duration', mi.Duration), ('stdev_duration', 'Interrupt duration standard deviation', mi.Duration), ('raise_count', 'Interrupt raise count', mi.Integer, 'interrupt raises'), ('min_latency', 'Minimum raise latency', mi.Duration), ('avg_latency', 'Average raise latency', mi.Duration), ('max_latency', 'Maximum raise latency', mi.Duration), ('stdev_latency', 'Interrupt raise latency standard deviation', mi.Duration), ] ), ( _MI_TABLE_CLASS_FREQ, 'Interrupt handler duration frequency distribution', [ ('duration_lower', 'Duration (lower bound)', mi.Duration), ('duration_upper', 'Duration (upper bound)', mi.Duration), ('count', 'Interrupt count', mi.Integer, 'interrupts'), ] ), ( _MI_TABLE_CLASS_SUMMARY, 'Interrupt statistics - summary', [ ('time_range', 'Time range', mi.TimeRange), ('count', 'Total interrupt count', mi.Integer, 'interrupts'), ] ), ] def _analysis_tick(self, begin_ns, end_ns): log_table = None hard_stats_table = None soft_stats_table = None freq_tables = None if self._args.log: log_table = self._get_log_result_table(begin_ns, end_ns) if self._args.stats or self._args.freq: hard_stats_table, soft_stats_table, freq_tables = \ self._get_stats_freq_result_tables(begin_ns, end_ns) if self._mi_mode: self._mi_append_result_table(log_table) self._mi_append_result_table(hard_stats_table) self._mi_append_result_table(soft_stats_table) if self._args.freq_series: freq_tables = [self._get_freq_series_table(freq_tables)] self._mi_append_result_tables(freq_tables) else: self._print_date(begin_ns, end_ns) if hard_stats_table or soft_stats_table or freq_tables: self._print_stats_freq(hard_stats_table, soft_stats_table, freq_tables) if log_table: print() if log_table: self._print_log(log_table) def _create_summary_result_tables(self): if not self._args.stats: self._mi_clear_result_tables() return hard_stats_tables = \ self._mi_get_result_tables(self._MI_TABLE_CLASS_HARD_STATS) soft_stats_tables = \ self._mi_get_result_tables(self._MI_TABLE_CLASS_SOFT_STATS) assert len(hard_stats_tables) == len(soft_stats_tables) begin = hard_stats_tables[0].timerange.begin end = hard_stats_tables[-1].timerange.end summary_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY, begin, end) for hs_table, ss_table in zip(hard_stats_tables, soft_stats_tables): assert hs_table.timerange == ss_table.timerange for row in itertools.chain(hs_table.rows, ss_table.rows): summary_table.append_row( time_range=hs_table.timerange, count=row.count, ) self._mi_clear_result_tables() self._mi_append_result_table(summary_table) def _get_log_result_table(self, begin_ns, end_ns): result_table = self._mi_create_result_table(self._MI_TABLE_CLASS_LOG, begin_ns, end_ns) for irq in self._analysis.irq_list: if not self._filter_irq(irq): continue if type(irq) is sv.HardIRQ: is_hard = True raised_ts_do = mi.Empty() name = self._analysis.hard_irq_stats[irq.id].name else: is_hard = False if irq.raise_ts is None: raised_ts_do = mi.Unknown() else: raised_ts_do = mi.Timestamp(irq.raise_ts) name = self._analysis.softirq_stats[irq.id].name result_table.append_row( time_range=mi.TimeRange(irq.begin_ts, irq.end_ts), raised_ts=raised_ts_do, cpu=mi.Cpu(irq.cpu_id), irq=mi.Irq(is_hard, irq.id, name), ) return result_table def _get_common_stats_result_table_row(self, is_hard, irq_nr, irq_stats): stdev = self._compute_duration_stdev(irq_stats) if math.isnan(stdev): stdev = mi.Unknown() else: stdev = mi.Duration(stdev) return ( mi.Irq(is_hard, irq_nr, irq_stats.name), mi.Integer(irq_stats.count), mi.Duration(irq_stats.min_duration), mi.Duration(irq_stats.total_duration / irq_stats.count), mi.Duration(irq_stats.max_duration), stdev, ) def _append_hard_stats_result_table_row(self, irq_nr, irq_stats, hard_stats_table): common_row = self._get_common_stats_result_table_row(True, irq_nr, irq_stats) hard_stats_table.append_row( irq=common_row[0], count=common_row[1], min_duration=common_row[2], avg_duration=common_row[3], max_duration=common_row[4], stdev_duration=common_row[5], ) def _append_soft_stats_result_table_row(self, irq_nr, irq_stats, soft_stats_table): common_row = self._get_common_stats_result_table_row(False, irq_nr, irq_stats) if irq_stats.raise_count == 0: min_latency = mi.Unknown() avg_latency = mi.Unknown() max_latency = mi.Unknown() stdev_latency = mi.Unknown() else: min_latency = mi.Duration(irq_stats.min_raise_latency) avg_latency = irq_stats.total_raise_latency / irq_stats.raise_count avg_latency = mi.Duration(avg_latency) max_latency = mi.Duration(irq_stats.max_raise_latency) stdev = self._compute_raise_latency_stdev(irq_stats) if math.isnan(stdev): stdev_latency = mi.Unknown() else: stdev_latency = mi.Duration(stdev) soft_stats_table.append_row( irq=common_row[0], count=common_row[1], min_duration=common_row[2], avg_duration=common_row[3], max_duration=common_row[4], stdev_duration=common_row[5], raise_count=mi.Integer(irq_stats.raise_count), min_latency=min_latency, avg_latency=avg_latency, max_latency=max_latency, stdev_latency=stdev_latency, ) def _fill_freq_result_table(self, irq_stats, freq_table): # The number of bins for the histogram resolution = self._args.freq_resolution if self._args.min is not None: min_duration = self._args.min else: min_duration = irq_stats.min_duration if self._args.max is not None: max_duration = self._args.max else: max_duration = irq_stats.max_duration # ns to µs min_duration /= 1000 max_duration /= 1000 # histogram's step if self._args.freq_uniform: # TODO: perform only one time durations = [irq.duration for irq in self._analysis.irq_list] min_duration, max_duration, step = \ self._get_uniform_freq_values(durations) else: step = (max_duration - min_duration) / resolution if step == 0: return buckets = [] counts = [] for i in range(resolution): buckets.append(i * step) counts.append(0) for irq in irq_stats.irq_list: duration = irq.duration / 1000 index = int((duration - min_duration) / step) if index >= resolution: # special case for max value: put in last bucket (includes # its upper bound) if duration == max_duration: counts[index - 1] += 1 continue counts[index] += 1 for index, count in enumerate(counts): lower_bound = index * step + min_duration upper_bound = (index + 1) * step + min_duration freq_table.append_row( duration_lower=mi.Duration.from_us(lower_bound), duration_upper=mi.Duration.from_us(upper_bound), count=mi.Integer(count), ) def _fill_stats_freq_result_tables(self, begin_ns, end_ns, is_hard, analysis_stats, filter_list, hard_stats_table, soft_stats_table, freq_tables): for id in sorted(analysis_stats): if filter_list and str(id) not in filter_list: continue irq_stats = analysis_stats[id] if irq_stats.count == 0: continue if self._args.stats: if is_hard: append_row_fn = self._append_hard_stats_result_table_row table = hard_stats_table else: append_row_fn = self._append_soft_stats_result_table_row table = soft_stats_table append_row_fn(id, irq_stats, table) if self._args.freq: subtitle = '{} ({})'.format(irq_stats.name, id) freq_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, begin_ns, end_ns, subtitle) self._fill_freq_result_table(irq_stats, freq_table) # it is possible that the frequency distribution result # table is empty; we need to keep it any way because # there's a 1-to-1 association between the statistics # row indexes (if available) and the frequency table # indexes freq_tables.append(freq_table) def _get_freq_series_table(self, freq_tables): if not freq_tables: return column_infos = [ ('duration_lower', 'Duration (lower bound)', mi.Duration), ('duration_upper', 'Duration (upper bound)', mi.Duration), ] for index, freq_table in enumerate(freq_tables): column_infos.append(( 'irq{}'.format(index), freq_table.subtitle, mi.Integer, 'interrupts' )) title = 'Interrupt handlers duration frequency distributions' table_class = mi.TableClass(None, title, column_infos) begin = freq_tables[0].timerange.begin end = freq_tables[0].timerange.end result_table = mi.ResultTable(table_class, begin, end) for row_index, freq0_row in enumerate(freq_tables[0].rows): row_tuple = [ freq0_row.duration_lower, freq0_row.duration_upper, ] for freq_table in freq_tables: freq_row = freq_table.rows[row_index] row_tuple.append(freq_row.count) result_table.append_row_tuple(tuple(row_tuple)) return result_table def _get_stats_freq_result_tables(self, begin_ns, end_ns): def fill_stats_freq_result_tables(is_hard, stats, filter_list): self._fill_stats_freq_result_tables(begin_ns, end_ns, is_hard, stats, filter_list, hard_stats_table, soft_stats_table, freq_tables) hard_stats_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_HARD_STATS, begin_ns, end_ns) soft_stats_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_SOFT_STATS, begin_ns, end_ns) freq_tables = [] if self._args.irq_filter_list is not None or \ self._args.softirq_filter_list is None: fill_stats_freq_result_tables(True, self._analysis.hard_irq_stats, self._args.irq_filter_list) if self._args.softirq_filter_list is not None or \ self._args.irq_filter_list is None: fill_stats_freq_result_tables(False, self._analysis.softirq_stats, self._args.softirq_filter_list) return hard_stats_table, soft_stats_table, freq_tables def _ns_to_hour_nsec(self, ts): return common.ns_to_hour_nsec(ts, self._args.multi_day, self._args.gmt) def _print_log(self, result_table): fmt = '[{:<18}, {:<18}] {:>15} {:>4} {:<9} {:>4} {:<22}' title_fmt = '{:<20} {:<19} {:>15} {:>4} {:<9} {:>4} {:<22}' print(title_fmt.format('Begin', 'End', 'Duration (us)', 'CPU', 'Type', '#', 'Name')) for row in result_table.rows: timerange = row.time_range begin_ts = timerange.begin end_ts = timerange.end if type(row.raised_ts) is mi.Timestamp: raised_fmt = ' (raised at %s)' raised_ts = \ raised_fmt % self._ns_to_hour_nsec(row.raised_ts.value) else: raised_ts = '' cpu_id = row.cpu.id irq_do = row.irq if irq_do.is_hard: irqtype = 'IRQ' else: irqtype = 'SoftIRQ' print(fmt.format(self._ns_to_hour_nsec(begin_ts), self._ns_to_hour_nsec(end_ts), '%0.03f' % ((end_ts - begin_ts) / 1000), '%d' % cpu_id, irqtype, irq_do.nr, irq_do.name + raised_ts)) def _validate_transform_args(self, args): args.irq_filter_list = None args.softirq_filter_list = None if args.irq: args.irq_filter_list = args.irq.split(',') if args.softirq: args.softirq_filter_list = args.softirq.split(',') def _compute_duration_stdev(self, irq_stats_item): if irq_stats_item.count < 2: return float('nan') durations = [] for irq in irq_stats_item.irq_list: durations.append(irq.end_ts - irq.begin_ts) return statistics.stdev(durations) def _compute_raise_latency_stdev(self, irq_stats_item): if irq_stats_item.raise_count < 2: return float('nan') raise_latencies = [] for irq in irq_stats_item.irq_list: if irq.raise_ts is None: continue raise_latencies.append(irq.begin_ts - irq.raise_ts) return statistics.stdev(raise_latencies) def _print_frequency_distribution(self, freq_table): title_fmt = 'Handler duration frequency distribution {}' graph = termgraph.FreqGraph( data=freq_table.rows, get_value=lambda row: row.count.value, get_lower_bound=lambda row: row.duration_lower.to_us(), title=title_fmt.format(freq_table.subtitle), unit='µs' ) graph.print_graph() def _filter_irq(self, irq): if type(irq) is sv.HardIRQ: if self._args.irq_filter_list: return str(irq.id) in self._args.irq_filter_list if self._args.softirq_filter_list: return False else: # SoftIRQ if self._args.softirq_filter_list: return str(irq.id) in self._args.softirq_filter_list if self._args.irq_filter_list: return False return True def _print_hard_irq_stats_row(self, row): output_str = self._get_duration_stats_str(row) print(output_str) def _print_soft_irq_stats_row(self, row): output_str = self._get_duration_stats_str(row) if row.raise_count.value != 0: output_str += self._get_raise_latency_str(row) print(output_str) def _get_duration_stats_str(self, row): format_str = '{:<3} {:<18} {:>5} {:>12} {:>12} {:>12} {:>12} {:<2}' irq_do = row.irq count = row.count.value min_duration = row.min_duration.to_us() avg_duration = row.avg_duration.to_us() max_duration = row.max_duration.to_us() if type(row.stdev_duration) is mi.Unknown: duration_stdev_str = '?' else: duration_stdev_str = '%0.03f' % row.stdev_duration.to_us() output_str = format_str.format('%d:' % irq_do.nr, '<%s>' % irq_do.name, '%d' % count, '%0.03f' % min_duration, '%0.03f' % avg_duration, '%0.03f' % max_duration, '%s' % duration_stdev_str, ' |') return output_str def _get_raise_latency_str(self, row): format_str = ' {:>6} {:>12} {:>12} {:>12} {:>12}' raise_count = row.raise_count.value min_raise_latency = row.min_latency.to_us() avg_raise_latency = row.avg_latency.to_us() max_raise_latency = row.max_latency.to_us() if type(row.stdev_latency) is mi.Unknown: raise_latency_stdev_str = '?' else: raise_latency_stdev_str = '%0.03f' % row.stdev_latency.to_us() output_str = format_str.format(raise_count, '%0.03f' % min_raise_latency, '%0.03f' % avg_raise_latency, '%0.03f' % max_raise_latency, '%s' % raise_latency_stdev_str) return output_str def _print_stats_freq(self, hard_stats_table, soft_stats_table, freq_tables): hard_header_format = '{:<52} {:<12}\n' \ '{:<22} {:<14} {:<12} {:<12} {:<10} {:<12}\n' hard_header = hard_header_format.format( 'Hard IRQ', 'Duration (us)', '', 'count', 'min', 'avg', 'max', 'stdev' ) hard_header += ('-' * 82 + '|') soft_header_format = '{:<52} {:<52} {:<12}\n' \ '{:<22} {:<14} {:<12} {:<12} {:<10} {:<4} ' \ '{:<3} {:<14} {:<12} {:<12} {:<10} {:<12}\n' soft_header = soft_header_format.format( 'Soft IRQ', 'Duration (us)', 'Raise latency (us)', '', 'count', 'min', 'avg', 'max', 'stdev', ' |', 'count', 'min', 'avg', 'max', 'stdev' ) soft_header += '-' * 82 + '|' + '-' * 60 if hard_stats_table.rows or soft_stats_table.rows: stats_rows = itertools.chain(hard_stats_table.rows, soft_stats_table.rows) if freq_tables: for stats_row, freq_table in zip(stats_rows, freq_tables): irq = stats_row.irq if irq.is_hard: print(hard_header) self._print_hard_irq_stats_row(stats_row) else: print(soft_header) self._print_soft_irq_stats_row(stats_row) # frequency table might be empty: do not print if freq_table.rows: print() self._print_frequency_distribution(freq_table) print() else: hard_header_printed = False soft_header_printed = False for stats_row in stats_rows: irq = stats_row.irq if irq.is_hard: if not hard_header_printed: print(hard_header) hard_header_printed = True self._print_hard_irq_stats_row(stats_row) else: if not soft_header_printed: if hard_header_printed: print() print(soft_header) soft_header_printed = True self._print_soft_irq_stats_row(stats_row) return for freq_table in freq_tables: # frequency table might be empty: do not print if freq_table.rows: print() self._print_frequency_distribution(freq_table) def _add_arguments(self, ap): Command._add_min_max_args(ap) Command._add_freq_args( ap, help='Output the frequency distribution of handler durations') Command._add_log_args( ap, help='Output the IRQs in chronological order') Command._add_stats_args(ap, help='Output IRQ statistics') ap.add_argument('--irq', type=str, default=None, help='Output results only for the list of IRQ') ap.add_argument('--softirq', type=str, default=None, help='Output results only for the list of SoftIRQ') def _run(mi_mode): irqcmd = IrqAnalysisCommand(mi_mode=mi_mode) irqcmd.run() def _runstats(mi_mode): sys.argv.insert(1, '--stats') _run(mi_mode) def _runlog(mi_mode): sys.argv.insert(1, '--log') _run(mi_mode) def _runfreq(mi_mode): sys.argv.insert(1, '--freq') _run(mi_mode) def runstats(): _runstats(mi_mode=False) def runlog(): _runlog(mi_mode=False) def runfreq(): _runfreq(mi_mode=False) def runstats_mi(): _runstats(mi_mode=True) def runlog_mi(): _runlog(mi_mode=True) def runfreq_mi(): _runfreq(mi_mode=True) lttnganalyses-0.4.3/lttnganalyses/cli/sched.py0000664000175000017500000007606012665072151023173 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import math import operator import statistics import collections from . import mi from . import termgraph from ..core import sched from .command import Command from ..common import format_utils from ..linuxautomaton import common _SchedStats = collections.namedtuple('_SchedStats', [ 'count', 'min', 'max', 'stdev', 'total', ]) class SchedAnalysisCommand(Command): _DESC = """The sched command.""" _ANALYSIS_CLASS = sched.SchedAnalysis _MI_TITLE = 'Scheduling latencies analysis' _MI_DESCRIPTION = \ 'Scheduling latencies frequency distribution, statistics, top, and log' _MI_TAGS = [mi.Tags.SCHED, mi.Tags.STATS, mi.Tags.FREQ, mi.Tags.TOP, mi.Tags.LOG] _MI_TABLE_CLASS_LOG = 'log' _MI_TABLE_CLASS_TOP = 'top' _MI_TABLE_CLASS_TOTAL_STATS = 'total_stats' _MI_TABLE_CLASS_PER_TID_STATS = 'per_tid_stats' _MI_TABLE_CLASS_PER_PRIO_STATS = 'per_prio_stats' _MI_TABLE_CLASS_FREQ = 'freq' # _MI_TABLE_CLASS_SUMMARY = 'summary' _MI_TABLE_CLASSES = [ ( _MI_TABLE_CLASS_LOG, 'Scheduling log', [ ('wakeup_ts', 'Wakeup timestamp', mi.Timestamp), ('switch_ts', 'Switch timestamp', mi.Timestamp), ('latency', 'Scheduling latency', mi.Duration), ('prio', 'Priority', mi.Integer), ('target_cpu', 'Target CPU', mi.Integer), ('wakee_proc', 'Wakee process', mi.Process), ('waker_proc', 'Waker process', mi.Process), ] ), ( _MI_TABLE_CLASS_TOP, 'Scheduling top', [ ('wakeup_ts', 'Wakeup timestamp', mi.Timestamp), ('switch_ts', 'Switch timestamp', mi.Timestamp), ('latency', 'Scheduling latency', mi.Duration), ('prio', 'Priority', mi.Integer), ('target_cpu', 'Target CPU', mi.Integer), ('wakee_proc', 'Wakee process', mi.Process), ('waker_proc', 'Waker process', mi.Process), ] ), ( _MI_TABLE_CLASS_TOTAL_STATS, 'Scheduling latency stats (total)', [ ('count', 'Scheduling count', mi.Integer, 'schedulings'), ('min_latency', 'Minimum latency', mi.Duration), ('avg_latency', 'Average latency', mi.Duration), ('max_latency', 'Maximum latency', mi.Duration), ('stdev_latency', 'Scheduling latency standard deviation', mi.Duration), ] ), ( _MI_TABLE_CLASS_PER_TID_STATS, 'Scheduling latency stats (per-TID)', [ ('process', 'Wakee process', mi.Process), ('count', 'Scheduling count', mi.Integer, 'schedulings'), ('min_latency', 'Minimum latency', mi.Duration), ('avg_latency', 'Average latency', mi.Duration), ('max_latency', 'Maximum latency', mi.Duration), ('stdev_latency', 'Scheduling latency standard deviation', mi.Duration), ('prio_list', 'Chronological priorities', mi.String), ] ), ( _MI_TABLE_CLASS_PER_PRIO_STATS, 'Scheduling latency stats (per-prio)', [ ('prio', 'Priority', mi.Integer), ('count', 'Scheduling count', mi.Integer, 'schedulings'), ('min_latency', 'Minimum latency', mi.Duration), ('avg_latency', 'Average latency', mi.Duration), ('max_latency', 'Maximum latency', mi.Duration), ('stdev_latency', 'Scheduling latency standard deviation', mi.Duration), ] ), ( _MI_TABLE_CLASS_FREQ, 'Scheduling latency frequency distribution', [ ('duration_lower', 'Duration (lower bound)', mi.Duration), ('duration_upper', 'Duration (upper bound)', mi.Duration), ('count', 'Scheduling count', mi.Integer, 'schedulings'), ] ), ] def _analysis_tick(self, begin_ns, end_ns): log_table = None top_table = None total_stats_table = None per_tid_stats_table = None per_prio_stats_table = None total_freq_tables = None per_tid_freq_tables = None per_prio_freq_tables = None if self._args.log: log_table = self._get_log_result_table(begin_ns, end_ns) if self._args.top: top_table = self._get_top_result_table(begin_ns, end_ns) if self._args.stats: if self._args.total: total_stats_table = self._get_total_stats_result_table( begin_ns, end_ns) if self._args.per_tid: per_tid_stats_table = self._get_per_tid_stats_result_table( begin_ns, end_ns) if self._args.per_prio: per_prio_stats_table = self._get_per_prio_stats_result_table( begin_ns, end_ns) if self._args.freq: if self._args.total: total_freq_tables = self._get_total_freq_result_tables( begin_ns, end_ns) if self._args.per_tid: per_tid_freq_tables = self._get_per_tid_freq_result_tables( begin_ns, end_ns) if self._args.per_prio: per_prio_freq_tables = self._get_per_prio_freq_result_tables( begin_ns, end_ns) if self._mi_mode: if log_table: self._mi_append_result_table(log_table) if top_table: self._mi_append_result_table(top_table) if total_stats_table and total_stats_table.rows: self._mi_append_result_table(total_stats_table) if per_tid_stats_table and per_tid_stats_table.rows: self._mi_append_result_table(per_tid_stats_table) if per_prio_stats_table and per_prio_stats_table.rows: self._mi_append_result_table(per_prio_stats_table) if self._args.freq_series: if total_freq_tables: self._mi_append_result_tables(total_freq_tables) if per_tid_freq_tables: per_tid_freq_tables = [ self._get_per_tid_freq_series_table( per_tid_freq_tables) ] self._mi_append_result_tables(per_tid_freq_tables) if per_prio_freq_tables: per_prio_freq_tables = [ self._get_per_prio_freq_series_table( per_prio_freq_tables) ] self._mi_append_result_tables(per_prio_freq_tables) else: self._print_date(begin_ns, end_ns) if self._args.stats: if total_stats_table: self._print_total_stats(total_stats_table) if per_tid_stats_table: self._print_per_tid_stats(per_tid_stats_table) if per_prio_stats_table: self._print_per_prio_stats(per_prio_stats_table) if self._args.freq: if total_freq_tables: self._print_freq(total_freq_tables) if per_tid_freq_tables: self._print_freq(per_tid_freq_tables) if per_prio_freq_tables: self._print_freq(per_prio_freq_tables) if log_table: self._print_sched_events(log_table) if top_table: self._print_sched_events(top_table) def _get_total_sched_lists_stats(self): total_list = self._analysis.sched_list stdev = self._compute_sched_latency_stdev(total_list) total_stats = _SchedStats( count=self._analysis.count, min=self._analysis.min_latency, max=self._analysis.max_latency, stdev=stdev, total=self._analysis.total_latency ) return [total_list], total_stats def _get_tid_sched_lists_stats(self): tid_sched_lists = {} tid_stats = {} for sched_event in self._analysis.sched_list: tid = sched_event.wakee_proc.tid if tid not in tid_sched_lists: tid_sched_lists[tid] = [] tid_sched_lists[tid].append(sched_event) for tid in tid_sched_lists: sched_list = tid_sched_lists[tid] if not sched_list: continue stdev = self._compute_sched_latency_stdev(sched_list) latencies = [sched.latency for sched in sched_list] count = len(latencies) min_latency = min(latencies) max_latency = max(latencies) total_latency = sum(latencies) tid_stats[tid] = _SchedStats( count=count, min=min_latency, max=max_latency, stdev=stdev, total=total_latency, ) return tid_sched_lists, tid_stats def _get_prio_sched_lists_stats(self): prio_sched_lists = {} prio_stats = {} for sched_event in self._analysis.sched_list: if sched_event.prio not in prio_sched_lists: prio_sched_lists[sched_event.prio] = [] prio_sched_lists[sched_event.prio].append(sched_event) for prio in prio_sched_lists: sched_list = prio_sched_lists[prio] if not sched_list: continue stdev = self._compute_sched_latency_stdev(sched_list) latencies = [sched.latency for sched in sched_list] count = len(latencies) min_latency = min(latencies) max_latency = max(latencies) total_latency = sum(latencies) prio_stats[prio] = _SchedStats( count=count, min=min_latency, max=max_latency, stdev=stdev, total=total_latency, ) return prio_sched_lists, prio_stats def _get_log_result_table(self, begin_ns, end_ns): result_table = self._mi_create_result_table(self._MI_TABLE_CLASS_LOG, begin_ns, end_ns) for sched_event in self._analysis.sched_list: wakee_proc = mi.Process(sched_event.wakee_proc.comm, sched_event.wakee_proc.pid, sched_event.wakee_proc.tid) if sched_event.waker_proc: waker_proc = mi.Process(sched_event.waker_proc.comm, sched_event.waker_proc.pid, sched_event.waker_proc.tid) else: waker_proc = mi.Empty() result_table.append_row( wakeup_ts=mi.Timestamp(sched_event.wakeup_ts), switch_ts=mi.Timestamp(sched_event.switch_ts), latency=mi.Duration(sched_event.latency), prio=mi.Integer(sched_event.prio), target_cpu=mi.Integer(sched_event.target_cpu), wakee_proc=wakee_proc, waker_proc=waker_proc, ) return result_table def _get_top_result_table(self, begin_ns, end_ns): result_table = self._mi_create_result_table( self._MI_TABLE_CLASS_TOP, begin_ns, end_ns) top_events = sorted(self._analysis.sched_list, key=operator.attrgetter('latency'), reverse=True) top_events = top_events[:self._args.limit] for sched_event in top_events: wakee_proc = mi.Process(sched_event.wakee_proc.comm, sched_event.wakee_proc.pid, sched_event.wakee_proc.tid) if sched_event.waker_proc: waker_proc = mi.Process(sched_event.waker_proc.comm, sched_event.waker_proc.pid, sched_event.waker_proc.tid) else: waker_proc = mi.Empty() result_table.append_row( wakeup_ts=mi.Timestamp(sched_event.wakeup_ts), switch_ts=mi.Timestamp(sched_event.switch_ts), latency=mi.Duration(sched_event.latency), prio=mi.Integer(sched_event.prio), target_cpu=mi.Integer(sched_event.target_cpu), wakee_proc=wakee_proc, waker_proc=waker_proc, ) return result_table def _get_total_stats_result_table(self, begin_ns, end_ns): stats_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL_STATS, begin_ns, end_ns) stdev = self._compute_sched_latency_stdev(self._analysis.sched_list) if math.isnan(stdev): stdev = mi.Unknown() else: stdev = mi.Duration(stdev) stats_table.append_row( count=mi.Integer(self._analysis.count), min_latency=mi.Duration(self._analysis.min_latency), avg_latency=mi.Duration(self._analysis.total_latency / self._analysis.count), max_latency=mi.Duration(self._analysis.max_latency), stdev_latency=stdev, ) return stats_table def _get_per_tid_stats_result_table(self, begin_ns, end_ns): stats_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_TID_STATS, begin_ns, end_ns) tid_stats_list = sorted(list(self._analysis.tids.values()), key=lambda proc: proc.comm.lower()) for tid_stats in tid_stats_list: if not tid_stats.sched_list: continue stdev = self._compute_sched_latency_stdev(tid_stats.sched_list) if math.isnan(stdev): stdev = mi.Unknown() else: stdev = mi.Duration(stdev) prio_list = format_utils.format_prio_list(tid_stats.prio_list) stats_table.append_row( process=mi.Process(tid=tid_stats.tid, name=tid_stats.comm), count=mi.Integer(tid_stats.count), min_latency=mi.Duration(tid_stats.min_latency), avg_latency=mi.Duration(tid_stats.total_latency / tid_stats.count), max_latency=mi.Duration(tid_stats.max_latency), stdev_latency=stdev, prio_list=mi.String(prio_list), ) return stats_table def _get_per_prio_stats_result_table(self, begin_ns, end_ns): stats_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PRIO_STATS, begin_ns, end_ns) _, prio_stats = self._get_prio_sched_lists_stats() for prio in sorted(prio_stats): stats = prio_stats[prio] stdev = stats.stdev if math.isnan(stdev): stdev = mi.Unknown() else: stdev = mi.Duration(stdev) count = stats.count min_latency = stats.min max_latency = stats.max total_latency = stats.total stats_table.append_row( prio=mi.Integer(prio), count=mi.Integer(count), min_latency=mi.Duration(min_latency), avg_latency=mi.Duration(total_latency / count), max_latency=mi.Duration(max_latency), stdev_latency=stdev, ) return stats_table def _get_per_tid_freq_series_table(self, freq_tables): if not freq_tables: return column_infos = [ ('duration_lower', 'Duration (lower bound)', mi.Duration), ('duration_upper', 'Duration (upper bound)', mi.Duration), ] for index, freq_table in enumerate(freq_tables): column_infos.append(( 'tid{}'.format(index), freq_table.subtitle, mi.Integer, 'schedulings' )) title = 'Scheduling latencies frequency distributions' table_class = mi.TableClass(None, title, column_infos) begin = freq_tables[0].timerange.begin end = freq_tables[0].timerange.end result_table = mi.ResultTable(table_class, begin, end) for row_index, freq0_row in enumerate(freq_tables[0].rows): row_tuple = [ freq0_row.duration_lower, freq0_row.duration_upper, ] for freq_table in freq_tables: freq_row = freq_table.rows[row_index] row_tuple.append(freq_row.count) result_table.append_row_tuple(tuple(row_tuple)) return result_table def _get_per_prio_freq_series_table(self, freq_tables): if not freq_tables: return column_infos = [ ('duration_lower', 'Duration (lower bound)', mi.Duration), ('duration_upper', 'Duration (upper bound)', mi.Duration), ] for index, freq_table in enumerate(freq_tables): column_infos.append(( 'prio{}'.format(index), freq_table.subtitle, mi.Integer, 'schedulings' )) title = 'Scheduling latencies frequency distributions' table_class = mi.TableClass(None, title, column_infos) begin = freq_tables[0].timerange.begin end = freq_tables[0].timerange.end result_table = mi.ResultTable(table_class, begin, end) for row_index, freq0_row in enumerate(freq_tables[0].rows): row_tuple = [ freq0_row.duration_lower, freq0_row.duration_upper, ] for freq_table in freq_tables: freq_row = freq_table.rows[row_index] row_tuple.append(freq_row.count) result_table.append_row_tuple(tuple(row_tuple)) return result_table def _fill_freq_result_table(self, sched_list, stats, min_duration, max_duration, step, freq_table): # The number of bins for the histogram resolution = self._args.freq_resolution if not self._args.freq_uniform: if self._args.min is not None: min_duration = self._args.min else: min_duration = stats.min if self._args.max is not None: max_duration = self._args.max else: max_duration = stats.max # ns to µs min_duration /= 1000 max_duration /= 1000 step = (max_duration - min_duration) / resolution if step == 0: return buckets = [] counts = [] for i in range(resolution): buckets.append(i * step) counts.append(0) for sched_event in sched_list: duration = sched_event.latency / 1000 index = int((duration - min_duration) / step) if index >= resolution: # special case for max value: put in last bucket (includes # its upper bound) if duration == max_duration: counts[index - 1] += 1 continue counts[index] += 1 for index, count in enumerate(counts): lower_bound = index * step + min_duration upper_bound = (index + 1) * step + min_duration freq_table.append_row( duration_lower=mi.Duration.from_us(lower_bound), duration_upper=mi.Duration.from_us(upper_bound), count=mi.Integer(count), ) def _get_total_freq_result_tables(self, begin_ns, end_ns): freq_tables = [] sched_lists, sched_stats = self._get_total_sched_lists_stats() min_duration = None max_duration = None step = None if self._args.freq_uniform: latencies = [] for sched_list in sched_lists: latencies += [sched.latency for sched in sched_list] min_duration, max_duration, step = \ self._get_uniform_freq_values(latencies) for sched_list in sched_lists: freq_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, begin_ns, end_ns) self._fill_freq_result_table(sched_list, sched_stats, min_duration, max_duration, step, freq_table) freq_tables.append(freq_table) return freq_tables def _get_per_tid_freq_result_tables(self, begin_ns, end_ns): freq_tables = [] tid_sched_lists, tid_stats = self._get_tid_sched_lists_stats() min_duration = None max_duration = None step = None if self._args.freq_uniform: latencies = [] for sched_list in tid_sched_lists.values(): latencies += [sched.latency for sched in sched_list] min_duration, max_duration, step = \ self._get_uniform_freq_values(latencies) for tid in sorted(tid_sched_lists): sched_list = tid_sched_lists[tid] stats = tid_stats[tid] subtitle = 'TID: {}'.format(tid) freq_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, begin_ns, end_ns, subtitle) self._fill_freq_result_table(sched_list, stats, min_duration, max_duration, step, freq_table) freq_tables.append(freq_table) return freq_tables def _get_per_prio_freq_result_tables(self, begin_ns, end_ns): freq_tables = [] prio_sched_lists, prio_stats = self._get_prio_sched_lists_stats() min_duration = None max_duration = None step = None if self._args.freq_uniform: latencies = [] for sched_list in prio_sched_lists.values(): latencies += [sched.latency for sched in sched_list] min_duration, max_duration, step = \ self._get_uniform_freq_values(latencies) for prio in sorted(prio_sched_lists): sched_list = prio_sched_lists[prio] stats = prio_stats[prio] subtitle = 'Priority: {}'.format(prio) freq_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, begin_ns, end_ns, subtitle) self._fill_freq_result_table(sched_list, stats, min_duration, max_duration, step, freq_table) freq_tables.append(freq_table) return freq_tables def _compute_sched_latency_stdev(self, sched_events): if len(sched_events) < 2: return float('nan') sched_latencies = [] for sched_event in sched_events: sched_latencies.append(sched_event.latency) return statistics.stdev(sched_latencies) def _ns_to_hour_nsec(self, ts): return common.ns_to_hour_nsec(ts, self._args.multi_day, self._args.gmt) def _print_sched_events(self, result_table): fmt = '[{:<18}, {:<18}] {:>15} {:>10} {:>3} {:<25} {:<25}' title_fmt = '{:<20} {:<19} {:>15} {:>10} {:>3} {:<25} {:<25}' print() print(result_table.title) print(title_fmt.format('Wakeup', 'Switch', 'Latency (us)', 'Priority', 'CPU', 'Wakee', 'Waker')) for row in result_table.rows: wakeup_ts = row.wakeup_ts.value switch_ts = row.switch_ts.value latency = row.latency.value prio = row.prio.value target_cpu = row.target_cpu.value wakee_proc = row.wakee_proc waker_proc = row.waker_proc wakee_str = '%s (%d)' % (wakee_proc.name, wakee_proc.tid) if isinstance(waker_proc, mi.Empty): waker_str = 'Unknown (N/A)' else: waker_str = '%s (%d)' % (waker_proc.name, waker_proc.tid) print(fmt.format(self._ns_to_hour_nsec(wakeup_ts), self._ns_to_hour_nsec(switch_ts), '%0.03f' % (latency / 1000), prio, target_cpu, wakee_str, waker_str)) def _print_total_stats(self, stats_table): row_format = '{:<12} {:<12} {:<12} {:<12} {:<12}' header = row_format.format( 'Count', 'Min', 'Avg', 'Max', 'Stdev' ) if stats_table.rows: print() print(stats_table.title + ' (us)') print(header) for row in stats_table.rows: if type(row.stdev_latency) is mi.Unknown: stdev_str = '?' else: stdev_str = '%0.03f' % row.stdev_latency.to_us() row_str = row_format.format( '%d' % row.count.value, '%0.03f' % row.min_latency.to_us(), '%0.03f' % row.avg_latency.to_us(), '%0.03f' % row.max_latency.to_us(), '%s' % stdev_str, ) print(row_str) def _print_per_tid_stats(self, stats_table): row_format = '{:<25} {:>8} {:>12} {:>12} {:>12} {:>12} {}' header = row_format.format( 'Process', 'Count', 'Min', 'Avg', 'Max', 'Stdev', 'Priorities' ) if stats_table.rows: print() print(stats_table.title + ' (us)') print(header) for row in stats_table.rows: if type(row.stdev_latency) is mi.Unknown: stdev_str = '?' else: stdev_str = '%0.03f' % row.stdev_latency.to_us() proc = row.process proc_str = '%s (%d)' % (proc.name, proc.tid) row_str = row_format.format( '%s' % proc_str, '%d' % row.count.value, '%0.03f' % row.min_latency.to_us(), '%0.03f' % row.avg_latency.to_us(), '%0.03f' % row.max_latency.to_us(), '%s' % stdev_str, '%s' % row.prio_list.value, ) print(row_str) def _print_per_prio_stats(self, stats_table): row_format = '{:>4} {:>8} {:>12} {:>12} {:>12} {:>12}' header = row_format.format( 'Prio', 'Count', 'Min', 'Avg', 'Max', 'Stdev' ) if stats_table.rows: print() print(stats_table.title + ' (us)') print(header) for row in stats_table.rows: if type(row.stdev_latency) is mi.Unknown: stdev_str = '?' else: stdev_str = '%0.03f' % row.stdev_latency.to_us() row_str = row_format.format( '%d' % row.prio.value, '%d' % row.count.value, '%0.03f' % row.min_latency.to_us(), '%0.03f' % row.avg_latency.to_us(), '%0.03f' % row.max_latency.to_us(), '%s' % stdev_str, ) print(row_str) def _print_frequency_distribution(self, freq_table): title_fmt = 'Scheduling latency frequency distribution - {}' graph = termgraph.FreqGraph( data=freq_table.rows, get_value=lambda row: row.count.value, get_lower_bound=lambda row: row.duration_lower.to_us(), title=title_fmt.format(freq_table.subtitle), unit='µs' ) graph.print_graph() def _print_freq(self, freq_tables): for freq_table in freq_tables: self._print_frequency_distribution(freq_table) def _validate_transform_args(self, args): # If neither --total nor --per-prio are specified, default # to --per-tid if not (args.total or args.per_prio): args.per_tid = True def _add_arguments(self, ap): Command._add_min_max_args(ap) Command._add_proc_filter_args(ap) Command._add_freq_args( ap, help='Output the frequency distribution of sched switch ' 'latencies') Command._add_top_args(ap, help='Output the top sched switch latencies') Command._add_log_args( ap, help='Output the sched switches in chronological order') Command._add_stats_args(ap, help='Output sched switch statistics') ap.add_argument('--total', action='store_true', help='Group all results (applies to stats and freq)') ap.add_argument('--per-tid', action='store_true', help='Group results per-TID (applies to stats and ' 'freq) (default)') ap.add_argument('--per-prio', action='store_true', help='Group results per-prio (applies to stats and ' 'freq)') def _run(mi_mode): schedcmd = SchedAnalysisCommand(mi_mode=mi_mode) schedcmd.run() def _runstats(mi_mode): sys.argv.insert(1, '--stats') _run(mi_mode) def _runlog(mi_mode): sys.argv.insert(1, '--log') _run(mi_mode) def _runtop(mi_mode): sys.argv.insert(1, '--top') _run(mi_mode) def _runfreq(mi_mode): sys.argv.insert(1, '--freq') _run(mi_mode) def runstats(): _runstats(mi_mode=False) def runlog(): _runlog(mi_mode=False) def runtop(): _runtop(mi_mode=False) def runfreq(): _runfreq(mi_mode=False) def runstats_mi(): _runstats(mi_mode=True) def runlog_mi(): _runlog(mi_mode=True) def runtop_mi(): _runtop(mi_mode=True) def runfreq_mi(): _runfreq(mi_mode=True) lttnganalyses-0.4.3/lttnganalyses/cli/io.py0000664000175000017500000012704712665072151022516 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import collections import operator import statistics import sys from . import mi from . import termgraph from ..core import io from ..common import format_utils from .command import Command from ..linuxautomaton import common _UsageTables = collections.namedtuple('_UsageTables', [ 'per_proc_read', 'per_proc_write', 'per_file_read', 'per_file_write', 'per_proc_block_read', 'per_proc_block_write', 'per_disk_sector', 'per_disk_request', 'per_disk_rtps', 'per_netif_recv', 'per_netif_send', ]) class IoAnalysisCommand(Command): _DESC = """The I/O command.""" _ANALYSIS_CLASS = io.IoAnalysis _MI_TITLE = 'I/O analysis' _MI_DESCRIPTION = 'System call/disk latency statistics, system call ' + \ 'latency distribution, system call top latencies, ' + \ 'I/O usage top, and I/O operations log' _MI_TAGS = [ mi.Tags.IO, mi.Tags.SYSCALL, mi.Tags.STATS, mi.Tags.FREQ, mi.Tags.LOG, mi.Tags.TOP, ] _MI_TABLE_CLASS_SYSCALL_LATENCY_STATS = 'syscall-latency-stats' _MI_TABLE_CLASS_PART_LATENCY_STATS = 'disk-latency-stats' _MI_TABLE_CLASS_FREQ = 'freq' _MI_TABLE_CLASS_TOP_SYSCALL = 'top-syscall' _MI_TABLE_CLASS_LOG = 'log' _MI_TABLE_CLASS_PER_PROCESS_TOP = 'per-process-top' _MI_TABLE_CLASS_PER_FILE_TOP = 'per-file-top' _MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK = 'per-process-top-block' _MI_TABLE_CLASS_PER_DISK_TOP_SECTOR = 'per-disk-top-sector' _MI_TABLE_CLASS_PER_DISK_TOP_REQUEST = 'per-disk-top-request' _MI_TABLE_CLASS_PER_DISK_TOP_RTPS = 'per-disk-top-rps' _MI_TABLE_CLASS_PER_NETIF_TOP = 'per-netif-top' _MI_TABLE_CLASSES = [ ( _MI_TABLE_CLASS_SYSCALL_LATENCY_STATS, 'System call latency statistics', [ ('obj', 'System call category', mi.String), ('count', 'Call count', mi.Integer, 'calls'), ('min_latency', 'Minimum call latency', mi.Duration), ('avg_latency', 'Average call latency', mi.Duration), ('max_latency', 'Maximum call latency', mi.Duration), ('stdev_latency', 'System call latency standard deviation', mi.Duration), ] ), ( _MI_TABLE_CLASS_PART_LATENCY_STATS, 'Partition latency statistics', [ ('obj', 'Partition', mi.Disk), ('count', 'Access count', mi.Integer, 'accesses'), ('min_latency', 'Minimum access latency', mi.Duration), ('avg_latency', 'Average access latency', mi.Duration), ('max_latency', 'Maximum access latency', mi.Duration), ('stdev_latency', 'System access latency standard deviation', mi.Duration), ] ), ( _MI_TABLE_CLASS_FREQ, 'I/O request latency distribution', [ ('latency_lower', 'Latency (lower bound)', mi.Duration), ('latency_upper', 'Latency (upper bound)', mi.Duration), ('count', 'Request count', mi.Integer, 'requests'), ] ), ( _MI_TABLE_CLASS_TOP_SYSCALL, 'Top system call latencies', [ ('time_range', 'Call time range', mi.TimeRange), ('out_of_range', 'System call out of range?', mi.Boolean), ('duration', 'Call duration', mi.Duration), ('syscall', 'System call', mi.Syscall), ('size', 'Read/write size', mi.Size), ('process', 'Process', mi.Process), ('path', 'File path', mi.Path), ('fd', 'File descriptor', mi.Fd), ] ), ( _MI_TABLE_CLASS_LOG, 'I/O operations log', [ ('time_range', 'Call time range', mi.TimeRange), ('out_of_range', 'System call out of range?', mi.Boolean), ('duration', 'Call duration', mi.Duration), ('syscall', 'System call', mi.Syscall), ('size', 'Read/write size', mi.Size), ('process', 'Process', mi.Process), ('path', 'File path', mi.Path), ('fd', 'File descriptor', mi.Fd), ] ), ( _MI_TABLE_CLASS_PER_PROCESS_TOP, 'Per-process top I/O operations', [ ('process', 'Process', mi.Process), ('size', 'Total operations size', mi.Size), ('disk_size', 'Disk operations size', mi.Size), ('net_size', 'Network operations size', mi.Size), ('unknown_size', 'Unknown operations size', mi.Size), ] ), ( _MI_TABLE_CLASS_PER_FILE_TOP, 'Per-file top I/O operations', [ ('path', 'File path/info', mi.Path), ('size', 'Operations size', mi.Size), ('fd_owners', 'File descriptor owners', mi.String), ] ), ( _MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK, 'Per-process top block I/O operations', [ ('process', 'Process', mi.Process), ('size', 'Operations size', mi.Size), ] ), ( _MI_TABLE_CLASS_PER_DISK_TOP_SECTOR, 'Per-disk top sector I/O operations', [ ('disk', 'Disk', mi.Disk), ('count', 'Sector count', mi.Integer, 'sectors'), ] ), ( _MI_TABLE_CLASS_PER_DISK_TOP_REQUEST, 'Per-disk top I/O requests', [ ('disk', 'Disk', mi.Disk), ('count', 'Request count', mi.Integer, 'I/O requests'), ] ), ( _MI_TABLE_CLASS_PER_DISK_TOP_RTPS, 'Per-disk top I/O request time/sector', [ ('disk', 'Disk', mi.Disk), ('rtps', 'Request time/sector', mi.Duration), ] ), ( _MI_TABLE_CLASS_PER_NETIF_TOP, 'Per-network interface top I/O operations', [ ('netif', 'Network interface', mi.NetIf), ('size', 'Operations size', mi.Size), ] ), ] _LATENCY_STATS_FORMAT = '{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}' _SECTION_SEPARATOR_STRING = '-' * 89 def _analysis_tick(self, begin_ns, end_ns): syscall_latency_stats_table = None disk_latency_stats_table = None freq_tables = None top_tables = None log_table = None usage_tables = None if self._args.stats: syscall_latency_stats_table, disk_latency_stats_table = \ self._get_latency_stats_result_tables(begin_ns, end_ns) if self._args.freq: freq_tables = self._get_freq_result_tables(begin_ns, end_ns) if self._args.usage: usage_tables = self._get_usage_result_tables(begin_ns, end_ns) if self._args.top: top_tables = self._get_top_result_tables(begin_ns, end_ns) if self._args.log: log_table = self._get_log_result_table(begin_ns, end_ns) if self._mi_mode: self._mi_append_result_tables([ log_table, syscall_latency_stats_table, disk_latency_stats_table, ]) self._mi_append_result_tables(top_tables) self._mi_append_result_tables(usage_tables) self._mi_append_result_tables(freq_tables) else: self._print_date(begin_ns, end_ns) if self._args.usage: self._print_usage(usage_tables) if self._args.stats: self._print_latency_stats(syscall_latency_stats_table, disk_latency_stats_table) if self._args.top: self._print_top(top_tables) if self._args.freq: self._print_freq(freq_tables) if self._args.log: self._print_log(log_table) def _create_summary_result_tables(self): # TODO: create a summary table here self._mi_clear_result_tables() # Filter predicates def _filter_size(self, size): if size is None: return True if self._args.maxsize is not None and size > self._args.maxsize: return False if self._args.minsize is not None and size < self._args.minsize: return False return True def _filter_latency(self, duration): if self._args.max is not None and duration > self._args.max: return False if self._args.min is not None and duration < self._args.min: return False return True def _filter_time_range(self, begin, end): # Note: we only want to return False only when a request has # ended and is completely outside the timerange (i.e. begun # after the end of the time range). return not (self._args.begin and self._args.end and end and begin > self._args.end) def _filter_io_request(self, io_rq): return self._filter_size(io_rq.size) and \ self._filter_latency(io_rq.duration) and \ self._filter_time_range(io_rq.begin_ts, io_rq.end_ts) def _is_io_rq_out_of_range(self, io_rq): return self._args.begin and io_rq.begin_ts < self._args.begin or \ self._args.end and io_rq.end_ts > self._args.end def _append_per_proc_read_usage_row(self, proc_stats, result_table): result_table.append_row( process=mi.Process(proc_stats.comm, pid=proc_stats.pid, tid=proc_stats.tid), size=mi.Size(proc_stats.total_read), disk_size=mi.Size(proc_stats.disk_io.read), net_size=mi.Size(proc_stats.net_io.read), unknown_size=mi.Size(proc_stats.unk_io.read), ) return True def _append_per_proc_write_usage_row(self, proc_stats, result_table): result_table.append_row( process=mi.Process(proc_stats.comm, pid=proc_stats.pid, tid=proc_stats.tid), size=mi.Size(proc_stats.total_write), disk_size=mi.Size(proc_stats.disk_io.write), net_size=mi.Size(proc_stats.net_io.write), unknown_size=mi.Size(proc_stats.unk_io.write), ) return True def _append_per_proc_block_read_usage_row(self, proc_stats, result_table): if proc_stats.block_io.read == 0: return False if proc_stats.comm: proc_name = proc_stats.comm else: proc_name = None result_table.append_row( process=mi.Process(proc_name, pid=proc_stats.pid, tid=proc_stats.tid), size=mi.Size(proc_stats.block_io.read), ) return True def _append_per_proc_block_write_usage_row(self, proc_stats, result_table): if proc_stats.block_io.write == 0: return False if proc_stats.comm: proc_name = proc_stats.comm else: proc_name = None result_table.append_row( process=mi.Process(proc_name, pid=proc_stats.pid, tid=proc_stats.tid), size=mi.Size(proc_stats.block_io.write), ) return True def _append_disk_sector_usage_row(self, disk_stats, result_table): if disk_stats.total_rq_sectors == 0: return None result_table.append_row( disk=mi.Disk(disk_stats.disk_name), count=mi.Integer(disk_stats.total_rq_sectors), ) return True def _append_disk_request_usage_row(self, disk_stats, result_table): if disk_stats.rq_count == 0: return False result_table.append_row( disk=mi.Disk(disk_stats.disk_name), count=mi.Integer(disk_stats.rq_count), ) return True def _append_disk_rtps_usage_row(self, disk_stats, result_table): if disk_stats.rq_count == 0: return False avg_latency = (disk_stats.total_rq_duration / disk_stats.rq_count) result_table.append_row( disk=mi.Disk(disk_stats.disk_name), rtps=mi.Duration(avg_latency), ) return True def _append_netif_recv_usage_row(self, netif_stats, result_table): result_table.append_row( netif=mi.NetIf(netif_stats.name), size=mi.Size(netif_stats.recv_bytes) ) return True def _append_netif_send_usage_row(self, netif_stats, result_table): result_table.append_row( netif=mi.NetIf(netif_stats.name), size=mi.Size(netif_stats.sent_bytes) ) return True def _get_file_stats_fd_owners_str(self, file_stats): fd_by_pid_str = '' for pid, fd in file_stats.fd_by_pid.items(): comm = self._analysis.tids[pid].comm fd_by_pid_str += 'fd %d in %s (%s) ' % (fd, comm, pid) return fd_by_pid_str def _append_file_read_usage_row(self, file_stats, result_table): if file_stats.io.read == 0: return False fd_owners = self._get_file_stats_fd_owners_str(file_stats) result_table.append_row( path=mi.Path(file_stats.filename), size=mi.Size(file_stats.io.read), fd_owners=mi.String(fd_owners), ) return True def _append_file_write_usage_row(self, file_stats, result_table): if file_stats.io.write == 0: return False fd_owners = self._get_file_stats_fd_owners_str(file_stats) result_table.append_row( path=mi.Path(file_stats.filename), size=mi.Size(file_stats.io.write), fd_owners=mi.String(fd_owners), ) return True def _fill_usage_result_table(self, input_list, append_row_cb, result_table): count = 0 limit = self._args.limit for elem in input_list: if append_row_cb(elem, result_table): count += 1 if limit is not None and count >= limit: break def _fill_per_process_read_usage_result_table(self, result_table): input_list = sorted(self._analysis.tids.values(), key=operator.attrgetter('total_read'), reverse=True) self._fill_usage_result_table(input_list, self._append_per_proc_read_usage_row, result_table) def _fill_per_process_write_usage_result_table(self, result_table): input_list = sorted(self._analysis.tids.values(), key=operator.attrgetter('total_write'), reverse=True) self._fill_usage_result_table(input_list, self._append_per_proc_write_usage_row, result_table) def _fill_per_process_block_read_usage_result_table(self, result_table): input_list = sorted(self._analysis.tids.values(), key=operator.attrgetter('block_io.read'), reverse=True) self._fill_usage_result_table( input_list, self._append_per_proc_block_read_usage_row, result_table) def _fill_per_process_block_write_usage_result_table(self, result_table): input_list = sorted(self._analysis.tids.values(), key=operator.attrgetter('block_io.write'), reverse=True) self._fill_usage_result_table( input_list, self._append_per_proc_block_write_usage_row, result_table) def _fill_disk_sector_usage_result_table(self, result_table): input_list = sorted(self._analysis.disks.values(), key=operator.attrgetter('total_rq_sectors'), reverse=True) self._fill_usage_result_table(input_list, self._append_disk_sector_usage_row, result_table) def _fill_disk_request_usage_result_table(self, result_table): input_list = sorted(self._analysis.disks.values(), key=operator.attrgetter('rq_count'), reverse=True) self._fill_usage_result_table(input_list, self._append_disk_request_usage_row, result_table) def _fill_disk_rtps_usage_result_table(self, result_table): input_list = self._analysis.disks.values() self._fill_usage_result_table(input_list, self._append_disk_rtps_usage_row, result_table) def _fill_netif_recv_usage_result_table(self, result_table): input_list = sorted(self._analysis.ifaces.values(), key=operator.attrgetter('recv_bytes'), reverse=True) self._fill_usage_result_table(input_list, self._append_netif_recv_usage_row, result_table) def _fill_netif_send_usage_result_table(self, result_table): input_list = sorted(self._analysis.ifaces.values(), key=operator.attrgetter('sent_bytes'), reverse=True) self._fill_usage_result_table(input_list, self._append_netif_send_usage_row, result_table) def _fill_file_read_usage_result_table(self, files, result_table): input_list = sorted(files.values(), key=lambda file_stats: file_stats.io.read, reverse=True) self._fill_usage_result_table(input_list, self._append_file_read_usage_row, result_table) def _fill_file_write_usage_result_table(self, files, result_table): input_list = sorted(files.values(), key=lambda file_stats: file_stats.io.write, reverse=True) self._fill_usage_result_table(input_list, self._append_file_write_usage_row, result_table) def _fill_file_usage_result_tables(self, read_table, write_table): files = self._analysis.get_files_stats() self._fill_file_read_usage_result_table(files, read_table) self._fill_file_write_usage_result_table(files, write_table) def _get_usage_result_tables(self, begin, end): # create result tables per_proc_read_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_PROCESS_TOP, begin, end, 'read') per_proc_write_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_PROCESS_TOP, begin, end, 'written') per_file_read_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_FILE_TOP, begin, end, 'read') per_file_write_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_FILE_TOP, begin, end, 'written') per_proc_block_read_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK, begin, end, 'read') per_proc_block_write_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK, begin, end, 'written') per_disk_sector_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_DISK_TOP_SECTOR, begin, end) per_disk_request_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_DISK_TOP_REQUEST, begin, end) per_disk_rtps_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_DISK_TOP_RTPS, begin, end) per_netif_recv_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_NETIF_TOP, begin, end, 'received') per_netif_send_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PER_NETIF_TOP, begin, end, 'sent') # fill result tables self._fill_per_process_read_usage_result_table(per_proc_read_table) self._fill_per_process_write_usage_result_table(per_proc_write_table) self._fill_file_usage_result_tables(per_file_read_table, per_file_write_table) self._fill_per_process_block_read_usage_result_table( per_proc_block_read_table) self._fill_per_process_block_write_usage_result_table( per_proc_block_write_table) self._fill_disk_sector_usage_result_table(per_disk_sector_table) self._fill_disk_request_usage_result_table(per_disk_request_table) self._fill_disk_rtps_usage_result_table(per_disk_rtps_table) self._fill_netif_recv_usage_result_table(per_netif_recv_table) self._fill_netif_send_usage_result_table(per_netif_send_table) return _UsageTables( per_proc_read=per_proc_read_table, per_proc_write=per_proc_write_table, per_file_read=per_file_read_table, per_file_write=per_file_write_table, per_proc_block_read=per_proc_block_read_table, per_proc_block_write=per_proc_block_write_table, per_disk_sector=per_disk_sector_table, per_disk_request=per_disk_request_table, per_disk_rtps=per_disk_rtps_table, per_netif_recv=per_netif_recv_table, per_netif_send=per_netif_send_table, ) def _print_per_proc_io(self, result_table, title): header_format = '{:<25} {:<10} {:<10} {:<10}' label_header = header_format.format( 'Process', 'Disk', 'Net', 'Unknown' ) def get_label(row): label_format = '{:<25} {:>10} {:>10} {:>10}' if row.process.pid is None: pid_str = 'unknown (tid=%d)' % (row.process.tid) else: pid_str = str(row.process.pid) label = label_format.format( '%s (%s)' % (row.process.name, pid_str), format_utils.format_size(row.disk_size.value), format_utils.format_size(row.net_size.value), format_utils.format_size(row.unknown_size.value) ) return label graph = termgraph.BarGraph( title='Per-process I/O ' + title, label_header=label_header, get_value=lambda row: row.size.value, get_value_str=format_utils.format_size, get_label=get_label, data=result_table.rows ) graph.print_graph() def _print_per_proc_block_io(self, result_table, title): def get_label(row): proc_name = row.process.name if not proc_name: proc_name = 'unknown' if row.process.pid is None: pid_str = 'unknown (tid={})'.format(row.process.tid) else: pid_str = str(row.process.pid) return '{} (pid={})'.format(proc_name, pid_str) graph = termgraph.BarGraph( title='Block I/O ' + title, label_header='Process', get_value=lambda row: row.size.value, get_value_str=format_utils.format_size, get_label=get_label, data=result_table.rows ) graph.print_graph() def _print_per_disk_sector(self, result_table): graph = termgraph.BarGraph( title='Disk Requests Sector Count', label_header='Disk', unit='sectors', get_value=lambda row: row.count.value, get_label=lambda row: row.disk.name, data=result_table.rows ) graph.print_graph() def _print_per_disk_request(self, result_table): graph = termgraph.BarGraph( title='Disk Request Count', label_header='Disk', unit='requests', get_value=lambda row: row.count.value, get_label=lambda row: row.disk.name, data=result_table.rows ) graph.print_graph() def _print_per_disk_rtps(self, result_table): graph = termgraph.BarGraph( title='Disk Request Average Latency', label_header='Disk', unit='ms', get_value=lambda row: row.rtps.value / common.NSEC_PER_MSEC, get_label=lambda row: row.disk.name, data=result_table.rows ) graph.print_graph() def _print_per_netif_io(self, result_table, title): graph = termgraph.BarGraph( title='Network ' + title + ' Bytes', label_header='Interface', get_value=lambda row: row.size.value, get_value_str=format_utils.format_size, get_label=lambda row: row.netif.name, data=result_table.rows ) graph.print_graph() def _print_per_file_io(self, result_table, title): # FIXME add option to show FD owners # FIXME why are read and write values the same? graph = termgraph.BarGraph( title='Per-file I/O ' + title, label_header='Path', get_value=lambda row: row.size.value, get_value_str=format_utils.format_size, get_label=lambda row: row.path.path, data=result_table.rows ) graph.print_graph() def _print_usage(self, usage_tables): self._print_per_proc_io(usage_tables.per_proc_read, 'Read') self._print_per_proc_io(usage_tables.per_proc_write, 'Write') self._print_per_file_io(usage_tables.per_file_read, 'Read') self._print_per_file_io(usage_tables.per_file_write, 'Write') self._print_per_proc_block_io(usage_tables.per_proc_block_read, 'Read') self._print_per_proc_block_io( usage_tables.per_proc_block_write, 'Write' ) self._print_per_disk_sector(usage_tables.per_disk_sector) self._print_per_disk_request(usage_tables.per_disk_request) self._print_per_disk_rtps(usage_tables.per_disk_rtps) self._print_per_netif_io(usage_tables.per_netif_recv, 'Received') self._print_per_netif_io(usage_tables.per_netif_send, 'Sent') def _fill_freq_result_table(self, duration_list, result_table): if not duration_list: return # The number of bins for the histogram resolution = self._args.freq_resolution min_duration = min(duration_list) max_duration = max(duration_list) # ns to µs min_duration /= 1000 max_duration /= 1000 step = (max_duration - min_duration) / resolution if step == 0: return buckets = [] values = [] for i in range(resolution): buckets.append(i * step) values.append(0) for duration in duration_list: duration /= 1000 index = min(int((duration - min_duration) / step), resolution - 1) values[index] += 1 for index, value in enumerate(values): result_table.append_row( latency_lower=mi.Duration.from_us(index * step + min_duration), latency_upper=mi.Duration.from_us((index + 1) * step + min_duration), count=mi.Integer(value), ) def _get_disk_freq_result_tables(self, begin, end): result_tables = [] for disk in self._analysis.disks.values(): rq_durations = [rq.duration for rq in disk.rq_list if self._filter_io_request(rq)] subtitle = 'disk: {}'.format(disk.disk_name) result_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, begin, end, subtitle) self._fill_freq_result_table(rq_durations, result_table) result_tables.append(result_table) return result_tables def _get_syscall_freq_result_tables(self, begin, end): open_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, begin, end, 'open') read_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, begin, end, 'read') write_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, begin, end, 'write') sync_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, begin, end, 'sync') self._fill_freq_result_table([io_rq.duration for io_rq in self._analysis.open_io_requests if self._filter_io_request(io_rq)], open_table) self._fill_freq_result_table([io_rq.duration for io_rq in self._analysis.read_io_requests if self._filter_io_request(io_rq)], read_table) self._fill_freq_result_table([io_rq.duration for io_rq in self._analysis.write_io_requests if self._filter_io_request(io_rq)], write_table) self._fill_freq_result_table([io_rq.duration for io_rq in self._analysis.sync_io_requests if self._filter_io_request(io_rq)], sync_table) return [open_table, read_table, write_table, sync_table] def _get_freq_result_tables(self, begin, end): syscall_tables = self._get_syscall_freq_result_tables(begin, end) disk_tables = self._get_disk_freq_result_tables(begin, end) return syscall_tables + disk_tables def _print_one_freq(self, result_table): graph = termgraph.FreqGraph( data=result_table.rows, get_value=lambda row: row.count.value, get_lower_bound=lambda row: row.latency_lower.to_us(), title='{} {}'.format(result_table.title, result_table.subtitle), unit='µs' ) graph.print_graph() def _print_freq(self, freq_tables): for freq_table in freq_tables: self._print_one_freq(freq_table) def _append_log_row(self, io_rq, result_table): if io_rq.size is None: size = mi.Empty() else: size = mi.Size(io_rq.size) tid = io_rq.tid proc_stats = self._analysis.tids[tid] proc_name = proc_stats.comm # TODO: handle fd_in/fd_out for RW type operations if io_rq.fd is None: path = mi.Empty() fd = mi.Empty() else: fd = mi.Fd(io_rq.fd) parent_proc = proc_stats if parent_proc.pid is not None: parent_proc = self._analysis.tids[parent_proc.pid] fd_stats = parent_proc.get_fd(io_rq.fd, io_rq.end_ts) if fd_stats is not None: path = mi.Path(fd_stats.filename) else: path = mi.Unknown() result_table.append_row( time_range=mi.TimeRange(io_rq.begin_ts, io_rq.end_ts), out_of_range=mi.Boolean(self._is_io_rq_out_of_range(io_rq)), duration=mi.Duration(io_rq.duration), syscall=mi.Syscall(io_rq.syscall_name), size=size, process=mi.Process(proc_name, tid=tid), path=path, fd=fd, ) def _fill_log_result_table(self, rq_list, sort_key, is_top, result_table): if not rq_list: return count = 0 for io_rq in sorted(rq_list, key=operator.attrgetter(sort_key), reverse=is_top): if is_top and count > self._args.limit: break self._append_log_row(io_rq, result_table) count += 1 def _fill_log_result_table_from_io_requests(self, io_requests, sort_key, is_top, result_table): io_requests = [io_rq for io_rq in io_requests if self._filter_io_request(io_rq)] self._fill_log_result_table(io_requests, sort_key, is_top, result_table) def _get_top_result_tables(self, begin, end): open_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL, begin, end, 'open') read_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL, begin, end, 'read') write_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL, begin, end, 'write') sync_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL, begin, end, 'sync') self._fill_log_result_table_from_io_requests( self._analysis.open_io_requests, 'duration', True, open_table) self._fill_log_result_table_from_io_requests( self._analysis.read_io_requests, 'duration', True, read_table) self._fill_log_result_table_from_io_requests( self._analysis.write_io_requests, 'duration', True, write_table) self._fill_log_result_table_from_io_requests( self._analysis.sync_io_requests, 'duration', True, sync_table) return [open_table, read_table, write_table, sync_table] def _print_log_row(self, row): fmt = '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}' begin_time = common.ns_to_hour_nsec(row.time_range.begin, self._args.multi_day, self._args.gmt) end_time = common.ns_to_hour_nsec(row.time_range.end, self._args.multi_day, self._args.gmt) time_range_str = '[' + begin_time + ',' + end_time + ']' duration_str = '%0.03f' % row.duration.to_us() if type(row.size) is mi.Empty: size = 'N/A' else: size = format_utils.format_size(row.size.value) tid = row.process.tid proc_name = row.process.name if type(row.fd) is mi.Empty: file_str = 'N/A' else: if type(row.path) is mi.Unknown: path = 'unknown' else: path = row.path.path file_str = '%s (fd=%s)' % (path, row.fd.fd) if row.out_of_range.value: time_range_str += '*' duration_str += '*' else: time_range_str += ' ' duration_str += ' ' print(fmt.format(time_range_str, row.syscall.name, duration_str, size, proc_name, tid, file_str)) def _print_log(self, result_table): if not result_table.rows: return has_out_of_range_rq = False print() fmt = '{} {} (usec)' print(fmt.format(result_table.title, result_table.subtitle)) header_fmt = '{:<19} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} {:<14}' print(header_fmt.format( 'Begin', 'End', 'Name', 'Duration (usec)', 'Size', 'Proc', 'PID', 'Filename')) for row in result_table.rows: self._print_log_row(row) if not has_out_of_range_rq and row.out_of_range.value: has_out_of_range_rq = True if has_out_of_range_rq: print('*: Syscalls started and/or completed outside of the ' 'range specified') def _print_top(self, top_tables): for table in top_tables: self._print_log(table) def _get_log_result_table(self, begin, end): log_table = self._mi_create_result_table(self._MI_TABLE_CLASS_LOG, begin, end) self._fill_log_result_table_from_io_requests( self._analysis.io_requests, 'begin_ts', False, log_table) return log_table def _append_latency_stats_row(self, obj, rq_durations, result_table): rq_count = len(rq_durations) total_duration = sum(rq_durations) if len(rq_durations) > 0: min_duration = min(rq_durations) max_duration = max(rq_durations) else: min_duration = 0 max_duration = 0 if rq_count < 2: stdev = mi.Unknown() else: stdev = mi.Duration(statistics.stdev(rq_durations)) if rq_count > 0: avg = total_duration / rq_count else: avg = 0 result_table.append_row( obj=obj, count=mi.Integer(rq_count), min_latency=mi.Duration(min_duration), avg_latency=mi.Duration(avg), max_latency=mi.Duration(max_duration), stdev_latency=stdev, ) def _append_latency_stats_row_from_requests(self, obj, io_requests, result_table): rq_durations = [io_rq.duration for io_rq in io_requests if self._filter_io_request(io_rq)] self._append_latency_stats_row(obj, rq_durations, result_table) def _get_syscall_latency_stats_result_table(self, begin, end): result_table = self._mi_create_result_table( self._MI_TABLE_CLASS_SYSCALL_LATENCY_STATS, begin, end) append_fn = self._append_latency_stats_row_from_requests append_fn(mi.String('Open'), self._analysis.open_io_requests, result_table) append_fn(mi.String('Read'), self._analysis.read_io_requests, result_table) append_fn(mi.String('Write'), self._analysis.write_io_requests, result_table) append_fn(mi.String('Sync'), self._analysis.sync_io_requests, result_table) return result_table def _get_disk_latency_stats_result_table(self, begin, end): if not self._analysis.disks: return result_table = self._mi_create_result_table( self._MI_TABLE_CLASS_PART_LATENCY_STATS, begin, end) for disk in self._analysis.disks.values(): if disk.rq_count: rq_durations = [rq.duration for rq in disk.rq_list if self._filter_io_request(rq)] disk = mi.Disk(disk.disk_name) self._append_latency_stats_row(disk, rq_durations, result_table) return result_table def _get_latency_stats_result_tables(self, begin, end): syscall_tbl = self._get_syscall_latency_stats_result_table(begin, end) disk_tbl = self._get_disk_latency_stats_result_table(begin, end) return syscall_tbl, disk_tbl def _print_latency_stats_row(self, row): if type(row.stdev_latency) is mi.Unknown: stdev = '?' else: stdev = '%0.03f' % row.stdev_latency.to_us() avg = '%0.03f' % row.avg_latency.to_us() min_duration = '%0.03f' % row.min_latency.to_us() max_duration = '%0.03f' % row.max_latency.to_us() print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format( str(row.obj), row.count.value, min_duration, avg, max_duration, stdev)) def _print_syscall_latency_stats(self, stats_table): print('\nSyscalls latency statistics (usec):') print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format( 'Type', 'Count', 'Min', 'Average', 'Max', 'Stdev')) print(IoAnalysisCommand._SECTION_SEPARATOR_STRING) for row in stats_table.rows: self._print_latency_stats_row(row) def _print_disk_latency_stats(self, stats_table): if not stats_table.rows: return print('\nDisk latency statistics (usec):') print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format( 'Name', 'Count', 'Min', 'Average', 'Max', 'Stdev')) print(IoAnalysisCommand._SECTION_SEPARATOR_STRING) for row in stats_table.rows: self._print_latency_stats_row(row) def _print_latency_stats(self, syscall_latency_stats_table, disk_latency_stats_table): self._print_syscall_latency_stats(syscall_latency_stats_table) self._print_disk_latency_stats(disk_latency_stats_table) def _add_arguments(self, ap): Command._add_proc_filter_args(ap) Command._add_min_max_args(ap) Command._add_log_args( ap, help='Output the I/O requests in chronological order') Command._add_top_args( ap, help='Output the top I/O latencies by category') Command._add_stats_args(ap, help='Output the I/O latency statistics') Command._add_freq_args( ap, help='Output the I/O latency frequency distribution') ap.add_argument('--usage', action='store_true', help='Output the I/O usage') ap.add_argument('--minsize', type=float, help='Filter out, I/O operations working with ' 'less that minsize bytes') ap.add_argument('--maxsize', type=float, help='Filter out, I/O operations working with ' 'more that maxsize bytes') def _run(mi_mode): iocmd = IoAnalysisCommand(mi_mode=mi_mode) iocmd.run() def _runstats(mi_mode): sys.argv.insert(1, '--stats') _run(mi_mode) def _runlog(mi_mode): sys.argv.insert(1, '--log') _run(mi_mode) def _runfreq(mi_mode): sys.argv.insert(1, '--freq') _run(mi_mode) def _runlatencytop(mi_mode): sys.argv.insert(1, '--top') _run(mi_mode) def _runusage(mi_mode): sys.argv.insert(1, '--usage') _run(mi_mode) def runstats(): _runstats(mi_mode=False) def runlog(): _runlog(mi_mode=False) def runfreq(): _runfreq(mi_mode=False) def runlatencytop(): _runlatencytop(mi_mode=False) def runusage(): _runusage(mi_mode=False) def runstats_mi(): _runstats(mi_mode=True) def runlog_mi(): _runlog(mi_mode=True) def runfreq_mi(): _runfreq(mi_mode=True) def runlatencytop_mi(): _runlatencytop(mi_mode=True) def runusage_mi(): _runusage(mi_mode=True) lttnganalyses-0.4.3/lttnganalyses/cli/command.py0000664000175000017500000004765012665072151023526 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Philippe Proulx # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import argparse import json import os import re import sys import subprocess from babeltrace import TraceCollection from . import mi from .. import _version from . import progressbar from .. import __version__ from ..common import version_utils from ..core import analysis from ..linuxautomaton import common from ..linuxautomaton import automaton class Command: _MI_BASE_TAGS = ['linux-kernel', 'lttng-analyses'] _MI_AUTHORS = [ 'Julien Desfossez', 'Antoine Busque', 'Philippe Proulx', ] _MI_URL = 'https://github.com/lttng/lttng-analyses' def __init__(self, mi_mode=False): self._analysis = None self._analysis_conf = None self._args = None self._handles = None self._traces = None self._ticks = 0 self._mi_mode = mi_mode self._create_automaton() self._mi_setup() @property def mi_mode(self): return self._mi_mode def run(self): try: self._parse_args() self._open_trace() self._create_analysis() self._run_analysis() self._close_trace() except KeyboardInterrupt: sys.exit(0) def _error(self, msg, exit_code=1): try: import termcolor msg = termcolor.colored(msg, 'red', attrs=['bold']) except ImportError: pass print(msg, file=sys.stderr) sys.exit(exit_code) def _gen_error(self, msg, exit_code=1): self._error('Error: {}'.format(msg), exit_code) def _cmdline_error(self, msg, exit_code=1): self._error('Command line error: {}'.format(msg), exit_code) def _print(self, msg): if not self._mi_mode: print(msg) def _mi_create_result_table(self, table_class_name, begin, end, subtitle=None): return mi.ResultTable(self._mi_table_classes[table_class_name], begin, end, subtitle) def _mi_setup(self): self._mi_table_classes = {} for tc_tuple in self._MI_TABLE_CLASSES: table_class = mi.TableClass(tc_tuple[0], tc_tuple[1], tc_tuple[2]) self._mi_table_classes[table_class.name] = table_class self._mi_clear_result_tables() def _mi_print_metadata(self): tags = self._MI_BASE_TAGS + self._MI_TAGS infos = mi.get_metadata(version=self._MI_VERSION, title=self._MI_TITLE, description=self._MI_DESCRIPTION, authors=self._MI_AUTHORS, url=self._MI_URL, tags=tags, table_classes=self._mi_table_classes.values()) print(json.dumps(infos)) def _mi_append_result_table(self, result_table): if not result_table or not result_table.rows: return tc_name = result_table.table_class.name self._mi_get_result_tables(tc_name).append(result_table) def _mi_append_result_tables(self, result_tables): if not result_tables: return for result_table in result_tables: self._mi_append_result_table(result_table) def _mi_clear_result_tables(self): self._result_tables = {} def _mi_get_result_tables(self, table_class_name): if table_class_name not in self._result_tables: self._result_tables[table_class_name] = [] return self._result_tables[table_class_name] def _mi_print(self): results = [] for result_tables in self._result_tables.values(): for result_table in result_tables: results.append(result_table.to_native_object()) obj = { 'results': results, } print(json.dumps(obj)) def _create_summary_result_tables(self): pass def _open_trace(self): traces = TraceCollection() handles = traces.add_traces_recursive(self._args.path, 'ctf') if handles == {}: self._gen_error('Failed to open ' + self._args.path, -1) self._handles = handles self._traces = traces self._process_date_args() self._read_tracer_version() if not self._args.skip_validation: self._check_lost_events() def _close_trace(self): for handle in self._handles.values(): self._traces.remove_trace(handle) def _read_tracer_version(self): kernel_path = None # remove the trailing / while self._args.path.endswith('/'): self._args.path = self._args.path[:-1] for root, _, _ in os.walk(self._args.path): if root.endswith('kernel'): kernel_path = root break if kernel_path is None: self._gen_error('Could not find kernel trace directory') try: ret, metadata = subprocess.getstatusoutput( 'babeltrace -o ctf-metadata "%s"' % kernel_path) except subprocess.CalledProcessError: self._gen_error('Cannot run babeltrace on the trace, cannot read' ' tracer version') # fallback to reading the text metadata if babeltrace failed to # output the CTF metadata if ret != 0: try: metadata = subprocess.getoutput( 'cat "%s"' % os.path.join(kernel_path, 'metadata')) except subprocess.CalledProcessError: self._gen_error('Cannot read the metadata of the trace, cannot' 'extract tracer version') major_match = re.search(r'tracer_major = "*(\d+)"*', metadata) minor_match = re.search(r'tracer_minor = "*(\d+)"*', metadata) patch_match = re.search(r'tracer_patchlevel = "*(\d+)"*', metadata) if not major_match or not minor_match or not patch_match: self._gen_error('Malformed metadata, cannot read tracer version') self.state.tracer_version = version_utils.Version( int(major_match.group(1)), int(minor_match.group(1)), int(patch_match.group(1)), ) def _check_lost_events(self): self._print('Checking the trace for lost events...') try: subprocess.check_output('babeltrace "%s"' % self._args.path, shell=True) except subprocess.CalledProcessError: self._gen_error('Cannot run babeltrace on the trace, cannot verify' ' if events were lost during the trace recording') def _pre_analysis(self): pass def _post_analysis(self): if not self._mi_mode: return if self._ticks > 1: self._create_summary_result_tables() self._mi_print() def _run_analysis(self): self._pre_analysis() progressbar.progressbar_setup(self) for event in self._traces.events: progressbar.progressbar_update(self) self._analysis.process_event(event) if self._analysis.ended: break self._automaton.process_event(event) progressbar.progressbar_finish(self) self._analysis.end() self._post_analysis() def _print_date(self, begin_ns, end_ns): date = 'Timerange: [%s, %s]' % ( common.ns_to_hour_nsec(begin_ns, gmt=self._args.gmt, multi_day=True), common.ns_to_hour_nsec(end_ns, gmt=self._args.gmt, multi_day=True)) self._print(date) def _get_uniform_freq_values(self, durations): if self._args.uniform_step is not None: return (self._args.uniform_min, self._args.uniform_max, self._args.uniform_step) if self._args.min is not None: self._args.uniform_min = self._args.min else: self._args.uniform_min = min(durations) if self._args.max is not None: self._args.uniform_max = self._args.max else: self._args.uniform_max = max(durations) # ns to µs self._args.uniform_min /= 1000 self._args.uniform_max /= 1000 self._args.uniform_step = ( (self._args.uniform_max - self._args.uniform_min) / self._args.freq_resolution ) return self._args.uniform_min, self._args.uniform_max, \ self._args.uniform_step def _validate_transform_common_args(self, args): refresh_period_ns = None if args.refresh is not None: try: refresh_period_ns = common.duration_str_to_ns(args.refresh) except ValueError as e: self._cmdline_error(str(e)) self._analysis_conf = analysis.AnalysisConfig() self._analysis_conf.refresh_period = refresh_period_ns self._analysis_conf.period_begin_ev_name = args.period_begin self._analysis_conf.period_end_ev_name = args.period_end self._analysis_conf.period_begin_key_fields = \ args.period_begin_key.split(',') if args.period_end_key: self._analysis_conf.period_end_key_fields = \ args.period_end_key.split(',') else: self._analysis_conf.period_end_key_fields = \ self._analysis_conf.period_begin_key_fields if args.period_key_value: self._analysis_conf.period_key_value = \ tuple(args.period_key_value.split(',')) if args.cpu: self._analysis_conf.cpu_list = args.cpu.split(',') self._analysis_conf.cpu_list = [int(cpu) for cpu in self._analysis_conf.cpu_list] # convert min/max args from µs to ns, if needed if hasattr(args, 'min') and args.min is not None: args.min *= 1000 self._analysis_conf.min_duration = args.min if hasattr(args, 'max') and args.max is not None: args.max *= 1000 self._analysis_conf.max_duration = args.max if hasattr(args, 'procname'): if args.procname: self._analysis_conf.proc_list = args.procname.split(',') if hasattr(args, 'tid'): if args.tid: self._analysis_conf.tid_list = args.tid.split(',') self._analysis_conf.tid_list = [int(tid) for tid in self._analysis_conf.tid_list] if hasattr(args, 'freq'): args.uniform_min = None args.uniform_max = None args.uniform_step = None if args.freq_series: # implies uniform buckets args.freq_uniform = True if self._mi_mode: # force no progress in MI mode args.no_progress = True # print MI metadata if required if args.metadata: self._mi_print_metadata() sys.exit(0) # validate path argument (required at this point) if not args.path: self._cmdline_error('Please specify a trace path') if type(args.path) is list: args.path = args.path[0] def _validate_transform_args(self, args): pass def _parse_args(self): ap = argparse.ArgumentParser(description=self._DESC) # common arguments ap.add_argument('-r', '--refresh', type=str, help='Refresh period, with optional units suffix ' '(default units: s)') ap.add_argument('--gmt', action='store_true', help='Manipulate timestamps based on GMT instead ' 'of local time') ap.add_argument('--skip-validation', action='store_true', help='Skip the trace validation') ap.add_argument('--begin', type=str, help='start time: ' 'hh:mm:ss[.nnnnnnnnn]') ap.add_argument('--end', type=str, help='end time: ' 'hh:mm:ss[.nnnnnnnnn]') ap.add_argument('--period-begin', type=str, help='Analysis period start marker event name') ap.add_argument('--period-end', type=str, help='Analysis period end marker event name ' '(requires --period-begin)') ap.add_argument('--period-begin-key', type=str, default='cpu_id', help='Optional, list of event field names used to ' 'match period markers (default: cpu_id)') ap.add_argument('--period-end-key', type=str, help='Optional, list of event field names used to ' 'match period marker. If none specified, use the same ' ' --period-begin-key') ap.add_argument('--period-key-value', type=str, help='Optional, define a fixed key value to which a' ' period must correspond to be considered.') ap.add_argument('--cpu', type=str, help='Filter the results only for this list of ' 'CPU IDs') ap.add_argument('--timerange', type=str, help='time range: ' '[begin,end]') ap.add_argument('-V', '--version', action='version', version='LTTng Analyses v' + __version__) # MI mode-dependent arguments if self._mi_mode: ap.add_argument('--metadata', action='store_true', help='Show analysis\'s metadata') ap.add_argument('path', metavar='', help='trace path', nargs='*') else: ap.add_argument('--no-progress', action='store_true', help='Don\'t display the progress bar') ap.add_argument('path', metavar='', help='trace path') # Used to add command-specific args self._add_arguments(ap) args = ap.parse_args() self._validate_transform_common_args(args) self._validate_transform_args(args) self._args = args @staticmethod def _add_proc_filter_args(ap): ap.add_argument('--procname', type=str, help='Filter the results only for this list of ' 'process names') ap.add_argument('--tid', type=str, help='Filter the results only for this list of TIDs') @staticmethod def _add_min_max_args(ap): ap.add_argument('--min', type=float, help='Filter out durations shorter than min usec') ap.add_argument('--max', type=float, help='Filter out durations longer than max usec') @staticmethod def _add_freq_args(ap, help=None): if not help: help = 'Output the frequency distribution' ap.add_argument('--freq', action='store_true', help=help) ap.add_argument('--freq-resolution', type=int, default=20, help='Frequency distribution resolution ' '(default 20)') ap.add_argument('--freq-uniform', action='store_true', help='Use a uniform resolution across distributions') ap.add_argument('--freq-series', action='store_true', help='Consolidate frequency distribution histogram ' 'as a single one') @staticmethod def _add_log_args(ap, help=None): if not help: help = 'Output the events in chronological order' ap.add_argument('--log', action='store_true', help=help) @staticmethod def _add_top_args(ap, help=None): if not help: help = 'Output the top results' ap.add_argument('--limit', type=int, default=10, help='Limit to top X (default = 10)') ap.add_argument('--top', action='store_true', help=help) @staticmethod def _add_stats_args(ap, help=None): if not help: help = 'Output statistics' ap.add_argument('--stats', action='store_true', help=help) def _add_arguments(self, ap): pass def _process_date_args(self): def date_to_epoch_nsec(date): ts = common.date_to_epoch_nsec(self._handles, date, self._args.gmt) if ts is None: self._cmdline_error('Invalid date format: "{}"'.format(date)) return ts self._args.multi_day = common.is_multi_day_trace_collection( self._handles) begin_ts = None end_ts = None if self._args.timerange: begin_ts, end_ts = common.extract_timerange(self._handles, self._args.timerange, self._args.gmt) if None in [begin_ts, end_ts]: self._cmdline_error( 'Invalid time format: "{}"'.format(self._args.timerange)) else: if self._args.begin: begin_ts = date_to_epoch_nsec(self._args.begin) if self._args.end: end_ts = date_to_epoch_nsec(self._args.end) # We have to check if timestamp_begin is None, which # it always is in older versions of babeltrace. In # that case, the test is simply skipped and an invalid # --end value will cause an empty analysis if self._traces.timestamp_begin is not None and \ end_ts < self._traces.timestamp_begin: self._cmdline_error( '--end timestamp before beginning of trace') self._analysis_conf.begin_ts = begin_ts self._analysis_conf.end_ts = end_ts def _create_analysis(self): notification_cbs = { analysis.Analysis.TICK_CB: self._analysis_tick_cb } self._analysis = self._ANALYSIS_CLASS(self.state, self._analysis_conf) self._analysis.register_notification_cbs(notification_cbs) def _create_automaton(self): self._automaton = automaton.Automaton() self.state = self._automaton.state def _analysis_tick_cb(self, **kwargs): begin_ns = kwargs['begin_ns'] end_ns = kwargs['end_ns'] self._analysis_tick(begin_ns, end_ns) self._ticks += 1 def _analysis_tick(self, begin_ns, end_ns): raise NotImplementedError() # create MI version _cmd_version = _version.get_versions()['version'] _version_match = re.match(r'(\d+)\.(\d+)\.(\d+)(.*)', _cmd_version) Command._MI_VERSION = version_utils.Version( int(_version_match.group(1)), int(_version_match.group(2)), int(_version_match.group(3)), _version_match.group(4), ) lttnganalyses-0.4.3/lttnganalyses/cli/progressbar.py0000664000175000017500000000530112665072151024424 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import sys try: from progressbar import ETA, Bar, Percentage, ProgressBar progressbar_available = True except ImportError: progressbar_available = False # approximation for the progress bar BYTES_PER_EVENT = 30 def get_folder_size(folder): total_size = os.path.getsize(folder) for item in os.listdir(folder): itempath = os.path.join(folder, item) if os.path.isfile(itempath): total_size += os.path.getsize(itempath) elif os.path.isdir(itempath): total_size += get_folder_size(itempath) return total_size def progressbar_setup(obj): if obj._args.no_progress: obj.pbar = None return if progressbar_available: size = get_folder_size(obj._args.path) widgets = ['Processing the trace: ', Percentage(), ' ', Bar(marker='#', left='[', right=']'), ' ', ETA(), ' '] # see docs for other options obj.pbar = ProgressBar(widgets=widgets, maxval=size/BYTES_PER_EVENT) obj.pbar.start() else: print('Warning: progressbar module not available, ' 'using --no-progress.', file=sys.stderr) obj._args.no_progress = True obj.pbar = None obj.event_count = 0 def progressbar_update(obj): if obj._args.no_progress or obj.pbar is None: return try: obj.pbar.update(obj.event_count) except ValueError: pass obj.event_count += 1 def progressbar_finish(obj): if obj._args.no_progress: return obj.pbar.finish() lttnganalyses-0.4.3/lttnganalyses/cli/syscallstats.py0000664000175000017500000002147312665072151024634 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import errno import operator import statistics from . import mi from ..core import syscalls from .command import Command class SyscallsAnalysis(Command): _DESC = """The syscallstats command.""" _ANALYSIS_CLASS = syscalls.SyscallsAnalysis _MI_TITLE = 'System call statistics' _MI_DESCRIPTION = 'Per-TID and global system call statistics' _MI_TAGS = [mi.Tags.SYSCALL, mi.Tags.STATS] _MI_TABLE_CLASS_PER_TID_STATS = 'per-tid' _MI_TABLE_CLASS_TOTAL = 'total' _MI_TABLE_CLASS_SUMMARY = 'summary' _MI_TABLE_CLASSES = [ ( _MI_TABLE_CLASS_PER_TID_STATS, 'System call statistics', [ ('syscall', 'System call', mi.Syscall), ('count', 'Call count', mi.Integer, 'calls'), ('min_duration', 'Minimum call duration', mi.Duration), ('avg_duration', 'Average call duration', mi.Duration), ('max_duration', 'Maximum call duration', mi.Duration), ('stdev_duration', 'Call duration standard deviation', mi.Duration), ('return_values', 'Return values count', mi.String), ] ), ( _MI_TABLE_CLASS_TOTAL, 'Per-TID system call statistics', [ ('process', 'Process', mi.Process), ('count', 'Total system call count', mi.Integer, 'calls'), ] ), ( _MI_TABLE_CLASS_SUMMARY, 'System call statistics - summary', [ ('time_range', 'Time range', mi.TimeRange), ('process', 'Process', mi.Process), ('count', 'Total system call count', mi.Integer, 'calls'), ] ), ] def _analysis_tick(self, begin_ns, end_ns): total_table, per_tid_tables = self._get_result_tables(begin_ns, end_ns) if self._mi_mode: self._mi_append_result_tables(per_tid_tables) self._mi_append_result_table(total_table) else: self._print_date(begin_ns, end_ns) self._print_results(total_table, per_tid_tables) def _post_analysis(self): if not self._mi_mode: return if len(self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL)) > 1: self._create_summary_result_table() self._mi_print() def _create_summary_result_table(self): total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL) begin = total_tables[0].timerange.begin end = total_tables[-1].timerange.end summary_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY, begin, end) for total_table in total_tables: for row in total_table.rows: process = row.process count = row.count summary_table.append_row( time_range=total_table.timerange, process=process, count=count, ) self._mi_clear_result_tables() self._mi_append_result_table(summary_table) def _get_result_tables(self, begin_ns, end_ns): per_tid_tables = [] total_table = self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL, begin_ns, end_ns) for proc_stats in sorted(self._analysis.tids.values(), key=operator.attrgetter('total_syscalls'), reverse=True): if proc_stats.total_syscalls == 0: continue pid = proc_stats.pid if proc_stats.pid is None: pid = '?' subtitle = '%s (%s, TID: %d)' % (proc_stats.comm, pid, proc_stats.tid) result_table = \ self._mi_create_result_table( self._MI_TABLE_CLASS_PER_TID_STATS, begin_ns, end_ns, subtitle) for syscall in sorted(proc_stats.syscalls.values(), key=operator.attrgetter('count'), reverse=True): durations = [] return_count = {} for syscall_event in syscall.syscalls_list: durations.append(syscall_event.duration) if syscall_event.ret >= 0: return_key = 'success' else: try: return_key = errno.errorcode[-syscall_event.ret] except KeyError: return_key = str(syscall_event.ret) if return_key not in return_count: return_count[return_key] = 1 return_count[return_key] += 1 if len(durations) > 2: stdev = mi.Duration(statistics.stdev(durations)) else: stdev = mi.Unknown() result_table.append_row( syscall=mi.Syscall(syscall.name), count=mi.Integer(syscall.count), min_duration=mi.Duration(syscall.min_duration), avg_duration=mi.Duration(syscall.total_duration / syscall.count), max_duration=mi.Duration(syscall.max_duration), stdev_duration=stdev, return_values=mi.String(str(return_count)), ) per_tid_tables.append(result_table) total_table.append_row( process=mi.Process(proc_stats.comm, pid=proc_stats.pid, tid=proc_stats.tid), count=mi.Integer(proc_stats.total_syscalls), ) return total_table, per_tid_tables def _print_results(self, total_table, per_tid_tables): line_format = '{:<38} {:>14} {:>14} {:>14} {:>12} {:>10} {:<14}' print('Per-TID syscalls statistics (usec)') total_calls = 0 for total_row, table in zip(total_table.rows, per_tid_tables): print(line_format.format(table.subtitle, 'Count', 'Min', 'Average', 'Max', 'Stdev', 'Return values')) for row in table.rows: syscall_name = row.syscall.name syscall_count = row.count.value min_duration = round(row.min_duration.to_us(), 3) avg_duration = round(row.avg_duration.to_us(), 3) max_duration = round(row.max_duration.to_us(), 3) if type(row.stdev_duration) is mi.Unknown: stdev = '?' else: stdev = round(row.stdev_duration.to_us(), 3) proc_total_calls = total_row.count.value print(line_format.format( ' - ' + syscall_name, syscall_count, min_duration, avg_duration, max_duration, stdev, row.return_values.value)) print(line_format.format('Total:', proc_total_calls, '', '', '', '', '')) print('-' * 113) total_calls += proc_total_calls print('\nTotal syscalls: %d' % (total_calls)) def _add_arguments(self, ap): Command._add_proc_filter_args(ap) def _run(mi_mode): syscallscmd = SyscallsAnalysis(mi_mode=mi_mode) syscallscmd.run() # entry point (human) def run(): _run(mi_mode=False) # entry point (MI) def run_mi(): _run(mi_mode=True) lttnganalyses-0.4.3/lttnganalyses/cli/cputop.py0000664000175000017500000001724112665072151023413 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import operator from ..common import format_utils from .command import Command from ..core import cputop from . import mi from . import termgraph class Cputop(Command): _DESC = """The cputop command.""" _ANALYSIS_CLASS = cputop.Cputop _MI_TITLE = 'Top CPU usage' _MI_DESCRIPTION = 'Per-TID, per-CPU, and total top CPU usage' _MI_TAGS = [mi.Tags.CPU, mi.Tags.TOP] _MI_TABLE_CLASS_PER_PROC = 'per-process' _MI_TABLE_CLASS_PER_CPU = 'per-cpu' _MI_TABLE_CLASS_TOTAL = 'total' _MI_TABLE_CLASS_SUMMARY = 'summary' _MI_TABLE_CLASSES = [ ( _MI_TABLE_CLASS_PER_PROC, 'Per-TID top CPU usage', [ ('process', 'Process', mi.Process), ('migrations', 'Migration count', mi.Integer, 'migrations'), ('prio_list', 'Chronological priorities', mi.String), ('usage', 'CPU usage', mi.Ratio), ] ), ( _MI_TABLE_CLASS_PER_CPU, 'Per-CPU top CPU usage', [ ('cpu', 'CPU', mi.Cpu), ('usage', 'CPU usage', mi.Ratio), ]), ( _MI_TABLE_CLASS_TOTAL, 'Total CPU usage', [ ('usage', 'CPU usage', mi.Ratio), ] ), ( _MI_TABLE_CLASS_SUMMARY, 'CPU usage - summary', [ ('time_range', 'Time range', mi.TimeRange), ('usage', 'Total CPU usage', mi.Ratio), ] ), ] def _analysis_tick(self, begin_ns, end_ns): per_tid_table = self._get_per_tid_usage_result_table(begin_ns, end_ns) per_cpu_table = self._get_per_cpu_usage_result_table(begin_ns, end_ns) total_table = self._get_total_usage_result_table(begin_ns, end_ns) if self._mi_mode: self._mi_append_result_table(per_tid_table) self._mi_append_result_table(per_cpu_table) self._mi_append_result_table(total_table) else: self._print_date(begin_ns, end_ns) self._print_per_tid_usage(per_tid_table) self._print_per_cpu_usage(per_cpu_table) if total_table: self._print_total_cpu_usage(total_table) def _create_summary_result_tables(self): total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL) begin = total_tables[0].timerange.begin end = total_tables[-1].timerange.end summary_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY, begin, end) for total_table in total_tables: usage = total_table.rows[0].usage summary_table.append_row( time_range=total_table.timerange, usage=usage, ) self._mi_clear_result_tables() self._mi_append_result_table(summary_table) def _get_per_tid_usage_result_table(self, begin_ns, end_ns): result_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROC, begin_ns, end_ns) count = 0 for tid in sorted(self._analysis.tids.values(), key=operator.attrgetter('usage_percent'), reverse=True): prio_list = format_utils.format_prio_list(tid.prio_list) result_table.append_row( process=mi.Process(tid.comm, tid=tid.tid), migrations=mi.Integer(tid.migrate_count), prio_list=mi.String(prio_list), usage=mi.Ratio.from_percentage(tid.usage_percent) ) count += 1 if self._args.limit > 0 and count >= self._args.limit: break return result_table def _get_per_cpu_usage_result_table(self, begin_ns, end_ns): result_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_CPU, begin_ns, end_ns) for cpu in sorted(self._analysis.cpus.values(), key=operator.attrgetter('cpu_id')): result_table.append_row( cpu=mi.Cpu(cpu.cpu_id), usage=mi.Ratio.from_percentage(cpu.usage_percent) ) return result_table def _get_total_usage_result_table(self, begin_ns, end_ns): result_table = \ self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL, begin_ns, end_ns) cpu_count = len(self.state.cpus) usage_percent = 0 if not cpu_count: return for cpu in sorted(self._analysis.cpus.values(), key=operator.attrgetter('usage_percent'), reverse=True): usage_percent += cpu.usage_percent # average per CPU usage_percent /= cpu_count result_table.append_row( usage=mi.Ratio.from_percentage(usage_percent), ) return result_table def _print_per_tid_usage(self, result_table): row_format = ' {:<25} {:>10} {}' label_header = row_format.format('Process', 'Migrations', 'Priorities') def format_label(row): return row_format.format( '%s (%d)' % (row.process.name, row.process.tid), row.migrations.value, row.prio_list.value, ) graph = termgraph.BarGraph( title='Per-TID Usage', unit='%', get_value=lambda row: row.usage.to_percentage(), get_label=format_label, label_header=label_header, data=result_table.rows ) graph.print_graph() def _print_per_cpu_usage(self, result_table): graph = termgraph.BarGraph( title='Per-CPU Usage', unit='%', get_value=lambda row: row.usage.to_percentage(), get_label=lambda row: 'CPU %d' % row.cpu.id, data=result_table.rows ) graph.print_graph() def _print_total_cpu_usage(self, result_table): usage_percent = result_table.rows[0].usage.to_percentage() print('\nTotal CPU Usage: %0.02f%%\n' % usage_percent) def _add_arguments(self, ap): Command._add_proc_filter_args(ap) Command._add_top_args(ap) def _run(mi_mode): cputopcmd = Cputop(mi_mode=mi_mode) cputopcmd.run() def run(): _run(mi_mode=False) def run_mi(): _run(mi_mode=True) lttnganalyses-0.4.3/lttnganalyses/core/0000775000175000017500000000000012667421106021703 5ustar mjeansonmjeanson00000000000000lttnganalyses-0.4.3/lttnganalyses/core/syscalls.py0000664000175000017500000000623612665072151024121 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import stats from .analysis import Analysis class SyscallsAnalysis(Analysis): def __init__(self, state, conf): notification_cbs = { 'syscall_exit': self._process_syscall_exit } super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) self.tids = {} self.total_syscalls = 0 def reset(self): # FIXME why no reset? pass def _process_syscall_exit(self, **kwargs): cpu_id = kwargs['cpu_id'] proc = kwargs['proc'] tid = proc.tid current_syscall = proc.current_syscall name = current_syscall.name if not self._filter_process(proc): return if not self._filter_cpu(cpu_id): return if tid not in self.tids: self.tids[tid] = ProcessSyscallStats.new_from_process(proc) proc_stats = self.tids[tid] if name not in proc_stats.syscalls: proc_stats.syscalls[name] = SyscallStats(name) proc_stats.syscalls[name].update_stats(current_syscall) proc_stats.total_syscalls += 1 self.total_syscalls += 1 class ProcessSyscallStats(stats.Process): def __init__(self, pid, tid, comm): super().__init__(pid, tid, comm) # indexed by syscall name self.syscalls = {} self.total_syscalls = 0 def reset(self): pass class SyscallStats(): def __init__(self, name): self.name = name self.min_duration = None self.max_duration = None self.total_duration = 0 self.syscalls_list = [] @property def count(self): return len(self.syscalls_list) def update_stats(self, syscall): duration = syscall.duration if self.min_duration is None or self.min_duration > duration: self.min_duration = duration if self.max_duration is None or self.max_duration < duration: self.max_duration = duration self.total_duration += duration self.syscalls_list.append(syscall) lttnganalyses-0.4.3/lttnganalyses/core/__init__.py0000664000175000017500000000217512665072151024021 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. lttnganalyses-0.4.3/lttnganalyses/core/stats.py0000664000175000017500000000416512665072151023421 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from collections import namedtuple PrioEvent = namedtuple('PrioEvent', ['timestamp', 'prio']) class Stats(): def reset(self): raise NotImplementedError() class Process(Stats): def __init__(self, pid, tid, comm): self.pid = pid self.tid = tid self.comm = comm self.prio_list = [] @classmethod def new_from_process(cls, proc): return cls(proc.pid, proc.tid, proc.comm) def update_prio(self, timestamp, prio): self.prio_list.append(PrioEvent(timestamp, prio)) def reset(self): if self.prio_list: # Keep the last prio as the first for the next period self.prio_list = self.prio_list[-1:] class IO(Stats): def __init__(self): # Number of bytes read or written self.read = 0 self.write = 0 def reset(self): self.read = 0 self.write = 0 def __iadd__(self, other): self.read += other.read self.write += other.write return self lttnganalyses-0.4.3/lttnganalyses/core/memtop.py0000664000175000017500000000522212665072151023557 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import stats from .analysis import Analysis class Memtop(Analysis): def __init__(self, state, conf): notification_cbs = { 'tid_page_alloc': self._process_tid_page_alloc, 'tid_page_free': self._process_tid_page_free } super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) self.tids = {} def reset(self): for tid in self.tids: self.tids[tid].reset() def _process_tid_page_alloc(self, **kwargs): cpu_id = kwargs['cpu_id'] proc = kwargs['proc'] if not self._filter_process(proc): return if not self._filter_cpu(cpu_id): return tid = proc.tid if tid not in self.tids: self.tids[tid] = ProcessMemStats.new_from_process(proc) self.tids[tid].allocated_pages += 1 def _process_tid_page_free(self, **kwargs): cpu_id = kwargs['cpu_id'] proc = kwargs['proc'] if not self._filter_process(proc): return if not self._filter_cpu(cpu_id): return tid = proc.tid if tid not in self.tids: self.tids[tid] = ProcessMemStats.new_from_process(proc) self.tids[tid].freed_pages += 1 class ProcessMemStats(stats.Process): def __init__(self, pid, tid, comm): super().__init__(pid, tid, comm) self.allocated_pages = 0 self.freed_pages = 0 def reset(self): self.allocated_pages = 0 self.freed_pages = 0 lttnganalyses-0.4.3/lttnganalyses/core/irq.py0000664000175000017500000001345712665072151023062 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from .analysis import Analysis class IrqAnalysis(Analysis): def __init__(self, state, conf): notification_cbs = { 'irq_handler_entry': self._process_irq_handler_entry, 'irq_handler_exit': self._process_irq_handler_exit, 'softirq_exit': self._process_softirq_exit } super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) # Indexed by irq 'id' (irq or vec) self.hard_irq_stats = {} self.softirq_stats = {} # Log of individual interrupts self.irq_list = [] def reset(self): self.irq_list = [] for id in self.hard_irq_stats: self.hard_irq_stats[id].reset() for id in self.softirq_stats: self.softirq_stats[id].reset() def _process_irq_handler_entry(self, **kwargs): id = kwargs['id'] name = kwargs['irq_name'] if id not in self.hard_irq_stats: self.hard_irq_stats[id] = HardIrqStats(name) elif name not in self.hard_irq_stats[id].names: self.hard_irq_stats[id].names.append(name) def _process_irq_handler_exit(self, **kwargs): irq = kwargs['hard_irq'] if not self._filter_cpu(irq.cpu_id): return if self._conf.min_duration is not None and \ irq.duration < self._conf.min_duration: return if self._conf.max_duration is not None and \ irq.duration > self._conf.max_duration: return self.irq_list.append(irq) if irq.id not in self.hard_irq_stats: self.hard_irq_stats[irq.id] = HardIrqStats() self.hard_irq_stats[irq.id].update_stats(irq) def _process_softirq_exit(self, **kwargs): irq = kwargs['softirq'] if not self._filter_cpu(irq.cpu_id): return if self._conf.min_duration is not None and \ irq.duration < self._conf.min_duration: return if self._conf.max_duration is not None and \ irq.duration > self._conf.max_duration: return self.irq_list.append(irq) if irq.id not in self.softirq_stats: name = SoftIrqStats.names[irq.id] self.softirq_stats[irq.id] = SoftIrqStats(name) self.softirq_stats[irq.id].update_stats(irq) class IrqStats(): def __init__(self, name): self._name = name self.min_duration = None self.max_duration = None self.total_duration = 0 self.irq_list = [] @property def name(self): return self._name @property def count(self): return len(self.irq_list) def update_stats(self, irq): if self.min_duration is None or irq.duration < self.min_duration: self.min_duration = irq.duration if self.max_duration is None or irq.duration > self.max_duration: self.max_duration = irq.duration self.total_duration += irq.duration self.irq_list.append(irq) def reset(self): self.min_duration = None self.max_duration = None self.total_duration = 0 self.irq_list = [] class HardIrqStats(IrqStats): NAMES_SEPARATOR = ', ' def __init__(self, name='unknown'): super().__init__(name) self.names = [name] @property def name(self): return self.NAMES_SEPARATOR.join(self.names) class SoftIrqStats(IrqStats): # from include/linux/interrupt.h names = {0: 'HI_SOFTIRQ', 1: 'TIMER_SOFTIRQ', 2: 'NET_TX_SOFTIRQ', 3: 'NET_RX_SOFTIRQ', 4: 'BLOCK_SOFTIRQ', 5: 'BLOCK_IOPOLL_SOFTIRQ', 6: 'TASKLET_SOFTIRQ', 7: 'SCHED_SOFTIRQ', 8: 'HRTIMER_SOFTIRQ', 9: 'RCU_SOFTIRQ'} def __init__(self, name): super().__init__(name) self.min_raise_latency = None self.max_raise_latency = None self.total_raise_latency = 0 self.raise_count = 0 def update_stats(self, irq): super().update_stats(irq) if irq.raise_ts is None: return raise_latency = irq.begin_ts - irq.raise_ts if self.min_raise_latency is None or \ raise_latency < self.min_raise_latency: self.min_raise_latency = raise_latency if self.max_raise_latency is None or \ raise_latency > self.max_raise_latency: self.max_raise_latency = raise_latency self.total_raise_latency += raise_latency self.raise_count += 1 def reset(self): super().reset() self.min_raise_latency = None self.max_raise_latency = None self.total_raise_latency = 0 self.raise_count = 0 lttnganalyses-0.4.3/lttnganalyses/core/sched.py0000664000175000017500000001253312665072151023347 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import stats from .analysis import Analysis class SchedAnalysis(Analysis): def __init__(self, state, conf): notification_cbs = { 'sched_switch_per_tid': self._process_sched_switch, 'prio_changed': self._process_prio_changed, } super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) # Log of individual wake scheduling events self.sched_list = [] # Scheduling latency stats indexed by TID self.tids = {} # Stats self.min_latency = None self.max_latency = None self.total_latency = 0 @property def count(self): return len(self.sched_list) def reset(self): self.sched_list = [] self.min_latency = None self.max_latency = None self.total_latency = 0 for tid in self.tids: self.tids[tid].reset() def _process_sched_switch(self, **kwargs): cpu_id = kwargs['cpu_id'] switch_ts = kwargs['timestamp'] wakee_proc = kwargs['wakee_proc'] waker_proc = kwargs['waker_proc'] next_tid = kwargs['next_tid'] wakeup_ts = wakee_proc.last_wakeup if not self._filter_process(wakee_proc): return if not self._filter_cpu(cpu_id): return if wakeup_ts is None: return latency = switch_ts - wakeup_ts if self._conf.min_duration is not None and \ latency < self._conf.min_duration: return if self._conf.max_duration is not None and \ latency > self._conf.max_duration: return if waker_proc is not None and waker_proc.tid not in self.tids: self.tids[waker_proc.tid] = \ ProcessSchedStats.new_from_process(waker_proc) self.tids[waker_proc.tid].update_prio(switch_ts, waker_proc.prio) if next_tid not in self.tids: self.tids[next_tid] = \ ProcessSchedStats.new_from_process(wakee_proc) self.tids[next_tid].update_prio(switch_ts, wakee_proc.prio) sched_event = SchedEvent( wakeup_ts, switch_ts, wakee_proc, waker_proc, cpu_id) self.tids[next_tid].update_stats(sched_event) self._update_stats(sched_event) def _process_prio_changed(self, **kwargs): timestamp = kwargs['timestamp'] prio = kwargs['prio'] tid = kwargs['tid'] if tid not in self.tids: return self.tids[tid].update_prio(timestamp, prio) def _update_stats(self, sched_event): if self.min_latency is None or sched_event.latency < self.min_latency: self.min_latency = sched_event.latency if self.max_latency is None or sched_event.latency > self.max_latency: self.max_latency = sched_event.latency self.total_latency += sched_event.latency self.sched_list.append(sched_event) class ProcessSchedStats(stats.Process): def __init__(self, pid, tid, comm): super().__init__(pid, tid, comm) self.min_latency = None self.max_latency = None self.total_latency = 0 self.sched_list = [] @property def count(self): return len(self.sched_list) def update_stats(self, sched_event): if self.min_latency is None or sched_event.latency < self.min_latency: self.min_latency = sched_event.latency if self.max_latency is None or sched_event.latency > self.max_latency: self.max_latency = sched_event.latency self.total_latency += sched_event.latency self.sched_list.append(sched_event) def reset(self): super().reset() self.min_latency = None self.max_latency = None self.total_latency = 0 self.sched_list = [] class SchedEvent(): def __init__(self, wakeup_ts, switch_ts, wakee_proc, waker_proc, target_cpu): self.wakeup_ts = wakeup_ts self.switch_ts = switch_ts self.wakee_proc = wakee_proc self.waker_proc = waker_proc self.prio = wakee_proc.prio self.target_cpu = target_cpu self.latency = switch_ts - wakeup_ts lttnganalyses-0.4.3/lttnganalyses/core/io.py0000664000175000017500000004553412665072151022677 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import stats from .analysis import Analysis from ..linuxautomaton import sv class IoAnalysis(Analysis): def __init__(self, state, conf): notification_cbs = { 'net_dev_xmit': self._process_net_dev_xmit, 'netif_receive_skb': self._process_netif_receive_skb, 'block_rq_complete': self._process_block_rq_complete, 'io_rq_exit': self._process_io_rq_exit, 'create_fd': self._process_create_fd, 'close_fd': self._process_close_fd, 'update_fd': self._process_update_fd, 'create_parent_proc': self._process_create_parent_proc } event_cbs = { 'lttng_statedump_block_device': self._process_lttng_statedump_block_device } super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) self._register_cbs(event_cbs) self.disks = {} self.ifaces = {} self.tids = {} def process_event(self, ev): super().process_event(ev) self._process_event_cb(ev) def reset(self): for dev in self.disks: self.disks[dev].reset() for name in self.ifaces: self.ifaces[name].reset() for tid in self.tids: self.tids[tid].reset() @property def disk_io_requests(self): for disk in self.disks.values(): for io_rq in disk.rq_list: yield io_rq @property def io_requests(self): return self._get_io_requests() @property def open_io_requests(self): return self._get_io_requests(sv.IORequest.OP_OPEN) @property def read_io_requests(self): return self._get_io_requests(sv.IORequest.OP_READ) @property def write_io_requests(self): return self._get_io_requests(sv.IORequest.OP_WRITE) @property def close_io_requests(self): return self._get_io_requests(sv.IORequest.OP_CLOSE) @property def sync_io_requests(self): return self._get_io_requests(sv.IORequest.OP_SYNC) @property def read_write_io_requests(self): return self._get_io_requests(sv.IORequest.OP_READ_WRITE) def _get_io_requests(self, io_operation=None): """Create a generator of syscall io requests by operation Args: io_operation (IORequest.OP_*, optional): The operation of the io_requests to return. Return all IO requests if None. """ for proc in self.tids.values(): for io_rq in proc.rq_list: if isinstance(io_rq, sv.BlockIORequest): continue if io_operation is None or \ sv.IORequest.is_equivalent_operation(io_operation, io_rq.operation): yield io_rq def get_files_stats(self): files_stats = {} for proc_stats in self.tids.values(): for fd_list in proc_stats.fds.values(): for fd_stats in fd_list: filename = fd_stats.filename # Add process name to generic filenames to # distinguish them if FileStats.is_generic_name(filename): filename += ' (%s)' % proc_stats.comm if filename not in files_stats: files_stats[filename] = FileStats(filename) files_stats[filename].update_stats(fd_stats, proc_stats) return files_stats @staticmethod def _assign_fds_to_parent(proc, parent): if proc.fds: toremove = [] for fd in proc.fds: if fd not in parent.fds: parent.fds[fd] = proc.fds[fd] else: # best effort to fix the filename if not parent.get_fd(fd).filename: parent.get_fd(fd).filename = proc.get_fd(fd).filename toremove.append(fd) for fd in toremove: del proc.fds[fd] def _process_net_dev_xmit(self, **kwargs): name = kwargs['iface_name'] sent_bytes = kwargs['sent_bytes'] cpu = kwargs['cpu_id'] if not self._filter_cpu(cpu): return if name not in self.ifaces: self.ifaces[name] = IfaceStats(name) self.ifaces[name].sent_packets += 1 self.ifaces[name].sent_bytes += sent_bytes def _process_netif_receive_skb(self, **kwargs): name = kwargs['iface_name'] recv_bytes = kwargs['recv_bytes'] cpu = kwargs['cpu_id'] if not self._filter_cpu(cpu): return if name not in self.ifaces: self.ifaces[name] = IfaceStats(name) self.ifaces[name].recv_packets += 1 self.ifaces[name].recv_bytes += recv_bytes def _process_block_rq_complete(self, **kwargs): req = kwargs['req'] proc = kwargs['proc'] cpu = kwargs['cpu_id'] if not self._filter_process(proc): return if not self._filter_cpu(cpu): return if req.dev not in self.disks: self.disks[req.dev] = DiskStats(req.dev) self.disks[req.dev].update_stats(req) if proc is not None: if proc.tid not in self.tids: self.tids[proc.tid] = ProcessIOStats.new_from_process(proc) self.tids[proc.tid].update_block_stats(req) def _process_lttng_statedump_block_device(self, event): dev = event['dev'] disk_name = event['diskname'] if dev not in self.disks: self.disks[dev] = DiskStats(dev, disk_name) else: self.disks[dev].disk_name = disk_name def _process_io_rq_exit(self, **kwargs): proc = kwargs['proc'] parent_proc = kwargs['parent_proc'] io_rq = kwargs['io_rq'] cpu = kwargs['cpu_id'] if not self._filter_process(parent_proc): return if not self._filter_cpu(cpu): return if proc.tid not in self.tids: self.tids[proc.tid] = ProcessIOStats.new_from_process(proc) if parent_proc.tid not in self.tids: self.tids[parent_proc.tid] = ( ProcessIOStats.new_from_process(parent_proc)) proc_stats = self.tids[proc.tid] parent_stats = self.tids[parent_proc.tid] fd_types = {} if io_rq.errno is None: if io_rq.operation == sv.IORequest.OP_READ or \ io_rq.operation == sv.IORequest.OP_WRITE: fd_types['fd'] = parent_stats.get_fd(io_rq.fd).fd_type elif io_rq.operation == sv.IORequest.OP_READ_WRITE: fd_types['fd_in'] = parent_stats.get_fd(io_rq.fd_in).fd_type fd_types['fd_out'] = parent_stats.get_fd(io_rq.fd_out).fd_type proc_stats.update_io_stats(io_rq, fd_types) parent_stats.update_fd_stats(io_rq) # Check if the proc stats comm corresponds to the actual # process comm. It might be that it was missing so far. if proc_stats.comm != proc.comm: proc_stats.comm = proc.comm if parent_stats.comm != parent_proc.comm: parent_stats.comm = parent_proc.comm def _process_create_parent_proc(self, **kwargs): proc = kwargs['proc'] parent_proc = kwargs['parent_proc'] if not self._filter_process(parent_proc): return if proc.tid not in self.tids: self.tids[proc.tid] = ProcessIOStats.new_from_process(proc) if parent_proc.tid not in self.tids: self.tids[parent_proc.tid] = ( ProcessIOStats.new_from_process(parent_proc)) proc_stats = self.tids[proc.tid] parent_stats = self.tids[parent_proc.tid] proc_stats.pid = parent_stats.tid IoAnalysis._assign_fds_to_parent(proc_stats, parent_stats) def _process_create_fd(self, **kwargs): timestamp = kwargs['timestamp'] parent_proc = kwargs['parent_proc'] tid = parent_proc.tid cpu = kwargs['cpu_id'] fd = kwargs['fd'] if not self._filter_process(parent_proc): return if not self._filter_cpu(cpu): return if tid not in self.tids: self.tids[tid] = ProcessIOStats.new_from_process(parent_proc) parent_stats = self.tids[tid] if fd not in parent_stats.fds: parent_stats.fds[fd] = [] parent_stats.fds[fd].append(FDStats.new_from_fd(parent_proc.fds[fd], timestamp)) def _process_close_fd(self, **kwargs): timestamp = kwargs['timestamp'] parent_proc = kwargs['parent_proc'] tid = parent_proc.tid cpu = kwargs['cpu_id'] fd = kwargs['fd'] if not self._filter_process(parent_proc): return if not self._filter_cpu(cpu): return parent_stats = self.tids[tid] last_fd = parent_stats.get_fd(fd) last_fd.close_ts = timestamp def _process_update_fd(self, **kwargs): parent_proc = kwargs['parent_proc'] tid = parent_proc.tid fd = kwargs['fd'] new_filename = parent_proc.fds[fd].filename fd_list = self.tids[tid].fds[fd] fd_list[-1].filename = new_filename class DiskStats(): MINORBITS = 20 MINORMASK = ((1 << MINORBITS) - 1) def __init__(self, dev, disk_name=None): self.dev = dev if disk_name is not None: self.disk_name = disk_name else: self.disk_name = DiskStats._get_name_from_dev(dev) self.min_rq_duration = None self.max_rq_duration = None self.total_rq_sectors = 0 self.total_rq_duration = 0 self.rq_list = [] @property def rq_count(self): return len(self.rq_list) def update_stats(self, req): if self.min_rq_duration is None or req.duration < self.min_rq_duration: self.min_rq_duration = req.duration if self.max_rq_duration is None or req.duration > self.max_rq_duration: self.max_rq_duration = req.duration self.total_rq_sectors += req.nr_sector self.total_rq_duration += req.duration self.rq_list.append(req) def reset(self): self.min_rq_duration = None self.max_rq_duration = None self.total_rq_sectors = 0 self.total_rq_duration = 0 self.rq_list = [] @staticmethod def _get_name_from_dev(dev): # imported from include/linux/kdev_t.h major = dev >> DiskStats.MINORBITS minor = dev & DiskStats.MINORMASK return '(%d,%d)' % (major, minor) class IfaceStats(): def __init__(self, name): self.name = name self.recv_bytes = 0 self.recv_packets = 0 self.sent_bytes = 0 self.sent_packets = 0 def reset(self): self.recv_bytes = 0 self.recv_packets = 0 self.sent_bytes = 0 self.sent_packets = 0 class ProcessIOStats(stats.Process): def __init__(self, pid, tid, comm): super().__init__(pid, tid, comm) self.disk_io = stats.IO() self.net_io = stats.IO() self.unk_io = stats.IO() self.block_io = stats.IO() # FDStats objects, indexed by fd (fileno) self.fds = {} self.rq_list = [] @classmethod def new_from_process(cls, proc): return cls(proc.pid, proc.tid, proc.comm) # Total read/write does not account for block layer I/O @property def total_read(self): return self.disk_io.read + self.net_io.read + self.unk_io.read @property def total_write(self): return self.disk_io.write + self.net_io.write + self.unk_io.write def update_fd_stats(self, req): if req.errno is not None: return if req.fd is not None: self.get_fd(req.fd).update_stats(req) elif isinstance(req, sv.ReadWriteIORequest): if req.fd_in is not None: self.get_fd(req.fd_in).update_stats(req) if req.fd_out is not None: self.get_fd(req.fd_out).update_stats(req) def update_block_stats(self, req): self.rq_list.append(req) if req.operation is sv.IORequest.OP_READ: self.block_io.read += req.size elif req.operation is sv.IORequest.OP_WRITE: self.block_io.write += req.size def update_io_stats(self, req, fd_types): self.rq_list.append(req) if req.size is None or req.errno is not None: return if req.operation is sv.IORequest.OP_READ: self._update_read(req.returned_size, fd_types['fd']) elif req.operation is sv.IORequest.OP_WRITE: self._update_write(req.returned_size, fd_types['fd']) elif req.operation is sv.IORequest.OP_READ_WRITE: self._update_read(req.returned_size, fd_types['fd_in']) self._update_write(req.returned_size, fd_types['fd_out']) def _update_read(self, size, fd_type): if fd_type == sv.FDType.disk: self.disk_io.read += size elif fd_type == sv.FDType.net or fd_type == sv.FDType.maybe_net: self.net_io.read += size else: self.unk_io.read += size def _update_write(self, size, fd_type): if fd_type == sv.FDType.disk: self.disk_io.write += size elif fd_type == sv.FDType.net or fd_type == sv.FDType.maybe_net: self.net_io.write += size else: self.unk_io.write += size def _get_current_fd(self, fd): fd_stats = self.fds[fd][-1] if fd_stats.close_ts is not None: return None return fd_stats @staticmethod def _get_fd_by_timestamp(fd_list, timestamp): """Return the FDStats object whose lifetime contains timestamp This method performs a recursive binary search on the given fd_list argument, and will find the FDStats object for which the timestamp is contained between its open_ts and close_ts attributes. Args: fd_list (list): list of FDStats object, sorted chronologically by open_ts timestamp (int): timestamp in nanoseconds (ns) since unix epoch which should be contained in the FD's lifetime. Returns: The FDStats object whose lifetime contains the given timestamp, None if no such object exists. """ list_size = len(fd_list) if list_size == 0: return None midpoint = list_size // 2 fd_stats = fd_list[midpoint] # Handle case of currently open fd (i.e. no close_ts) if fd_stats.close_ts is None: if timestamp >= fd_stats.open_ts: return fd_stats else: if fd_stats.open_ts <= timestamp <= fd_stats.close_ts: return fd_stats else: if timestamp < fd_stats.open_ts: return ProcessIOStats._get_fd_by_timestamp( fd_list[:midpoint], timestamp) else: return ProcessIOStats._get_fd_by_timestamp( fd_list[midpoint + 1:], timestamp) def get_fd(self, fd, timestamp=None): if fd not in self.fds or not self.fds[fd]: return None if timestamp is None: fd_stats = self._get_current_fd(fd) else: fd_stats = ProcessIOStats._get_fd_by_timestamp(self.fds[fd], timestamp) return fd_stats def reset(self): self.disk_io.reset() self.net_io.reset() self.unk_io.reset() self.block_io.reset() self.rq_list = [] for fd in self.fds: fd_stats = self.get_fd(fd) if fd_stats is not None: fd_stats.reset() class FDStats(): def __init__(self, fd, filename, fd_type, cloexec, family, open_ts): self.fd = fd self.filename = filename self.fd_type = fd_type self.cloexec = cloexec self.family = family self.open_ts = open_ts self.close_ts = None self.io = stats.IO() # IO Requests that acted upon the FD self.rq_list = [] @classmethod def new_from_fd(cls, fd, open_ts): return cls(fd.fd, fd.filename, fd.fd_type, fd.cloexec, fd.family, open_ts) def update_stats(self, req): if req.operation is sv.IORequest.OP_READ: self.io.read += req.returned_size elif req.operation is sv.IORequest.OP_WRITE: self.io.write += req.returned_size elif req.operation is sv.IORequest.OP_READ_WRITE: if self.fd == req.fd_in: self.io.read += req.returned_size elif self.fd == req.fd_out: self.io.write += req.returned_size self.rq_list.append(req) def reset(self): self.io.reset() self.rq_list = [] class FileStats(): GENERIC_NAMES = ['pipe', 'socket', 'anon_inode', 'unknown'] def __init__(self, filename): self.filename = filename self.io = stats.IO() # Dict of file descriptors representing this file, indexed by # parent pid # FIXME this doesn't cover FD reuse cases self.fd_by_pid = {} def update_stats(self, fd_stats, proc_stats): self.io += fd_stats.io if proc_stats.pid is not None: pid = proc_stats.pid else: pid = proc_stats.tid if pid not in self.fd_by_pid: self.fd_by_pid[pid] = fd_stats.fd def reset(self): self.io.reset() @staticmethod def is_generic_name(filename): for generic_name in FileStats.GENERIC_NAMES: if filename.startswith(generic_name): return True return False lttnganalyses-0.4.3/lttnganalyses/core/analysis.py0000664000175000017500000001600112665072151024076 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. class AnalysisConfig: def __init__(self): self.refresh_period = None self.period_begin_ev_name = None self.period_end_ev_name = None self.period_begin_key_fields = None self.period_end_key_fields = None self.period_key_value = None self.begin_ts = None self.end_ts = None self.min_duration = None self.max_duration = None self.proc_list = None self.tid_list = None self.cpu_list = None class Analysis: TICK_CB = 'tick' def __init__(self, state, conf): self._state = state self._conf = conf self._period_key = None self._period_start_ts = None self._last_event_ts = None self._notification_cbs = {} self._cbs = {} self.started = False self.ended = False def process_event(self, ev): self._last_event_ts = ev.timestamp if not self.started: if self._conf.begin_ts: self._check_analysis_begin(ev) if not self.started: return else: self._period_start_ts = ev.timestamp self.started = True self._check_analysis_end(ev) if self.ended: return # Prioritise period events over refresh period if self._conf.period_begin_ev_name is not None: self._handle_period_event(ev) elif self._conf.refresh_period is not None: self._check_refresh(ev) def reset(self): raise NotImplementedError() def end(self): if self._period_start_ts: self._end_period() def register_notification_cbs(self, cbs): for name in cbs: if name not in self._notification_cbs: self._notification_cbs[name] = [] self._notification_cbs[name].append(cbs[name]) def _send_notification_cb(self, name, **kwargs): if name in self._notification_cbs: for cb in self._notification_cbs[name]: cb(**kwargs) def _register_cbs(self, cbs): self._cbs = cbs def _process_event_cb(self, ev): name = ev.name if name in self._cbs: self._cbs[name](ev) elif 'syscall_entry' in self._cbs and \ (name.startswith('sys_') or name.startswith('syscall_entry_')): self._cbs['syscall_entry'](ev) elif 'syscall_exit' in self._cbs and \ (name.startswith('exit_syscall') or name.startswith('syscall_exit_')): self._cbs['syscall_exit'](ev) def _check_analysis_begin(self, ev): if self._conf.begin_ts and ev.timestamp >= self._conf.begin_ts: self.started = True self._period_start_ts = ev.timestamp self.reset() def _check_analysis_end(self, ev): if self._conf.end_ts and ev.timestamp > self._conf.end_ts: self.ended = True def _check_refresh(self, ev): if not self._period_start_ts: self._period_start_ts = ev.timestamp elif ev.timestamp >= (self._period_start_ts + self._conf.refresh_period): self._end_period() self._period_start_ts = ev.timestamp def _handle_period_event(self, ev): if ev.name != self._conf.period_begin_ev_name and \ ev.name != self._conf.period_end_ev_name: return if self._period_key: period_key = Analysis._get_period_event_key( ev, self._conf.period_end_key_fields) if not period_key: # There was an error caused by a missing field, ignore # this period event return if period_key == self._period_key: if self._conf.period_end_ev_name: if ev.name == self._conf.period_end_ev_name: self._end_period() self._period_key = None self._period_start_ts = None elif ev.name == self._conf.period_begin_ev_name: self._end_period() self._begin_period(period_key, ev.timestamp) elif ev.name == self._conf.period_begin_ev_name: period_key = Analysis._get_period_event_key( ev, self._conf.period_begin_key_fields) if not period_key: return if self._conf.period_key_value: # Must convert the period key to string for comparison str_period_key = tuple(map(str, period_key)) if self._conf.period_key_value != str_period_key: return self._begin_period(period_key, ev.timestamp) def _begin_period(self, period_key, timestamp): self._period_key = period_key self._period_start_ts = timestamp self.reset() def _end_period(self): self._end_period_cb() self._send_notification_cb(Analysis.TICK_CB, begin_ns=self._period_start_ts, end_ns=self._last_event_ts) def _end_period_cb(self): pass @staticmethod def _get_period_event_key(ev, key_fields): if not key_fields: return None key_values = [] for field in key_fields: try: key_values.append(ev[field]) except KeyError: # Error: missing field return None return tuple(key_values) def _filter_process(self, proc): if not proc: return True if self._conf.proc_list and proc.comm not in self._conf.proc_list: return False if self._conf.tid_list and proc.tid not in self._conf.tid_list: return False return True def _filter_cpu(self, cpu): return not (self._conf.cpu_list and cpu not in self._conf.cpu_list) lttnganalyses-0.4.3/lttnganalyses/core/cputop.py0000664000175000017500000001631012665072151023570 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import stats from .analysis import Analysis class Cputop(Analysis): def __init__(self, state, conf): notification_cbs = { 'sched_migrate_task': self._process_sched_migrate_task, 'sched_switch_per_cpu': self._process_sched_switch_per_cpu, 'sched_switch_per_tid': self._process_sched_switch_per_tid, 'prio_changed': self._process_prio_changed, } super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) self._ev_count = 0 self.cpus = {} self.tids = {} def process_event(self, ev): super().process_event(ev) self._ev_count += 1 def reset(self): for cpu_stats in self.cpus.values(): cpu_stats.reset() if cpu_stats.current_task_start_ts is not None: cpu_stats.current_task_start_ts = self._last_event_ts for proc_stats in self.tids.values(): proc_stats.reset() if proc_stats.last_sched_ts is not None: proc_stats.last_sched_ts = self._last_event_ts def _end_period_cb(self): self._compute_stats() def _compute_stats(self): """Compute usage stats relative to a certain time range For each CPU and process tracked by the analysis, we set its usage_percent attribute, which represents the percentage of usage time for the given CPU or process relative to the full duration of the time range. Do note that we need to know the timestamps and not just the duration, because if a CPU or a process is currently busy, we use the end timestamp to add the partial results of the currently running task to the usage stats. """ duration = self._last_event_ts - self._period_start_ts for cpu_id in self.cpus: cpu = self.cpus[cpu_id] if cpu.current_task_start_ts is not None: cpu.total_usage_time += self._last_event_ts - \ cpu.current_task_start_ts cpu.compute_stats(duration) for tid in self.tids: proc = self.tids[tid] if proc.last_sched_ts is not None: proc.total_cpu_time += self._last_event_ts - \ proc.last_sched_ts proc.compute_stats(duration) def _process_sched_switch_per_cpu(self, **kwargs): timestamp = kwargs['timestamp'] cpu_id = kwargs['cpu_id'] wakee_proc = kwargs['wakee_proc'] if not self._filter_cpu(cpu_id): return if cpu_id not in self.cpus: self.cpus[cpu_id] = CpuUsageStats(cpu_id) cpu = self.cpus[cpu_id] if cpu.current_task_start_ts is not None: cpu.total_usage_time += timestamp - cpu.current_task_start_ts if not self._filter_process(wakee_proc): cpu.current_task_start_ts = None else: cpu.current_task_start_ts = timestamp def _process_sched_switch_per_tid(self, **kwargs): cpu_id = kwargs['cpu_id'] wakee_proc = kwargs['wakee_proc'] timestamp = kwargs['timestamp'] prev_tid = kwargs['prev_tid'] next_tid = kwargs['next_tid'] next_comm = kwargs['next_comm'] if not self._filter_cpu(cpu_id): return if prev_tid in self.tids: prev_proc = self.tids[prev_tid] if prev_proc.last_sched_ts is not None: prev_proc.total_cpu_time += timestamp - prev_proc.last_sched_ts prev_proc.last_sched_ts = None # Only filter on wakee_proc after finalizing the prev_proc # accounting if not self._filter_process(wakee_proc): return if next_tid not in self.tids: self.tids[next_tid] = ProcessCpuStats(None, next_tid, next_comm) self.tids[next_tid].update_prio(timestamp, wakee_proc.prio) next_proc = self.tids[next_tid] next_proc.last_sched_ts = timestamp def _process_sched_migrate_task(self, **kwargs): cpu_id = kwargs['cpu_id'] proc = kwargs['proc'] tid = proc.tid if not self._filter_process(proc): return if not self._filter_cpu(cpu_id): return if tid not in self.tids: self.tids[tid] = ProcessCpuStats.new_from_process(proc) self.tids[tid].migrate_count += 1 def _process_prio_changed(self, **kwargs): timestamp = kwargs['timestamp'] prio = kwargs['prio'] tid = kwargs['tid'] if tid not in self.tids: return self.tids[tid].update_prio(timestamp, prio) def _filter_process(self, proc): # Exclude swapper if proc.tid == 0: return False return super()._filter_process(proc) @property def event_count(self): return self._ev_count class CpuUsageStats(): def __init__(self, cpu_id): self.cpu_id = cpu_id # Usage time and start timestamp are in nanoseconds (ns) self.total_usage_time = 0 self.current_task_start_ts = None self.usage_percent = None def compute_stats(self, duration): if duration != 0: self.usage_percent = self.total_usage_time * 100 / duration else: self.usage_percent = 0 def reset(self): self.total_usage_time = 0 self.usage_percent = None class ProcessCpuStats(stats.Process): def __init__(self, pid, tid, comm): super().__init__(pid, tid, comm) # CPU Time and timestamp in nanoseconds (ns) self.total_cpu_time = 0 self.last_sched_ts = None self.migrate_count = 0 self.usage_percent = None def compute_stats(self, duration): if duration != 0: self.usage_percent = self.total_cpu_time * 100 / duration else: self.usage_percent = 0 def reset(self): super().reset() self.total_cpu_time = 0 self.migrate_count = 0 self.usage_percent = None lttnganalyses-0.4.3/MANIFEST.in0000664000175000017500000000072112665620661017626 0ustar mjeansonmjeanson00000000000000include versioneer.py recursive-include tests * include ChangeLog include LICENSE include mit-license.txt include lttng-cputop include lttng-iolatencyfreq include lttng-iolatencystats include lttng-iolatencytop include lttng-iolog include lttng-iousagetop include lttng-irqfreq include lttng-irqlog include lttng-irqstats include lttng-memtop include lttng-schedfreq include lttng-schedlog include lttng-schedstats include lttng-schedtop include lttng-syscallstats lttnganalyses-0.4.3/setup.cfg0000664000175000017500000000036012667421106017703 0ustar mjeansonmjeanson00000000000000[versioneer] vcs = git style = pep440 versionfile_source = lttnganalyses/_version.py versionfile_build = lttnganalyses/_version.py tag_prefix = v parentdir_prefix = lttnganalyses- [egg_info] tag_svn_revision = 0 tag_date = 0 tag_build = lttnganalyses-0.4.3/lttng-schedlog0000775000175000017500000000235112665072151020730 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import sched if __name__ == '__main__': sched.runlog() lttnganalyses-0.4.3/PKG-INFO0000664000175000017500000012742212667421106017170 0ustar mjeansonmjeanson00000000000000Metadata-Version: 1.1 Name: lttnganalyses Version: 0.4.3 Summary: LTTng analyses Home-page: https://github.com/lttng/lttng-analyses Author: Julien Desfossez Author-email: jdesfossez@efficios.com License: MIT Description: ************** LTTng-analyses ************** This repository contains various scripts to extract monitoring data and metrics from LTTng kernel traces. As opposed to other diagnostic or monitoring solutions, this approach is designed to allow users to record their system's activity with a low overhead, wait for a problem to occur and then diagnose its cause offline. This solution allows the user to target hard to find problems and dig until the root cause is found. This README describes the implemented analyses as well as how to use them. |pypi| .. contents:: :local: :depth: 2 :backlinks: none ============ Requirements ============ * LTTng >= 2.5 * Babeltrace >= 1.2 (with python bindings built) * Python >= 3.4 ============ Installation ============ --------------- Release version --------------- On **Ubuntu** (12.04 and up) using the LTTng ppa: .. code-block:: bash apt-get install -y software-properties-common (or python-software-properties on 12.04) apt-add-repository -y ppa:lttng/ppa apt-get update apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On **Debian Sid**: .. code-block:: bash apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On other distributions: Please refer to the `LTTng documentation `_ to install LTTng and the `Babeltrace README `_ to install ``babeltrace`` with the python bindings. Optionally install the ``progressbar`` python module, and then: .. code-block:: bash pip3 install lttnganalyses ------------------- Development version ------------------- The **latest development version** can be installed directly from GitHub: .. code-block:: bash pip3 install --upgrade git+git://github.com/lttng/lttng-analyses.git ============== Trace creation ============== Here are the basic commands to create a trace, for more information on the LTTng setup, please refer to the `LTTng documentation Getting started guide `_. --------- Automatic --------- From the cloned git tree: .. code-block:: bash ./lttng-analyses-record ------ Manual ------ .. code-block:: bash lttng create lttng enable-channel -k bla --subbuf-size=4M lttng enable-event -k sched_switch,block_rq_complete,block_rq_issue,block_bio_remap,block_bio_backmerge,netif_receive_skb,net_dev_xmit,sched_process_fork,sched_process_exec,lttng_statedump_process_state,lttng_statedump_file_descriptor,lttng_statedump_block_device,writeback_pages_written,mm_vmscan_wakeup_kswapd,mm_page_free,mm_page_alloc,block_dirty_buffer,irq_handler_entry,irq_handler_exit,softirq_entry,softirq_exit,softirq_raise -c bla lttng enable-event -k --syscall -a -c bla lttng start ..do stuff... lttng stop lttng destroy ------ Remote ------ You can also create a trace on a server and send it to a remote host. The remote host only needs to run ``lttng-relayd -d`` and be reachable over the network. The only difference with the above commands is the tracing session's creation: .. code-block:: bash lttng create -U net:// ==================== Implemented analyses ==================== * CPU usage for the whole system * CPU usage per-process * Process CPU migration count * Memory usage per-process (as seen by the kernel) * Memory usage system-wide (as seen by the kernel) * I/O usage (syscalls, disk, network) * I/O operations log (with latency and usage) * I/O latency statistics (open, read, write, sync operations) * I/O latency frequency distribution * Interrupt handler duration statistics (count, min, max, average stdev) * Interrupt handler duration top * Interrupt handler duration log * Interrupt handler duration frequency distribution * SoftIRQ handler latency statistics * Syscalls usage statistics All of the analyses share the same code architecture making it possible to filter by timerange, process name, PID, min and max values using the same command-line options. Also note that reported timestamps can optionally be expressed in the GMT timezone to allow easy sharing between teams. The project's architecture makes it easy to add new analyses or to reuse the analysis backend in external tools which may then present the results in their own format (as opposed to text). ======== Examples ======== After having collected your trace, any script contained in this repository can be used to run an analysis. Read on for some examples! --- I/O --- ^^^^^^^^^^^^^^^^^ I/O latency stats ^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolatencystats mytrace/ Timerange: [2015-01-06 10:58:26.140545481, 2015-01-06 10:58:27.229358936] Syscalls latency statistics (usec): Type Count Min Average Max Stdev ----------------------------------------------------------------------------------------- Open 45 5.562 13.835 77.683 15.263 Read 109 0.316 5.774 62.569 9.277 Write 101 0.256 7.060 48.531 8.555 Sync 207 19.384 40.664 160.188 21.201 Disk latency statistics (usec): Name Count Min Average Max Stdev ----------------------------------------------------------------------------------------- dm-0 108 0.001 0.004 0.007 1.306 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ I/O latency frequency distribution ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolatencyfreq mytrace/ Timerange: [2015-01-06 10:58:26.140545481, 2015-01-06 10:58:27.229358936] Open latency distribution (usec) ############################################################################### 5.562 ███████████████████████████████████████████████████████████████████ 25 9.168 ██████████ 4 12.774 █████████████████████ 8 16.380 ████████ 3 19.986 █████ 2 23.592 0 27.198 0 30.804 0 34.410 ██ 1 38.016 0 41.623 0 45.229 0 48.835 0 52.441 0 56.047 0 59.653 0 63.259 0 66.865 0 70.471 0 74.077 █████ 2 ^^^^^^^^^^^^^^^ I/O latency top ^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolatencytop analysis-20150115-120942/ --limit 3 --minsize 2 Checking the trace for lost events... Timerange: [2015-01-15 12:18:37.216484041, 2015-01-15 12:18:53.821580313] Top open syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:50.432950815,12:18:50.870648568] open 437697.753 N/A apache2 31517 /var/lib/php5/sess_0ifir2hangm8ggaljdphl9o5b5 (fd=13) [12:18:52.946080165,12:18:52.946132278] open 52.113 N/A apache2 31588 /var/lib/php5/sess_mr9045p1k55vin1h0vg7rhgd63 (fd=13) [12:18:46.800846035,12:18:46.800874916] open 28.881 N/A apache2 31591 /var/lib/php5/sess_r7c12pccfvjtas15g3j69u14h0 (fd=13) [12:18:51.389797604,12:18:51.389824426] open 26.822 N/A apache2 31520 /var/lib/php5/sess_4sdb1rtjkhb78sabnoj8gpbl00 (fd=13) Top read syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:37.256073107,12:18:37.256555967] read 482.860 7.00 B bash 10237 unknown (origin not found) (fd=3) [12:18:52.000209798,12:18:52.000252304] read 42.506 1.00 KB irqbalance 1337 /proc/interrupts (fd=3) [12:18:37.256559439,12:18:37.256601615] read 42.176 5.00 B bash 10237 unknown (origin not found) (fd=3) [12:18:42.000281918,12:18:42.000320016] read 38.098 1.00 KB irqbalance 1337 /proc/interrupts (fd=3) Top write syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:49.913241516,12:18:49.915908862] write 2667.346 95.00 B apache2 31584 /var/log/apache2/access.log (fd=8) [12:18:37.472823631,12:18:37.472859836] writev 36.205 21.97 KB apache2 31544 unknown (origin not found) (fd=12) [12:18:37.991578372,12:18:37.991612724] writev 34.352 21.97 KB apache2 31589 unknown (origin not found) (fd=12) [12:18:39.547778549,12:18:39.547812515] writev 33.966 21.97 KB apache2 31584 unknown (origin not found) (fd=12) Top sync syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:50.162776739,12:18:51.157522361] sync 994745.622 N/A sync 22791 None (fd=None) [12:18:37.227867532,12:18:37.232289687] sync_file_range 4422.155 N/A lttng-consumerd 19964 /home/julien/lttng-traces/analysis-20150115-120942/kernel/metadata (fd=32) [12:18:37.238076585,12:18:37.239012027] sync_file_range 935.442 N/A lttng-consumerd 19964 /home/julien/lttng-traces/analysis-20150115-120942/kernel/metadata (fd=32) [12:18:37.220974711,12:18:37.221647124] sync_file_range 672.413 N/A lttng-consumerd 19964 /home/julien/lttng-traces/analysis-20150115-120942/kernel/metadata (fd=32) ^^^^^^^^^^^^^^^^^^ I/O operations log ^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolog mytrace/ [10:58:26.221618530,10:58:26.221620659] write 2.129 8.00 B /usr/bin/x-term 11793 anon_inode:[eventfd] (fd=5) [10:58:26.221623609,10:58:26.221628055] read 4.446 50.00 B /usr/bin/x-term 11793 /dev/ptmx (fd=24) [10:58:26.221638929,10:58:26.221640008] write 1.079 8.00 B /usr/bin/x-term 11793 anon_inode:[eventfd] (fd=5) [10:58:26.221676232,10:58:26.221677385] read 1.153 8.00 B /usr/bin/x-term 11793 anon_inode:[eventfd] (fd=5) [10:58:26.223401804,10:58:26.223411683] open 9.879 N/A sleep 12420 /etc/ld.so.cache (fd=3) [10:58:26.223448060,10:58:26.223455577] open 7.517 N/A sleep 12420 /lib/x86_64-linux-gnu/libc.so.6 (fd=3) [10:58:26.223456522,10:58:26.223458898] read 2.376 832.00 B sleep 12420 /lib/x86_64-linux-gnu/libc.so.6 (fd=3) [10:58:26.223918068,10:58:26.223929316] open 11.248 N/A sleep 12420 (fd=3) [10:58:26.231881565,10:58:26.231895970] writev 14.405 16.00 B /usr/bin/x-term 11793 socket:[45650] (fd=4) [10:58:26.231979636,10:58:26.231988446] recvmsg 8.810 16.00 B Xorg 1827 socket:[47480] (fd=38) ^^^^^^^^^^^^^ I/O usage top ^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iousagetop traces/pgread-writes Timerange: [2014-10-07 16:36:00.733214969, 2014-10-07 16:36:18.804584183] Per-process I/O Read ############################################################################### ██████████████████████████████████████████████████ 16.00 MB lttng-consumerd (2619) 0 B file 4.00 B net 16.00 MB unknown █████ 1.72 MB lttng-consumerd (2619) 0 B file 0 B net 1.72 MB unknown █ 398.13 KB postgres (4219) 121.05 KB file 277.07 KB net 8.00 B unknown 256.09 KB postgres (1348) 0 B file 255.97 KB net 117.00 B unknown 204.81 KB postgres (4218) 204.81 KB file 0 B net 0 B unknown 123.77 KB postgres (4220) 117.50 KB file 6.26 KB net 8.00 B unknown Per-process I/O Write ############################################################################### ██████████████████████████████████████████████████ 16.00 MB lttng-consumerd (2619) 0 B file 8.00 MB net 8.00 MB unknown ██████ 2.20 MB postgres (4219) 2.00 MB file 202.23 KB net 0 B unknown █████ 1.73 MB lttng-consumerd (2619) 0 B file 887.73 KB net 882.58 KB unknown ██ 726.33 KB postgres (1165) 8.00 KB file 6.33 KB net 712.00 KB unknown 158.69 KB postgres (1168) 158.69 KB file 0 B net 0 B unknown 80.66 KB postgres (1348) 0 B file 80.66 KB net 0 B unknown Files Read ############################################################################### ██████████████████████████████████████████████████ 8.00 MB anon_inode:[lttng_stream] (lttng-consumerd) 'fd 32 in lttng-consumerd (2619)' █████ 834.41 KB base/16384/pg_internal.init 'fd 7 in postgres (4219)', 'fd 7 in postgres (4220)', 'fd 7 in postgres (4221)', 'fd 7 in postgres (4222)', 'fd 7 in postgres (4223)', 'fd 7 in postgres (4224)', 'fd 7 in postgres (4225)', 'fd 7 in postgres (4226)' █ 256.09 KB socket:[8893] (postgres) 'fd 9 in postgres (1348)' █ 174.69 KB pg_stat_tmp/pgstat.stat 'fd 9 in postgres (4218)', 'fd 9 in postgres (1167)' 109.48 KB global/pg_internal.init 'fd 7 in postgres (4218)', 'fd 7 in postgres (4219)', 'fd 7 in postgres (4220)', 'fd 7 in postgres (4221)', 'fd 7 in postgres (4222)', 'fd 7 in postgres (4223)', 'fd 7 in postgres (4224)', 'fd 7 in postgres (4225)', 'fd 7 in postgres (4226)' 104.30 KB base/11951/pg_internal.init 'fd 7 in postgres (4218)' 12.85 KB socket (lttng-sessiond) 'fd 30 in lttng-sessiond (384)' 4.50 KB global/pg_filenode.map 'fd 7 in postgres (4218)', 'fd 7 in postgres (4219)', 'fd 7 in postgres (4220)', 'fd 7 in postgres (4221)', 'fd 7 in postgres (4222)', 'fd 7 in postgres (4223)', 'fd 7 in postgres (4224)', 'fd 7 in postgres (4225)', 'fd 7 in postgres (4226)' 4.16 KB socket (postgres) 'fd 9 in postgres (4226)' 4.00 KB /proc/interrupts 'fd 3 in irqbalance (1104)' Files Write ############################################################################### ██████████████████████████████████████████████████ 8.00 MB socket:[56371] (lttng-consumerd) 'fd 30 in lttng-consumerd (2619)' █████████████████████████████████████████████████ 8.00 MB pipe:[53306] (lttng-consumerd) 'fd 12 in lttng-consumerd (2619)' ██████████ 1.76 MB pg_xlog/00000001000000000000000B 'fd 31 in postgres (4219)' █████ 887.82 KB socket:[56369] (lttng-consumerd) 'fd 26 in lttng-consumerd (2619)' █████ 882.58 KB pipe:[53309] (lttng-consumerd) 'fd 18 in lttng-consumerd (2619)' 160.00 KB /var/lib/postgresql/9.1/main/base/16384/16602 'fd 14 in postgres (1165)' 158.69 KB pg_stat_tmp/pgstat.tmp 'fd 3 in postgres (1168)' 144.00 KB /var/lib/postgresql/9.1/main/base/16384/16613 'fd 12 in postgres (1165)' 88.00 KB /var/lib/postgresql/9.1/main/base/16384/16609 'fd 11 in postgres (1165)' 78.28 KB socket:[8893] (postgres) 'fd 9 in postgres (1348)' Block I/O Read ############################################################################### Block I/O Write ############################################################################### ██████████████████████████████████████████████████ 1.76 MB postgres (pid=4219) ████ 160.00 KB postgres (pid=1168) ██ 100.00 KB kworker/u8:0 (pid=1540) ██ 96.00 KB jbd2/vda1-8 (pid=257) █ 40.00 KB postgres (pid=1166) 8.00 KB kworker/u9:0 (pid=4197) 4.00 KB kworker/u9:2 (pid=1381) Disk nr_sector ############################################################################### ███████████████████████████████████████████████████████████████████ 4416.00 sectors vda1 Disk nr_requests ############################################################################### ████████████████████████████████████████████████████████████████████ 177.00 requests vda1 Disk request time/sector ############################################################################### ██████████████████████████████████████████████████████████████████ 0.01 ms vda1 Network recv_bytes ############################################################################### ███████████████████████████████████████████████████████ 739.50 KB eth0 █████ 80.27 KB lo Network sent_bytes ############################################################################### ████████████████████████████████████████████████████████ 9.36 MB eth0 -------- Syscalls -------- ^^^^^^^^^^ Statistics ^^^^^^^^^^ .. code-block:: bash $ ./lttng-syscallstats mytrace/ Timerange: [2015-01-15 12:18:37.216484041, 2015-01-15 12:18:53.821580313] Per-TID syscalls statistics (usec) find (22785) Count Min Average Max Stdev Return values - getdents 14240 0.380 364.301 43372.450 1629.390 {'success': 14240} - close 14236 0.233 0.506 4.932 0.217 {'success': 14236} - fchdir 14231 0.252 0.407 5.769 0.117 {'success': 14231} - open 7123 0.779 2.321 12.697 0.936 {'success': 7119, 'ENOENT': 4} - newfstatat 7118 1.457 143.562 28103.532 1410.281 {'success': 7118} - openat 7118 1.525 2.411 9.107 0.771 {'success': 7118} - newfstat 7117 0.272 0.654 8.707 0.248 {'success': 7117} - write 573 0.298 0.715 8.584 0.391 {'success': 573} - brk 27 0.615 5.768 30.792 7.830 {'success': 27} - rt_sigaction 22 0.227 0.283 0.589 0.098 {'success': 22} - mmap 12 1.116 2.116 3.597 0.762 {'success': 12} - mprotect 6 1.185 2.235 3.923 1.148 {'success': 6} - read 5 0.925 2.101 6.300 2.351 {'success': 5} - ioctl 4 0.342 1.151 2.280 0.873 {'success': 2, 'ENOTTY': 2} - access 4 1.166 2.530 4.202 1.527 {'ENOENT': 4} - rt_sigprocmask 3 0.325 0.570 0.979 0.357 {'success': 3} - dup2 2 0.250 0.562 0.874 ? {'success': 2} - munmap 2 3.006 5.399 7.792 ? {'success': 2} - execve 1 7277.974 7277.974 7277.974 ? {'success': 1} - setpgid 1 0.945 0.945 0.945 ? {'success': 1} - fcntl 1 ? 0.000 0.000 ? {} - newuname 1 1.240 1.240 1.240 ? {'success': 1} Total: 71847 ----------------------------------------------------------------------------------------------------------------- apache2 (31517) Count Min Average Max Stdev Return values - fcntl 192 ? 0.000 0.000 ? {} - newfstat 156 0.237 0.484 1.102 0.222 {'success': 156} - read 144 0.307 1.602 16.307 1.698 {'success': 117, 'EAGAIN': 27} - access 96 0.705 1.580 3.364 0.670 {'success': 12, 'ENOENT': 84} - newlstat 84 0.459 0.738 1.456 0.186 {'success': 63, 'ENOENT': 21} - newstat 74 0.735 2.266 11.212 1.772 {'success': 50, 'ENOENT': 24} - lseek 72 0.317 0.522 0.915 0.112 {'success': 72} - close 39 0.471 0.615 0.867 0.069 {'success': 39} - open 36 2.219 12162.689 437697.753 72948.868 {'success': 36} - getcwd 28 0.287 0.701 1.331 0.277 {'success': 28} - poll 27 1.080 1139.669 2851.163 856.723 {'success': 27} - times 24 0.765 0.956 1.327 0.107 {'success': 24} - setitimer 24 0.499 5.848 16.668 4.041 {'success': 24} - write 24 5.467 6.784 16.827 2.459 {'success': 24} - writev 24 10.241 17.645 29.817 5.116 {'success': 24} - mmap 15 3.060 3.482 4.406 0.317 {'success': 15} - munmap 15 2.944 3.502 4.154 0.427 {'success': 15} - brk 12 0.738 4.579 13.795 4.437 {'success': 12} - chdir 12 0.989 1.600 2.353 0.385 {'success': 12} - flock 6 0.906 1.282 2.043 0.423 {'success': 6} - rt_sigaction 6 0.530 0.725 1.123 0.217 {'success': 6} - pwrite64 6 1.262 1.430 1.692 0.143 {'success': 6} - rt_sigprocmask 6 0.539 0.650 0.976 0.162 {'success': 6} - shutdown 3 7.323 8.487 10.281 1.576 {'success': 3} - getsockname 3 1.015 1.228 1.585 0.311 {'success': 3} - accept4 3 5174453.611 3450157.282 5176018.235 ? {'success': 2} Total: 1131 --- IRQ --- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Handler duration and raise latency statistics ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-irqstats mytrace/ Timerange: [2014-03-11 16:05:41.314824752, 2014-03-11 16:05:45.041994298] Hard IRQ Duration (us) count min avg max stdev ----------------------------------------------------------------------------------| 1: 30 10.901 45.500 64.510 18.447 | 42: 259 3.203 7.863 21.426 3.183 | 43: 2 3.859 3.976 4.093 0.165 | 44: 92 0.300 3.995 6.542 2.181 | Soft IRQ Duration (us) Raise latency (us) count min avg max stdev | count min avg max stdev ----------------------------------------------------------------------------------|------------------------------------------------------------ 1: 495 0.202 21.058 51.060 11.047 | 53 2.141 11.217 20.005 7.233 3: 14 0.133 9.177 32.774 10.483 | 14 0.763 3.703 10.902 3.448 4: 257 5.981 29.064 125.862 15.891 | 257 0.891 3.104 15.054 2.046 6: 26 0.309 1.198 1.748 0.329 | 26 9.636 39.222 51.430 11.246 7: 299 1.185 14.768 90.465 15.992 | 298 1.286 31.387 61.700 11.866 9: 338 0.592 3.387 13.745 1.356 | 147 2.480 29.299 64.453 14.286 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Handler duration frequency distribution ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-irqfreq --timerange [16:05:42,16:05:45] --irq 44 --stats mytrace/ Timerange: [2014-03-11 16:05:42.042034570, 2014-03-11 16:05:44.998914297] Hard IRQ Duration (us) count min avg max stdev ----------------------------------------------------------------------------------| 44: 72 0.300 4.018 6.542 2.164 | Frequency distribution iwlwifi (44) ############################################################################### 0.300 █████ 1.00 0.612 ██████████████████████████████████████████████████████████████ 12.00 0.924 ████████████████████ 4.00 1.236 ██████████ 2.00 1.548 0.00 1.861 █████ 1.00 2.173 0.00 2.485 █████ 1.00 2.797 ██████████████████████████ 5.00 3.109 █████ 1.00 3.421 ███████████████ 3.00 3.733 0.00 4.045 █████ 1.00 4.357 █████ 1.00 4.669 ██████████ 2.00 4.981 ██████████ 2.00 5.294 █████████████████████████████████████████ 8.00 5.606 ████████████████████████████████████████████████████████████████████ 13.00 5.918 ██████████████████████████████████████████████████████████████ 12.00 6.230 ███████████████ 3.00 ------ Others ------ There are a lot of other scripts, we encourage you to try them and read the ``--help`` to see all the available options. ================ Work in progress ================ Track the page cache and extract the latencies associated with pages flush to disk. In order to do that, we rely on the assumption that the pages are flushed in a FIFO order. It might not be 100% accurate, but it already gives great results : An example here when saving a file in vim:: [19:57:51.173332284 - 19:57:51.177794657] vim (31517) syscall_entry_fsync(fd = 4 ) = 0, 4.462 ms 1 dirty page(s) were flushed (assuming FIFO): vim (31517): 1 pages - blabla : 1 pages 13 active dirty filesystem page(s) (known): redis-server (2092): 2 pages - /var/log/redis/redis-server.log : 2 pages vim (31517): 2 pages - .blabla.swp : 2 pages lttng-consumerd (6750): 9 pages - unknown (origin not found) : 9 pages An other example when running the 'sync' command:: [19:57:53.046840755 - 19:57:53.072809609] sync (31554) syscall_entry_sync(fd = ) = 0, 25.969 ms 23 dirty page(s) were flushed (assuming FIFO): redis-server (2092): 2 pages - /var/log/redis/redis-server.log : 2 pages vim (31517): 9 pages - /home/julien/.viminfo.tmp : 6 pages - .blabla.swp : 3 pages lttng-consumerd (6750): 12 pages - unknown (origin not found) : 12 pages PostgreSQL with 'sys_fdatasync':: [13:49:39.908599447 - 13:49:39.915930730] postgres (1137) sys_fdatasync(fd = 7 ) = 0, 7.331 ms 2 pages allocated during the period 88 dirty page(s) were flushed (assuming FIFO): postgres (1137): 88 pages - /var/lib/postgresql/9.1/main/pg_xlog/000000010000000000000008 : 88 pages 68 last dirtied filesystem page(s): postgres (2419): 68 pages - base/11951/18410 : 46 pages - base/11951/18407 : 10 pages - base/11951/18407_fsm : 6 pages - base/11951/18410_fsm : 6 pages Detecting a fight for the I/O between a huge write and postgresql:: [13:49:47.242730583 - 13:49:47.442835037] python (2353) sys_write(fd = 3 , count = 102395904) = 102395904, 200.104 ms 34760 pages allocated during the period woke up kswapd during the period 10046 pages written on disk freed 33753 pages from the cache during the period 1397 last dirtied filesystem page(s): python (2353): 1325 pages - /root/bla : 1325 pages postgres (2419): 72 pages - base/11951/18419 : 72 pages =========== Limitations =========== The main limitation of this project is the fact that it can be quite slow to process a large trace. This project is a work in progress and we focus on the problem-solving aspect. Therefore, features have been prioritized over performance for now. One other aspect is the fact that the state is not persistent; the trace has to be re-processed if another analysis script is to be used on the same trace. Some scripts belonging to the same category allow the combination of multiple analyses into a single pass (see ``--freq``, ``--log``, ``--usage``, ``--latencystats``, etc). We are planning to add a way to save the state and/or create an interactive environment to allow the user to run multiple analyses on the same trace without having to process the trace every time. ========== Conclusion ========== We hope you have fun trying this project and please remember it is a work in progress; feedback, bug reports and improvement ideas are always welcome! .. _pip: http://www.pip-installer.org/en/latest/index.html .. |pypi| image:: https://img.shields.io/pypi/v/lttnganalyses.svg?style=flat-square&label=latest%20version :target: https://pypi.python.org/pypi/lttnganalyses :alt: Latest version released on PyPi Keywords: lttng tracing Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: Topic :: System :: Monitoring Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python :: 3.4 lttnganalyses-0.4.3/mit-license.txt0000664000175000017500000000204112667420737021043 0ustar mjeansonmjeanson00000000000000Copyright (c) 2016 EfficiOS Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. lttnganalyses-0.4.3/lttng-irqfreq0000775000175000017500000000235012553274232020610 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import irq if __name__ == '__main__': irq.runfreq() lttnganalyses-0.4.3/lttng-analyses-record0000775000175000017500000001015712665072151022236 0ustar mjeansonmjeanson00000000000000#!/bin/bash # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Helper to setup a local LTTng tracing session with the appropriate # settings for the lttng analyses scripts SESSION_NAME="lttng-analysis-$RANDOM" destroy() { lttng destroy $SESSION_NAME >/dev/null echo "" echo "You can now launch the analyses scripts on /$TRACEPATH" exit 0 } if test "$1" = "-h" -o "$1" = "--help"; then echo "usage : $0" exit 0 fi pgrep -u root lttng-sessiond >/dev/null if test $? != 0; then echo "Starting lttng-sessiond as root (trying sudo, start manually if \ it fails)" sudo lttng-sessiond -d if test $? != 0; then exit 1 fi fi SUDO="" groups|grep tracing >/dev/null if test $? != 0; then echo "You are not a member of the tracing group, so you need root \ access, the script will try with sudo" SUDO="sudo" fi # check if lttng command if in the path # check if the user can execute the command (with sudo if not in tracing group) # check if lttng-modules is installed $SUDO lttng list -k | grep sched_switch >/dev/null if test $? != 0; then echo "Something went wrong executing \"$SUDO lttng list -k | grep sched_switch\", \ try to fix the problem manually and then start the script again" fi # if our random session name was already in use, add more randomness... $SUDO lttng list | grep $SESSION_NAME if test $? = 0; then SESSION_NAME="$SESSION_NAME-$RANDOM" fi $SUDO lttng list | grep $SESSION_NAME if test $? = 0; then echo "Cannot create a random session name, something must be wrong" exit 2 fi lttng create $SESSION_NAME >/tmp/lttngout [[ $? != 0 ]] && exit 2 TRACEPATH=$(grep Traces /tmp/lttngout | cut -d'/' -f2-) rm /tmp/lttngout trap "destroy" SIGINT SIGTERM lttng enable-channel -k chan1 --subbuf-size=8M >/dev/null # events that always work lttng enable-event -s $SESSION_NAME -k sched_switch,sched_wakeup,sched_waking,block_rq_complete,block_rq_issue,block_bio_remap,block_bio_backmerge,netif_receive_skb,net_dev_xmit,sched_process_fork,sched_process_exec,lttng_statedump_process_state,lttng_statedump_file_descriptor,lttng_statedump_block_device,mm_vmscan_wakeup_kswapd,mm_page_free,mm_page_alloc,block_dirty_buffer,irq_handler_entry,irq_handler_exit,softirq_entry,softirq_exit,softirq_raise -c chan1 >/dev/null [[ $? != 0 ]] && echo "Warning: some events were not enabled, some analyses might not be complete" # events that might fail on specific kernels and that are not mandatory lttng enable-event -s $SESSION_NAME -k writeback_pages_written -c chan1 >/dev/null 2>&1 [[ $? != 0 ]] && echo "Warning: Optional event writeback_pages_written could not be enabled, everything will still work (experimental feature)" lttng enable-event -s $SESSION_NAME -k -c chan1 --syscall -a >/dev/null [[ $? != 0 ]] && exit 2 # if you want to add Perf counters, do something like that : #lttng add-context -s $SESSION_NAME -k -t perf:cache-misses -t perf:major-faults -t perf:branch-load-misses >/dev/null lttng start $SESSION_NAME >/dev/null [[ $? != 0 ]] && exit 2 echo -n "The trace is now recording, press ctrl+c to stop it " while true; do echo -n "." sleep 1 done destroy lttnganalyses-0.4.3/lttng-iousagetop0000775000175000017500000000234712553274232021324 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import io if __name__ == '__main__': io.runusage() lttnganalyses-0.4.3/tests/0000775000175000017500000000000012667421106017225 5ustar mjeansonmjeanson00000000000000lttnganalyses-0.4.3/tests/test_cputop.py0000664000175000017500000000445112667420737022165 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2016 - Julien Desfossez # Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from .analysis_test import AnalysisTest class CpuTest(AnalysisTest): def write_trace(self): # runs the whole time: 100% self.trace_writer.write_sched_switch(1000, 5, 'swapper/5', 0, 'prog100pc-cpu5', 42) # runs for 2s alternating with swapper out every 100ms self.trace_writer.sched_switch_50pc(1100, 5000, 0, 100, 'swapper/0', 0, 'prog20pc-cpu0', 30664) # runs for 2.5s alternating with swapper out every 100ms self.trace_writer.sched_switch_50pc(5100, 10000, 1, 100, 'swapper/1', 0, 'prog25pc-cpu1', 30665) # switch out prog100pc-cpu5 self.trace_writer.write_sched_switch(11000, 5, 'prog100pc-cpu5', 42, 'swapper/5', 0) self.trace_writer.flush() def test_cputop(self): test_name = 'cputop' expected = self.get_expected_output(test_name) result = self.get_cmd_output('lttng-cputop') self._assertMultiLineEqual(result, expected, test_name) lttnganalyses-0.4.3/tests/test_io.py0000664000175000017500000000760412667420737021265 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2016 - Julien Desfossez # Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from .analysis_test import AnalysisTest class IoTest(AnalysisTest): def write_trace(self): # app (99) is known at statedump self.trace_writer.write_lttng_statedump_process_state( 1000, 0, 99, 99, 99, 99, 98, 98, 'app', 0, 5, 0, 5, 0) # app2 (100) unknown at statedump has testfile, FD 3 defined at # statedump self.trace_writer.write_lttng_statedump_file_descriptor( 1001, 0, 100, 3, 0, 0, 'testfile') # app write 10 bytes to FD 4 self.trace_writer.write_sched_switch(1002, 0, 'swapper/0', 0, 'app', 99) self.trace_writer.write_syscall_write(1004, 0, 1, 4, 0xabcd, 10, 10) # app2 reads 100 bytes in FD 3 self.trace_writer.write_sched_switch(1006, 0, 'app', 99, 'app2', 100) self.trace_writer.write_syscall_read(1008, 0, 1, 3, 0xcafe, 100, 100) # app3 and its FD 3 are completely unknown at statedump, tries to read # 100 bytes from FD 3 but only gets 42 self.trace_writer.write_sched_switch(1010, 0, 'app2', 100, 'app3', 101) self.trace_writer.write_syscall_read(1012, 0, 1, 3, 0xcafe, 100, 42) # block write self.trace_writer.write_block_rq_issue(1015, 0, 264241152, 33, 10, 40, 99, 0, 0, '', 'app') self.trace_writer.write_block_rq_complete(1016, 0, 264241152, 33, 10, 0, 0, 0, '') # block read self.trace_writer.write_block_rq_issue(1017, 0, 8388608, 33, 20, 90, 101, 1, 0, '', 'app3') self.trace_writer.write_block_rq_complete(1018, 0, 8388608, 33, 20, 0, 1, 0, '') # net xmit self.trace_writer.write_net_dev_xmit(1020, 2, 0xff, 32, 100, 'wlan0') # net receive self.trace_writer.write_netif_receive_skb(1021, 1, 0xff, 100, 'wlan1') self.trace_writer.write_netif_receive_skb(1022, 1, 0xff, 200, 'wlan0') # syscall open self.trace_writer.write_syscall_open(1023, 0, 1, 'test/open/file', 0, 0, 42) self.trace_writer.flush() def test_iousagetop(self): test_name = 'iousagetop' expected = self.get_expected_output(test_name) result = self.get_cmd_output('lttng-iousagetop') self._assertMultiLineEqual(result, expected, test_name) def test_iolatencytop(self): test_name = 'iolatencytop' expected = self.get_expected_output(test_name) result = self.get_cmd_output('lttng-iolatencytop') self._assertMultiLineEqual(result, expected, test_name) lttnganalyses-0.4.3/tests/gen_ctfwriter.py0000775000175000017500000001161712665072151022452 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2016 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Helper tool to generate CTFWriter code from the metadata of an existing # trace. # It used to add code in TraceTest.py. # Only the basic types are supported, a warning is generated if a field cannot # be generated so it is easy to look manually at the metadata and fix it. import sys import argparse from babeltrace import TraceCollection, CTFScope, CTFTypeId def get_definition_type(field, event): if field.type == CTFTypeId.INTEGER: signed = '' if field.signedness == 0: signed = 'u' length = field.length print(' self.%s.add_field(self.%sint%s_type, "_%s")' % (event.name, signed, length, field.name)) elif field.type == CTFTypeId.ARRAY: print(' self.%s.add_field(self.array%s_type, "_%s")' % (event.name, field.length, field.name)) elif field.type == CTFTypeId.STRING: print(' self.%s.add_field(self.string_type, "_%s")' % (event.name, field.name)) else: print(' # FIXME %s.%s: Unhandled type %d' % (event.name, field.name, field.type)) def gen_define(event): fields = [] print(' def define_%s(self):' % (event.name)) print(' self.%s = CTFWriter.EventClass("%s")' % (event.name, event.name)) for field in event.fields: if field.scope == CTFScope.EVENT_FIELDS: fname = field.name fields.append(fname) get_definition_type(field, event) print(' self.add_event(self.%s)' % event.name) print('') return fields def gen_write(event, fields): f_list = None for f in fields: if f_list is None: f_list = f else: f_list = f_list + ", %s" % (f) print(' def write_%s(self, time_ms, cpu_id, %s):' % (event.name, f_list)) print(' event = CTFWriter.Event(self.%s)' % (event.name)) print(' self.clock.time = time_ms * 1000000') print(' self.set_int(event.payload("_cpu_id"), cpu_id)') for field in event.fields: if field.scope == CTFScope.EVENT_FIELDS: fname = field.name if field.type == CTFTypeId.INTEGER: print(' self.set_int(event.payload("_%s"), %s)' % (fname, fname)) elif field.type == CTFTypeId.ARRAY: print(' self.set_char_array(event.payload("_%s"), ' '%s)' % (fname, fname)) elif field.type == CTFTypeId.STRING: print(' self.set_string(event.payload("_%s"), %s)' % (fname, fname)) else: print(' # FIXME %s.%s: Unhandled type %d' % (event.name, field.name, field.type)) print(' self.stream.append_event(event)') print(' self.stream.flush()') print('') def gen_parser(handle, args): for h in handle.values(): for event in h.events: fields = gen_define(event) gen_write(event, fields) if __name__ == "__main__": parser = argparse.ArgumentParser(description='CTFWriter code generator') parser.add_argument('path', metavar="", help='Trace path') args = parser.parse_args() traces = TraceCollection() handle = traces.add_traces_recursive(args.path, "ctf") if handle is None: sys.exit(1) gen_parser(handle, args) for h in handle.values(): traces.remove_trace(h) lttnganalyses-0.4.3/tests/trace_writer.py0000664000175000017500000006047412665072151022304 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2016 - Julien Desfossez # Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import os import shutil import tempfile from babeltrace import CTFWriter, CTFStringEncoding class TraceWriter(): def __init__(self): self._trace_root = tempfile.mkdtemp() self.trace_path = os.path.join(self.trace_root, "kernel") self.create_writer() self.create_stream_class() self.define_base_types() self.define_events() self.create_stream() @property def trace_root(self): return self._trace_root def rm_trace(self): shutil.rmtree(self.trace_root) def flush(self): self.writer.flush_metadata() self.stream.flush() def create_writer(self): self.clock = CTFWriter.Clock("A_clock") self.clock.description = "Simple clock" self.writer = CTFWriter.Writer(self.trace_path) self.writer.add_clock(self.clock) self.writer.add_environment_field("Python_version", str(sys.version_info)) self.writer.add_environment_field("tracer_major", 2) self.writer.add_environment_field("tracer_minor", 8) self.writer.add_environment_field("tracer_patchlevel", 0) def create_stream_class(self): self.stream_class = CTFWriter.StreamClass("test_stream") self.stream_class.clock = self.clock def define_base_types(self): self.char8_type = CTFWriter.IntegerFieldDeclaration(8) self.char8_type.signed = True self.char8_type.encoding = CTFStringEncoding.UTF8 self.char8_type.alignment = 8 self.int16_type = CTFWriter.IntegerFieldDeclaration(16) self.int16_type.signed = True self.int16_type.alignment = 8 self.uint16_type = CTFWriter.IntegerFieldDeclaration(16) self.uint16_type.signed = False self.uint16_type.alignment = 8 self.int32_type = CTFWriter.IntegerFieldDeclaration(32) self.int32_type.signed = True self.int32_type.alignment = 8 self.uint32_type = CTFWriter.IntegerFieldDeclaration(32) self.uint32_type.signed = False self.uint32_type.alignment = 8 self.int64_type = CTFWriter.IntegerFieldDeclaration(64) self.int64_type.signed = True self.int64_type.alignment = 8 self.uint64_type = CTFWriter.IntegerFieldDeclaration(64) self.uint64_type.signed = False self.uint64_type.alignment = 8 self.array16_type = CTFWriter.ArrayFieldDeclaration(self.char8_type, 16) self.string_type = CTFWriter.StringFieldDeclaration() def add_event(self, event): event.add_field(self.uint32_type, "_cpu_id") self.stream_class.add_event_class(event) def define_sched_switch(self): self.sched_switch = CTFWriter.EventClass("sched_switch") self.sched_switch.add_field(self.array16_type, "_prev_comm") self.sched_switch.add_field(self.int32_type, "_prev_tid") self.sched_switch.add_field(self.int32_type, "_prev_prio") self.sched_switch.add_field(self.int64_type, "_prev_state") self.sched_switch.add_field(self.array16_type, "_next_comm") self.sched_switch.add_field(self.int32_type, "_next_tid") self.sched_switch.add_field(self.int32_type, "_next_prio") self.add_event(self.sched_switch) def define_softirq_raise(self): self.softirq_raise = CTFWriter.EventClass("softirq_raise") self.softirq_raise.add_field(self.uint32_type, "_vec") self.add_event(self.softirq_raise) def define_softirq_entry(self): self.softirq_entry = CTFWriter.EventClass("softirq_entry") self.softirq_entry.add_field(self.uint32_type, "_vec") self.add_event(self.softirq_entry) def define_softirq_exit(self): self.softirq_exit = CTFWriter.EventClass("softirq_exit") self.softirq_exit.add_field(self.uint32_type, "_vec") self.add_event(self.softirq_exit) def define_irq_handler_entry(self): self.irq_handler_entry = CTFWriter.EventClass("irq_handler_entry") self.irq_handler_entry.add_field(self.int32_type, "_irq") self.irq_handler_entry.add_field(self.string_type, "_name") self.add_event(self.irq_handler_entry) def define_irq_handler_exit(self): self.irq_handler_exit = CTFWriter.EventClass("irq_handler_exit") self.irq_handler_exit.add_field(self.int32_type, "_irq") self.irq_handler_exit.add_field(self.int32_type, "_ret") self.add_event(self.irq_handler_exit) def define_syscall_entry_write(self): self.syscall_entry_write = CTFWriter.EventClass("syscall_entry_write") self.syscall_entry_write.add_field(self.uint32_type, "_fd") self.syscall_entry_write.add_field(self.uint64_type, "_buf") self.syscall_entry_write.add_field(self.uint64_type, "_count") self.add_event(self.syscall_entry_write) def define_syscall_exit_write(self): self.syscall_exit_write = CTFWriter.EventClass("syscall_exit_write") self.syscall_exit_write.add_field(self.int64_type, "_ret") self.add_event(self.syscall_exit_write) def define_syscall_entry_read(self): self.syscall_entry_read = CTFWriter.EventClass("syscall_entry_read") self.syscall_entry_read.add_field(self.uint32_type, "_fd") self.syscall_entry_read.add_field(self.uint64_type, "_count") self.add_event(self.syscall_entry_read) def define_syscall_exit_read(self): self.syscall_exit_read = CTFWriter.EventClass("syscall_exit_read") self.syscall_exit_read.add_field(self.uint64_type, "_buf") self.syscall_exit_read.add_field(self.int64_type, "_ret") self.add_event(self.syscall_exit_read) def define_syscall_entry_open(self): self.syscall_entry_open = CTFWriter.EventClass("syscall_entry_open") self.syscall_entry_open.add_field(self.string_type, "_filename") self.syscall_entry_open.add_field(self.int32_type, "_flags") self.syscall_entry_open.add_field(self.uint16_type, "_mode") self.add_event(self.syscall_entry_open) def define_syscall_exit_open(self): self.syscall_exit_open = CTFWriter.EventClass("syscall_exit_open") self.syscall_exit_open.add_field(self.int64_type, "_ret") self.add_event(self.syscall_exit_open) def define_lttng_statedump_process_state(self): self.lttng_statedump_process_state = CTFWriter.EventClass( "lttng_statedump_process_state") self.lttng_statedump_process_state.add_field(self.int32_type, "_tid") self.lttng_statedump_process_state.add_field(self.int32_type, "_vtid") self.lttng_statedump_process_state.add_field(self.int32_type, "_pid") self.lttng_statedump_process_state.add_field(self.int32_type, "_vpid") self.lttng_statedump_process_state.add_field(self.int32_type, "_ppid") self.lttng_statedump_process_state.add_field(self.int32_type, "_vppid") self.lttng_statedump_process_state.add_field(self.array16_type, "_name") self.lttng_statedump_process_state.add_field(self.int32_type, "_type") self.lttng_statedump_process_state.add_field(self.int32_type, "_mode") self.lttng_statedump_process_state.add_field(self.int32_type, "_submode") self.lttng_statedump_process_state.add_field(self.int32_type, "_status") self.lttng_statedump_process_state.add_field(self.int32_type, "_ns_level") self.add_event(self.lttng_statedump_process_state) def define_lttng_statedump_file_descriptor(self): self.lttng_statedump_file_descriptor = CTFWriter.EventClass( "lttng_statedump_file_descriptor") self.lttng_statedump_file_descriptor.add_field(self.int32_type, "_pid") self.lttng_statedump_file_descriptor.add_field(self.int32_type, "_fd") self.lttng_statedump_file_descriptor.add_field(self.uint32_type, "_flags") self.lttng_statedump_file_descriptor.add_field(self.uint32_type, "_fmode") self.lttng_statedump_file_descriptor.add_field(self.string_type, "_filename") self.add_event(self.lttng_statedump_file_descriptor) def define_sched_wakeup(self): self.sched_wakeup = CTFWriter.EventClass("sched_wakeup") self.sched_wakeup.add_field(self.array16_type, "_comm") self.sched_wakeup.add_field(self.int32_type, "_tid") self.sched_wakeup.add_field(self.int32_type, "_prio") self.sched_wakeup.add_field(self.int32_type, "_success") self.sched_wakeup.add_field(self.int32_type, "_target_cpu") self.add_event(self.sched_wakeup) def define_sched_waking(self): self.sched_waking = CTFWriter.EventClass("sched_waking") self.sched_waking.add_field(self.array16_type, "_comm") self.sched_waking.add_field(self.int32_type, "_tid") self.sched_waking.add_field(self.int32_type, "_prio") self.sched_waking.add_field(self.int32_type, "_target_cpu") self.add_event(self.sched_waking) def define_block_rq_complete(self): self.block_rq_complete = CTFWriter.EventClass("block_rq_complete") self.block_rq_complete.add_field(self.uint32_type, "_dev") self.block_rq_complete.add_field(self.uint64_type, "_sector") self.block_rq_complete.add_field(self.uint32_type, "_nr_sector") self.block_rq_complete.add_field(self.int32_type, "_errors") self.block_rq_complete.add_field(self.uint32_type, "_rwbs") self.block_rq_complete.add_field(self.uint64_type, "__cmd_length") self.block_rq_complete.add_field(self.array16_type, "_cmd") self.add_event(self.block_rq_complete) def define_block_rq_issue(self): self.block_rq_issue = CTFWriter.EventClass("block_rq_issue") self.block_rq_issue.add_field(self.uint32_type, "_dev") self.block_rq_issue.add_field(self.uint64_type, "_sector") self.block_rq_issue.add_field(self.uint32_type, "_nr_sector") self.block_rq_issue.add_field(self.uint32_type, "_bytes") self.block_rq_issue.add_field(self.int32_type, "_tid") self.block_rq_issue.add_field(self.uint32_type, "_rwbs") self.block_rq_issue.add_field(self.uint64_type, "__cmd_length") self.block_rq_issue.add_field(self.array16_type, "_cmd") self.block_rq_issue.add_field(self.array16_type, "_comm") self.add_event(self.block_rq_issue) def define_net_dev_xmit(self): self.net_dev_xmit = CTFWriter.EventClass("net_dev_xmit") self.net_dev_xmit.add_field(self.uint64_type, "_skbaddr") self.net_dev_xmit.add_field(self.int32_type, "_rc") self.net_dev_xmit.add_field(self.uint32_type, "_len") self.net_dev_xmit.add_field(self.string_type, "_name") self.add_event(self.net_dev_xmit) def define_netif_receive_skb(self): self.netif_receive_skb = CTFWriter.EventClass("netif_receive_skb") self.netif_receive_skb.add_field(self.uint64_type, "_skbaddr") self.netif_receive_skb.add_field(self.uint32_type, "_len") self.netif_receive_skb.add_field(self.string_type, "_name") self.add_event(self.netif_receive_skb) def define_events(self): self.define_sched_switch() self.define_softirq_raise() self.define_softirq_entry() self.define_softirq_exit() self.define_irq_handler_entry() self.define_irq_handler_exit() self.define_syscall_entry_write() self.define_syscall_exit_write() self.define_syscall_entry_read() self.define_syscall_exit_read() self.define_syscall_entry_open() self.define_syscall_exit_open() self.define_lttng_statedump_process_state() self.define_lttng_statedump_file_descriptor() self.define_sched_wakeup() self.define_sched_waking() self.define_block_rq_complete() self.define_block_rq_issue() self.define_net_dev_xmit() self.define_netif_receive_skb() def create_stream(self): self.stream = self.writer.create_stream(self.stream_class) def set_char_array(self, event, string): if len(string) > 16: string = string[0:16] else: string = "%s" % (string + "\0" * (16 - len(string))) for i, char in enumerate(string): event.field(i).value = ord(char) def set_int(self, event, value): event.value = value def set_string(self, event, value): event.value = value def write_softirq_raise(self, time_ms, cpu_id, vec): event = CTFWriter.Event(self.softirq_raise) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_vec"), vec) self.stream.append_event(event) self.stream.flush() def write_softirq_entry(self, time_ms, cpu_id, vec): event = CTFWriter.Event(self.softirq_entry) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_vec"), vec) self.stream.append_event(event) self.stream.flush() def write_softirq_exit(self, time_ms, cpu_id, vec): event = CTFWriter.Event(self.softirq_exit) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_vec"), vec) self.stream.append_event(event) self.stream.flush() def write_irq_handler_entry(self, time_ms, cpu_id, irq, name): event = CTFWriter.Event(self.irq_handler_entry) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_irq"), irq) self.set_string(event.payload("_name"), name) self.stream.append_event(event) self.stream.flush() def write_irq_handler_exit(self, time_ms, cpu_id, irq, ret): event = CTFWriter.Event(self.irq_handler_exit) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_irq"), irq) self.set_int(event.payload("_ret"), ret) self.stream.append_event(event) self.stream.flush() def write_syscall_write(self, time_ms, cpu_id, delay, fd, buf, count, ret): event_entry = CTFWriter.Event(self.syscall_entry_write) self.clock.time = time_ms * 1000000 self.set_int(event_entry.payload("_cpu_id"), cpu_id) self.set_int(event_entry.payload("_fd"), fd) self.set_int(event_entry.payload("_buf"), buf) self.set_int(event_entry.payload("_count"), count) self.stream.append_event(event_entry) event_exit = CTFWriter.Event(self.syscall_exit_write) self.clock.time = (time_ms + delay) * 1000000 self.set_int(event_exit.payload("_cpu_id"), cpu_id) self.set_int(event_exit.payload("_ret"), ret) self.stream.append_event(event_exit) self.stream.flush() def write_syscall_read(self, time_ms, cpu_id, delay, fd, buf, count, ret): event_entry = CTFWriter.Event(self.syscall_entry_read) self.clock.time = time_ms * 1000000 self.set_int(event_entry.payload("_cpu_id"), cpu_id) self.set_int(event_entry.payload("_fd"), fd) self.set_int(event_entry.payload("_count"), count) self.stream.append_event(event_entry) event_exit = CTFWriter.Event(self.syscall_exit_read) self.clock.time = (time_ms + delay) * 1000000 self.set_int(event_exit.payload("_cpu_id"), cpu_id) self.set_int(event_exit.payload("_buf"), buf) self.set_int(event_exit.payload("_ret"), ret) self.stream.append_event(event_exit) self.stream.flush() def write_syscall_open(self, time_ms, cpu_id, delay, filename, flags, mode, ret): event = CTFWriter.Event(self.syscall_entry_open) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_string(event.payload("_filename"), filename) self.set_int(event.payload("_flags"), flags) self.set_int(event.payload("_mode"), mode) self.stream.append_event(event) self.stream.flush() event = CTFWriter.Event(self.syscall_exit_open) self.clock.time = (time_ms + delay) * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_ret"), ret) self.stream.append_event(event) self.stream.flush() def write_lttng_statedump_file_descriptor(self, time_ms, cpu_id, pid, fd, flags, fmode, filename): event = CTFWriter.Event(self.lttng_statedump_file_descriptor) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_pid"), pid) self.set_int(event.payload("_fd"), fd) self.set_int(event.payload("_flags"), flags) self.set_int(event.payload("_fmode"), fmode) self.set_string(event.payload("_filename"), filename) self.stream.append_event(event) self.stream.flush() def write_lttng_statedump_process_state(self, time_ms, cpu_id, tid, vtid, pid, vpid, ppid, vppid, name, type, mode, submode, status, ns_level): event = CTFWriter.Event(self.lttng_statedump_process_state) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_tid"), tid) self.set_int(event.payload("_vtid"), vtid) self.set_int(event.payload("_pid"), pid) self.set_int(event.payload("_vpid"), vpid) self.set_int(event.payload("_ppid"), ppid) self.set_int(event.payload("_vppid"), vppid) self.set_char_array(event.payload("_name"), name) self.set_int(event.payload("_type"), type) self.set_int(event.payload("_mode"), mode) self.set_int(event.payload("_submode"), submode) self.set_int(event.payload("_status"), status) self.set_int(event.payload("_ns_level"), ns_level) self.stream.append_event(event) self.stream.flush() def write_sched_wakeup(self, time_ms, cpu_id, comm, tid, prio, target_cpu): event = CTFWriter.Event(self.sched_wakeup) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_char_array(event.payload("_comm"), comm) self.set_int(event.payload("_tid"), tid) self.set_int(event.payload("_prio"), prio) self.set_int(event.payload("_target_cpu"), target_cpu) self.stream.append_event(event) self.stream.flush() def write_sched_waking(self, time_ms, cpu_id, comm, tid, prio, target_cpu): event = CTFWriter.Event(self.sched_waking) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_char_array(event.payload("_comm"), comm) self.set_int(event.payload("_tid"), tid) self.set_int(event.payload("_prio"), prio) self.set_int(event.payload("_target_cpu"), target_cpu) self.stream.append_event(event) self.stream.flush() def write_block_rq_complete(self, time_ms, cpu_id, dev, sector, nr_sector, errors, rwbs, _cmd_length, cmd): event = CTFWriter.Event(self.block_rq_complete) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_dev"), dev) self.set_int(event.payload("_sector"), sector) self.set_int(event.payload("_nr_sector"), nr_sector) self.set_int(event.payload("_errors"), errors) self.set_int(event.payload("_rwbs"), rwbs) self.set_int(event.payload("__cmd_length"), _cmd_length) self.set_char_array(event.payload("_cmd"), cmd) self.stream.append_event(event) self.stream.flush() def write_block_rq_issue(self, time_ms, cpu_id, dev, sector, nr_sector, bytes, tid, rwbs, _cmd_length, cmd, comm): event = CTFWriter.Event(self.block_rq_issue) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_dev"), dev) self.set_int(event.payload("_sector"), sector) self.set_int(event.payload("_nr_sector"), nr_sector) self.set_int(event.payload("_bytes"), bytes) self.set_int(event.payload("_tid"), tid) self.set_int(event.payload("_rwbs"), rwbs) self.set_int(event.payload("__cmd_length"), _cmd_length) self.set_char_array(event.payload("_cmd"), cmd) self.set_char_array(event.payload("_comm"), comm) self.stream.append_event(event) self.stream.flush() def write_net_dev_xmit(self, time_ms, cpu_id, skbaddr, rc, len, name): event = CTFWriter.Event(self.net_dev_xmit) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_skbaddr"), skbaddr) self.set_int(event.payload("_rc"), rc) self.set_int(event.payload("_len"), len) self.set_string(event.payload("_name"), name) self.stream.append_event(event) self.stream.flush() def write_netif_receive_skb(self, time_ms, cpu_id, skbaddr, len, name): event = CTFWriter.Event(self.netif_receive_skb) self.clock.time = time_ms * 1000000 self.set_int(event.payload("_cpu_id"), cpu_id) self.set_int(event.payload("_skbaddr"), skbaddr) self.set_int(event.payload("_len"), len) self.set_string(event.payload("_name"), name) self.stream.append_event(event) self.stream.flush() def write_sched_switch(self, time_ms, cpu_id, prev_comm, prev_tid, next_comm, next_tid, prev_prio=20, prev_state=1, next_prio=20): event = CTFWriter.Event(self.sched_switch) self.clock.time = time_ms * 1000000 self.set_char_array(event.payload("_prev_comm"), prev_comm) self.set_int(event.payload("_prev_tid"), prev_tid) self.set_int(event.payload("_prev_prio"), prev_prio) self.set_int(event.payload("_prev_state"), prev_state) self.set_char_array(event.payload("_next_comm"), next_comm) self.set_int(event.payload("_next_tid"), next_tid) self.set_int(event.payload("_next_prio"), next_prio) self.set_int(event.payload("_cpu_id"), cpu_id) self.stream.append_event(event) self.stream.flush() def sched_switch_50pc(self, start_time_ms, end_time_ms, cpu_id, period, comm1, tid1, comm2, tid2): current = start_time_ms while current < end_time_ms: self.write_sched_switch(current, cpu_id, comm1, tid1, comm2, tid2) current += period self.write_sched_switch(current, cpu_id, comm2, tid2, comm1, tid1) current += period lttnganalyses-0.4.3/tests/__init__.py0000664000175000017500000000217012665072151021336 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2016 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. lttnganalyses-0.4.3/tests/analysis_test.py0000664000175000017500000000570512667420737022501 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2016 - Julien Desfossez # Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import subprocess import unittest from .trace_writer import TraceWriter class AnalysisTest(unittest.TestCase): COMMON_OPTIONS = '--no-progress --skip-validation --gmt' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.rm_trace = True def set_up_class(self): dirname = os.path.dirname(os.path.realpath(__file__)) self.data_path = dirname + '/expected/' self.maxDiff = None self.trace_writer = TraceWriter() self.write_trace() def tear_down_class(self): if self.rm_trace: self.trace_writer.rm_trace() def write_trace(self): raise NotImplementedError def run(self, result=None): self.set_up_class() super().run(result) self.tear_down_class() return result def get_expected_output(self, test_name): expected_path = os.path.join(self.data_path, test_name + '.txt') with open(expected_path, 'r') as expected_file: return expected_file.read() def get_cmd_output(self, exec_name, options=''): cmd_fmt = './{} {} {} {}' cmd = cmd_fmt.format(exec_name, self.COMMON_OPTIONS, options, self.trace_writer.trace_root) return subprocess.getoutput(cmd) def save_test_result(self, result, test_name): result_path = os.path.join(self.trace_writer.trace_root, test_name) with open(result_path, 'w') as result_file: result_file.write(result) self.rm_trace = False def _assertMultiLineEqual(self, result, expected, test_name): try: self.assertMultiLineEqual(result, expected) except AssertionError: self.save_test_result(result, test_name) raise lttnganalyses-0.4.3/tests/test_irq.py0000664000175000017500000001075112667420737021446 0ustar mjeansonmjeanson00000000000000# The MIT License (MIT) # # Copyright (C) 2016 - Julien Desfossez # Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from .analysis_test import AnalysisTest class IrqTest(AnalysisTest): def write_trace(self): self.trace_writer.write_softirq_raise(1000, 1, 1) self.trace_writer.write_softirq_raise(1001, 3, 1) self.trace_writer.write_softirq_raise(1002, 1, 9) self.trace_writer.write_softirq_exit(1003, 0, 4) self.trace_writer.write_softirq_raise(1004, 3, 9) self.trace_writer.write_softirq_raise(1005, 3, 7) self.trace_writer.write_softirq_entry(1006, 3, 1) self.trace_writer.write_softirq_entry(1007, 1, 1) self.trace_writer.write_softirq_exit(1008, 1, 1) self.trace_writer.write_softirq_exit(1009, 3, 1) self.trace_writer.write_softirq_entry(1010, 1, 9) self.trace_writer.write_softirq_entry(1011, 3, 7) self.trace_writer.write_softirq_exit(1012, 1, 9) self.trace_writer.write_softirq_exit(1013, 3, 7) self.trace_writer.write_softirq_entry(1014, 3, 9) self.trace_writer.write_softirq_exit(1015, 3, 9) self.trace_writer.write_irq_handler_entry(1016, 0, 41, 'ahci') self.trace_writer.write_softirq_raise(1017, 0, 4) self.trace_writer.write_irq_handler_exit(1018, 0, 41, 1) self.trace_writer.write_softirq_entry(1019, 0, 4) self.trace_writer.write_softirq_exit(1020, 0, 4) self.trace_writer.write_irq_handler_entry(1021, 0, 41, 'ahci') self.trace_writer.write_softirq_raise(1022, 0, 4) self.trace_writer.write_irq_handler_exit(1023, 0, 41, 1) self.trace_writer.write_softirq_entry(1024, 0, 4) self.trace_writer.write_softirq_exit(1025, 0, 4) self.trace_writer.write_irq_handler_entry(1026, 0, 41, 'ahci') self.trace_writer.write_softirq_raise(1027, 0, 4) self.trace_writer.write_irq_handler_exit(1028, 0, 41, 1) self.trace_writer.write_softirq_entry(1029, 0, 4) self.trace_writer.write_softirq_exit(1030, 0, 4) self.trace_writer.write_irq_handler_entry(1031, 0, 41, 'ahci') self.trace_writer.write_softirq_raise(1032, 0, 4) self.trace_writer.write_irq_handler_exit(1033, 0, 41, 1) self.trace_writer.write_softirq_entry(1034, 0, 4) self.trace_writer.write_softirq_exit(1035, 0, 4) self.trace_writer.write_irq_handler_entry(1036, 0, 41, 'ahci') self.trace_writer.write_softirq_raise(1037, 0, 4) self.trace_writer.write_irq_handler_exit(1038, 0, 41, 1) self.trace_writer.write_softirq_entry(1039, 0, 4) self.trace_writer.write_softirq_exit(1040, 0, 4) self.trace_writer.write_irq_handler_entry(1041, 0, 41, 'ahci') self.trace_writer.write_softirq_raise(1042, 0, 4) self.trace_writer.write_irq_handler_exit(1043, 0, 41, 1) self.trace_writer.write_softirq_entry(1044, 0, 4) self.trace_writer.write_softirq_exit(1045, 0, 4) self.trace_writer.flush() def test_irqstats(self): test_name = 'irqstats' expected = self.get_expected_output(test_name) result = self.get_cmd_output('lttng-irqstats') self._assertMultiLineEqual(result, expected, test_name) def test_irqlog(self): test_name = 'irqlog' expected = self.get_expected_output(test_name) result = self.get_cmd_output('lttng-irqlog') self._assertMultiLineEqual(result, expected, test_name) lttnganalyses-0.4.3/tests/expected/0000775000175000017500000000000012667421106021026 5ustar mjeansonmjeanson00000000000000lttnganalyses-0.4.3/tests/expected/iousagetop.txt0000664000175000017500000001535412667420737023767 0ustar mjeansonmjeanson00000000000000Timerange: [1970-01-01 00:00:01.000000000, 1970-01-01 00:00:01.024000000] Per-process I/O Read Process Disk Net Unknown ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 100 B app2 (100) 0 B 0 B 100 B █████████████████████████████████ 42 B app3 (unknown (tid=101)) 0 B 0 B 42 B 0 B app (99) 0 B 0 B 0 B Per-process I/O Write Process Disk Net Unknown ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 10 B app (99) 0 B 0 B 10 B 0 B app2 (100) 0 B 0 B 0 B 0 B app3 (unknown (tid=101)) 0 B 0 B 0 B Per-file I/O Read Path ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 100 B testfile █████████████████████████████████ 42 B unknown (app3) Per-file I/O Write Path ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 10 B unknown (app) Block I/O Read Process ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 5.00 KiB app (pid=99) Block I/O Write Process ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 10.00 KiB app3 (pid=unknown (tid=101)) Disk Requests Sector Count Disk ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 20 sectors (8,0) ████████████████████████████████████████ 10 sectors (252,0) Disk Request Count Disk ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 1 requests (252,0) ████████████████████████████████████████████████████████████████████████████████ 1 requests (8,0) Disk Request Average Latency Disk ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 1.00 ms (252,0) ████████████████████████████████████████████████████████████████████████████████ 1.00 ms (8,0) Network Received Bytes Interface ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 200 B wlan0 ████████████████████████████████████████ 100 B wlan1 Network Sent Bytes Interface ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 100 B wlan0 0 B wlan1 lttnganalyses-0.4.3/tests/expected/irqlog.txt0000664000175000017500000000414312667420737023077 0ustar mjeansonmjeanson00000000000000Timerange: [1970-01-01 00:00:01.000000000, 1970-01-01 00:00:01.045000000] Begin End Duration (us) CPU Type # Name [00:00:01.007000000, 00:00:01.008000000] 1000.000 1 SoftIRQ 1 TIMER_SOFTIRQ (raised at 00:00:01.000000000) [00:00:01.006000000, 00:00:01.009000000] 3000.000 3 SoftIRQ 1 TIMER_SOFTIRQ (raised at 00:00:01.001000000) [00:00:01.010000000, 00:00:01.012000000] 2000.000 1 SoftIRQ 9 RCU_SOFTIRQ (raised at 00:00:01.002000000) [00:00:01.011000000, 00:00:01.013000000] 2000.000 3 SoftIRQ 7 SCHED_SOFTIRQ (raised at 00:00:01.005000000) [00:00:01.014000000, 00:00:01.015000000] 1000.000 3 SoftIRQ 9 RCU_SOFTIRQ (raised at 00:00:01.004000000) [00:00:01.016000000, 00:00:01.018000000] 2000.000 0 IRQ 41 ahci [00:00:01.019000000, 00:00:01.020000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.017000000) [00:00:01.021000000, 00:00:01.023000000] 2000.000 0 IRQ 41 ahci [00:00:01.024000000, 00:00:01.025000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.022000000) [00:00:01.026000000, 00:00:01.028000000] 2000.000 0 IRQ 41 ahci [00:00:01.029000000, 00:00:01.030000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.027000000) [00:00:01.031000000, 00:00:01.033000000] 2000.000 0 IRQ 41 ahci [00:00:01.034000000, 00:00:01.035000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.032000000) [00:00:01.036000000, 00:00:01.038000000] 2000.000 0 IRQ 41 ahci [00:00:01.039000000, 00:00:01.040000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.037000000) [00:00:01.041000000, 00:00:01.043000000] 2000.000 0 IRQ 41 ahci [00:00:01.044000000, 00:00:01.045000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.042000000)lttnganalyses-0.4.3/tests/expected/cputop.txt0000664000175000017500000000305312667420737023113 0ustar mjeansonmjeanson00000000000000Timerange: [1970-01-01 00:00:01.000000000, 1970-01-01 00:00:11.000000000] Per-TID Usage Process Migrations Priorities ################################################################################ ████████████████████████████████████████████████████████████████████████████████ 100.00 % prog100pc-cpu5 (42) 0 [20] ████████████████████ 25.00 % prog25pc-cpu1 (30665) 0 [20] ████████████████ 20.00 % prog20pc-cpu0 (30664) 0 [20] Per-CPU Usage ################################################################################ ████████████████ 20.00 % CPU 0 ████████████████████ 25.00 % CPU 1 ████████████████████████████████████████████████████████████████████████████████ 100.00 % CPU 5 Total CPU Usage: 48.33% lttnganalyses-0.4.3/tests/expected/irqstats.txt0000664000175000017500000000255412667420737023460 0ustar mjeansonmjeanson00000000000000Timerange: [1970-01-01 00:00:01.000000000, 1970-01-01 00:00:01.045000000] Hard IRQ Duration (us) count min avg max stdev ----------------------------------------------------------------------------------| 41: 6 2000.000 2000.000 2000.000 0.000 | Soft IRQ Duration (us) Raise latency (us) count min avg max stdev | count min avg max stdev ----------------------------------------------------------------------------------|------------------------------------------------------------ 1: 2 1000.000 2000.000 3000.000 1414.214 | 2 5000.000 6000.000 7000.000 1414.214 4: 6 1000.000 1000.000 1000.000 0.000 | 6 2000.000 2000.000 2000.000 0.000 7: 1 2000.000 2000.000 2000.000 ? | 1 6000.000 6000.000 6000.000 ? 9: 2 1000.000 1500.000 2000.000 707.107 | 2 8000.000 9000.000 10000.000 1414.214lttnganalyses-0.4.3/tests/expected/iolatencytop.txt0000664000175000017500000000220712667420737024313 0ustar mjeansonmjeanson00000000000000Timerange: [1970-01-01 00:00:01.000000000, 1970-01-01 00:00:01.024000000] Top system call latencies open (usec) Begin End Name Duration (usec) Size Proc PID Filename [00:00:01.023000000,00:00:01.024000000] open 1000.000 N/A app3 101 test/open/file (fd=42) Top system call latencies read (usec) Begin End Name Duration (usec) Size Proc PID Filename [00:00:01.008000000,00:00:01.009000000] read 1000.000 100 B app2 100 testfile (fd=3) [00:00:01.012000000,00:00:01.013000000] read 1000.000 42 B app3 101 unknown (fd=3) Top system call latencies write (usec) Begin End Name Duration (usec) Size Proc PID Filename [00:00:01.004000000,00:00:01.005000000] write 1000.000 10 B app 99 unknown (fd=4)lttnganalyses-0.4.3/lttng-syscallstats0000775000175000017500000000236612553274232021677 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import syscallstats if __name__ == '__main__': syscallstats.run() lttnganalyses-0.4.3/lttng-irqstats0000775000175000017500000000235112553274232021012 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import irq if __name__ == '__main__': irq.runstats() lttnganalyses-0.4.3/lttng-iolatencytop0000775000175000017500000000235412553274232021655 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import io if __name__ == '__main__': io.runlatencytop() lttnganalyses-0.4.3/setup.py0000775000175000017500000001150612665072151017603 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # Copyright (C) 2015 - Michael Jeanson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """LTTnganalyses setup script""" from setuptools import setup import versioneer import sys if sys.version_info[0:2] < (3, 4): raise RuntimeError("Python version >= 3.4 required.") if 'install' in sys.argv: try: __import__('babeltrace') except ImportError: print('lttnganalysescli needs the babeltrace package.\n \ See https://www.efficios.com/babeltrace for more info.\n', file=sys.stderr) sys.exit(1) def read_file(filename): """Read all contents of ``filename``.""" with open(filename, encoding='utf-8') as source: return source.read() setup( name='lttnganalyses', version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description='LTTng analyses', long_description=read_file('README.rst'), url='https://github.com/lttng/lttng-analyses', author='Julien Desfossez', author_email='jdesfossez@efficios.com', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Topic :: System :: Monitoring', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.4', ], keywords='lttng tracing', packages=[ 'lttnganalyses', 'lttnganalyses.common', 'lttnganalyses.core', 'lttnganalyses.cli', 'lttnganalyses.linuxautomaton' ], entry_points={ 'console_scripts': [ # human-readable output 'lttng-cputop = lttnganalyses.cli.cputop:run', 'lttng-iolatencyfreq = lttnganalyses.cli.io:runfreq', 'lttng-iolatencystats = lttnganalyses.cli.io:runstats', 'lttng-iolatencytop = lttnganalyses.cli.io:runlatencytop', 'lttng-iolog = lttnganalyses.cli.io:runlog', 'lttng-iousagetop = lttnganalyses.cli.io:runusage', 'lttng-irqfreq = lttnganalyses.cli.irq:runfreq', 'lttng-irqlog = lttnganalyses.cli.irq:runlog', 'lttng-irqstats = lttnganalyses.cli.irq:runstats', 'lttng-memtop = lttnganalyses.cli.memtop:run', 'lttng-syscallstats = lttnganalyses.cli.syscallstats:run', 'lttng-schedlog = lttnganalyses.cli.sched:runlog', 'lttng-schedtop = lttnganalyses.cli.sched:runtop', 'lttng-schedstats = lttnganalyses.cli.sched:runstats', 'lttng-schedfreq = lttnganalyses.cli.sched:runfreq', # MI mode 'lttng-cputop-mi = lttnganalyses.cli.cputop:run_mi', 'lttng-memtop-mi = lttnganalyses.cli.memtop:run_mi', 'lttng-syscallstats-mi = lttnganalyses.cli.syscallstats:run_mi', 'lttng-irqfreq-mi = lttnganalyses.cli.irq:runfreq_mi', 'lttng-irqlog-mi = lttnganalyses.cli.irq:runlog_mi', 'lttng-irqstats-mi = lttnganalyses.cli.irq:runstats_mi', 'lttng-iolatencyfreq-mi = lttnganalyses.cli.io:runfreq_mi', 'lttng-iolatencystats-mi = lttnganalyses.cli.io:runstats_mi', 'lttng-iolatencytop-mi = lttnganalyses.cli.io:runlatencytop_mi', 'lttng-iolog-mi = lttnganalyses.cli.io:runlog_mi', 'lttng-iousagetop-mi = lttnganalyses.cli.io:runusage_mi', 'lttng-schedlog-mi = lttnganalyses.cli.sched:runlog_mi', 'lttng-schedtop-mi = lttnganalyses.cli.sched:runtop_mi', 'lttng-schedstats-mi = lttnganalyses.cli.sched:runstats_mi', 'lttng-schedfreq-mi = lttnganalyses.cli.sched:runfreq_mi', ], }, scripts=[ 'lttng-analyses-record', 'lttng-track-process' ], extras_require={ 'progressbar': ["progressbar"] }, test_suite='tests', ) lttnganalyses-0.4.3/lttng-iolog0000775000175000017500000000234512665072151020254 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import io if __name__ == '__main__': io.runlog() lttnganalyses-0.4.3/lttng-memtop0000775000175000017500000000235212553274232020442 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import memtop if __name__ == '__main__': memtop.run() lttnganalyses-0.4.3/ChangeLog0000664000175000017500000002112012667420737017642 0ustar mjeansonmjeanson000000000000002016-03-07 LTTng analyses 0.4.3 * Tests fixes (timezone issues) 2016-03-01 LTTng analyses 0.4.2 * Packaging fixes 2016-02-29 LTTng analyses 0.4.1 * Packaging fixes 2016-02-26 LTTng analyses 0.4.0 * Scheduler latency analyses * Priority fields in CPU and latency analyses * Machine Interface (json) output * Period-based analyses (begin and end events) * Refactoring/Bugfixes/Cleanup * Basic testing infrastructure 2015-07-13 LTTng analyses 0.3.0 * Merge pull request #23 from mjeanson/master * Convert README to reStructuredText * Fix pep8 errors * Refactor in a single package with subpackages * fix: stats with 0 requests * Check for babeltrace python package on installation * Define version once per package only * Add ChangeLog file 2015-04-20 LTTng analyses 0.2.0 * Merge pull request #22 from abusque/refactor-syscallstats * Bump version to 0.2 * Refactor syscallstats script to use new analysis backend * Rename min/max attributes to avoid collision with built-ins * Merge pull request #21 from abusque/decouple-io * Implement check for --end argument before start of trace * Style: fix indentation in _get_io_requests * Fix: set pid correctly on FileStats init * Fix typo in _fix_context_pid * Fix: use TID instead of PID in file stats if PID is None * Refactor io latency freq output * Lint: remove unused import, fix 'dangerous' default args * Refactor io top and log views * Remove deprecated --extra argument * Fix: correct typo and existence test in fd getter * Fix: correct typo in ns_to_hour_nsec output * Style: fix pylint/pep8 style issues * Replace map() by list comprehension in disk latency stats * Refactor IO Latency stats output methods * Add generators to iterate over io requests * Add method to compare equivalent io operations * Fix: properly handle empty filters for IO file stats * Fix FileStats reset() function * Move _filter_process method to base command class * Make _arg_pid_list list of ints instead of strings * Refactor iotop per file analysis and output * Refactor iotop output methods * Add _print_ascii_graph method to simplify output of graphs * Rename filter predicates to indicate visibility * Remove deprecated breakcb in IO command * Remove unused _compute_stats method from commands * Rename IO command for consistency with other commands * Track FDs chronologically in IO analysis * Add timestamp to create/close FD notifications * Remove dead code from IO cli * Reset FD in IO Analysis * Add support for pwrite* and pread* I/O syscalls * Implement syscall I/O analysis * Move returned_size attribute from SyscallIORequest into ReadWriteIORequest * Send create process and fd notification on statedump events * Send fd create and close notifications on sched events * Fix: send create_fd notification for open io requests * Add OP_READ_WRITE IO operation type for syscalls which both read and write * Use a single method to track io request exits * Refactor/rewrite IO state provider * Refactor syscall analysis to use new SyscallEvent class * Refactor NetStateProvider to use new SyscallEvent and io rq objects * Refactor MemStateProvider to use new SyscallEvent and io rq objects * Remove pending_syscalls array from State class * Refactor statedump provider to track only state and not analysis related attributes * Don't set deprecated parent_pid on FD object * Use SyscallEvent objects in syscall state provider * Remove Syscalls_stats class * Remove analysis related attributes from FD class, add factory to create from open rq * Add get_fd_type method to retrieve fd type from syscall name * Add more IORequest classes, and io_rq attr to SyscallEvent * Set SyscallEvent name using get_syscall_name method * Remove analysis related attributes from Process state class * Add more dup open syscalls, remove generic filenames from SyscallConsts * Fix get_syscall_name string indexing * Move IO syscalls handling into separate provider * Strip prefixes from syscall names for brevity * Merge branch 'master' into decouple-io * Merge pull request #20 from abusque/linting * Rename state to _state in providers for consistency * Rename irq start/stop timestamps to begin/end for consistency * Refactor IO Requests mechanism and (block I/O) analysis * Track network usage in IO analysis * Separate syscalls and io analyses * Use del instead of pop when possible with fds and remove unused attributes * Move date args processing to command, more linting * Linting: rename p* to pattern * Linting of common.py and related code * Fix: make the regex strings raw strings * fix for unknown pid in io.py * Fix syscallstats command description method names * Add IO analysis separate from syscalls * Merge pull request #19 from jdesfossez/dev * Fix: process the sched_switch for the swapper * Fix: handle the case of missing PID * Merge pull request #18 from abusque/decouple-cputop * Revert accidental partial commit of syscalls.py * Fix: remove deprecated last_sched attribute from Process class * Fix: remove deprecated cpu_ns attribute from Process class * Refactor cputop cli to work with new analysis module * Implement cputop analysis module * Fix: assign boolean instead of integer values for CLOEXEC * Add class method to duplicate FD objects * Remove non-state related attributes from process and cpu classes * Refactor sched state provider to track current state only * Remove deprecated perf context tracking in sched * Fix: set cloexec on fd from flags on statedump * remove old code (pre 0.1) that was kept as reference for the refactoring * Merge pull request #17 from abusque/decouple-memtop * Minor: fix pep8 style issues * Decouple mem analysis from current state * Rename notification callback methods to reflect public accessibility * Add print date method to base command class * Add reset method to Analysis classes * Merge pull request #16 from abusque/decouple-modules * Style: correct pep8 errors * Fix: set cpu id in constructor * Minor: add comment in irq state provider to clarify execptional softirq creation * Style: rename method in memtop for consistency * Fix tracking of softirq_raises and corresponding entries * Fix: don't print raise_ts multiple times in irq log * Simplify irq cli args transform * Refactor IrqAnalysisCommand to work with rewritten analysis * Add reset method to IrqStats * Keep irq list by id and count irq raises * Simplify filter_irq function in CLI * Track CPU id in interrupt objects * Rename irq analysis cli module to IrqAnalysisCommand to avoid ambiguity * Implement filtering by duration for IrqAnalysis * Update copyright info for modified files * Implement initial IrqStats system for analysis * fix: title * new tool to filter a trace based on TID/Procname with follow-child support * Style: replace double quotes by single quotes in lttnganalysescli * Style: replace double quotes by single quotes in lttnganalyses * Style: replace double quotes by single quotes in linuxautomaton * Implement notification for communication from automaton to analyses * Remove superfluous clear_screen string in irq _print_stats * Refactor IRQ state provider and related classes * Remove unused final argument in _print_results in cli * Fix: don't count freed pages twice in memtop, reorganize printing code * Fix: display unkwown for pname/pid in block read/write when we don't have the info * Fix: check that current_tid is not None instead of -1 * Initialize self.state in Command module when creating automaton * Pythonify tests for empty or uninitialized structures and arguments * Use None instead of -1 or 0 for default argument values * Add callback registration to analysis module * Replace usage of -1 as default/invalid value by None * Clean-up mem and sched state providers and related modules. * Replace integer logic by boolean value * fix: missing sync in i/o syscalls list * handle sys_accept4 * Merge pull request #15 from abusque/deduplication * Clean-up: dead code removal in linuxautomaton modules * Remove deprecated ret_strings from syscalls.py * Merge pull request #14 from abusque/email-fix * Fix: correct typo in author email address * Remove redundant IOCategory code * Merge pull request #13 from abusque/chrono_fds * Move track chrono fd code into method of Process class * Track files from statedump in chrono_fds * Fix: use event.timestamp instead of event[timestamp_begin] * Track files opened before start of trace in chrono_fds * Track chronological fd metadata * fix override syscall name * test override syscall name for epoll_ctl * show tid value * fix: handle unknown syscall return codes * fix: handle unknown syscall return codes * don't fail if some events are not available lttnganalyses-0.4.3/lttng-track-process0000775000175000017500000175470112553274232021736 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # Follow the execution of one or more processes throughout a LTTng trace and # print a textual output similar to Babeltrace. # When using the --procname option, the program tries to find the associated # TID as soon as possible. # The "follow-child" option only works for children started with fork after the # beginning of the trace. # # When invoked without filtering arguments, all the events are displayed and an # additionnal field at the beginning of the line shows the current TID allowing # to easily grep/search in the text dump. # # To handle more events (including UST events), follow the comments below, most # of this file has been autogenerated with parser_generator.py # # Note: unbelievably slow (140x slower than babeltrace), blame python and a lot # of string comparisons, but still much faster than a brain and eyes. # # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import time import argparse NSEC_PER_SEC = 1000000000 try: from babeltrace import TraceCollection except ImportError: # quick fix for debian-based distros sys.path.append("/usr/local/lib/python%d.%d/site-packages" % (sys.version_info.major, sys.version_info.minor)) from babeltrace import TraceCollection class TraceParser: def __init__(self, trace, arg_proc_list, arg_tid_list, arg_follow_child): self.trace = trace self.event_count = {} self.arg_proc_list = arg_proc_list self.arg_tid_list = arg_tid_list self.arg_follow_child = arg_follow_child self.per_cpu_current = {} def ns_to_hour_nsec(self, ns): d = time.localtime(ns/NSEC_PER_SEC) return "%02d:%02d:%02d.%09d" % (d.tm_hour, d.tm_min, d.tm_sec, ns % NSEC_PER_SEC) def check_procname(self, name, tid): if self.arg_proc_list is None: return if name in self.arg_proc_list: if self.arg_tid_list is None: self.arg_tid_list = [] if not tid in self.arg_tid_list: self.arg_tid_list.append(int(tid)) def tid_check(self, tid): if self.arg_tid_list is not None and tid in self.arg_tid_list: return True return False def filter_event(self, event): # no filtering if self.arg_tid_list is None and self.arg_proc_list is None: return True # we don't know yet the PID we are interested in (match procname - pid) if self.arg_tid_list is None: return False cpu_id = event["cpu_id"] if not cpu_id in self.per_cpu_current.keys(): return False return self.tid_check(self.per_cpu_current[cpu_id]) def get_tid_str(self, event): cpu_id = event["cpu_id"] if not cpu_id in self.per_cpu_current.keys(): tid = "?" else: tid = self.per_cpu_current[cpu_id] return "[{:>6}]".format(tid) def print_filter(self, event, string): if event.name.startswith("lttng_statedump"): if "tid" in event.keys(): if not self.tid_check(event["tid"]): return elif "pid" in event.keys(): if not self.tid_check(event["pid"]): return else: return elif not self.filter_event(event): return print(self.get_tid_str(event), string) def handle_special_events(self, event): # events that need some mangling/processing cpu_id = event["cpu_id"] if event.name == "sched_switch": timestamp = event.timestamp cpu_id = event["cpu_id"] prev_comm = event["prev_comm"] prev_tid = event["prev_tid"] prev_prio = event["prev_prio"] prev_state = event["prev_state"] next_comm = event["next_comm"] next_tid = event["next_tid"] next_prio = event["next_prio"] if cpu_id not in self.per_cpu_current.keys(): self.per_cpu_current[cpu_id] = next_tid else: self.per_cpu_current[cpu_id] = next_tid # we want to see the scheduling out if self.tid_check(prev_tid): print(self.get_tid_str(event), "[%s] %s: { cpu_id = %s }, { prev_comm = " "%s, prev_tid = %s, prev_prio = %s, prev_state = %s, " "next_comm = %s, next_tid = %s, next_prio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, prev_comm, prev_tid, prev_prio, prev_state, next_comm, next_tid, next_prio,)) elif event.name == "sched_process_exec": tid = event["tid"] filename = event["filename"] name = filename.split("/")[-1] self.check_procname(name, tid) elif event.name == "syscall_entry_execve": if not cpu_id in self.per_cpu_current.keys(): return tid = self.per_cpu_current[cpu_id] filename = event["filename"] name = filename.split("/")[-1] self.check_procname(name, tid) elif event.name == "sched_process_fork" and self.arg_follow_child: pt = event["parent_tid"] pc = event["parent_comm"] ct = event["child_tid"] cc = event["child_comm"] if self.tid_check(pt) and ct not in self.arg_tid_list: self.arg_tid_list.append(ct) if self.arg_proc_list is not None and pc in self.arg_proc_list: self.arg_tid_list.append(ct) def parse(self): # iterate over all the events for event in self.trace.events: method_name = "handle_%s" % event.name.replace(":", "_").replace("+", "_") # call the function to handle each event individually if "comm" in event.keys() and "tid" in event.keys(): self.check_procname(event["comm"], event["tid"]) elif "name" in event.keys() and "tid" in event.keys(): self.check_procname(event["name"], event["tid"]) elif "next_comm" in event.keys() and "next_tid" in event.keys(): self.check_procname(event["next_comm"], event["next_tid"]) elif "prev_comm" in event.keys() and "prev_tid" in event.keys(): self.check_procname(event["prev_comm"], event["prev_tid"]) elif "parent_comm" in event.keys() and "parent_tid" in event.keys(): self.check_procname(event["parent_comm"], event["parent_tid"]) elif "child_comm" in event.keys() and "child_tid" in event.keys(): self.check_procname(event["child_comm"], event["child_tid"]) self.handle_special_events(event) if hasattr(TraceParser, method_name): func = getattr(TraceParser, method_name) func(self, event) # everything between here and the end of the class has been generated # with parser_generator.py on a trace with all kernel events enabled # and transformed with: # :%s/print("\[%s\]/self.print_filter(event, "[%s]/g # :%s/self.event_count\[event.name\] += 1\n// # :%s/ self.print_filter/ self.print_filter/g def handle_compat_syscall_exit_setns(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sendmmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_syncfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_clock_adjtime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] utx = event["utx"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, utx = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, utx,)) def handle_compat_syscall_exit_prlimit64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] old_rlim = event["old_rlim"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, old_rlim = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, old_rlim,)) def handle_compat_syscall_exit_fanotify_init(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_recvmmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] mmsg = event["mmsg"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, mmsg = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, mmsg, timeout,)) def handle_compat_syscall_exit_perf_event_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_rt_tgsigqueueinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_pwritev(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_preadv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, vec,)) def handle_compat_syscall_exit_inotify_init1(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_pipe2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fildes = event["fildes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fildes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fildes,)) def handle_compat_syscall_exit_dup3(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_epoll_create1(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_eventfd2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_signalfd4(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_timerfd_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] otmr = event["otmr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, otmr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, otmr,)) def handle_compat_syscall_exit_timerfd_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] otmr = event["otmr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, otmr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, otmr,)) def handle_compat_syscall_exit_eventfd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_timerfd_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_signalfd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_utimensat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_epoll_pwait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] events = event["events"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, events = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, events,)) def handle_compat_syscall_exit_getcpu(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] cpup = event["cpup"] nodep = event["nodep"] tcache = event["tcache"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, cpup = %s, nodep = %s, tcache = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, cpup, nodep, tcache,)) def handle_compat_syscall_exit_vmsplice(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_tee(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_splice(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_get_robust_list(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] head_ptr = event["head_ptr"] len_ptr = event["len_ptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, head_ptr = %s, len_ptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, head_ptr, len_ptr,)) def handle_compat_syscall_exit_set_robust_list(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_unshare(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_ppoll(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ufds = event["ufds"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ufds = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ufds,)) def handle_compat_syscall_exit_pselect6(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] inp = event["inp"] outp = event["outp"] exp = event["exp"] tsp = event["tsp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, inp = %s, outp = %s, exp = %s, tsp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, inp, outp, exp, tsp,)) def handle_compat_syscall_exit_faccessat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fchmodat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_readlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_compat_syscall_exit_symlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_linkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_renameat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_unlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fstatat64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] dfd = event["dfd"] filename = event["filename"] statbuf = event["statbuf"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, dfd = %s, filename = %s, statbuf = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, dfd, filename, statbuf, flag,)) def handle_compat_syscall_exit_futimesat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fchownat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mknodat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mkdirat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_openat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_inotify_rm_watch(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_inotify_add_watch(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_inotify_init(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_ioprio_get(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_ioprio_set(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_keyctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg2 = event["arg2"] arg3 = event["arg3"] arg4 = event["arg4"] arg5 = event["arg5"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg2 = %s, arg3 = %s, arg4 = %s, arg5 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg2, arg3, arg4, arg5,)) def handle_compat_syscall_exit_request_key(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_add_key(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_waitid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] infop = event["infop"] ru = event["ru"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, infop = %s, ru = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, infop, ru,)) def handle_compat_syscall_exit_kexec_load(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mq_getsetattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] u_omqstat = event["u_omqstat"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, u_omqstat = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, u_omqstat,)) def handle_compat_syscall_exit_mq_notify(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mq_timedreceive(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] u_msg_ptr = event["u_msg_ptr"] u_msg_prio = event["u_msg_prio"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, u_msg_ptr = %s, u_msg_prio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, u_msg_ptr, u_msg_prio,)) def handle_compat_syscall_exit_mq_timedsend(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mq_unlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mq_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_utimes(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_tgkill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fstatfs64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fd = event["fd"] sz = event["sz"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fd = %s, sz = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fd, sz, buf,)) def handle_compat_syscall_exit_statfs64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] pathname = event["pathname"] sz = event["sz"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, pathname = %s, sz = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, pathname, sz, buf,)) def handle_compat_syscall_exit_clock_nanosleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rmtp = event["rmtp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rmtp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rmtp,)) def handle_compat_syscall_exit_clock_getres(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tp = event["tp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tp,)) def handle_compat_syscall_exit_clock_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tp = event["tp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tp,)) def handle_compat_syscall_exit_clock_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_timer_delete(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_timer_getoverrun(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_timer_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] setting = event["setting"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, setting = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, setting,)) def handle_compat_syscall_exit_timer_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] old_setting = event["old_setting"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, old_setting = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, old_setting,)) def handle_compat_syscall_exit_timer_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] created_timer_id = event["created_timer_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, created_timer_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, created_timer_id,)) def handle_compat_syscall_exit_set_tid_address(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_remap_file_pages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_epoll_wait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] events = event["events"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, events = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, events,)) def handle_compat_syscall_exit_epoll_ctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_epoll_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_exit_group(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_io_cancel(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] result = event["result"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, result = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, result,)) def handle_compat_syscall_exit_io_submit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_io_getevents(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] events = event["events"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, events = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, events, timeout,)) def handle_compat_syscall_exit_io_destroy(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_io_setup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sched_getaffinity(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] user_mask_ptr = event["user_mask_ptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, user_mask_ptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, user_mask_ptr,)) def handle_compat_syscall_exit_sched_setaffinity(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_futex(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] uaddr = event["uaddr"] uaddr2 = event["uaddr2"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, uaddr = %s, uaddr2 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, uaddr, uaddr2,)) def handle_compat_syscall_exit_sendfile64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] offset = event["offset"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, offset = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, offset,)) def handle_compat_syscall_exit_tkill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fremovexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_lremovexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_removexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_flistxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] list = event["list"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, list = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, list,)) def handle_compat_syscall_exit_llistxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] list = event["list"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, list = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, list,)) def handle_compat_syscall_exit_listxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] list = event["list"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, list = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, list,)) def handle_compat_syscall_exit_fgetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, value,)) def handle_compat_syscall_exit_lgetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, value,)) def handle_compat_syscall_exit_getxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, value,)) def handle_compat_syscall_exit_fsetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_lsetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_setxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_gettid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fcntl64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fd = event["fd"] cmd = event["cmd"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fd = %s, cmd = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fd, cmd, arg,)) def handle_compat_syscall_exit_getdents64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] dirent = event["dirent"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, dirent = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, dirent,)) def handle_compat_syscall_exit_madvise(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mincore(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, vec,)) def handle_compat_syscall_exit_pivot_root(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_setfsgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_setfsuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_setgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_setuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_chown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getresgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rgid = event["rgid"] egid = event["egid"] sgid = event["sgid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rgid = %s, egid = %s, sgid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rgid, egid, sgid,)) def handle_compat_syscall_exit_setresgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getresuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ruid = event["ruid"] euid = event["euid"] suid = event["suid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ruid = %s, euid = %s, suid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ruid, euid, suid,)) def handle_compat_syscall_exit_setresuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fchown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_setgroups(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getgroups(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] grouplist = event["grouplist"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, grouplist = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, grouplist,)) def handle_compat_syscall_exit_setregid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_setreuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getegid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_geteuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_lchown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fstat64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fd = event["fd"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fd = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fd, statbuf,)) def handle_compat_syscall_exit_lstat64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] filename = event["filename"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, filename = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, filename, statbuf,)) def handle_compat_syscall_exit_stat64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] filename = event["filename"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, filename = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, filename, statbuf,)) def handle_compat_syscall_exit_mmap_pgoff(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] addr = event["addr"] len = event["len"] prot = event["prot"] flags = event["flags"] fd = event["fd"] pgoff = event["pgoff"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, addr = %s, len = %s, prot = %s, flags = %s, fd = %s, pgoff = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, addr, len, prot, flags, fd, pgoff,)) def handle_compat_syscall_exit_getrlimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rlim = event["rlim"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rlim = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rlim,)) def handle_compat_syscall_exit_sendfile(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] out_fd = event["out_fd"] in_fd = event["in_fd"] offset = event["offset"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, out_fd = %s, in_fd = %s, offset = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, out_fd, in_fd, offset, count,)) def handle_compat_syscall_exit_getcwd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_compat_syscall_exit_chown16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] filename = event["filename"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, filename = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, filename, user, group,)) def handle_compat_syscall_exit_rt_sigsuspend(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_rt_sigqueueinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_rt_sigtimedwait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] uthese = event["uthese"] uinfo = event["uinfo"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, uthese = %s, uinfo = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, uthese, uinfo,)) def handle_compat_syscall_exit_rt_sigpending(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] set = event["set"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, set = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, set,)) def handle_compat_syscall_exit_rt_sigprocmask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] oset = event["oset"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, oset = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, oset,)) def handle_compat_syscall_exit_rt_sigaction(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] oact = event["oact"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, oact = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, oact,)) def handle_compat_syscall_exit_prctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg2 = event["arg2"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg2 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg2,)) def handle_compat_syscall_exit_getresgid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rgid = event["rgid"] egid = event["egid"] sgid = event["sgid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rgid = %s, egid = %s, sgid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rgid, egid, sgid,)) def handle_compat_syscall_exit_setresgid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rgid = event["rgid"] egid = event["egid"] sgid = event["sgid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rgid = %s, egid = %s, sgid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rgid, egid, sgid,)) def handle_compat_syscall_exit_poll(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ufds = event["ufds"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ufds = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ufds,)) def handle_compat_syscall_exit_getresuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ruid = event["ruid"] euid = event["euid"] suid = event["suid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ruid = %s, euid = %s, suid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ruid, euid, suid,)) def handle_compat_syscall_exit_setresuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ruid = event["ruid"] euid = event["euid"] suid = event["suid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ruid = %s, euid = %s, suid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ruid, euid, suid,)) def handle_compat_syscall_exit_mremap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_nanosleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rmtp = event["rmtp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rmtp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rmtp,)) def handle_compat_syscall_exit_sched_rr_get_interval(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] interval = event["interval"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, interval = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, interval,)) def handle_compat_syscall_exit_sched_get_priority_min(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sched_get_priority_max(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sched_yield(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sched_getscheduler(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sched_setscheduler(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sched_getparam(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] param = event["param"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, param = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, param,)) def handle_compat_syscall_exit_sched_setparam(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_munlockall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mlockall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_munlock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mlock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sysctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] args = event["args"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, args = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, args,)) def handle_compat_syscall_exit_fdatasync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getsid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_writev(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, vec,)) def handle_compat_syscall_exit_readv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, vec,)) def handle_compat_syscall_exit_msync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_flock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_select(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] inp = event["inp"] outp = event["outp"] exp = event["exp"] tvp = event["tvp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, inp = %s, outp = %s, exp = %s, tvp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, inp, outp, exp, tvp,)) def handle_compat_syscall_exit_getdents(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] dirent = event["dirent"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, dirent = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, dirent,)) def handle_compat_syscall_exit_llseek(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fd = event["fd"] offset_high = event["offset_high"] offset_low = event["offset_low"] result = event["result"] origin = event["origin"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fd = %s, offset_high = %s, offset_low = %s, result = %s, origin = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fd, offset_high, offset_low, result, origin,)) def handle_compat_syscall_exit_setfsgid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] gid = event["gid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, gid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, gid,)) def handle_compat_syscall_exit_setfsuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] uid = event["uid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, uid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, uid,)) def handle_compat_syscall_exit_personality(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sysfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_bdflush(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] func = event["func"] data = event["data"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, func = %s, data = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, func, data,)) def handle_compat_syscall_exit_fchdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getpgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_quotactl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] addr = event["addr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, addr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, addr,)) def handle_compat_syscall_exit_delete_module(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_init_module(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sigprocmask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] how = event["how"] nset = event["nset"] oset = event["oset"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, how = %s, nset = %s, oset = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, how, nset, oset,)) def handle_compat_syscall_exit_mprotect(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_adjtimex(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] txc_p = event["txc_p"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, txc_p = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, txc_p,)) def handle_compat_syscall_exit_newuname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, name,)) def handle_compat_syscall_exit_setdomainname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_clone(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fsync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_ipc(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] call = event["call"] first = event["first"] second = event["second"] third = event["third"] ptr = event["ptr"] fifth = event["fifth"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, call = %s, first = %s, second = %s, third = %s, ptr = %s, fifth = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, call, first, second, third, ptr, fifth,)) def handle_compat_syscall_exit_sysinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] info = event["info"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, info = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, info,)) def handle_compat_syscall_exit_swapoff(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_wait4(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] stat_addr = event["stat_addr"] ru = event["ru"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, stat_addr = %s, ru = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, stat_addr, ru,)) def handle_compat_syscall_exit_vhangup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_uname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, name,)) def handle_compat_syscall_exit_newfstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, statbuf,)) def handle_compat_syscall_exit_newlstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, statbuf,)) def handle_compat_syscall_exit_newstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, statbuf,)) def handle_compat_syscall_exit_getitimer(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, value,)) def handle_compat_syscall_exit_setitimer(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ovalue = event["ovalue"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ovalue = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ovalue,)) def handle_compat_syscall_exit_syslog(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_compat_syscall_exit_socketcall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] call = event["call"] args = event["args"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, call = %s, args = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, call, args,)) def handle_compat_syscall_exit_fstatfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_compat_syscall_exit_statfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_compat_syscall_exit_setpriority(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getpriority(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fchown16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fd = event["fd"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fd = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fd, user, group,)) def handle_compat_syscall_exit_fchmod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_ftruncate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_truncate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_munmap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_old_mmap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg,)) def handle_compat_syscall_exit_old_readdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fd = event["fd"] dirent = event["dirent"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fd = %s, dirent = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fd, dirent, count,)) def handle_compat_syscall_exit_reboot(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_swapon(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_uselib(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] library = event["library"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, library = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, library,)) def handle_compat_syscall_exit_readlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_compat_syscall_exit_lstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] filename = event["filename"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, filename = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, filename, statbuf,)) def handle_compat_syscall_exit_symlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_old_select(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg,)) def handle_compat_syscall_exit_setgroups16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] gidsetsize = event["gidsetsize"] grouplist = event["grouplist"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, gidsetsize = %s, grouplist = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, gidsetsize, grouplist,)) def handle_compat_syscall_exit_getgroups16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] gidsetsize = event["gidsetsize"] grouplist = event["grouplist"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, gidsetsize = %s, grouplist = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, gidsetsize, grouplist,)) def handle_compat_syscall_exit_settimeofday(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_gettimeofday(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tv = event["tv"] tz = event["tz"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tv = %s, tz = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tv, tz,)) def handle_compat_syscall_exit_getrusage(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ru = event["ru"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ru = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ru,)) def handle_compat_syscall_exit_old_getrlimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] resource = event["resource"] rlim = event["rlim"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, resource = %s, rlim = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, resource, rlim,)) def handle_compat_syscall_exit_setrlimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sethostname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sigpending(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] set = event["set"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, set = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, set,)) def handle_compat_syscall_exit_setregid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rgid = event["rgid"] egid = event["egid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rgid = %s, egid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rgid, egid,)) def handle_compat_syscall_exit_setreuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ruid = event["ruid"] euid = event["euid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ruid = %s, euid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ruid, euid,)) def handle_compat_syscall_exit_ssetmask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] newmask = event["newmask"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, newmask = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, newmask,)) def handle_compat_syscall_exit_sgetmask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_setsid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getpgrp(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getppid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_dup2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_ustat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ubuf = event["ubuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ubuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ubuf,)) def handle_compat_syscall_exit_chroot(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_umask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_olduname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, name,)) def handle_compat_syscall_exit_setpgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fcntl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg,)) def handle_compat_syscall_exit_ioctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg,)) def handle_compat_syscall_exit_umount(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_acct(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getegid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_geteuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_signal(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] sig = event["sig"] handler = event["handler"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, sig = %s, handler = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, sig, handler,)) def handle_compat_syscall_exit_getgid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_setgid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] gid = event["gid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, gid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, gid,)) def handle_compat_syscall_exit_brk(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_times(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tbuf = event["tbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tbuf,)) def handle_compat_syscall_exit_pipe(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fildes = event["fildes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fildes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fildes,)) def handle_compat_syscall_exit_dup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_rmdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mkdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_rename(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_kill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_sync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_nice(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] increment = event["increment"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, increment = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, increment,)) def handle_compat_syscall_exit_access(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_utime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_pause(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_fstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fd = event["fd"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fd = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fd, statbuf,)) def handle_compat_syscall_exit_alarm(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_ptrace(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] addr = event["addr"] data = event["data"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, addr = %s, data = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, addr, data,)) def handle_compat_syscall_exit_stime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tptr = event["tptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tptr,)) def handle_compat_syscall_exit_getuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_setuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] uid = event["uid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, uid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, uid,)) def handle_compat_syscall_exit_oldumount(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, name,)) def handle_compat_syscall_exit_mount(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_getpid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_lseek(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_stat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] filename = event["filename"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, filename = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, filename, statbuf,)) def handle_compat_syscall_exit_lchown16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] filename = event["filename"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, filename = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, filename, user, group,)) def handle_compat_syscall_exit_chmod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_mknod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_time(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tloc = event["tloc"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tloc = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tloc,)) def handle_compat_syscall_exit_chdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_execve(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_unlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_link(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_creat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_waitpid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] pid = event["pid"] stat_addr = event["stat_addr"] options = event["options"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, pid = %s, stat_addr = %s, options = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, pid, stat_addr, options,)) def handle_compat_syscall_exit_close(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_write(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_read(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_compat_syscall_exit_exit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_exit_restart_syscall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_compat_syscall_entry_setns(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] nstype = event["nstype"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, nstype = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, nstype,)) def handle_compat_syscall_entry_sendmmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] mmsg = event["mmsg"] vlen = event["vlen"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, mmsg = %s, vlen = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, mmsg, vlen, flags,)) def handle_compat_syscall_entry_syncfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_compat_syscall_entry_clock_adjtime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] utx = event["utx"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s, utx = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock, utx,)) def handle_compat_syscall_entry_prlimit64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] resource = event["resource"] new_rlim = event["new_rlim"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, resource = %s, new_rlim = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, resource, new_rlim,)) def handle_compat_syscall_entry_fanotify_init(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] flags = event["flags"] event_f_flags = event["event_f_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { flags = %s, event_f_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, flags, event_f_flags,)) def handle_compat_syscall_entry_recvmmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] vlen = event["vlen"] flags = event["flags"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, vlen = %s, flags = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, vlen, flags, timeout,)) def handle_compat_syscall_entry_perf_event_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] attr_uptr = event["attr_uptr"] pid = event["pid"] cpu = event["cpu"] group_fd = event["group_fd"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { attr_uptr = %s, pid = %s, cpu = %s, group_fd = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, attr_uptr, pid, cpu, group_fd, flags,)) def handle_compat_syscall_entry_rt_tgsigqueueinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tgid = event["tgid"] pid = event["pid"] sig = event["sig"] uinfo = event["uinfo"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tgid = %s, pid = %s, sig = %s, uinfo = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tgid, pid, sig, uinfo,)) def handle_compat_syscall_entry_pwritev(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] vec = event["vec"] vlen = event["vlen"] pos_l = event["pos_l"] pos_h = event["pos_h"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, vec = %s, vlen = %s, pos_l = %s, pos_h = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, vec, vlen, pos_l, pos_h,)) def handle_compat_syscall_entry_preadv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] vlen = event["vlen"] pos_l = event["pos_l"] pos_h = event["pos_h"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, vlen = %s, pos_l = %s, pos_h = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, vlen, pos_l, pos_h,)) def handle_compat_syscall_entry_inotify_init1(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, flags,)) def handle_compat_syscall_entry_pipe2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, flags,)) def handle_compat_syscall_entry_dup3(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldfd = event["oldfd"] newfd = event["newfd"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldfd = %s, newfd = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldfd, newfd, flags,)) def handle_compat_syscall_entry_epoll_create1(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, flags,)) def handle_compat_syscall_entry_eventfd2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] count = event["count"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { count = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, count, flags,)) def handle_compat_syscall_entry_signalfd4(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufd = event["ufd"] user_mask = event["user_mask"] sizemask = event["sizemask"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufd = %s, user_mask = %s, sizemask = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufd, user_mask, sizemask, flags,)) def handle_compat_syscall_entry_timerfd_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufd = event["ufd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufd,)) def handle_compat_syscall_entry_timerfd_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufd = event["ufd"] flags = event["flags"] utmr = event["utmr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufd = %s, flags = %s, utmr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufd, flags, utmr,)) def handle_compat_syscall_entry_eventfd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, count,)) def handle_compat_syscall_entry_timerfd_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] clockid = event["clockid"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { clockid = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, clockid, flags,)) def handle_compat_syscall_entry_signalfd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufd = event["ufd"] user_mask = event["user_mask"] sizemask = event["sizemask"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufd = %s, user_mask = %s, sizemask = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufd, user_mask, sizemask,)) def handle_compat_syscall_entry_utimensat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] utimes = event["utimes"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, utimes = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, utimes, flags,)) def handle_compat_syscall_entry_epoll_pwait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] epfd = event["epfd"] maxevents = event["maxevents"] timeout = event["timeout"] sigmask = event["sigmask"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { epfd = %s, maxevents = %s, timeout = %s, sigmask = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, epfd, maxevents, timeout, sigmask, sigsetsize,)) def handle_compat_syscall_entry_getcpu(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tcache = event["tcache"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tcache = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tcache,)) def handle_compat_syscall_entry_vmsplice(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] iov = event["iov"] nr_segs = event["nr_segs"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, iov = %s, nr_segs = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, iov, nr_segs, flags,)) def handle_compat_syscall_entry_tee(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fdin = event["fdin"] fdout = event["fdout"] len = event["len"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fdin = %s, fdout = %s, len = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fdin, fdout, len, flags,)) def handle_compat_syscall_entry_splice(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd_in = event["fd_in"] off_in = event["off_in"] fd_out = event["fd_out"] off_out = event["off_out"] len = event["len"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd_in = %s, off_in = %s, fd_out = %s, off_out = %s, len = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd_in, off_in, fd_out, off_out, len, flags,)) def handle_compat_syscall_entry_get_robust_list(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_compat_syscall_entry_set_robust_list(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] head = event["head"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { head = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, head, len,)) def handle_compat_syscall_entry_unshare(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] unshare_flags = event["unshare_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { unshare_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, unshare_flags,)) def handle_compat_syscall_entry_ppoll(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufds = event["ufds"] nfds = event["nfds"] tsp = event["tsp"] sigmask = event["sigmask"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufds = %s, nfds = %s, tsp = %s, sigmask = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufds, nfds, tsp, sigmask, sigsetsize,)) def handle_compat_syscall_entry_pselect6(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] n = event["n"] inp = event["inp"] outp = event["outp"] exp = event["exp"] tsp = event["tsp"] sig = event["sig"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { n = %s, inp = %s, outp = %s, exp = %s, tsp = %s, sig = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, n, inp, outp, exp, tsp, sig,)) def handle_compat_syscall_entry_faccessat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, mode,)) def handle_compat_syscall_entry_fchmodat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, mode,)) def handle_compat_syscall_entry_readlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] pathname = event["pathname"] bufsiz = event["bufsiz"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, pathname = %s, bufsiz = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, pathname, bufsiz,)) def handle_compat_syscall_entry_symlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldname = event["oldname"] newdfd = event["newdfd"] newname = event["newname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldname = %s, newdfd = %s, newname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldname, newdfd, newname,)) def handle_compat_syscall_entry_linkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] olddfd = event["olddfd"] oldname = event["oldname"] newdfd = event["newdfd"] newname = event["newname"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { olddfd = %s, oldname = %s, newdfd = %s, newname = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, olddfd, oldname, newdfd, newname, flags,)) def handle_compat_syscall_entry_renameat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] olddfd = event["olddfd"] oldname = event["oldname"] newdfd = event["newdfd"] newname = event["newname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { olddfd = %s, oldname = %s, newdfd = %s, newname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, olddfd, oldname, newdfd, newname,)) def handle_compat_syscall_entry_unlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] pathname = event["pathname"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, pathname = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, pathname, flag,)) def handle_compat_syscall_entry_fstatat64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] statbuf = event["statbuf"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, statbuf = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, statbuf, flag,)) def handle_compat_syscall_entry_futimesat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] utimes = event["utimes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, utimes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, utimes,)) def handle_compat_syscall_entry_fchownat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] user = event["user"] group = event["group"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, user = %s, group = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, user, group, flag,)) def handle_compat_syscall_entry_mknodat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] mode = event["mode"] dev = event["dev"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, mode = %s, dev = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, mode, dev,)) def handle_compat_syscall_entry_mkdirat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] pathname = event["pathname"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, pathname = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, pathname, mode,)) def handle_compat_syscall_entry_openat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] flags = event["flags"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, flags = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, flags, mode,)) def handle_compat_syscall_entry_inotify_rm_watch(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] wd = event["wd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, wd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, wd,)) def handle_compat_syscall_entry_inotify_add_watch(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] pathname = event["pathname"] mask = event["mask"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, pathname = %s, mask = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, pathname, mask,)) def handle_compat_syscall_entry_inotify_init(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_ioprio_get(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] who = event["who"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, who = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, who,)) def handle_compat_syscall_entry_ioprio_set(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] who = event["who"] ioprio = event["ioprio"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, who = %s, ioprio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, who, ioprio,)) def handle_compat_syscall_entry_keyctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] option = event["option"] arg2 = event["arg2"] arg3 = event["arg3"] arg4 = event["arg4"] arg5 = event["arg5"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { option = %s, arg2 = %s, arg3 = %s, arg4 = %s, arg5 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, option, arg2, arg3, arg4, arg5,)) def handle_compat_syscall_entry_request_key(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] _type = event["_type"] _description = event["_description"] _callout_info = event["_callout_info"] destringid = event["destringid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { _type = %s, _description = %s, _callout_info = %s, destringid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, _type, _description, _callout_info, destringid,)) def handle_compat_syscall_entry_add_key(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] _type = event["_type"] _description = event["_description"] _payload = event["_payload"] plen = event["plen"] ringid = event["ringid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { _type = %s, _description = %s, _payload = %s, plen = %s, ringid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, _type, _description, _payload, plen, ringid,)) def handle_compat_syscall_entry_waitid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] upid = event["upid"] options = event["options"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, upid = %s, options = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, upid, options,)) def handle_compat_syscall_entry_kexec_load(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] entry = event["entry"] nr_segments = event["nr_segments"] segments = event["segments"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { entry = %s, nr_segments = %s, segments = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, entry, nr_segments, segments, flags,)) def handle_compat_syscall_entry_mq_getsetattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mqdes = event["mqdes"] u_mqstat = event["u_mqstat"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mqdes = %s, u_mqstat = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mqdes, u_mqstat,)) def handle_compat_syscall_entry_mq_notify(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mqdes = event["mqdes"] u_notification = event["u_notification"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mqdes = %s, u_notification = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mqdes, u_notification,)) def handle_compat_syscall_entry_mq_timedreceive(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mqdes = event["mqdes"] msg_len = event["msg_len"] u_abs_timeout = event["u_abs_timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mqdes = %s, msg_len = %s, u_abs_timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mqdes, msg_len, u_abs_timeout,)) def handle_compat_syscall_entry_mq_timedsend(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mqdes = event["mqdes"] u_msg_ptr = event["u_msg_ptr"] msg_len = event["msg_len"] msg_prio = event["msg_prio"] u_abs_timeout = event["u_abs_timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mqdes = %s, u_msg_ptr = %s, msg_len = %s, msg_prio = %s, u_abs_timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout,)) def handle_compat_syscall_entry_mq_unlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] u_name = event["u_name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { u_name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, u_name,)) def handle_compat_syscall_entry_mq_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] u_name = event["u_name"] oflag = event["oflag"] mode = event["mode"] u_attr = event["u_attr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { u_name = %s, oflag = %s, mode = %s, u_attr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, u_name, oflag, mode, u_attr,)) def handle_compat_syscall_entry_utimes(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] utimes = event["utimes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, utimes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, utimes,)) def handle_compat_syscall_entry_tgkill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tgid = event["tgid"] pid = event["pid"] sig = event["sig"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tgid = %s, pid = %s, sig = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tgid, pid, sig,)) def handle_compat_syscall_entry_fstatfs64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] sz = event["sz"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, sz = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, sz, buf,)) def handle_compat_syscall_entry_statfs64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] sz = event["sz"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, sz = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, sz, buf,)) def handle_compat_syscall_entry_clock_nanosleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] flags = event["flags"] rqtp = event["rqtp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s, flags = %s, rqtp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock, flags, rqtp,)) def handle_compat_syscall_entry_clock_getres(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock,)) def handle_compat_syscall_entry_clock_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock,)) def handle_compat_syscall_entry_clock_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] tp = event["tp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s, tp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock, tp,)) def handle_compat_syscall_entry_timer_delete(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer_id = event["timer_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer_id,)) def handle_compat_syscall_entry_timer_getoverrun(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer_id = event["timer_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer_id,)) def handle_compat_syscall_entry_timer_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer_id = event["timer_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer_id,)) def handle_compat_syscall_entry_timer_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer_id = event["timer_id"] flags = event["flags"] new_setting = event["new_setting"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer_id = %s, flags = %s, new_setting = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer_id, flags, new_setting,)) def handle_compat_syscall_entry_timer_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] timer_event_spec = event["timer_event_spec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s, timer_event_spec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock, timer_event_spec,)) def handle_compat_syscall_entry_set_tid_address(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tidptr = event["tidptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tidptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tidptr,)) def handle_compat_syscall_entry_remap_file_pages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] size = event["size"] prot = event["prot"] pgoff = event["pgoff"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, size = %s, prot = %s, pgoff = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, size, prot, pgoff, flags,)) def handle_compat_syscall_entry_epoll_wait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] epfd = event["epfd"] maxevents = event["maxevents"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { epfd = %s, maxevents = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, epfd, maxevents, timeout,)) def handle_compat_syscall_entry_epoll_ctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] epfd = event["epfd"] op = event["op"] fd = event["fd"] _event = event["event"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { epfd = %s, op = %s, fd = %s, event = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, epfd, op, fd, _event,)) def handle_compat_syscall_entry_epoll_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, size,)) def handle_compat_syscall_entry_exit_group(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] error_code = event["error_code"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { error_code = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, error_code,)) def handle_compat_syscall_entry_io_cancel(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ctx_id = event["ctx_id"] iocb = event["iocb"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ctx_id = %s, iocb = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ctx_id, iocb,)) def handle_compat_syscall_entry_io_submit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ctx_id = event["ctx_id"] nr = event["nr"] iocbpp = event["iocbpp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ctx_id = %s, nr = %s, iocbpp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ctx_id, nr, iocbpp,)) def handle_compat_syscall_entry_io_getevents(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ctx_id = event["ctx_id"] min_nr = event["min_nr"] nr = event["nr"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ctx_id = %s, min_nr = %s, nr = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ctx_id, min_nr, nr, timeout,)) def handle_compat_syscall_entry_io_destroy(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ctx = event["ctx"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ctx = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ctx,)) def handle_compat_syscall_entry_io_setup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nr_events = event["nr_events"] ctxp = event["ctxp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nr_events = %s, ctxp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nr_events, ctxp,)) def handle_compat_syscall_entry_sched_getaffinity(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, len,)) def handle_compat_syscall_entry_sched_setaffinity(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] len = event["len"] user_mask_ptr = event["user_mask_ptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, len = %s, user_mask_ptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, len, user_mask_ptr,)) def handle_compat_syscall_entry_futex(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uaddr = event["uaddr"] op = event["op"] val = event["val"] utime = event["utime"] uaddr2 = event["uaddr2"] val3 = event["val3"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uaddr = %s, op = %s, val = %s, utime = %s, uaddr2 = %s, val3 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uaddr, op, val, utime, uaddr2, val3,)) def handle_compat_syscall_entry_sendfile64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] out_fd = event["out_fd"] in_fd = event["in_fd"] offset = event["offset"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { out_fd = %s, in_fd = %s, offset = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, out_fd, in_fd, offset, count,)) def handle_compat_syscall_entry_tkill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] sig = event["sig"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, sig = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, sig,)) def handle_compat_syscall_entry_fremovexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, name,)) def handle_compat_syscall_entry_lremovexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name,)) def handle_compat_syscall_entry_removexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name,)) def handle_compat_syscall_entry_flistxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, size,)) def handle_compat_syscall_entry_llistxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, size,)) def handle_compat_syscall_entry_listxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, size,)) def handle_compat_syscall_entry_fgetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] name = event["name"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, name = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, name, size,)) def handle_compat_syscall_entry_lgetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name, size,)) def handle_compat_syscall_entry_getxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name, size,)) def handle_compat_syscall_entry_fsetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] name = event["name"] value = event["value"] size = event["size"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, name = %s, value = %s, size = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, name, value, size, flags,)) def handle_compat_syscall_entry_lsetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] value = event["value"] size = event["size"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s, value = %s, size = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name, value, size, flags,)) def handle_compat_syscall_entry_setxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] value = event["value"] size = event["size"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s, value = %s, size = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name, value, size, flags,)) def handle_compat_syscall_entry_gettid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_fcntl64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] cmd = event["cmd"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, cmd = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, cmd, arg,)) def handle_compat_syscall_entry_getdents64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, count,)) def handle_compat_syscall_entry_madvise(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len_in = event["len_in"] behavior = event["behavior"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len_in = %s, behavior = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len_in, behavior,)) def handle_compat_syscall_entry_mincore(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len,)) def handle_compat_syscall_entry_pivot_root(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] new_root = event["new_root"] put_old = event["put_old"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { new_root = %s, put_old = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, new_root, put_old,)) def handle_compat_syscall_entry_setfsgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gid = event["gid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gid,)) def handle_compat_syscall_entry_setfsuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uid = event["uid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uid,)) def handle_compat_syscall_entry_setgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gid = event["gid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gid,)) def handle_compat_syscall_entry_setuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uid = event["uid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uid,)) def handle_compat_syscall_entry_chown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, user, group,)) def handle_compat_syscall_entry_getresgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_setresgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rgid = event["rgid"] egid = event["egid"] sgid = event["sgid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rgid = %s, egid = %s, sgid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rgid, egid, sgid,)) def handle_compat_syscall_entry_getresuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_setresuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ruid = event["ruid"] euid = event["euid"] suid = event["suid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ruid = %s, euid = %s, suid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ruid, euid, suid,)) def handle_compat_syscall_entry_fchown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, user, group,)) def handle_compat_syscall_entry_setgroups(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gidsetsize = event["gidsetsize"] grouplist = event["grouplist"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gidsetsize = %s, grouplist = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gidsetsize, grouplist,)) def handle_compat_syscall_entry_getgroups(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gidsetsize = event["gidsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gidsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gidsetsize,)) def handle_compat_syscall_entry_setregid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rgid = event["rgid"] egid = event["egid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rgid = %s, egid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rgid, egid,)) def handle_compat_syscall_entry_setreuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ruid = event["ruid"] euid = event["euid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ruid = %s, euid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ruid, euid,)) def handle_compat_syscall_entry_getegid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_geteuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_getgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_getuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_lchown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, user, group,)) def handle_compat_syscall_entry_fstat64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, statbuf,)) def handle_compat_syscall_entry_lstat64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, statbuf,)) def handle_compat_syscall_entry_stat64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, statbuf,)) def handle_compat_syscall_entry_mmap_pgoff(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] addr = event["addr"] len = event["len"] prot = event["prot"] flags = event["flags"] fd = event["fd"] pgoff = event["pgoff"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { addr = %s, len = %s, prot = %s, flags = %s, fd = %s, pgoff = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, addr, len, prot, flags, fd, pgoff,)) def handle_compat_syscall_entry_getrlimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] resource = event["resource"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { resource = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, resource,)) def handle_compat_syscall_entry_sendfile(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] out_fd = event["out_fd"] in_fd = event["in_fd"] offset = event["offset"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { out_fd = %s, in_fd = %s, offset = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, out_fd, in_fd, offset, count,)) def handle_compat_syscall_entry_getcwd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, size,)) def handle_compat_syscall_entry_chown16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, user, group,)) def handle_compat_syscall_entry_rt_sigsuspend(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] unewset = event["unewset"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { unewset = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, unewset, sigsetsize,)) def handle_compat_syscall_entry_rt_sigqueueinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] sig = event["sig"] uinfo = event["uinfo"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, sig = %s, uinfo = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, sig, uinfo,)) def handle_compat_syscall_entry_rt_sigtimedwait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uts = event["uts"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uts = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uts, sigsetsize,)) def handle_compat_syscall_entry_rt_sigpending(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, sigsetsize,)) def handle_compat_syscall_entry_rt_sigprocmask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] how = event["how"] nset = event["nset"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { how = %s, nset = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, how, nset, sigsetsize,)) def handle_compat_syscall_entry_rt_sigaction(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] sig = event["sig"] act = event["act"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { sig = %s, act = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, sig, act, sigsetsize,)) def handle_compat_syscall_entry_prctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] option = event["option"] arg2 = event["arg2"] arg3 = event["arg3"] arg4 = event["arg4"] arg5 = event["arg5"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { option = %s, arg2 = %s, arg3 = %s, arg4 = %s, arg5 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, option, arg2, arg3, arg4, arg5,)) def handle_compat_syscall_entry_getresgid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rgid = event["rgid"] egid = event["egid"] sgid = event["sgid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rgid = %s, egid = %s, sgid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rgid, egid, sgid,)) def handle_compat_syscall_entry_setresgid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rgid = event["rgid"] egid = event["egid"] sgid = event["sgid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rgid = %s, egid = %s, sgid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rgid, egid, sgid,)) def handle_compat_syscall_entry_poll(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufds = event["ufds"] nfds = event["nfds"] timeout_msecs = event["timeout_msecs"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufds = %s, nfds = %s, timeout_msecs = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufds, nfds, timeout_msecs,)) def handle_compat_syscall_entry_getresuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ruid = event["ruid"] euid = event["euid"] suid = event["suid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ruid = %s, euid = %s, suid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ruid, euid, suid,)) def handle_compat_syscall_entry_setresuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ruid = event["ruid"] euid = event["euid"] suid = event["suid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ruid = %s, euid = %s, suid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ruid, euid, suid,)) def handle_compat_syscall_entry_mremap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] addr = event["addr"] old_len = event["old_len"] new_len = event["new_len"] flags = event["flags"] new_addr = event["new_addr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { addr = %s, old_len = %s, new_len = %s, flags = %s, new_addr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, addr, old_len, new_len, flags, new_addr,)) def handle_compat_syscall_entry_nanosleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rqtp = event["rqtp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rqtp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rqtp,)) def handle_compat_syscall_entry_sched_rr_get_interval(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_compat_syscall_entry_sched_get_priority_min(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] policy = event["policy"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { policy = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, policy,)) def handle_compat_syscall_entry_sched_get_priority_max(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] policy = event["policy"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { policy = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, policy,)) def handle_compat_syscall_entry_sched_yield(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_sched_getscheduler(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_compat_syscall_entry_sched_setscheduler(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] policy = event["policy"] param = event["param"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, policy = %s, param = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, policy, param,)) def handle_compat_syscall_entry_sched_getparam(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_compat_syscall_entry_sched_setparam(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] param = event["param"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, param = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, param,)) def handle_compat_syscall_entry_munlockall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_mlockall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, flags,)) def handle_compat_syscall_entry_munlock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len,)) def handle_compat_syscall_entry_mlock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len,)) def handle_compat_syscall_entry_sysctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] args = event["args"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { args = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, args,)) def handle_compat_syscall_entry_fdatasync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_compat_syscall_entry_getsid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_compat_syscall_entry_writev(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] vec = event["vec"] vlen = event["vlen"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, vec = %s, vlen = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, vec, vlen,)) def handle_compat_syscall_entry_readv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] vec = event["vec"] vlen = event["vlen"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, vec = %s, vlen = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, vec, vlen,)) def handle_compat_syscall_entry_msync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len, flags,)) def handle_compat_syscall_entry_flock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] cmd = event["cmd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, cmd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, cmd,)) def handle_compat_syscall_entry_select(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] n = event["n"] inp = event["inp"] outp = event["outp"] exp = event["exp"] tvp = event["tvp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { n = %s, inp = %s, outp = %s, exp = %s, tvp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, n, inp, outp, exp, tvp,)) def handle_compat_syscall_entry_getdents(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, count,)) def handle_compat_syscall_entry_llseek(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] offset_high = event["offset_high"] offset_low = event["offset_low"] result = event["result"] origin = event["origin"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, offset_high = %s, offset_low = %s, result = %s, origin = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, offset_high, offset_low, result, origin,)) def handle_compat_syscall_entry_setfsgid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gid = event["gid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gid,)) def handle_compat_syscall_entry_setfsuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uid = event["uid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uid,)) def handle_compat_syscall_entry_personality(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] personality = event["personality"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { personality = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, personality,)) def handle_compat_syscall_entry_sysfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] option = event["option"] arg1 = event["arg1"] arg2 = event["arg2"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { option = %s, arg1 = %s, arg2 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, option, arg1, arg2,)) def handle_compat_syscall_entry_bdflush(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] func = event["func"] data = event["data"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { func = %s, data = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, func, data,)) def handle_compat_syscall_entry_fchdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_compat_syscall_entry_getpgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_compat_syscall_entry_quotactl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] cmd = event["cmd"] special = event["special"] id = event["id"] addr = event["addr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { cmd = %s, special = %s, id = %s, addr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, cmd, special, id, addr,)) def handle_compat_syscall_entry_delete_module(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name_user = event["name_user"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name_user = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name_user, flags,)) def handle_compat_syscall_entry_init_module(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] umod = event["umod"] len = event["len"] uargs = event["uargs"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { umod = %s, len = %s, uargs = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, umod, len, uargs,)) def handle_compat_syscall_entry_sigprocmask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] how = event["how"] nset = event["nset"] oset = event["oset"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { how = %s, nset = %s, oset = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, how, nset, oset,)) def handle_compat_syscall_entry_mprotect(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] prot = event["prot"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s, prot = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len, prot,)) def handle_compat_syscall_entry_adjtimex(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] txc_p = event["txc_p"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { txc_p = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, txc_p,)) def handle_compat_syscall_entry_newuname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_setdomainname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, len,)) def handle_compat_syscall_entry_clone(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] clone_flags = event["clone_flags"] newsp = event["newsp"] parent_tid = event["parent_tid"] child_tid = event["child_tid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { clone_flags = %s, newsp = %s, parent_tid = %s, child_tid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, clone_flags, newsp, parent_tid, child_tid,)) def handle_compat_syscall_entry_fsync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_compat_syscall_entry_ipc(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] call = event["call"] first = event["first"] second = event["second"] third = event["third"] ptr = event["ptr"] fifth = event["fifth"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { call = %s, first = %s, second = %s, third = %s, ptr = %s, fifth = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, call, first, second, third, ptr, fifth,)) def handle_compat_syscall_entry_sysinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_swapoff(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] specialfile = event["specialfile"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { specialfile = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, specialfile,)) def handle_compat_syscall_entry_wait4(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] upid = event["upid"] options = event["options"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { upid = %s, options = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, upid, options,)) def handle_compat_syscall_entry_vhangup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_uname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_compat_syscall_entry_newfstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_compat_syscall_entry_newlstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename,)) def handle_compat_syscall_entry_newstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename,)) def handle_compat_syscall_entry_getitimer(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which,)) def handle_compat_syscall_entry_setitimer(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, value,)) def handle_compat_syscall_entry_syslog(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] type = event["type"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { type = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, type, len,)) def handle_compat_syscall_entry_socketcall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] call = event["call"] args = event["args"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { call = %s, args = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, call, args,)) def handle_compat_syscall_entry_fstatfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_compat_syscall_entry_statfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname,)) def handle_compat_syscall_entry_setpriority(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] who = event["who"] niceval = event["niceval"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, who = %s, niceval = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, who, niceval,)) def handle_compat_syscall_entry_getpriority(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] who = event["who"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, who = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, who,)) def handle_compat_syscall_entry_fchown16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, user, group,)) def handle_compat_syscall_entry_fchmod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, mode,)) def handle_compat_syscall_entry_ftruncate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] length = event["length"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, length = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, length,)) def handle_compat_syscall_entry_truncate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] path = event["path"] length = event["length"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { path = %s, length = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, path, length,)) def handle_compat_syscall_entry_munmap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] addr = event["addr"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { addr = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, addr, len,)) def handle_compat_syscall_entry_old_mmap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, arg,)) def handle_compat_syscall_entry_old_readdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] dirent = event["dirent"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, dirent = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, dirent, count,)) def handle_compat_syscall_entry_reboot(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] magic1 = event["magic1"] magic2 = event["magic2"] cmd = event["cmd"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { magic1 = %s, magic2 = %s, cmd = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, magic1, magic2, cmd, arg,)) def handle_compat_syscall_entry_swapon(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] specialfile = event["specialfile"] swap_flags = event["swap_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { specialfile = %s, swap_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, specialfile, swap_flags,)) def handle_compat_syscall_entry_uselib(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] library = event["library"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { library = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, library,)) def handle_compat_syscall_entry_readlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] path = event["path"] bufsiz = event["bufsiz"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { path = %s, bufsiz = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, path, bufsiz,)) def handle_compat_syscall_entry_lstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, statbuf,)) def handle_compat_syscall_entry_symlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldname = event["oldname"] newname = event["newname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldname = %s, newname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldname, newname,)) def handle_compat_syscall_entry_old_select(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, arg,)) def handle_compat_syscall_entry_setgroups16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gidsetsize = event["gidsetsize"] grouplist = event["grouplist"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gidsetsize = %s, grouplist = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gidsetsize, grouplist,)) def handle_compat_syscall_entry_getgroups16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gidsetsize = event["gidsetsize"] grouplist = event["grouplist"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gidsetsize = %s, grouplist = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gidsetsize, grouplist,)) def handle_compat_syscall_entry_settimeofday(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tv = event["tv"] tz = event["tz"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tv = %s, tz = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tv, tz,)) def handle_compat_syscall_entry_gettimeofday(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_getrusage(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] who = event["who"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { who = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, who,)) def handle_compat_syscall_entry_old_getrlimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] resource = event["resource"] rlim = event["rlim"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { resource = %s, rlim = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, resource, rlim,)) def handle_compat_syscall_entry_setrlimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] resource = event["resource"] rlim = event["rlim"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { resource = %s, rlim = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, resource, rlim,)) def handle_compat_syscall_entry_sethostname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, len,)) def handle_compat_syscall_entry_sigpending(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] set = event["set"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { set = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, set,)) def handle_compat_syscall_entry_setregid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rgid = event["rgid"] egid = event["egid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rgid = %s, egid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rgid, egid,)) def handle_compat_syscall_entry_setreuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ruid = event["ruid"] euid = event["euid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ruid = %s, euid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ruid, euid,)) def handle_compat_syscall_entry_ssetmask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] newmask = event["newmask"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { newmask = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, newmask,)) def handle_compat_syscall_entry_sgetmask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_setsid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_getpgrp(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_getppid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_dup2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldfd = event["oldfd"] newfd = event["newfd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldfd = %s, newfd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldfd, newfd,)) def handle_compat_syscall_entry_ustat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev,)) def handle_compat_syscall_entry_chroot(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename,)) def handle_compat_syscall_entry_umask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mask = event["mask"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mask = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mask,)) def handle_compat_syscall_entry_olduname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_compat_syscall_entry_setpgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] pgid = event["pgid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, pgid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, pgid,)) def handle_compat_syscall_entry_fcntl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] cmd = event["cmd"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, cmd = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, cmd, arg,)) def handle_compat_syscall_entry_ioctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] cmd = event["cmd"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, cmd = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, cmd, arg,)) def handle_compat_syscall_entry_umount(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, flags,)) def handle_compat_syscall_entry_acct(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_compat_syscall_entry_getegid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_geteuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_signal(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] sig = event["sig"] handler = event["handler"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { sig = %s, handler = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, sig, handler,)) def handle_compat_syscall_entry_getgid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_setgid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gid = event["gid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gid,)) def handle_compat_syscall_entry_brk(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] brk = event["brk"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { brk = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, brk,)) def handle_compat_syscall_entry_times(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_pipe(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_dup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fildes = event["fildes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fildes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fildes,)) def handle_compat_syscall_entry_rmdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname,)) def handle_compat_syscall_entry_mkdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, mode,)) def handle_compat_syscall_entry_rename(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldname = event["oldname"] newname = event["newname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldname = %s, newname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldname, newname,)) def handle_compat_syscall_entry_kill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] sig = event["sig"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, sig = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, sig,)) def handle_compat_syscall_entry_sync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_nice(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] increment = event["increment"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { increment = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, increment,)) def handle_compat_syscall_entry_access(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, mode,)) def handle_compat_syscall_entry_utime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] times = event["times"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, times = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, times,)) def handle_compat_syscall_entry_pause(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_fstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, statbuf,)) def handle_compat_syscall_entry_alarm(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] seconds = event["seconds"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { seconds = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, seconds,)) def handle_compat_syscall_entry_ptrace(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] request = event["request"] pid = event["pid"] addr = event["addr"] data = event["data"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { request = %s, pid = %s, addr = %s, data = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, request, pid, addr, data,)) def handle_compat_syscall_entry_stime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tptr = event["tptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tptr,)) def handle_compat_syscall_entry_getuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_setuid16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uid = event["uid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uid,)) def handle_compat_syscall_entry_oldumount(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_compat_syscall_entry_mount(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev_name = event["dev_name"] dir_name = event["dir_name"] type = event["type"] flags = event["flags"] data = event["data"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev_name = %s, dir_name = %s, type = %s, flags = %s, data = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev_name, dir_name, type, flags, data,)) def handle_compat_syscall_entry_getpid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_lseek(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] offset = event["offset"] origin = event["origin"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, offset = %s, origin = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, offset, origin,)) def handle_compat_syscall_entry_stat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, statbuf,)) def handle_compat_syscall_entry_lchown16(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, user, group,)) def handle_compat_syscall_entry_chmod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, mode,)) def handle_compat_syscall_entry_mknod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] mode = event["mode"] dev = event["dev"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, mode = %s, dev = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, mode, dev,)) def handle_compat_syscall_entry_time(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_compat_syscall_entry_chdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename,)) def handle_compat_syscall_entry_execve(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] argv = event["argv"] envp = event["envp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, argv = %s, envp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, argv, envp,)) def handle_compat_syscall_entry_unlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname,)) def handle_compat_syscall_entry_link(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldname = event["oldname"] newname = event["newname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldname = %s, newname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldname, newname,)) def handle_compat_syscall_entry_creat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, mode,)) def handle_compat_syscall_entry_waitpid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] stat_addr = event["stat_addr"] options = event["options"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, stat_addr = %s, options = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, stat_addr, options,)) def handle_compat_syscall_entry_close(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_compat_syscall_entry_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] flags = event["flags"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, flags = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, flags, mode,)) def handle_compat_syscall_entry_write(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] buf = event["buf"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, buf = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, buf, count,)) def handle_compat_syscall_entry_read(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, count,)) def handle_compat_syscall_entry_exit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] error_code = event["error_code"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { error_code = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, error_code,)) def handle_compat_syscall_entry_restart_syscall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_exit_finit_module(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_process_vm_writev(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_process_vm_readv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] lvec = event["lvec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, lvec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, lvec,)) def handle_syscall_exit_getcpu(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] cpup = event["cpup"] nodep = event["nodep"] tcache = event["tcache"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, cpup = %s, nodep = %s, tcache = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, cpup, nodep, tcache,)) def handle_syscall_exit_setns(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sendmmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_syncfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_clock_adjtime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] utx = event["utx"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, utx = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, utx,)) def handle_syscall_exit_open_by_handle_at(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_name_to_handle_at(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] handle = event["handle"] mnt_id = event["mnt_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, handle = %s, mnt_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, handle, mnt_id,)) def handle_syscall_exit_prlimit64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] old_rlim = event["old_rlim"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, old_rlim = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, old_rlim,)) def handle_syscall_exit_fanotify_mark(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fanotify_init(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_recvmmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] mmsg = event["mmsg"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, mmsg = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, mmsg, timeout,)) def handle_syscall_exit_perf_event_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_rt_tgsigqueueinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_pwritev(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_preadv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, vec,)) def handle_syscall_exit_inotify_init1(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_pipe2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fildes = event["fildes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fildes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fildes,)) def handle_syscall_exit_dup3(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_epoll_create1(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_eventfd2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_signalfd4(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_accept4(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] upeer_sockaddr = event["upeer_sockaddr"] upeer_addrlen = event["upeer_addrlen"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, upeer_sockaddr = %s, upeer_addrlen = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, upeer_sockaddr, upeer_addrlen,)) def handle_syscall_exit_timerfd_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] otmr = event["otmr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, otmr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, otmr,)) def handle_syscall_exit_timerfd_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] otmr = event["otmr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, otmr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, otmr,)) def handle_syscall_exit_fallocate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_eventfd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_timerfd_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_signalfd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_epoll_pwait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] events = event["events"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, events = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, events,)) def handle_syscall_exit_utimensat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_move_pages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] status = event["status"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, status = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, status,)) def handle_syscall_exit_vmsplice(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sync_file_range(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_tee(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_splice(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_get_robust_list(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] head_ptr = event["head_ptr"] len_ptr = event["len_ptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, head_ptr = %s, len_ptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, head_ptr, len_ptr,)) def handle_syscall_exit_set_robust_list(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_unshare(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_ppoll(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ufds = event["ufds"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ufds = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ufds,)) def handle_syscall_exit_pselect6(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] inp = event["inp"] outp = event["outp"] exp = event["exp"] tsp = event["tsp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, inp = %s, outp = %s, exp = %s, tsp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, inp, outp, exp, tsp,)) def handle_syscall_exit_faccessat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fchmodat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_readlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_exit_symlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_linkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_renameat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_unlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_newfstatat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, statbuf,)) def handle_syscall_exit_futimesat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fchownat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mknodat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mkdirat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_openat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_migrate_pages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_inotify_rm_watch(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_inotify_add_watch(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_inotify_init(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_ioprio_get(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_ioprio_set(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_keyctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg2 = event["arg2"] arg3 = event["arg3"] arg4 = event["arg4"] arg5 = event["arg5"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg2 = %s, arg3 = %s, arg4 = %s, arg5 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg2, arg3, arg4, arg5,)) def handle_syscall_exit_request_key(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_add_key(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_waitid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] infop = event["infop"] ru = event["ru"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, infop = %s, ru = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, infop, ru,)) def handle_syscall_exit_kexec_load(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mq_getsetattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] u_omqstat = event["u_omqstat"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, u_omqstat = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, u_omqstat,)) def handle_syscall_exit_mq_notify(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mq_timedreceive(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] u_msg_ptr = event["u_msg_ptr"] u_msg_prio = event["u_msg_prio"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, u_msg_ptr = %s, u_msg_prio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, u_msg_ptr, u_msg_prio,)) def handle_syscall_exit_mq_timedsend(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mq_unlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mq_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_get_mempolicy(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] policy = event["policy"] nmask = event["nmask"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, policy = %s, nmask = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, policy, nmask,)) def handle_syscall_exit_set_mempolicy(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mbind(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_utimes(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_tgkill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_epoll_ctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_epoll_wait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] events = event["events"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, events = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, events,)) def handle_syscall_exit_exit_group(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_clock_nanosleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rmtp = event["rmtp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rmtp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rmtp,)) def handle_syscall_exit_clock_getres(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tp = event["tp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tp,)) def handle_syscall_exit_clock_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tp = event["tp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tp,)) def handle_syscall_exit_clock_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_timer_delete(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_timer_getoverrun(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_timer_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] setting = event["setting"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, setting = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, setting,)) def handle_syscall_exit_timer_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] old_setting = event["old_setting"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, old_setting = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, old_setting,)) def handle_syscall_exit_timer_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] created_timer_id = event["created_timer_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, created_timer_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, created_timer_id,)) def handle_syscall_exit_fadvise64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_semtimedop(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, timeout,)) def handle_syscall_exit_restart_syscall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_set_tid_address(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getdents64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] dirent = event["dirent"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, dirent = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, dirent,)) def handle_syscall_exit_remap_file_pages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_epoll_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_lookup_dcookie(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_exit_io_cancel(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] result = event["result"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, result = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, result,)) def handle_syscall_exit_io_submit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_io_getevents(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] events = event["events"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, events = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, events, timeout,)) def handle_syscall_exit_io_destroy(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_io_setup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sched_getaffinity(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] user_mask_ptr = event["user_mask_ptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, user_mask_ptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, user_mask_ptr,)) def handle_syscall_exit_sched_setaffinity(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_futex(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] uaddr = event["uaddr"] uaddr2 = event["uaddr2"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, uaddr = %s, uaddr2 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, uaddr, uaddr2,)) def handle_syscall_exit_time(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tloc = event["tloc"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tloc = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tloc,)) def handle_syscall_exit_tkill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fremovexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_lremovexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_removexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_flistxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] list = event["list"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, list = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, list,)) def handle_syscall_exit_llistxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] list = event["list"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, list = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, list,)) def handle_syscall_exit_listxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] list = event["list"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, list = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, list,)) def handle_syscall_exit_fgetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, value,)) def handle_syscall_exit_lgetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, value,)) def handle_syscall_exit_getxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, value,)) def handle_syscall_exit_fsetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_lsetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_readahead(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_gettid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_quotactl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] addr = event["addr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, addr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, addr,)) def handle_syscall_exit_delete_module(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_init_module(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setdomainname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sethostname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_reboot(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_swapoff(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_swapon(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_umount(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mount(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_settimeofday(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_acct(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_chroot(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setrlimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_adjtimex(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] txc_p = event["txc_p"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, txc_p = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, txc_p,)) def handle_syscall_exit_prctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg2 = event["arg2"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg2 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg2,)) def handle_syscall_exit_sysctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] args = event["args"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, args = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, args,)) def handle_syscall_exit_pivot_root(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_vhangup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_munlockall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mlockall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_munlock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mlock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sched_rr_get_interval(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] interval = event["interval"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, interval = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, interval,)) def handle_syscall_exit_sched_get_priority_min(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sched_get_priority_max(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sched_getscheduler(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sched_setscheduler(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sched_getparam(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] param = event["param"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, param = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, param,)) def handle_syscall_exit_sched_setparam(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setpriority(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getpriority(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sysfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fstatfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_exit_statfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_exit_ustat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ubuf = event["ubuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ubuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ubuf,)) def handle_syscall_exit_personality(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mknod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_utime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sigaltstack(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] uoss = event["uoss"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, uoss = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, uoss,)) def handle_syscall_exit_rt_sigsuspend(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_rt_sigqueueinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_rt_sigtimedwait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] uthese = event["uthese"] uinfo = event["uinfo"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, uthese = %s, uinfo = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, uthese, uinfo,)) def handle_syscall_exit_rt_sigpending(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] uset = event["uset"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, uset = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, uset,)) def handle_syscall_exit_getsid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setfsgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setfsuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getpgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getresgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rgidp = event["rgidp"] egidp = event["egidp"] sgidp = event["sgidp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rgidp = %s, egidp = %s, sgidp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rgidp, egidp, sgidp,)) def handle_syscall_exit_setresgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getresuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ruidp = event["ruidp"] euidp = event["euidp"] suidp = event["suidp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ruidp = %s, euidp = %s, suidp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ruidp, euidp, suidp,)) def handle_syscall_exit_setresuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setgroups(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getgroups(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] grouplist = event["grouplist"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, grouplist = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, grouplist,)) def handle_syscall_exit_setregid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setreuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setsid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getpgrp(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getppid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setpgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getegid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_geteuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_syslog(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_exit_getuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_ptrace(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] addr = event["addr"] data = event["data"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, addr = %s, data = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, addr, data,)) def handle_syscall_exit_times(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tbuf = event["tbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tbuf,)) def handle_syscall_exit_sysinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] info = event["info"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, info = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, info,)) def handle_syscall_exit_getrusage(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ru = event["ru"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ru = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ru,)) def handle_syscall_exit_getrlimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rlim = event["rlim"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rlim = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rlim,)) def handle_syscall_exit_gettimeofday(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] tv = event["tv"] tz = event["tz"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, tv = %s, tz = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, tv, tz,)) def handle_syscall_exit_umask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_lchown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fchown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_chown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fchmod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_chmod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_readlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_exit_symlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_unlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_link(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_creat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_rmdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mkdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_rename(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fchdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_chdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getcwd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_exit_getdents(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] dirent = event["dirent"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, dirent = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, dirent,)) def handle_syscall_exit_ftruncate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_truncate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fdatasync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fsync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_flock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_fcntl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg,)) def handle_syscall_exit_msgctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_exit_msgrcv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] msgp = event["msgp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, msgp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, msgp,)) def handle_syscall_exit_msgsnd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_msgget(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_shmdt(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_semctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg,)) def handle_syscall_exit_semop(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_semget(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_newuname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, name,)) def handle_syscall_exit_kill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_wait4(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] stat_addr = event["stat_addr"] ru = event["ru"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, stat_addr = %s, ru = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, stat_addr, ru,)) def handle_syscall_exit_exit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_execve(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_clone(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getsockopt(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] optval = event["optval"] optlen = event["optlen"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, optval = %s, optlen = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, optval, optlen,)) def handle_syscall_exit_setsockopt(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_socketpair(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] usockvec = event["usockvec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, usockvec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, usockvec,)) def handle_syscall_exit_getpeername(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] usockaddr = event["usockaddr"] usockaddr_len = event["usockaddr_len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, usockaddr = %s, usockaddr_len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, usockaddr, usockaddr_len,)) def handle_syscall_exit_getsockname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] usockaddr = event["usockaddr"] usockaddr_len = event["usockaddr_len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, usockaddr = %s, usockaddr_len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, usockaddr, usockaddr_len,)) def handle_syscall_exit_listen(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_bind(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_shutdown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_recvmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] msg = event["msg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, msg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, msg,)) def handle_syscall_exit_sendmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_recvfrom(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ubuf = event["ubuf"] addr = event["addr"] addr_len = event["addr_len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ubuf = %s, addr = %s, addr_len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ubuf, addr, addr_len,)) def handle_syscall_exit_sendto(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_accept(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] upeer_addrlen = event["upeer_addrlen"] family = event["family"] sport = event["sport"] _v4addr_length = event["_v4addr_length"] v4addr = event["v4addr"] _v6addr_length = event["_v6addr_length"] v6addr = event["v6addr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, upeer_addrlen = %s, family = %s, sport = %s, _v4addr_length = %s, v4addr = %s, _v6addr_length = %s, v6addr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, upeer_addrlen, family, sport, _v4addr_length, v4addr, _v6addr_length, v6addr,)) def handle_syscall_exit_connect(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_socket(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sendfile64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] offset = event["offset"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, offset = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, offset,)) def handle_syscall_exit_getpid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_setitimer(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ovalue = event["ovalue"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ovalue = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ovalue,)) def handle_syscall_exit_alarm(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_getitimer(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, value,)) def handle_syscall_exit_nanosleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] rmtp = event["rmtp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, rmtp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, rmtp,)) def handle_syscall_exit_pause(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_dup2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_dup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_shmctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_exit_shmat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_shmget(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_madvise(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mincore(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, vec,)) def handle_syscall_exit_msync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mremap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_sched_yield(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_select(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] inp = event["inp"] outp = event["outp"] exp = event["exp"] tvp = event["tvp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, inp = %s, outp = %s, exp = %s, tvp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, inp, outp, exp, tvp,)) def handle_syscall_exit_pipe(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] fildes = event["fildes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, fildes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, fildes,)) def handle_syscall_exit_access(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_writev(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, vec,)) def handle_syscall_exit_readv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, vec,)) def handle_syscall_exit_pwrite64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_pread64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_exit_ioctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, arg,)) def handle_syscall_exit_rt_sigprocmask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] oset = event["oset"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, oset = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, oset,)) def handle_syscall_exit_rt_sigaction(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] oact = event["oact"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, oact = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, oact,)) def handle_syscall_exit_brk(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_munmap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mprotect(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_mmap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_lseek(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_poll(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] ufds = event["ufds"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, ufds = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, ufds,)) def handle_syscall_exit_newlstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, statbuf,)) def handle_syscall_exit_newfstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, statbuf,)) def handle_syscall_exit_newstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] statbuf = event["statbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, statbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, statbuf,)) def handle_syscall_exit_close(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_write(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_syscall_exit_read(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret, buf,)) def handle_syscall_entry_finit_module(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] uargs = event["uargs"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, uargs = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, uargs, flags,)) def handle_syscall_entry_process_vm_writev(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] lvec = event["lvec"] liovcnt = event["liovcnt"] rvec = event["rvec"] riovcnt = event["riovcnt"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, lvec = %s, liovcnt = %s, rvec = %s, riovcnt = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, lvec, liovcnt, rvec, riovcnt, flags,)) def handle_syscall_entry_process_vm_readv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] liovcnt = event["liovcnt"] rvec = event["rvec"] riovcnt = event["riovcnt"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, liovcnt = %s, rvec = %s, riovcnt = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, liovcnt, rvec, riovcnt, flags,)) def handle_syscall_entry_getcpu(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tcache = event["tcache"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tcache = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tcache,)) def handle_syscall_entry_setns(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] nstype = event["nstype"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, nstype = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, nstype,)) def handle_syscall_entry_sendmmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] mmsg = event["mmsg"] vlen = event["vlen"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, mmsg = %s, vlen = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, mmsg, vlen, flags,)) def handle_syscall_entry_syncfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_syscall_entry_clock_adjtime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] utx = event["utx"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s, utx = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock, utx,)) def handle_syscall_entry_open_by_handle_at(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mountdirfd = event["mountdirfd"] handle = event["handle"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mountdirfd = %s, handle = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mountdirfd, handle, flags,)) def handle_syscall_entry_name_to_handle_at(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] name = event["name"] handle = event["handle"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, name = %s, handle = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, name, handle, flag,)) def handle_syscall_entry_prlimit64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] resource = event["resource"] new_rlim = event["new_rlim"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, resource = %s, new_rlim = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, resource, new_rlim,)) def handle_syscall_entry_fanotify_mark(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fanotify_fd = event["fanotify_fd"] flags = event["flags"] mask = event["mask"] dfd = event["dfd"] pathname = event["pathname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fanotify_fd = %s, flags = %s, mask = %s, dfd = %s, pathname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fanotify_fd, flags, mask, dfd, pathname,)) def handle_syscall_entry_fanotify_init(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] flags = event["flags"] event_f_flags = event["event_f_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { flags = %s, event_f_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, flags, event_f_flags,)) def handle_syscall_entry_recvmmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] vlen = event["vlen"] flags = event["flags"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, vlen = %s, flags = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, vlen, flags, timeout,)) def handle_syscall_entry_perf_event_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] attr_uptr = event["attr_uptr"] pid = event["pid"] cpu = event["cpu"] group_fd = event["group_fd"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { attr_uptr = %s, pid = %s, cpu = %s, group_fd = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, attr_uptr, pid, cpu, group_fd, flags,)) def handle_syscall_entry_rt_tgsigqueueinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tgid = event["tgid"] pid = event["pid"] sig = event["sig"] uinfo = event["uinfo"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tgid = %s, pid = %s, sig = %s, uinfo = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tgid, pid, sig, uinfo,)) def handle_syscall_entry_pwritev(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] vec = event["vec"] vlen = event["vlen"] pos_l = event["pos_l"] pos_h = event["pos_h"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, vec = %s, vlen = %s, pos_l = %s, pos_h = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, vec, vlen, pos_l, pos_h,)) def handle_syscall_entry_preadv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] vlen = event["vlen"] pos_l = event["pos_l"] pos_h = event["pos_h"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, vlen = %s, pos_l = %s, pos_h = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, vlen, pos_l, pos_h,)) def handle_syscall_entry_inotify_init1(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, flags,)) def handle_syscall_entry_pipe2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, flags,)) def handle_syscall_entry_dup3(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldfd = event["oldfd"] newfd = event["newfd"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldfd = %s, newfd = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldfd, newfd, flags,)) def handle_syscall_entry_epoll_create1(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, flags,)) def handle_syscall_entry_eventfd2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] count = event["count"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { count = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, count, flags,)) def handle_syscall_entry_signalfd4(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufd = event["ufd"] user_mask = event["user_mask"] sizemask = event["sizemask"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufd = %s, user_mask = %s, sizemask = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufd, user_mask, sizemask, flags,)) def handle_syscall_entry_accept4(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] upeer_addrlen = event["upeer_addrlen"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, upeer_addrlen = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, upeer_addrlen, flags,)) def handle_syscall_entry_timerfd_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufd = event["ufd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufd,)) def handle_syscall_entry_timerfd_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufd = event["ufd"] flags = event["flags"] utmr = event["utmr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufd = %s, flags = %s, utmr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufd, flags, utmr,)) def handle_syscall_entry_fallocate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] mode = event["mode"] offset = event["offset"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, mode = %s, offset = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, mode, offset, len,)) def handle_syscall_entry_eventfd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, count,)) def handle_syscall_entry_timerfd_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] clockid = event["clockid"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { clockid = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, clockid, flags,)) def handle_syscall_entry_signalfd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufd = event["ufd"] user_mask = event["user_mask"] sizemask = event["sizemask"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufd = %s, user_mask = %s, sizemask = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufd, user_mask, sizemask,)) def handle_syscall_entry_epoll_pwait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] epfd = event["epfd"] maxevents = event["maxevents"] timeout = event["timeout"] sigmask = event["sigmask"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { epfd = %s, maxevents = %s, timeout = %s, sigmask = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, epfd, maxevents, timeout, sigmask, sigsetsize,)) def handle_syscall_entry_utimensat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] utimes = event["utimes"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, utimes = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, utimes, flags,)) def handle_syscall_entry_move_pages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] nr_pages = event["nr_pages"] pages = event["pages"] nodes = event["nodes"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, nr_pages = %s, pages = %s, nodes = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, nr_pages, pages, nodes, flags,)) def handle_syscall_entry_vmsplice(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] iov = event["iov"] nr_segs = event["nr_segs"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, iov = %s, nr_segs = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, iov, nr_segs, flags,)) def handle_syscall_entry_sync_file_range(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] offset = event["offset"] nbytes = event["nbytes"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, offset = %s, nbytes = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, offset, nbytes, flags,)) def handle_syscall_entry_tee(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fdin = event["fdin"] fdout = event["fdout"] len = event["len"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fdin = %s, fdout = %s, len = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fdin, fdout, len, flags,)) def handle_syscall_entry_splice(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd_in = event["fd_in"] off_in = event["off_in"] fd_out = event["fd_out"] off_out = event["off_out"] len = event["len"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd_in = %s, off_in = %s, fd_out = %s, off_out = %s, len = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd_in, off_in, fd_out, off_out, len, flags,)) def handle_syscall_entry_get_robust_list(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_syscall_entry_set_robust_list(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] head = event["head"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { head = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, head, len,)) def handle_syscall_entry_unshare(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] unshare_flags = event["unshare_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { unshare_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, unshare_flags,)) def handle_syscall_entry_ppoll(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufds = event["ufds"] nfds = event["nfds"] tsp = event["tsp"] sigmask = event["sigmask"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufds = %s, nfds = %s, tsp = %s, sigmask = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufds, nfds, tsp, sigmask, sigsetsize,)) def handle_syscall_entry_pselect6(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] n = event["n"] inp = event["inp"] outp = event["outp"] exp = event["exp"] tsp = event["tsp"] sig = event["sig"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { n = %s, inp = %s, outp = %s, exp = %s, tsp = %s, sig = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, n, inp, outp, exp, tsp, sig,)) def handle_syscall_entry_faccessat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, mode,)) def handle_syscall_entry_fchmodat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, mode,)) def handle_syscall_entry_readlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] pathname = event["pathname"] bufsiz = event["bufsiz"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, pathname = %s, bufsiz = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, pathname, bufsiz,)) def handle_syscall_entry_symlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldname = event["oldname"] newdfd = event["newdfd"] newname = event["newname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldname = %s, newdfd = %s, newname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldname, newdfd, newname,)) def handle_syscall_entry_linkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] olddfd = event["olddfd"] oldname = event["oldname"] newdfd = event["newdfd"] newname = event["newname"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { olddfd = %s, oldname = %s, newdfd = %s, newname = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, olddfd, oldname, newdfd, newname, flags,)) def handle_syscall_entry_renameat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] olddfd = event["olddfd"] oldname = event["oldname"] newdfd = event["newdfd"] newname = event["newname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { olddfd = %s, oldname = %s, newdfd = %s, newname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, olddfd, oldname, newdfd, newname,)) def handle_syscall_entry_unlinkat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] pathname = event["pathname"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, pathname = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, pathname, flag,)) def handle_syscall_entry_newfstatat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, flag,)) def handle_syscall_entry_futimesat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] utimes = event["utimes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, utimes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, utimes,)) def handle_syscall_entry_fchownat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] user = event["user"] group = event["group"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, user = %s, group = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, user, group, flag,)) def handle_syscall_entry_mknodat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] mode = event["mode"] dev = event["dev"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, mode = %s, dev = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, mode, dev,)) def handle_syscall_entry_mkdirat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] pathname = event["pathname"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, pathname = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, pathname, mode,)) def handle_syscall_entry_openat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dfd = event["dfd"] filename = event["filename"] flags = event["flags"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dfd = %s, filename = %s, flags = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dfd, filename, flags, mode,)) def handle_syscall_entry_migrate_pages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] maxnode = event["maxnode"] old_nodes = event["old_nodes"] new_nodes = event["new_nodes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, maxnode = %s, old_nodes = %s, new_nodes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, maxnode, old_nodes, new_nodes,)) def handle_syscall_entry_inotify_rm_watch(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] wd = event["wd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, wd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, wd,)) def handle_syscall_entry_inotify_add_watch(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] pathname = event["pathname"] mask = event["mask"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, pathname = %s, mask = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, pathname, mask,)) def handle_syscall_entry_inotify_init(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_ioprio_get(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] who = event["who"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, who = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, who,)) def handle_syscall_entry_ioprio_set(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] who = event["who"] ioprio = event["ioprio"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, who = %s, ioprio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, who, ioprio,)) def handle_syscall_entry_keyctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] option = event["option"] arg2 = event["arg2"] arg3 = event["arg3"] arg4 = event["arg4"] arg5 = event["arg5"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { option = %s, arg2 = %s, arg3 = %s, arg4 = %s, arg5 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, option, arg2, arg3, arg4, arg5,)) def handle_syscall_entry_request_key(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] _type = event["_type"] _description = event["_description"] _callout_info = event["_callout_info"] destringid = event["destringid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { _type = %s, _description = %s, _callout_info = %s, destringid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, _type, _description, _callout_info, destringid,)) def handle_syscall_entry_add_key(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] _type = event["_type"] _description = event["_description"] _payload = event["_payload"] plen = event["plen"] ringid = event["ringid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { _type = %s, _description = %s, _payload = %s, plen = %s, ringid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, _type, _description, _payload, plen, ringid,)) def handle_syscall_entry_waitid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] upid = event["upid"] options = event["options"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, upid = %s, options = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, upid, options,)) def handle_syscall_entry_kexec_load(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] entry = event["entry"] nr_segments = event["nr_segments"] segments = event["segments"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { entry = %s, nr_segments = %s, segments = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, entry, nr_segments, segments, flags,)) def handle_syscall_entry_mq_getsetattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mqdes = event["mqdes"] u_mqstat = event["u_mqstat"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mqdes = %s, u_mqstat = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mqdes, u_mqstat,)) def handle_syscall_entry_mq_notify(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mqdes = event["mqdes"] u_notification = event["u_notification"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mqdes = %s, u_notification = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mqdes, u_notification,)) def handle_syscall_entry_mq_timedreceive(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mqdes = event["mqdes"] msg_len = event["msg_len"] u_abs_timeout = event["u_abs_timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mqdes = %s, msg_len = %s, u_abs_timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mqdes, msg_len, u_abs_timeout,)) def handle_syscall_entry_mq_timedsend(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mqdes = event["mqdes"] u_msg_ptr = event["u_msg_ptr"] msg_len = event["msg_len"] msg_prio = event["msg_prio"] u_abs_timeout = event["u_abs_timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mqdes = %s, u_msg_ptr = %s, msg_len = %s, msg_prio = %s, u_abs_timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout,)) def handle_syscall_entry_mq_unlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] u_name = event["u_name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { u_name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, u_name,)) def handle_syscall_entry_mq_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] u_name = event["u_name"] oflag = event["oflag"] mode = event["mode"] u_attr = event["u_attr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { u_name = %s, oflag = %s, mode = %s, u_attr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, u_name, oflag, mode, u_attr,)) def handle_syscall_entry_get_mempolicy(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] maxnode = event["maxnode"] addr = event["addr"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { maxnode = %s, addr = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, maxnode, addr, flags,)) def handle_syscall_entry_set_mempolicy(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mode = event["mode"] nmask = event["nmask"] maxnode = event["maxnode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mode = %s, nmask = %s, maxnode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mode, nmask, maxnode,)) def handle_syscall_entry_mbind(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] mode = event["mode"] nmask = event["nmask"] maxnode = event["maxnode"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s, mode = %s, nmask = %s, maxnode = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len, mode, nmask, maxnode, flags,)) def handle_syscall_entry_utimes(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] utimes = event["utimes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, utimes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, utimes,)) def handle_syscall_entry_tgkill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tgid = event["tgid"] pid = event["pid"] sig = event["sig"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tgid = %s, pid = %s, sig = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tgid, pid, sig,)) def handle_syscall_entry_epoll_ctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] epfd = event["epfd"] op = event["op"] fd = event["fd"] _event = event["event"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { epfd = %s, op = %s, fd = %s, event = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, epfd, op, fd, _event,)) def handle_syscall_entry_epoll_wait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] epfd = event["epfd"] maxevents = event["maxevents"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { epfd = %s, maxevents = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, epfd, maxevents, timeout,)) def handle_syscall_entry_exit_group(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] error_code = event["error_code"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { error_code = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, error_code,)) def handle_syscall_entry_clock_nanosleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] flags = event["flags"] rqtp = event["rqtp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s, flags = %s, rqtp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock, flags, rqtp,)) def handle_syscall_entry_clock_getres(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock,)) def handle_syscall_entry_clock_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock,)) def handle_syscall_entry_clock_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] tp = event["tp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s, tp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock, tp,)) def handle_syscall_entry_timer_delete(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer_id = event["timer_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer_id,)) def handle_syscall_entry_timer_getoverrun(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer_id = event["timer_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer_id,)) def handle_syscall_entry_timer_gettime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer_id = event["timer_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer_id,)) def handle_syscall_entry_timer_settime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer_id = event["timer_id"] flags = event["flags"] new_setting = event["new_setting"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer_id = %s, flags = %s, new_setting = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer_id, flags, new_setting,)) def handle_syscall_entry_timer_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which_clock = event["which_clock"] timer_event_spec = event["timer_event_spec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which_clock = %s, timer_event_spec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which_clock, timer_event_spec,)) def handle_syscall_entry_fadvise64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] offset = event["offset"] len = event["len"] advice = event["advice"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, offset = %s, len = %s, advice = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, offset, len, advice,)) def handle_syscall_entry_semtimedop(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] semid = event["semid"] tsops = event["tsops"] nsops = event["nsops"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { semid = %s, tsops = %s, nsops = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, semid, tsops, nsops, timeout,)) def handle_syscall_entry_restart_syscall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_set_tid_address(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tidptr = event["tidptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tidptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tidptr,)) def handle_syscall_entry_getdents64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, count,)) def handle_syscall_entry_remap_file_pages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] size = event["size"] prot = event["prot"] pgoff = event["pgoff"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, size = %s, prot = %s, pgoff = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, size, prot, pgoff, flags,)) def handle_syscall_entry_epoll_create(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, size,)) def handle_syscall_entry_lookup_dcookie(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] cookie64 = event["cookie64"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { cookie64 = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, cookie64, len,)) def handle_syscall_entry_io_cancel(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ctx_id = event["ctx_id"] iocb = event["iocb"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ctx_id = %s, iocb = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ctx_id, iocb,)) def handle_syscall_entry_io_submit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ctx_id = event["ctx_id"] nr = event["nr"] iocbpp = event["iocbpp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ctx_id = %s, nr = %s, iocbpp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ctx_id, nr, iocbpp,)) def handle_syscall_entry_io_getevents(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ctx_id = event["ctx_id"] min_nr = event["min_nr"] nr = event["nr"] timeout = event["timeout"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ctx_id = %s, min_nr = %s, nr = %s, timeout = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ctx_id, min_nr, nr, timeout,)) def handle_syscall_entry_io_destroy(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ctx = event["ctx"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ctx = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ctx,)) def handle_syscall_entry_io_setup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nr_events = event["nr_events"] ctxp = event["ctxp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nr_events = %s, ctxp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nr_events, ctxp,)) def handle_syscall_entry_sched_getaffinity(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, len,)) def handle_syscall_entry_sched_setaffinity(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] len = event["len"] user_mask_ptr = event["user_mask_ptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, len = %s, user_mask_ptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, len, user_mask_ptr,)) def handle_syscall_entry_futex(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uaddr = event["uaddr"] op = event["op"] val = event["val"] utime = event["utime"] uaddr2 = event["uaddr2"] val3 = event["val3"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uaddr = %s, op = %s, val = %s, utime = %s, uaddr2 = %s, val3 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uaddr, op, val, utime, uaddr2, val3,)) def handle_syscall_entry_time(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_tkill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] sig = event["sig"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, sig = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, sig,)) def handle_syscall_entry_fremovexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, name,)) def handle_syscall_entry_lremovexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name,)) def handle_syscall_entry_removexattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name,)) def handle_syscall_entry_flistxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, size,)) def handle_syscall_entry_llistxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, size,)) def handle_syscall_entry_listxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, size,)) def handle_syscall_entry_fgetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] name = event["name"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, name = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, name, size,)) def handle_syscall_entry_lgetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name, size,)) def handle_syscall_entry_getxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name, size,)) def handle_syscall_entry_fsetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] name = event["name"] value = event["value"] size = event["size"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, name = %s, value = %s, size = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, name, value, size, flags,)) def handle_syscall_entry_lsetxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] value = event["value"] size = event["size"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s, value = %s, size = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name, value, size, flags,)) def handle_syscall_entry_setxattr(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] name = event["name"] value = event["value"] size = event["size"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, name = %s, value = %s, size = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, name, value, size, flags,)) def handle_syscall_entry_readahead(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] offset = event["offset"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, offset = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, offset, count,)) def handle_syscall_entry_gettid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_quotactl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] cmd = event["cmd"] special = event["special"] id = event["id"] addr = event["addr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { cmd = %s, special = %s, id = %s, addr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, cmd, special, id, addr,)) def handle_syscall_entry_delete_module(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name_user = event["name_user"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name_user = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name_user, flags,)) def handle_syscall_entry_init_module(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] umod = event["umod"] len = event["len"] uargs = event["uargs"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { umod = %s, len = %s, uargs = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, umod, len, uargs,)) def handle_syscall_entry_setdomainname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, len,)) def handle_syscall_entry_sethostname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, len,)) def handle_syscall_entry_reboot(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] magic1 = event["magic1"] magic2 = event["magic2"] cmd = event["cmd"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { magic1 = %s, magic2 = %s, cmd = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, magic1, magic2, cmd, arg,)) def handle_syscall_entry_swapoff(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] specialfile = event["specialfile"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { specialfile = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, specialfile,)) def handle_syscall_entry_swapon(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] specialfile = event["specialfile"] swap_flags = event["swap_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { specialfile = %s, swap_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, specialfile, swap_flags,)) def handle_syscall_entry_umount(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, flags,)) def handle_syscall_entry_mount(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev_name = event["dev_name"] dir_name = event["dir_name"] type = event["type"] flags = event["flags"] data = event["data"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev_name = %s, dir_name = %s, type = %s, flags = %s, data = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev_name, dir_name, type, flags, data,)) def handle_syscall_entry_settimeofday(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tv = event["tv"] tz = event["tz"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tv = %s, tz = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tv, tz,)) def handle_syscall_entry_acct(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_syscall_entry_sync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_chroot(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename,)) def handle_syscall_entry_setrlimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] resource = event["resource"] rlim = event["rlim"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { resource = %s, rlim = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, resource, rlim,)) def handle_syscall_entry_adjtimex(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] txc_p = event["txc_p"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { txc_p = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, txc_p,)) def handle_syscall_entry_prctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] option = event["option"] arg2 = event["arg2"] arg3 = event["arg3"] arg4 = event["arg4"] arg5 = event["arg5"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { option = %s, arg2 = %s, arg3 = %s, arg4 = %s, arg5 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, option, arg2, arg3, arg4, arg5,)) def handle_syscall_entry_sysctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] args = event["args"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { args = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, args,)) def handle_syscall_entry_pivot_root(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] new_root = event["new_root"] put_old = event["put_old"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { new_root = %s, put_old = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, new_root, put_old,)) def handle_syscall_entry_vhangup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_munlockall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_mlockall(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, flags,)) def handle_syscall_entry_munlock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len,)) def handle_syscall_entry_mlock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len,)) def handle_syscall_entry_sched_rr_get_interval(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_syscall_entry_sched_get_priority_min(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] policy = event["policy"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { policy = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, policy,)) def handle_syscall_entry_sched_get_priority_max(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] policy = event["policy"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { policy = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, policy,)) def handle_syscall_entry_sched_getscheduler(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_syscall_entry_sched_setscheduler(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] policy = event["policy"] param = event["param"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, policy = %s, param = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, policy, param,)) def handle_syscall_entry_sched_getparam(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_syscall_entry_sched_setparam(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] param = event["param"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, param = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, param,)) def handle_syscall_entry_setpriority(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] who = event["who"] niceval = event["niceval"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, who = %s, niceval = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, who, niceval,)) def handle_syscall_entry_getpriority(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] who = event["who"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, who = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, who,)) def handle_syscall_entry_sysfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] option = event["option"] arg1 = event["arg1"] arg2 = event["arg2"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { option = %s, arg1 = %s, arg2 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, option, arg1, arg2,)) def handle_syscall_entry_fstatfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_syscall_entry_statfs(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname,)) def handle_syscall_entry_ustat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev,)) def handle_syscall_entry_personality(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] personality = event["personality"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { personality = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, personality,)) def handle_syscall_entry_mknod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] mode = event["mode"] dev = event["dev"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, mode = %s, dev = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, mode, dev,)) def handle_syscall_entry_utime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] times = event["times"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, times = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, times,)) def handle_syscall_entry_sigaltstack(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uss = event["uss"] uoss = event["uoss"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uss = %s, uoss = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uss, uoss,)) def handle_syscall_entry_rt_sigsuspend(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] unewset = event["unewset"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { unewset = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, unewset, sigsetsize,)) def handle_syscall_entry_rt_sigqueueinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] sig = event["sig"] uinfo = event["uinfo"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, sig = %s, uinfo = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, sig, uinfo,)) def handle_syscall_entry_rt_sigtimedwait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uts = event["uts"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uts = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uts, sigsetsize,)) def handle_syscall_entry_rt_sigpending(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, sigsetsize,)) def handle_syscall_entry_getsid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_syscall_entry_setfsgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gid = event["gid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gid,)) def handle_syscall_entry_setfsuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uid = event["uid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uid,)) def handle_syscall_entry_getpgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid,)) def handle_syscall_entry_getresgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_setresgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rgid = event["rgid"] egid = event["egid"] sgid = event["sgid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rgid = %s, egid = %s, sgid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rgid, egid, sgid,)) def handle_syscall_entry_getresuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_setresuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ruid = event["ruid"] euid = event["euid"] suid = event["suid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ruid = %s, euid = %s, suid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ruid, euid, suid,)) def handle_syscall_entry_setgroups(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gidsetsize = event["gidsetsize"] grouplist = event["grouplist"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gidsetsize = %s, grouplist = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gidsetsize, grouplist,)) def handle_syscall_entry_getgroups(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gidsetsize = event["gidsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gidsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gidsetsize,)) def handle_syscall_entry_setregid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rgid = event["rgid"] egid = event["egid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rgid = %s, egid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rgid, egid,)) def handle_syscall_entry_setreuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ruid = event["ruid"] euid = event["euid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ruid = %s, euid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ruid, euid,)) def handle_syscall_entry_setsid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_getpgrp(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_getppid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_setpgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] pgid = event["pgid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, pgid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, pgid,)) def handle_syscall_entry_getegid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_geteuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_setgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gid = event["gid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gid,)) def handle_syscall_entry_setuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] uid = event["uid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { uid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, uid,)) def handle_syscall_entry_getgid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_syslog(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] type = event["type"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { type = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, type, len,)) def handle_syscall_entry_getuid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_ptrace(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] request = event["request"] pid = event["pid"] addr = event["addr"] data = event["data"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { request = %s, pid = %s, addr = %s, data = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, request, pid, addr, data,)) def handle_syscall_entry_times(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_sysinfo(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_getrusage(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] who = event["who"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { who = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, who,)) def handle_syscall_entry_getrlimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] resource = event["resource"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { resource = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, resource,)) def handle_syscall_entry_gettimeofday(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_umask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] mask = event["mask"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { mask = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, mask,)) def handle_syscall_entry_lchown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, user, group,)) def handle_syscall_entry_fchown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, user, group,)) def handle_syscall_entry_chown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] user = event["user"] group = event["group"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, user = %s, group = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, user, group,)) def handle_syscall_entry_fchmod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, mode,)) def handle_syscall_entry_chmod(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, mode,)) def handle_syscall_entry_readlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] path = event["path"] bufsiz = event["bufsiz"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { path = %s, bufsiz = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, path, bufsiz,)) def handle_syscall_entry_symlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldname = event["oldname"] newname = event["newname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldname = %s, newname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldname, newname,)) def handle_syscall_entry_unlink(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname,)) def handle_syscall_entry_link(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldname = event["oldname"] newname = event["newname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldname = %s, newname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldname, newname,)) def handle_syscall_entry_creat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, mode,)) def handle_syscall_entry_rmdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname,)) def handle_syscall_entry_mkdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pathname = event["pathname"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pathname = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pathname, mode,)) def handle_syscall_entry_rename(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldname = event["oldname"] newname = event["newname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldname = %s, newname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldname, newname,)) def handle_syscall_entry_fchdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_syscall_entry_chdir(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename,)) def handle_syscall_entry_getcwd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, size,)) def handle_syscall_entry_getdents(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, count,)) def handle_syscall_entry_ftruncate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] length = event["length"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, length = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, length,)) def handle_syscall_entry_truncate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] path = event["path"] length = event["length"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { path = %s, length = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, path, length,)) def handle_syscall_entry_fdatasync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_syscall_entry_fsync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_syscall_entry_flock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] cmd = event["cmd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, cmd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, cmd,)) def handle_syscall_entry_fcntl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] cmd = event["cmd"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, cmd = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, cmd, arg,)) def handle_syscall_entry_msgctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] msqid = event["msqid"] cmd = event["cmd"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { msqid = %s, cmd = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, msqid, cmd, buf,)) def handle_syscall_entry_msgrcv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] msqid = event["msqid"] msgsz = event["msgsz"] msgtyp = event["msgtyp"] msgflg = event["msgflg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { msqid = %s, msgsz = %s, msgtyp = %s, msgflg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, msqid, msgsz, msgtyp, msgflg,)) def handle_syscall_entry_msgsnd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] msqid = event["msqid"] msgp = event["msgp"] msgsz = event["msgsz"] msgflg = event["msgflg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { msqid = %s, msgp = %s, msgsz = %s, msgflg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, msqid, msgp, msgsz, msgflg,)) def handle_syscall_entry_msgget(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] key = event["key"] msgflg = event["msgflg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { key = %s, msgflg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, key, msgflg,)) def handle_syscall_entry_shmdt(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] shmaddr = event["shmaddr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { shmaddr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, shmaddr,)) def handle_syscall_entry_semctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] semid = event["semid"] semnum = event["semnum"] cmd = event["cmd"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { semid = %s, semnum = %s, cmd = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, semid, semnum, cmd, arg,)) def handle_syscall_entry_semop(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] semid = event["semid"] tsops = event["tsops"] nsops = event["nsops"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { semid = %s, tsops = %s, nsops = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, semid, tsops, nsops,)) def handle_syscall_entry_semget(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] key = event["key"] nsems = event["nsems"] semflg = event["semflg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { key = %s, nsems = %s, semflg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, key, nsems, semflg,)) def handle_syscall_entry_newuname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_kill(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] sig = event["sig"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, sig = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, sig,)) def handle_syscall_entry_wait4(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] upid = event["upid"] options = event["options"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { upid = %s, options = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, upid, options,)) def handle_syscall_entry_exit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] error_code = event["error_code"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { error_code = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, error_code,)) def handle_syscall_entry_execve(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] argv = event["argv"] envp = event["envp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, argv = %s, envp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, argv, envp,)) def handle_syscall_entry_clone(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] clone_flags = event["clone_flags"] newsp = event["newsp"] parent_tid = event["parent_tid"] child_tid = event["child_tid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { clone_flags = %s, newsp = %s, parent_tid = %s, child_tid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, clone_flags, newsp, parent_tid, child_tid,)) def handle_syscall_entry_getsockopt(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] level = event["level"] optname = event["optname"] optlen = event["optlen"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, level = %s, optname = %s, optlen = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, level, optname, optlen,)) def handle_syscall_entry_setsockopt(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] level = event["level"] optname = event["optname"] optval = event["optval"] optlen = event["optlen"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, level = %s, optname = %s, optval = %s, optlen = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, level, optname, optval, optlen,)) def handle_syscall_entry_socketpair(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] family = event["family"] type = event["type"] protocol = event["protocol"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { family = %s, type = %s, protocol = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, family, type, protocol,)) def handle_syscall_entry_getpeername(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] usockaddr_len = event["usockaddr_len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, usockaddr_len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, usockaddr_len,)) def handle_syscall_entry_getsockname(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] usockaddr_len = event["usockaddr_len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, usockaddr_len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, usockaddr_len,)) def handle_syscall_entry_listen(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] backlog = event["backlog"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, backlog = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, backlog,)) def handle_syscall_entry_bind(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] umyaddr = event["umyaddr"] addrlen = event["addrlen"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, umyaddr = %s, addrlen = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, umyaddr, addrlen,)) def handle_syscall_entry_shutdown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] how = event["how"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, how = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, how,)) def handle_syscall_entry_recvmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] msg = event["msg"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, msg = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, msg, flags,)) def handle_syscall_entry_sendmsg(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] msg = event["msg"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, msg = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, msg, flags,)) def handle_syscall_entry_recvfrom(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] size = event["size"] flags = event["flags"] addr_len = event["addr_len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, size = %s, flags = %s, addr_len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, size, flags, addr_len,)) def handle_syscall_entry_sendto(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] buff = event["buff"] len = event["len"] flags = event["flags"] addr = event["addr"] addr_len = event["addr_len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, buff = %s, len = %s, flags = %s, addr = %s, addr_len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, buff, len, flags, addr, addr_len,)) def handle_syscall_entry_accept(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] upeer_sockaddr = event["upeer_sockaddr"] upeer_addrlen = event["upeer_addrlen"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, upeer_sockaddr = %s, upeer_addrlen = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, upeer_sockaddr, upeer_addrlen,)) def handle_syscall_entry_connect(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] uservaddr = event["uservaddr"] addrlen = event["addrlen"] family = event["family"] dport = event["dport"] _v4addr_length = event["_v4addr_length"] v4addr = event["v4addr"] _v6addr_length = event["_v6addr_length"] v6addr = event["v6addr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, uservaddr = %s, addrlen = %s, family = %s, dport = %s, _v4addr_length = %s, v4addr = %s, _v6addr_length = %s, v6addr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, uservaddr, addrlen, family, dport, _v4addr_length, v4addr, _v6addr_length, v6addr,)) def handle_syscall_entry_socket(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] family = event["family"] type = event["type"] protocol = event["protocol"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { family = %s, type = %s, protocol = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, family, type, protocol,)) def handle_syscall_entry_sendfile64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] out_fd = event["out_fd"] in_fd = event["in_fd"] offset = event["offset"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { out_fd = %s, in_fd = %s, offset = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, out_fd, in_fd, offset, count,)) def handle_syscall_entry_getpid(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_setitimer(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, value,)) def handle_syscall_entry_alarm(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] seconds = event["seconds"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { seconds = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, seconds,)) def handle_syscall_entry_getitimer(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which,)) def handle_syscall_entry_nanosleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rqtp = event["rqtp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rqtp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rqtp,)) def handle_syscall_entry_pause(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_dup2(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] oldfd = event["oldfd"] newfd = event["newfd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { oldfd = %s, newfd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, oldfd, newfd,)) def handle_syscall_entry_dup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fildes = event["fildes"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fildes = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fildes,)) def handle_syscall_entry_shmctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] shmid = event["shmid"] cmd = event["cmd"] buf = event["buf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { shmid = %s, cmd = %s, buf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, shmid, cmd, buf,)) def handle_syscall_entry_shmat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] shmid = event["shmid"] shmaddr = event["shmaddr"] shmflg = event["shmflg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { shmid = %s, shmaddr = %s, shmflg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, shmid, shmaddr, shmflg,)) def handle_syscall_entry_shmget(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] key = event["key"] size = event["size"] shmflg = event["shmflg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { key = %s, size = %s, shmflg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, key, size, shmflg,)) def handle_syscall_entry_madvise(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len_in = event["len_in"] behavior = event["behavior"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len_in = %s, behavior = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len_in, behavior,)) def handle_syscall_entry_mincore(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len,)) def handle_syscall_entry_msync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len, flags,)) def handle_syscall_entry_mremap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] addr = event["addr"] old_len = event["old_len"] new_len = event["new_len"] flags = event["flags"] new_addr = event["new_addr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { addr = %s, old_len = %s, new_len = %s, flags = %s, new_addr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, addr, old_len, new_len, flags, new_addr,)) def handle_syscall_entry_sched_yield(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_select(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] n = event["n"] inp = event["inp"] outp = event["outp"] exp = event["exp"] tvp = event["tvp"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { n = %s, inp = %s, outp = %s, exp = %s, tvp = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, n, inp, outp, exp, tvp,)) def handle_syscall_entry_pipe(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_syscall_entry_access(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, mode,)) def handle_syscall_entry_writev(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] vec = event["vec"] vlen = event["vlen"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, vec = %s, vlen = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, vec, vlen,)) def handle_syscall_entry_readv(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] vec = event["vec"] vlen = event["vlen"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, vec = %s, vlen = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, vec, vlen,)) def handle_syscall_entry_pwrite64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] buf = event["buf"] count = event["count"] pos = event["pos"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, buf = %s, count = %s, pos = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, buf, count, pos,)) def handle_syscall_entry_pread64(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] count = event["count"] pos = event["pos"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, count = %s, pos = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, count, pos,)) def handle_syscall_entry_ioctl(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] cmd = event["cmd"] arg = event["arg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, cmd = %s, arg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, cmd, arg,)) def handle_syscall_entry_rt_sigprocmask(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] how = event["how"] nset = event["nset"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { how = %s, nset = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, how, nset, sigsetsize,)) def handle_syscall_entry_rt_sigaction(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] sig = event["sig"] act = event["act"] sigsetsize = event["sigsetsize"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { sig = %s, act = %s, sigsetsize = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, sig, act, sigsetsize,)) def handle_syscall_entry_brk(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] brk = event["brk"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { brk = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, brk,)) def handle_syscall_entry_munmap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] addr = event["addr"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { addr = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, addr, len,)) def handle_syscall_entry_mprotect(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] start = event["start"] len = event["len"] prot = event["prot"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { start = %s, len = %s, prot = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, start, len, prot,)) def handle_syscall_entry_mmap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] addr = event["addr"] len = event["len"] prot = event["prot"] flags = event["flags"] fd = event["fd"] offset = event["offset"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { addr = %s, len = %s, prot = %s, flags = %s, fd = %s, offset = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, addr, len, prot, flags, fd, offset,)) def handle_syscall_entry_lseek(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] offset = event["offset"] whence = event["whence"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, offset = %s, whence = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, offset, whence,)) def handle_syscall_entry_poll(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ufds = event["ufds"] nfds = event["nfds"] timeout_msecs = event["timeout_msecs"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ufds = %s, nfds = %s, timeout_msecs = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ufds, nfds, timeout_msecs,)) def handle_syscall_entry_newlstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename,)) def handle_syscall_entry_newfstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_syscall_entry_newstat(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename,)) def handle_syscall_entry_close(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd,)) def handle_syscall_entry_open(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] flags = event["flags"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, flags = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, flags, mode,)) def handle_syscall_entry_write(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] buf = event["buf"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, buf = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, buf, count,)) def handle_syscall_entry_read(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] fd = event["fd"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { fd = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, fd, count,)) def handle_syscall_exit_unknown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] id = event["id"] ret = event["ret"] args = event["args"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { id = %s, ret = %s, args = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, id, ret, args,)) def handle_compat_syscall_exit_unknown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] id = event["id"] ret = event["ret"] args = event["args"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { id = %s, ret = %s, args = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, id, ret, args,)) def handle_compat_syscall_entry_unknown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] id = event["id"] args = event["args"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { id = %s, args = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, id, args,)) def handle_syscall_entry_unknown(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] id = event["id"] args = event["args"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { id = %s, args = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, id, args,)) def handle_lttng_logger(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] _msg_length = event["_msg_length"] msg = event["msg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { _msg_length = %s, msg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, _msg_length, msg,)) def handle_snd_soc_cache_sync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] status = event["status"] type = event["type"] id = event["id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, status = %s, type = %s, id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, status, type, id,)) def handle_snd_soc_jack_notify(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, val,)) def handle_snd_soc_jack_report(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] mask = event["mask"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, mask = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, mask, val,)) def handle_snd_soc_jack_irq(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_snd_soc_dapm_connected(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] paths = event["paths"] stream = event["stream"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { paths = %s, stream = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, paths, stream,)) def handle_snd_soc_dapm_input_path(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] wname = event["wname"] pname = event["pname"] psname = event["psname"] path_source = event["path_source"] path_connect = event["path_connect"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { wname = %s, pname = %s, psname = %s, path_source = %s, path_connect = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, wname, pname, psname, path_source, path_connect,)) def handle_snd_soc_dapm_output_path(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] wname = event["wname"] pname = event["pname"] psname = event["psname"] path_sink = event["path_sink"] path_connect = event["path_connect"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { wname = %s, pname = %s, psname = %s, path_sink = %s, path_connect = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, wname, pname, psname, path_sink, path_connect,)) def handle_snd_soc_dapm_walk_done(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] power_checks = event["power_checks"] path_checks = event["path_checks"] neighbour_checks = event["neighbour_checks"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, power_checks = %s, path_checks = %s, neighbour_checks = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, power_checks, path_checks, neighbour_checks,)) def handle_snd_soc_dapm_widget_event_done(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, val,)) def handle_snd_soc_dapm_widget_event_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, val,)) def handle_snd_soc_dapm_widget_power(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, val,)) def handle_snd_soc_dapm_done(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_snd_soc_dapm_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_snd_soc_bias_level_done(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, val,)) def handle_snd_soc_bias_level_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, val,)) def handle_snd_soc_preg_read(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] id = event["id"] reg = event["reg"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, id = %s, reg = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, id, reg, val,)) def handle_snd_soc_preg_write(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] id = event["id"] reg = event["reg"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, id = %s, reg = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, id, reg, val,)) def handle_snd_soc_reg_read(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] id = event["id"] reg = event["reg"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, id = %s, reg = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, id, reg, val,)) def handle_snd_soc_reg_write(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] id = event["id"] reg = event["reg"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, id = %s, reg = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, id, reg, val,)) def handle_block_rq_remap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] old_dev = event["old_dev"] old_sector = event["old_sector"] rwbs = event["rwbs"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, old_dev = %s, old_sector = %s, rwbs = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, old_dev, old_sector, rwbs,)) def handle_block_bio_remap(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] old_dev = event["old_dev"] old_sector = event["old_sector"] rwbs = event["rwbs"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, old_dev = %s, old_sector = %s, rwbs = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, old_dev, old_sector, rwbs,)) def handle_block_split(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] new_sector = event["new_sector"] rwbs = event["rwbs"] tid = event["tid"] comm = event["comm"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, new_sector = %s, rwbs = %s, tid = %s, comm = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, new_sector, rwbs, tid, comm,)) def handle_block_unplug(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nr_rq = event["nr_rq"] tid = event["tid"] comm = event["comm"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nr_rq = %s, tid = %s, comm = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nr_rq, tid, comm,)) def handle_block_plug(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tid = event["tid"] comm = event["comm"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tid = %s, comm = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tid, comm,)) def handle_block_sleeprq(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] rwbs = event["rwbs"] tid = event["tid"] comm = event["comm"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, rwbs = %s, tid = %s, comm = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, rwbs, tid, comm,)) def handle_block_getrq(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] rwbs = event["rwbs"] tid = event["tid"] comm = event["comm"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, rwbs = %s, tid = %s, comm = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, rwbs, tid, comm,)) def handle_block_bio_queue(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] rwbs = event["rwbs"] tid = event["tid"] comm = event["comm"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, rwbs = %s, tid = %s, comm = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, rwbs, tid, comm,)) def handle_block_bio_frontmerge(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] rwbs = event["rwbs"] tid = event["tid"] comm = event["comm"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, rwbs = %s, tid = %s, comm = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, rwbs, tid, comm,)) def handle_block_bio_backmerge(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] rwbs = event["rwbs"] tid = event["tid"] comm = event["comm"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, rwbs = %s, tid = %s, comm = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, rwbs, tid, comm,)) def handle_block_bio_complete(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] error = event["error"] rwbs = event["rwbs"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, error = %s, rwbs = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, error, rwbs,)) def handle_block_bio_bounce(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] rwbs = event["rwbs"] tid = event["tid"] comm = event["comm"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, rwbs = %s, tid = %s, comm = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, rwbs, tid, comm,)) def handle_block_rq_issue(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] bytes = event["bytes"] rwbs = event["rwbs"] tid = event["tid"] comm = event["comm"] _cmd_length = event["_cmd_length"] cmd = event["cmd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, bytes = %s, rwbs = %s, tid = %s, comm = %s, _cmd_length = %s, cmd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, bytes, rwbs, tid, comm, _cmd_length, cmd,)) def handle_block_rq_insert(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] bytes = event["bytes"] rwbs = event["rwbs"] tid = event["tid"] comm = event["comm"] _cmd_length = event["_cmd_length"] cmd = event["cmd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, bytes = %s, rwbs = %s, tid = %s, comm = %s, _cmd_length = %s, cmd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, bytes, rwbs, tid, comm, _cmd_length, cmd,)) def handle_block_rq_complete(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] errors = event["errors"] rwbs = event["rwbs"] _cmd_length = event["_cmd_length"] cmd = event["cmd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, errors = %s, rwbs = %s, _cmd_length = %s, cmd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, errors, rwbs, _cmd_length, cmd,)) def handle_block_rq_requeue(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] errors = event["errors"] rwbs = event["rwbs"] _cmd_length = event["_cmd_length"] cmd = event["cmd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, errors = %s, rwbs = %s, _cmd_length = %s, cmd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, errors, rwbs, _cmd_length, cmd,)) def handle_block_rq_abort(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] nr_sector = event["nr_sector"] errors = event["errors"] rwbs = event["rwbs"] _cmd_length = event["_cmd_length"] cmd = event["cmd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, nr_sector = %s, errors = %s, rwbs = %s, _cmd_length = %s, cmd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, nr_sector, errors, rwbs, _cmd_length, cmd,)) def handle_block_dirty_buffer(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, size,)) def handle_block_touch_buffer(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sector = event["sector"] size = event["size"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sector = %s, size = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sector, size,)) def handle_mm_compaction_migratepages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nr_migrated = event["nr_migrated"] nr_failed = event["nr_failed"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nr_migrated = %s, nr_failed = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nr_migrated, nr_failed,)) def handle_mm_compaction_isolate_freepages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nr_scanned = event["nr_scanned"] nr_taken = event["nr_taken"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nr_scanned = %s, nr_taken = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nr_scanned, nr_taken,)) def handle_mm_compaction_isolate_migratepages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nr_scanned = event["nr_scanned"] nr_taken = event["nr_taken"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nr_scanned = %s, nr_taken = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nr_scanned, nr_taken,)) def handle_gpio_value(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gpio = event["gpio"] get = event["get"] value = event["value"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gpio = %s, get = %s, value = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gpio, get, value,)) def handle_gpio_direction(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gpio = event["gpio"] _in = event["in"] err = event["err"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gpio = %s, in = %s, err = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gpio, _in, err,)) def handle_softirq_raise(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, vec,)) def handle_softirq_exit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, vec,)) def handle_softirq_entry(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] vec = event["vec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { vec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, vec,)) def handle_irq_handler_exit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] irq = event["irq"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { irq = %s, ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, irq, ret,)) def handle_irq_handler_entry(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] irq = event["irq"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { irq = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, irq, name,)) def handle_jbd2_write_superblock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] write_op = event["write_op"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, write_op = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, write_op,)) def handle_jbd2_update_log_tail(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] tail_sequence = event["tail_sequence"] first_tid = event["first_tid"] block_nr = event["block_nr"] freed = event["freed"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, tail_sequence = %s, first_tid = %s, block_nr = %s, freed = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, tail_sequence, first_tid, block_nr, freed,)) def handle_jbd2_checkpoint_stats(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] tid = event["tid"] chp_time = event["chp_time"] forced_to_close = event["forced_to_close"] written = event["written"] dropped = event["dropped"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, tid = %s, chp_time = %s, forced_to_close = %s, written = %s, dropped = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, tid, chp_time, forced_to_close, written, dropped,)) def handle_jbd2_run_stats(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] tid = event["tid"] wait = event["wait"] running = event["running"] locked = event["locked"] flushing = event["flushing"] logging = event["logging"] handle_count = event["handle_count"] blocks = event["blocks"] blocks_logged = event["blocks_logged"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, tid = %s, wait = %s, running = %s, locked = %s, flushing = %s, logging = %s, handle_count = %s, blocks = %s, blocks_logged = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, tid, wait, running, locked, flushing, logging, handle_count, blocks, blocks_logged,)) def handle_jbd2_submit_inode_data(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] ino = event["ino"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, ino = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, ino,)) def handle_jbd2_end_commit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sync_commit = event["sync_commit"] transaction = event["transaction"] head = event["head"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sync_commit = %s, transaction = %s, head = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sync_commit, transaction, head,)) def handle_jbd2_drop_transaction(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sync_commit = event["sync_commit"] transaction = event["transaction"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sync_commit = %s, transaction = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sync_commit, transaction,)) def handle_jbd2_commit_logging(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sync_commit = event["sync_commit"] transaction = event["transaction"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sync_commit = %s, transaction = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sync_commit, transaction,)) def handle_jbd2_commit_flushing(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sync_commit = event["sync_commit"] transaction = event["transaction"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sync_commit = %s, transaction = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sync_commit, transaction,)) def handle_jbd2_commit_locking(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sync_commit = event["sync_commit"] transaction = event["transaction"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sync_commit = %s, transaction = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sync_commit, transaction,)) def handle_jbd2_start_commit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] sync_commit = event["sync_commit"] transaction = event["transaction"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, sync_commit = %s, transaction = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, sync_commit, transaction,)) def handle_jbd2_checkpoint(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] result = event["result"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, result = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, result,)) def handle_mm_page_alloc_extfrag(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] page = event["page"] alloc_order = event["alloc_order"] fallback_order = event["fallback_order"] alloc_migratetype = event["alloc_migratetype"] fallback_migratetype = event["fallback_migratetype"] change_ownership = event["change_ownership"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { page = %s, alloc_order = %s, fallback_order = %s, alloc_migratetype = %s, fallback_migratetype = %s, change_ownership = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, page, alloc_order, fallback_order, alloc_migratetype, fallback_migratetype, change_ownership,)) def handle_mm_page_pcpu_drain(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] page = event["page"] order = event["order"] migratetype = event["migratetype"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { page = %s, order = %s, migratetype = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, page, order, migratetype,)) def handle_mm_page_alloc_zone_locked(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] page = event["page"] order = event["order"] migratetype = event["migratetype"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { page = %s, order = %s, migratetype = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, page, order, migratetype,)) def handle_mm_page_alloc(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] page = event["page"] order = event["order"] gfp_flags = event["gfp_flags"] migratetype = event["migratetype"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { page = %s, order = %s, gfp_flags = %s, migratetype = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, page, order, gfp_flags, migratetype,)) def handle_mm_page_free_batched(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] page = event["page"] cold = event["cold"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { page = %s, cold = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, page, cold,)) def handle_mm_page_free(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] page = event["page"] order = event["order"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { page = %s, order = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, page, order,)) def handle_kmem_cache_free(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] call_site = event["call_site"] ptr = event["ptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { call_site = %s, ptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, call_site, ptr,)) def handle_kmem_kfree(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] call_site = event["call_site"] ptr = event["ptr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { call_site = %s, ptr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, call_site, ptr,)) def handle_kmem_cache_alloc_node(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] call_site = event["call_site"] ptr = event["ptr"] bytes_req = event["bytes_req"] bytes_alloc = event["bytes_alloc"] gfp_flags = event["gfp_flags"] node = event["node"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { call_site = %s, ptr = %s, bytes_req = %s, bytes_alloc = %s, gfp_flags = %s, node = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node,)) def handle_kmem_kmalloc_node(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] call_site = event["call_site"] ptr = event["ptr"] bytes_req = event["bytes_req"] bytes_alloc = event["bytes_alloc"] gfp_flags = event["gfp_flags"] node = event["node"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { call_site = %s, ptr = %s, bytes_req = %s, bytes_alloc = %s, gfp_flags = %s, node = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node,)) def handle_kmem_cache_alloc(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] call_site = event["call_site"] ptr = event["ptr"] bytes_req = event["bytes_req"] bytes_alloc = event["bytes_alloc"] gfp_flags = event["gfp_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { call_site = %s, ptr = %s, bytes_req = %s, bytes_alloc = %s, gfp_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, call_site, ptr, bytes_req, bytes_alloc, gfp_flags,)) def handle_kmem_kmalloc(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] call_site = event["call_site"] ptr = event["ptr"] bytes_req = event["bytes_req"] bytes_alloc = event["bytes_alloc"] gfp_flags = event["gfp_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { call_site = %s, ptr = %s, bytes_req = %s, bytes_alloc = %s, gfp_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, call_site, ptr, bytes_req, bytes_alloc, gfp_flags,)) def handle_kvm_async_pf_completed(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] address = event["address"] gva = event["gva"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { address = %s, gva = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, address, gva,)) def handle_kvm_async_pf_ready(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] token = event["token"] gva = event["gva"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { token = %s, gva = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, token, gva,)) def handle_kvm_async_pf_not_present(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] token = event["token"] gva = event["gva"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { token = %s, gva = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, token, gva,)) def handle_kvm_async_pf_doublefault(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gva = event["gva"] gfn = event["gfn"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gva = %s, gfn = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gva, gfn,)) def handle_kvm_try_async_get_page(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gva = event["gva"] gfn = event["gfn"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gva = %s, gfn = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gva, gfn,)) def handle_kvm_age_page(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] hva = event["hva"] gfn = event["gfn"] referenced = event["referenced"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { hva = %s, gfn = %s, referenced = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, hva, gfn, referenced,)) def handle_kvm_fpu(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] load = event["load"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { load = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, load,)) def handle_kvm_mmio(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] type = event["type"] len = event["len"] gpa = event["gpa"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { type = %s, len = %s, gpa = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, type, len, gpa, val,)) def handle_kvm_ack_irq(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] irqchip = event["irqchip"] pin = event["pin"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { irqchip = %s, pin = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, irqchip, pin,)) def handle_kvm_msi_set_irq(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] address = event["address"] data = event["data"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { address = %s, data = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, address, data,)) def handle_kvm_ioapic_set_irq(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] e = event["e"] pin = event["pin"] coalesced = event["coalesced"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { e = %s, pin = %s, coalesced = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, e, pin, coalesced,)) def handle_kvm_set_irq(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] gsi = event["gsi"] level = event["level"] irq_source_id = event["irq_source_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { gsi = %s, level = %s, irq_source_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, gsi, level, irq_source_id,)) def handle_kvm_userspace_exit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] reason = event["reason"] errno = event["errno"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { reason = %s, errno = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, reason, errno,)) def handle_module_request(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ip = event["ip"] wait = event["wait"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ip = %s, wait = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ip, wait, name,)) def handle_module_put(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ip = event["ip"] refcnt = event["refcnt"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ip = %s, refcnt = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ip, refcnt, name,)) def handle_module_get(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ip = event["ip"] refcnt = event["refcnt"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ip = %s, refcnt = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ip, refcnt, name,)) def handle_module_free(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_module_load(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] taints = event["taints"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { taints = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, taints, name,)) def handle_napi_poll(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] napi = event["napi"] dev_name = event["dev_name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { napi = %s, dev_name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, napi, dev_name,)) def handle_netif_rx(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] skbaddr = event["skbaddr"] len = event["len"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { skbaddr = %s, len = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, skbaddr, len, name,)) def handle_netif_receive_skb(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] skbaddr = event["skbaddr"] len = event["len"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { skbaddr = %s, len = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, skbaddr, len, name,)) def handle_net_dev_queue(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] skbaddr = event["skbaddr"] len = event["len"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { skbaddr = %s, len = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, skbaddr, len, name,)) def handle_net_dev_xmit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] skbaddr = event["skbaddr"] len = event["len"] rc = event["rc"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { skbaddr = %s, len = %s, rc = %s, name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, skbaddr, len, rc, name,)) def handle_power_domain_target(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] state = event["state"] cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, state = %s, cpu_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, state, cpu_id,)) def handle_power_clock_set_rate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] state = event["state"] cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, state = %s, cpu_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, state, cpu_id,)) def handle_power_clock_disable(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] state = event["state"] cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, state = %s, cpu_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, state, cpu_id,)) def handle_power_clock_enable(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] state = event["state"] cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, state = %s, cpu_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, state, cpu_id,)) def handle_power_wakeup_source_deactivate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] state = event["state"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, state = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, state,)) def handle_power_wakeup_source_activate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] state = event["state"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, state = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, state,)) def handle_power_machine_suspend(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] state = event["state"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { state = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, state,)) def handle_power_cpu_frequency(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] state = event["state"] cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { state = %s, cpu_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, state, cpu_id,)) def handle_power_cpu_idle(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] state = event["state"] cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { state = %s, cpu_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, state, cpu_id,)) def handle_console(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] _msg_length = event["_msg_length"] msg = event["msg"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { _msg_length = %s, msg = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, _msg_length, msg,)) def handle_random_extract_entropy_user(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pool_name = event["pool_name"] nbytes = event["nbytes"] entropy_count = event["entropy_count"] IP = event["IP"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pool_name = %s, nbytes = %s, entropy_count = %s, IP = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pool_name, nbytes, entropy_count, IP,)) def handle_random_extract_entropy(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pool_name = event["pool_name"] nbytes = event["nbytes"] entropy_count = event["entropy_count"] IP = event["IP"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pool_name = %s, nbytes = %s, entropy_count = %s, IP = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pool_name, nbytes, entropy_count, IP,)) def handle_random_get_random_bytes(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nbytes = event["nbytes"] IP = event["IP"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nbytes = %s, IP = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nbytes, IP,)) def handle_random_credit_entropy_bits(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pool_name = event["pool_name"] bits = event["bits"] entropy_count = event["entropy_count"] entropy_total = event["entropy_total"] IP = event["IP"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pool_name = %s, bits = %s, entropy_count = %s, entropy_total = %s, IP = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pool_name, bits, entropy_count, entropy_total, IP,)) def handle_random_mix_pool_bytes_nolock(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pool_name = event["pool_name"] bytes = event["bytes"] IP = event["IP"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pool_name = %s, bytes = %s, IP = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pool_name, bytes, IP,)) def handle_random_mix_pool_bytes(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pool_name = event["pool_name"] bytes = event["bytes"] IP = event["IP"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pool_name = %s, bytes = %s, IP = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pool_name, bytes, IP,)) def handle_rcu_utilization(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] s = event["s"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { s = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, s,)) def handle_regmap_cache_bypass(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, flag,)) def handle_regmap_cache_only(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, flag,)) def handle_regcache_sync(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] status = event["status"] type = event["type"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, status = %s, type = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, status, type,)) def handle_regmap_hw_write_done(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] reg = event["reg"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, reg = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, reg, count,)) def handle_regmap_hw_write_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] reg = event["reg"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, reg = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, reg, count,)) def handle_regmap_hw_read_done(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] reg = event["reg"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, reg = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, reg, count,)) def handle_regmap_hw_read_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] reg = event["reg"] count = event["count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, reg = %s, count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, reg, count,)) def handle_regmap_reg_read_cache(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] reg = event["reg"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, reg = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, reg, val,)) def handle_regmap_reg_read(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] reg = event["reg"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, reg = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, reg, val,)) def handle_regmap_reg_write(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] reg = event["reg"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, reg = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, reg, val,)) def handle_regulator_set_voltage_complete(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] val = event["val"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, val = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, val,)) def handle_regulator_set_voltage(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] min = event["min"] max = event["max"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, min = %s, max = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, min, max,)) def handle_regulator_disable_complete(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_regulator_disable(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_regulator_enable_complete(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_regulator_enable_delay(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_regulator_enable(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_rpm_return_int(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] ip = event["ip"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, ip = %s, ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, ip, ret,)) def handle_rpm_idle(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] flags = event["flags"] usage_count = event["usage_count"] disable_depth = event["disable_depth"] runtime_auto = event["runtime_auto"] request_pending = event["request_pending"] irq_safe = event["irq_safe"] child_count = event["child_count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, flags = %s, usage_count = %s, disable_depth = %s, runtime_auto = %s, request_pending = %s, irq_safe = %s, child_count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, flags, usage_count, disable_depth, runtime_auto, request_pending, irq_safe, child_count,)) def handle_rpm_resume(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] flags = event["flags"] usage_count = event["usage_count"] disable_depth = event["disable_depth"] runtime_auto = event["runtime_auto"] request_pending = event["request_pending"] irq_safe = event["irq_safe"] child_count = event["child_count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, flags = %s, usage_count = %s, disable_depth = %s, runtime_auto = %s, request_pending = %s, irq_safe = %s, child_count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, flags, usage_count, disable_depth, runtime_auto, request_pending, irq_safe, child_count,)) def handle_rpm_suspend(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] flags = event["flags"] usage_count = event["usage_count"] disable_depth = event["disable_depth"] runtime_auto = event["runtime_auto"] request_pending = event["request_pending"] irq_safe = event["irq_safe"] child_count = event["child_count"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, flags = %s, usage_count = %s, disable_depth = %s, runtime_auto = %s, request_pending = %s, irq_safe = %s, child_count = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, flags, usage_count, disable_depth, runtime_auto, request_pending, irq_safe, child_count,)) def handle_sched_pi_setprio(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] oldprio = event["oldprio"] newprio = event["newprio"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, oldprio = %s, newprio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, oldprio, newprio,)) def handle_sched_stat_runtime(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] runtime = event["runtime"] vruntime = event["vruntime"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, runtime = %s, vruntime = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, runtime, vruntime,)) def handle_sched_stat_blocked(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] delay = event["delay"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, delay = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, delay,)) def handle_sched_stat_iowait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] delay = event["delay"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, delay = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, delay,)) def handle_sched_stat_sleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] delay = event["delay"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, delay = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, delay,)) def handle_sched_stat_wait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] delay = event["delay"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, delay = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, delay,)) def handle_sched_process_exec(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] filename = event["filename"] tid = event["tid"] old_tid = event["old_tid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { filename = %s, tid = %s, old_tid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, filename, tid, old_tid,)) def handle_sched_process_fork(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] parent_comm = event["parent_comm"] parent_tid = event["parent_tid"] parent_pid = event["parent_pid"] child_comm = event["child_comm"] child_tid = event["child_tid"] child_pid = event["child_pid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { parent_comm = %s, parent_tid = %s, parent_pid = %s, child_comm = %s, child_tid = %s, child_pid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, parent_comm, parent_tid, parent_pid, child_comm, child_tid, child_pid,)) def handle_sched_process_wait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] prio = event["prio"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, prio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, prio,)) def handle_sched_wait_task(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] prio = event["prio"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, prio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, prio,)) def handle_sched_process_exit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] prio = event["prio"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, prio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, prio,)) def handle_sched_process_free(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] prio = event["prio"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, prio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, prio,)) def handle_sched_migrate_task(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] prio = event["prio"] orig_cpu = event["orig_cpu"] dest_cpu = event["dest_cpu"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, prio = %s, orig_cpu = %s, dest_cpu = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, prio, orig_cpu, dest_cpu,)) def handle_sched_switch(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] prev_comm = event["prev_comm"] prev_tid = event["prev_tid"] prev_prio = event["prev_prio"] prev_state = event["prev_state"] next_comm = event["next_comm"] next_tid = event["next_tid"] next_prio = event["next_prio"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { prev_comm = %s, prev_tid = %s, prev_prio = %s, prev_state = %s, next_comm = %s, next_tid = %s, next_prio = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, prev_comm, prev_tid, prev_prio, prev_state, next_comm, next_tid, next_prio,)) def handle_sched_wakeup_new(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] prio = event["prio"] success = event["success"] target_cpu = event["target_cpu"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, prio = %s, success = %s, target_cpu = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, prio, success, target_cpu,)) def handle_sched_wakeup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] prio = event["prio"] success = event["success"] target_cpu = event["target_cpu"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s, prio = %s, success = %s, target_cpu = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid, prio, success, target_cpu,)) def handle_sched_kthread_stop_ret(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] ret = event["ret"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { ret = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, ret,)) def handle_sched_kthread_stop(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] tid = event["tid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, tid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, tid,)) def handle_scsi_eh_wakeup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] host_no = event["host_no"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { host_no = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, host_no,)) def handle_scsi_dispatch_cmd_timeout(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] host_no = event["host_no"] channel = event["channel"] id = event["id"] lun = event["lun"] result = event["result"] opcode = event["opcode"] cmd_len = event["cmd_len"] data_sglen = event["data_sglen"] prot_sglen = event["prot_sglen"] prot_op = event["prot_op"] _cmnd_length = event["_cmnd_length"] cmnd = event["cmnd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { host_no = %s, channel = %s, id = %s, lun = %s, result = %s, opcode = %s, cmd_len = %s, data_sglen = %s, prot_sglen = %s, prot_op = %s, _cmnd_length = %s, cmnd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, host_no, channel, id, lun, result, opcode, cmd_len, data_sglen, prot_sglen, prot_op, _cmnd_length, cmnd,)) def handle_scsi_dispatch_cmd_done(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] host_no = event["host_no"] channel = event["channel"] id = event["id"] lun = event["lun"] result = event["result"] opcode = event["opcode"] cmd_len = event["cmd_len"] data_sglen = event["data_sglen"] prot_sglen = event["prot_sglen"] prot_op = event["prot_op"] _cmnd_length = event["_cmnd_length"] cmnd = event["cmnd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { host_no = %s, channel = %s, id = %s, lun = %s, result = %s, opcode = %s, cmd_len = %s, data_sglen = %s, prot_sglen = %s, prot_op = %s, _cmnd_length = %s, cmnd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, host_no, channel, id, lun, result, opcode, cmd_len, data_sglen, prot_sglen, prot_op, _cmnd_length, cmnd,)) def handle_scsi_dispatch_cmd_error(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] host_no = event["host_no"] channel = event["channel"] id = event["id"] lun = event["lun"] rtn = event["rtn"] opcode = event["opcode"] cmd_len = event["cmd_len"] data_sglen = event["data_sglen"] prot_sglen = event["prot_sglen"] prot_op = event["prot_op"] _cmnd_length = event["_cmnd_length"] cmnd = event["cmnd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { host_no = %s, channel = %s, id = %s, lun = %s, rtn = %s, opcode = %s, cmd_len = %s, data_sglen = %s, prot_sglen = %s, prot_op = %s, _cmnd_length = %s, cmnd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, host_no, channel, id, lun, rtn, opcode, cmd_len, data_sglen, prot_sglen, prot_op, _cmnd_length, cmnd,)) def handle_scsi_dispatch_cmd_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] host_no = event["host_no"] channel = event["channel"] id = event["id"] lun = event["lun"] opcode = event["opcode"] cmd_len = event["cmd_len"] data_sglen = event["data_sglen"] prot_sglen = event["prot_sglen"] prot_op = event["prot_op"] _cmnd_length = event["_cmnd_length"] cmnd = event["cmnd"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { host_no = %s, channel = %s, id = %s, lun = %s, opcode = %s, cmd_len = %s, data_sglen = %s, prot_sglen = %s, prot_op = %s, _cmnd_length = %s, cmnd = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, host_no, channel, id, lun, opcode, cmd_len, data_sglen, prot_sglen, prot_op, _cmnd_length, cmnd,)) def handle_signal_deliver(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] sig = event["sig"] errno = event["errno"] code = event["code"] sa_handler = event["sa_handler"] sa_flags = event["sa_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { sig = %s, errno = %s, code = %s, sa_handler = %s, sa_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, sig, errno, code, sa_handler, sa_flags,)) def handle_signal_generate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] sig = event["sig"] errno = event["errno"] code = event["code"] comm = event["comm"] pid = event["pid"] group = event["group"] result = event["result"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { sig = %s, errno = %s, code = %s, comm = %s, pid = %s, group = %s, result = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, sig, errno, code, comm, pid, group, result,)) def handle_skb_copy_datagram_iovec(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] skbaddr = event["skbaddr"] len = event["len"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { skbaddr = %s, len = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, skbaddr, len,)) def handle_skb_consume(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] skbaddr = event["skbaddr"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { skbaddr = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, skbaddr,)) def handle_skb_kfree(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] skbaddr = event["skbaddr"] location = event["location"] protocol = event["protocol"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { skbaddr = %s, location = %s, protocol = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, skbaddr, location, protocol,)) def handle_sock_exceed_buf_limit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] sysctl_mem = event["sysctl_mem"] allocated = event["allocated"] sysctl_rmem = event["sysctl_rmem"] rmem_alloc = event["rmem_alloc"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, sysctl_mem = %s, allocated = %s, sysctl_rmem = %s, rmem_alloc = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, sysctl_mem, allocated, sysctl_rmem, rmem_alloc,)) def handle_sock_rcvqueue_full(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rmem_alloc = event["rmem_alloc"] truesize = event["truesize"] sk_rcvbuf = event["sk_rcvbuf"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rmem_alloc = %s, truesize = %s, sk_rcvbuf = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rmem_alloc, truesize, sk_rcvbuf,)) def handle_lttng_statedump_interrupt(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] irq = event["irq"] name = event["name"] action = event["action"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { irq = %s, name = %s, action = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, irq, name, action,)) def handle_lttng_statedump_block_device(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] dev = event["dev"] diskname = event["diskname"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { dev = %s, diskname = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, dev, diskname,)) def handle_lttng_statedump_network_interface(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] address_ipv4 = event["address_ipv4"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, address_ipv4 = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, address_ipv4,)) def handle_lttng_statedump_vm_map(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] start = event["start"] end = event["end"] flags = event["flags"] inode = event["inode"] pgoff = event["pgoff"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, start = %s, end = %s, flags = %s, inode = %s, pgoff = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, start, end, flags, inode, pgoff,)) def handle_lttng_statedump_file_descriptor(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pid = event["pid"] fd = event["fd"] flags = event["flags"] fmode = event["fmode"] filename = event["filename"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pid = %s, fd = %s, flags = %s, fmode = %s, filename = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pid, fd, flags, fmode, filename,)) def handle_lttng_statedump_process_state(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] tid = event["tid"] vtid = event["vtid"] pid = event["pid"] vpid = event["vpid"] ppid = event["ppid"] vppid = event["vppid"] name = event["name"] type = event["type"] mode = event["mode"] submode = event["submode"] status = event["status"] ns_level = event["ns_level"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { tid = %s, vtid = %s, pid = %s, vpid = %s, ppid = %s, vppid = %s, name = %s, type = %s, mode = %s, submode = %s, status = %s, ns_level = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, tid, vtid, pid, vpid, ppid, vppid, name, type, mode, submode, status, ns_level,)) def handle_lttng_statedump_end(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_lttng_statedump_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id,)) def handle_rpc_task_wakeup(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] clnt = event["clnt"] task = event["task"] timeout = event["timeout"] runstate = event["runstate"] status = event["status"] flags = event["flags"] q_name = event["q_name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { clnt = %s, task = %s, timeout = %s, runstate = %s, status = %s, flags = %s, q_name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, clnt, task, timeout, runstate, status, flags, q_name,)) def handle_rpc_task_sleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] clnt = event["clnt"] task = event["task"] timeout = event["timeout"] runstate = event["runstate"] status = event["status"] flags = event["flags"] q_name = event["q_name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { clnt = %s, task = %s, timeout = %s, runstate = %s, status = %s, flags = %s, q_name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, clnt, task, timeout, runstate, status, flags, q_name,)) def handle_rpc_task_complete(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] clnt = event["clnt"] task = event["task"] action = event["action"] runstate = event["runstate"] status = event["status"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { clnt = %s, task = %s, action = %s, runstate = %s, status = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, clnt, task, action, runstate, status, flags,)) def handle_rpc_task_run_action(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] clnt = event["clnt"] task = event["task"] action = event["action"] runstate = event["runstate"] status = event["status"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { clnt = %s, task = %s, action = %s, runstate = %s, status = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, clnt, task, action, runstate, status, flags,)) def handle_rpc_task_begin(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] clnt = event["clnt"] task = event["task"] action = event["action"] runstate = event["runstate"] status = event["status"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { clnt = %s, task = %s, action = %s, runstate = %s, status = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, clnt, task, action, runstate, status, flags,)) def handle_rpc_connect_status(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] task = event["task"] clnt = event["clnt"] status = event["status"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { task = %s, clnt = %s, status = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, task, clnt, status,)) def handle_rpc_bind_status(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] task = event["task"] clnt = event["clnt"] status = event["status"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { task = %s, clnt = %s, status = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, task, clnt, status,)) def handle_rpc_call_status(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] task = event["task"] clnt = event["clnt"] status = event["status"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { task = %s, clnt = %s, status = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, task, clnt, status,)) def handle_itimer_expire(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] pid = event["pid"] now = event["now"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, pid = %s, now = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, pid, now,)) def handle_itimer_state(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] which = event["which"] expires = event["expires"] value_sec = event["value_sec"] value_usec = event["value_usec"] interval_sec = event["interval_sec"] interval_usec = event["interval_usec"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { which = %s, expires = %s, value_sec = %s, value_usec = %s, interval_sec = %s, interval_usec = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, which, expires, value_sec, value_usec, interval_sec, interval_usec,)) def handle_hrtimer_cancel(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] hrtimer = event["hrtimer"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { hrtimer = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, hrtimer,)) def handle_hrtimer_expire_exit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] hrtimer = event["hrtimer"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { hrtimer = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, hrtimer,)) def handle_hrtimer_expire_entry(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] hrtimer = event["hrtimer"] now = event["now"] function = event["function"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { hrtimer = %s, now = %s, function = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, hrtimer, now, function,)) def handle_hrtimer_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] hrtimer = event["hrtimer"] function = event["function"] expires = event["expires"] softexpires = event["softexpires"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { hrtimer = %s, function = %s, expires = %s, softexpires = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, hrtimer, function, expires, softexpires,)) def handle_hrtimer_init(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] hrtimer = event["hrtimer"] clockid = event["clockid"] mode = event["mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { hrtimer = %s, clockid = %s, mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, hrtimer, clockid, mode,)) def handle_timer_cancel(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer = event["timer"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer,)) def handle_timer_expire_exit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer = event["timer"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer,)) def handle_timer_expire_entry(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer = event["timer"] now = event["now"] function = event["function"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer = %s, now = %s, function = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer, now, function,)) def handle_timer_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer = event["timer"] function = event["function"] expires = event["expires"] now = event["now"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer = %s, function = %s, expires = %s, now = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer, function, expires, now,)) def handle_timer_init(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] timer = event["timer"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { timer = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, timer,)) def handle_udp_fail_queue_rcv_skb(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] rc = event["rc"] lport = event["lport"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { rc = %s, lport = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, rc, lport,)) def handle_mm_vmscan_lru_shrink_inactive(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nid = event["nid"] zid = event["zid"] nr_scanned = event["nr_scanned"] nr_reclaimed = event["nr_reclaimed"] priority = event["priority"] reclaim_flags = event["reclaim_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nid = %s, zid = %s, nr_scanned = %s, nr_reclaimed = %s, priority = %s, reclaim_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags,)) def handle_mm_vmscan_writepage(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] page = event["page"] reclaim_flags = event["reclaim_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { page = %s, reclaim_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, page, reclaim_flags,)) def handle_mm_vmscan_memcg_isolate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] order = event["order"] nr_requested = event["nr_requested"] nr_scanned = event["nr_scanned"] nr_taken = event["nr_taken"] isolate_mode = event["isolate_mode"] file = event["file"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { order = %s, nr_requested = %s, nr_scanned = %s, nr_taken = %s, isolate_mode = %s, file = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file,)) def handle_mm_vmscan_lru_isolate(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] order = event["order"] nr_requested = event["nr_requested"] nr_scanned = event["nr_scanned"] nr_taken = event["nr_taken"] isolate_mode = event["isolate_mode"] file = event["file"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { order = %s, nr_requested = %s, nr_scanned = %s, nr_taken = %s, isolate_mode = %s, file = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file,)) def handle_mm_shrink_slab_end(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] shr = event["shr"] shrink = event["shrink"] unused_scan = event["unused_scan"] new_scan = event["new_scan"] retval = event["retval"] total_scan = event["total_scan"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { shr = %s, shrink = %s, unused_scan = %s, new_scan = %s, retval = %s, total_scan = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, shr, shrink, unused_scan, new_scan, retval, total_scan,)) def handle_mm_shrink_slab_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] shr = event["shr"] shrink = event["shrink"] nr_objects_to_shrink = event["nr_objects_to_shrink"] gfp_flags = event["gfp_flags"] pgs_scanned = event["pgs_scanned"] lru_pgs = event["lru_pgs"] cache_items = event["cache_items"] delta = event["delta"] total_scan = event["total_scan"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { shr = %s, shrink = %s, nr_objects_to_shrink = %s, gfp_flags = %s, pgs_scanned = %s, lru_pgs = %s, cache_items = %s, delta = %s, total_scan = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, shr, shrink, nr_objects_to_shrink, gfp_flags, pgs_scanned, lru_pgs, cache_items, delta, total_scan,)) def handle_mm_vmscan_memcg_softlimit_reclaim_end(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nr_reclaimed = event["nr_reclaimed"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nr_reclaimed = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nr_reclaimed,)) def handle_mm_vmscan_memcg_reclaim_end(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nr_reclaimed = event["nr_reclaimed"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nr_reclaimed = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nr_reclaimed,)) def handle_mm_vmscan_direct_reclaim_end(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nr_reclaimed = event["nr_reclaimed"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nr_reclaimed = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nr_reclaimed,)) def handle_mm_vmscan_memcg_softlimit_reclaim_begin(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] order = event["order"] may_writepage = event["may_writepage"] gfp_flags = event["gfp_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { order = %s, may_writepage = %s, gfp_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, order, may_writepage, gfp_flags,)) def handle_mm_vmscan_memcg_reclaim_begin(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] order = event["order"] may_writepage = event["may_writepage"] gfp_flags = event["gfp_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { order = %s, may_writepage = %s, gfp_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, order, may_writepage, gfp_flags,)) def handle_mm_vmscan_direct_reclaim_begin(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] order = event["order"] may_writepage = event["may_writepage"] gfp_flags = event["gfp_flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { order = %s, may_writepage = %s, gfp_flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, order, may_writepage, gfp_flags,)) def handle_mm_vmscan_wakeup_kswapd(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nid = event["nid"] zid = event["zid"] order = event["order"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nid = %s, zid = %s, order = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nid, zid, order,)) def handle_mm_vmscan_kswapd_wake(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nid = event["nid"] order = event["order"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nid = %s, order = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nid, order,)) def handle_mm_vmscan_kswapd_sleep(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nid = event["nid"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nid = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nid,)) def handle_workqueue_execute_end(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] work = event["work"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { work = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, work,)) def handle_workqueue_execute_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] work = event["work"] function = event["function"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { work = %s, function = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, work, function,)) def handle_workqueue_activate_work(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] work = event["work"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { work = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, work,)) def handle_workqueue_queue_work(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] work = event["work"] function = event["function"] req_cpu = event["req_cpu"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { work = %s, function = %s, req_cpu = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, work, function, req_cpu,)) def handle_writeback_single_inode(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] ino = event["ino"] state = event["state"] dirtied_when = event["dirtied_when"] writeback_index = event["writeback_index"] nr_to_write = event["nr_to_write"] wrote = event["wrote"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, ino = %s, state = %s, dirtied_when = %s, writeback_index = %s, nr_to_write = %s, wrote = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, ino, state, dirtied_when, writeback_index, nr_to_write, wrote,)) def handle_writeback_wait_iff_congested(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] usec_timeout = event["usec_timeout"] usec_delayed = event["usec_delayed"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { usec_timeout = %s, usec_delayed = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, usec_timeout, usec_delayed,)) def handle_writeback_congestion_wait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] usec_timeout = event["usec_timeout"] usec_delayed = event["usec_delayed"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { usec_timeout = %s, usec_delayed = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, usec_timeout, usec_delayed,)) def handle_writeback_sb_inodes_requeue(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] ino = event["ino"] state = event["state"] dirtied_when = event["dirtied_when"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, ino = %s, state = %s, dirtied_when = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, ino, state, dirtied_when,)) def handle_writeback_balance_dirty_pages(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] bdi = event["bdi"] limit = event["limit"] setpoint = event["setpoint"] dirty = event["dirty"] bdi_setpoint = event["bdi_setpoint"] bdi_dirty = event["bdi_dirty"] dirty_ratelimit = event["dirty_ratelimit"] task_ratelimit = event["task_ratelimit"] dirtied = event["dirtied"] dirtied_pause = event["dirtied_pause"] paused = event["paused"] pause = event["pause"] period = event["period"] think = event["think"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { bdi = %s, limit = %s, setpoint = %s, dirty = %s, bdi_setpoint = %s, bdi_dirty = %s, dirty_ratelimit = %s, task_ratelimit = %s, dirtied = %s, dirtied_pause = %s, paused = %s, pause = %s, period = %s, think = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, bdi, limit, setpoint, dirty, bdi_setpoint, bdi_dirty, dirty_ratelimit, task_ratelimit, dirtied, dirtied_pause, paused, pause, period, think,)) def handle_writeback_bdi_dirty_ratelimit(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] bdi = event["bdi"] write_bw = event["write_bw"] avg_write_bw = event["avg_write_bw"] dirty_rate = event["dirty_rate"] dirty_ratelimit = event["dirty_ratelimit"] task_ratelimit = event["task_ratelimit"] balanced_dirty_ratelimit = event["balanced_dirty_ratelimit"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { bdi = %s, write_bw = %s, avg_write_bw = %s, dirty_rate = %s, dirty_ratelimit = %s, task_ratelimit = %s, balanced_dirty_ratelimit = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, bdi, write_bw, avg_write_bw, dirty_rate, dirty_ratelimit, task_ratelimit, balanced_dirty_ratelimit,)) def handle_writeback_global_dirty_state(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] nr_dirty = event["nr_dirty"] nr_writeback = event["nr_writeback"] nr_unstable = event["nr_unstable"] background_thresh = event["background_thresh"] dirty_thresh = event["dirty_thresh"] dirty_limit = event["dirty_limit"] nr_dirtied = event["nr_dirtied"] nr_written = event["nr_written"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { nr_dirty = %s, nr_writeback = %s, nr_unstable = %s, background_thresh = %s, dirty_thresh = %s, dirty_limit = %s, nr_dirtied = %s, nr_written = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, nr_dirty, nr_writeback, nr_unstable, background_thresh, dirty_thresh, dirty_limit, nr_dirtied, nr_written,)) def handle_writeback_queue_io(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] moved = event["moved"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, moved = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, moved,)) def handle_writeback_wbc_writepage(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] nr_to_write = event["nr_to_write"] pages_skipped = event["pages_skipped"] sync_mode = event["sync_mode"] for_kupdate = event["for_kupdate"] for_background = event["for_background"] for_reclaim = event["for_reclaim"] range_cyclic = event["range_cyclic"] range_start = event["range_start"] range_end = event["range_end"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, nr_to_write = %s, pages_skipped = %s, sync_mode = %s, for_kupdate = %s, for_background = %s, for_reclaim = %s, range_cyclic = %s, range_start = %s, range_end = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, nr_to_write, pages_skipped, sync_mode, for_kupdate, for_background, for_reclaim, range_cyclic, range_start, range_end,)) def handle_writeback_thread_stop(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_thread_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_bdi_unregister(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_bdi_register(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_wake_forker_thread(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_wake_thread(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_wake_background(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_nowork(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_pages_written(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] pages = event["pages"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { pages = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, pages,)) def handle_writeback_wait(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_written(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_exec(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_queue(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_nothread(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name,)) def handle_writeback_write_inode(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] ino = event["ino"] sync_mode = event["sync_mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, ino = %s, sync_mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, ino, sync_mode,)) def handle_writeback_write_inode_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] ino = event["ino"] sync_mode = event["sync_mode"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, ino = %s, sync_mode = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, ino, sync_mode,)) def handle_writeback_dirty_inode(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] ino = event["ino"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, ino = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, ino, flags,)) def handle_writeback_dirty_inode_start(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] ino = event["ino"] flags = event["flags"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, ino = %s, flags = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, ino, flags,)) def handle_writeback_dirty_page(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] ino = event["ino"] index = event["index"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, ino = %s, index = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, ino, index,)) def handle_net_latency(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] name = event["name"] delay = event["delay"] flag = event["flag"] out_id = event["out_id"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { name = %s, delay = %s, flag = %s, out_id = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, name, delay, flag, out_id,)) def handle_block_latency(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] major = event["major"] minor = event["minor"] sector = event["sector"] delay = event["delay"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { major = %s, minor = %s, sector = %s, delay = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, major, minor, sector, delay,)) def handle_offcpu_latency(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] pid = event["pid"] delay = event["delay"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, pid = %s, delay = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, pid, delay, flag,)) def handle_wakeup_latency(self, event): timestamp = event.timestamp cpu_id = event["cpu_id"] comm = event["comm"] pid = event["pid"] delay = event["delay"] flag = event["flag"] self.print_filter(event, "[%s] %s: { cpu_id = %s }, { comm = %s, pid = %s, delay = %s, flag = %s }" % (self.ns_to_hour_nsec(timestamp), event.name, cpu_id, comm, pid, delay, flag,)) # end of generated code if __name__ == "__main__": parser = argparse.ArgumentParser(description='Track a process throughout a LTTng trace') parser.add_argument('path', metavar="", help='Trace path') parser.add_argument('--procname', '-n', type=str, default=0, help='Filter the results only for this list of ' 'process names') parser.add_argument('--tid', '-t', type=str, default=0, help='Filter the results only for this list ' 'of TIDs') parser.add_argument('--follow-child', '-f', action="store_true", help='Follow childs on fork') args = parser.parse_args() arg_proc_list = None if args.procname: arg_proc_list = args.procname.split(",") arg_tid_list = None if args.tid: arg_tid_list = [] for i in args.tid.split(","): arg_tid_list.append(int(i)) traces = TraceCollection() handle = traces.add_traces_recursive(args.path, "ctf") if handle is None: sys.exit(1) t = TraceParser(traces, arg_proc_list, arg_tid_list, args.follow_child) t.parse() for h in handle.values(): traces.remove_trace(h) lttnganalyses-0.4.3/lttng-schedfreq0000775000175000017500000000234512665072151021107 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import sched if __name__ == '__main__': sched.runfreq() lttnganalyses-0.4.3/lttng-iolatencyfreq0000775000175000017500000000234612553274232022011 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import io if __name__ == '__main__': io.runfreq() lttnganalyses-0.4.3/versioneer.py0000664000175000017500000017201212553274232020621 0ustar mjeansonmjeanson00000000000000 # Version: 0.15 """ The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation First, decide on values for the following configuration variables: * `VCS`: the version control system you use. Currently accepts "git". * `style`: the style of version string to be produced. See "Styles" below for details. Defaults to "pep440", which looks like `TAG[+DISTANCE.gSHORTHASH[.dirty]]`. * `versionfile_source`: A project-relative pathname into which the generated version strings should be written. This is usually a `_version.py` next to your project's main `__init__.py` file, so it can be imported at runtime. If your project uses `src/myproject/__init__.py`, this should be `src/myproject/_version.py`. This file should be checked in to your VCS as usual: the copy created below by `setup.py setup_versioneer` will include code that parses expanded VCS keywords in generated tarballs. The 'build' and 'sdist' commands will replace it with a copy that has just the calculated version string. This must be set even if your project does not have any modules (and will therefore never import `_version.py`), since "setup.py sdist" -based trees still need somewhere to record the pre-calculated version strings. Anywhere in the source tree should do. If there is a `__init__.py` next to your `_version.py`, the `setup.py setup_versioneer` command (described below) will append some `__version__`-setting assignments, if they aren't already present. * `versionfile_build`: Like `versionfile_source`, but relative to the build directory instead of the source directory. These will differ when your setup.py uses 'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`, then you will probably have `versionfile_build='myproject/_version.py'` and `versionfile_source='src/myproject/_version.py'`. If this is set to None, then `setup.py build` will not attempt to rewrite any `_version.py` in the built tree. If your project does not have any libraries (e.g. if it only builds a script), then you should use `versionfile_build = None` and override `distutils.command.build_scripts` to explicitly insert a copy of `versioneer.get_version()` into your generated script. * `tag_prefix`: a string, like 'PROJECTNAME-', which appears at the start of all VCS tags. If your tags look like 'myproject-1.2.0', then you should use tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this should be an empty string. * `parentdir_prefix`: a optional string, frequently the same as tag_prefix, which appears at the start of all unpacked tarball filenames. If your tarball unpacks into 'myproject-1.2.0', this should be 'myproject-'. To disable this feature, just omit the field from your `setup.cfg`. This tool provides one script, named `versioneer`. That script has one mode, "install", which writes a copy of `versioneer.py` into the current directory and runs `versioneer.py setup` to finish the installation. To versioneer-enable your project: * 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and populating it with the configuration values you decided earlier (note that the option names are not case-sensitive): ```` [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = "" parentdir_prefix = myproject- ```` * 2: Run `versioneer install`. This will do the following: * copy `versioneer.py` into the top of your source tree * create `_version.py` in the right place (`versionfile_source`) * modify your `__init__.py` (if one exists next to `_version.py`) to define `__version__` (by calling a function from `_version.py`) * modify your `MANIFEST.in` to include both `versioneer.py` and the generated `_version.py` in sdist tarballs `versioneer install` will complain about any problems it finds with your `setup.py` or `setup.cfg`. Run it multiple times until you have fixed all the problems. * 3: add a `import versioneer` to your setup.py, and add the following arguments to the setup() call: version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), * 4: commit these changes to your VCS. To make sure you won't forget, `versioneer install` will mark everything it touched for addition using `git add`. Don't forget to add `setup.py` and `setup.cfg` too. ## Post-Installation Usage Once established, all uses of your tree from a VCS checkout should get the current version string. All generated tarballs should include an embedded version string (so users who unpack them will not need a VCS tool installed). If you distribute your project through PyPI, then the release process should boil down to two steps: * 1: git tag 1.0 * 2: python setup.py register sdist upload If you distribute it through github (i.e. users use github to generate tarballs with `git archive`), the process is: * 1: git tag 1.0 * 2: git push; git push --tags Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at least one tag in its history. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See details.md in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ### Upgrading to 0.15 Starting with this version, Versioneer is configured with a `[versioneer]` section in your `setup.cfg` file. Earlier versions required the `setup.py` to set attributes on the `versioneer` module immediately after import. The new version will refuse to run (raising an exception during import) until you have provided the necessary `setup.cfg` section. In addition, the Versioneer package provides an executable named `versioneer`, and the installation process is driven by running `versioneer install`. In 0.14 and earlier, the executable was named `versioneer-installer` and was run without an argument. ### Upgrading to 0.14 0.14 changes the format of the version string. 0.13 and earlier used hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a plus-separated "local version" section strings, with dot-separated components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old format, but should be ok with the new one. ### Upgrading from 0.11 to 0.12 Nothing special. ### Upgrading from 0.10 to 0.11 You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running `setup.py setup_versioneer`. This will enable the use of additional version-control systems (SVN, etc) in the future. ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is hereby released into the public domain. The `_version.py` that it creates is also in the public domain. """ from __future__ import print_function try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: pass def get_root(): # we require that all commands are run from the project root, i.e. the # directory that contains setup.py, setup.cfg, and versioneer.py . root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): pass # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator def decorate(f): if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) return None return stdout LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.15 (https://github.com/warner/python-versioneer) import errno import os import re import subprocess import sys def get_keywords(): # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full} return keywords class VersioneerConfig: pass def get_config(): # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): pass LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator def decorate(f): if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) return None return stdout def versions_from_parentdir(parentdir_prefix, root, verbose): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%%s', but '%%s' doesn't start with " "prefix '%%s'" %% (root, dirname, parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None} @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): if not keywords: raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs-tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags"} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' keywords were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, # meaning we're inside a checked out source tree. if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %%s" %% root) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces def plus_or_dot(pieces): if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): # now build up version string, with post-release "local version # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty # exceptions: # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): # TAG[.post.devDISTANCE] . No -dirty # exceptions: # 1: no tags. 0.post.devDISTANCE if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that # .dev0 sorts backwards (a dirty tree will appear "older" than the # corresponding clean one), but you shouldn't be releasing software with # -dirty anyways. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty # --always' # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty # --always -long'. The distance/hash is unconditional. # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"]} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None} def get_versions(): # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree"} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): if not keywords: raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs-tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags"} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' keywords were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, # meaning we're inside a checked out source tree. if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%s', but '%s' doesn't start with " "prefix '%s'" % (root, dirname, parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None} SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.15) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json import sys version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): # now build up version string, with post-release "local version # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty # exceptions: # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): # TAG[.post.devDISTANCE] . No -dirty # exceptions: # 1: no tags. 0.post.devDISTANCE if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that # .dev0 sorts backwards (a dirty tree will appear "older" than the # corresponding clean one), but you shouldn't be releasing software with # -dirty anyways. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty # --always' # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty # --always -long'. The distance/hash is unconditional. # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"]} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None} class VersioneerBadRootError(Exception): pass def get_versions(verbose=False): # returns dict with two keys: 'version' and 'full' if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"} def get_version(): return get_versions()["version"] def get_cmdclass(): if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = "" parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-time keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1) lttnganalyses-0.4.3/lttng-schedtop0000775000175000017500000000235112665072151020751 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import sched if __name__ == '__main__': sched.runtop() lttnganalyses-0.4.3/LICENSE0000664000175000017500000000031712665072151017071 0ustar mjeansonmjeanson00000000000000LTTng-Analyses - Licensing These analyses are released under the MIT license. This license is used to allow the use of these analyses in both free and proprietary software. See mit-license.txt for details. lttnganalyses-0.4.3/lttnganalyses.egg-info/0000775000175000017500000000000012667421106022445 5ustar mjeansonmjeanson00000000000000lttnganalyses-0.4.3/lttnganalyses.egg-info/PKG-INFO0000664000175000017500000012742212667421106023552 0ustar mjeansonmjeanson00000000000000Metadata-Version: 1.1 Name: lttnganalyses Version: 0.4.3 Summary: LTTng analyses Home-page: https://github.com/lttng/lttng-analyses Author: Julien Desfossez Author-email: jdesfossez@efficios.com License: MIT Description: ************** LTTng-analyses ************** This repository contains various scripts to extract monitoring data and metrics from LTTng kernel traces. As opposed to other diagnostic or monitoring solutions, this approach is designed to allow users to record their system's activity with a low overhead, wait for a problem to occur and then diagnose its cause offline. This solution allows the user to target hard to find problems and dig until the root cause is found. This README describes the implemented analyses as well as how to use them. |pypi| .. contents:: :local: :depth: 2 :backlinks: none ============ Requirements ============ * LTTng >= 2.5 * Babeltrace >= 1.2 (with python bindings built) * Python >= 3.4 ============ Installation ============ --------------- Release version --------------- On **Ubuntu** (12.04 and up) using the LTTng ppa: .. code-block:: bash apt-get install -y software-properties-common (or python-software-properties on 12.04) apt-add-repository -y ppa:lttng/ppa apt-get update apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On **Debian Sid**: .. code-block:: bash apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On other distributions: Please refer to the `LTTng documentation `_ to install LTTng and the `Babeltrace README `_ to install ``babeltrace`` with the python bindings. Optionally install the ``progressbar`` python module, and then: .. code-block:: bash pip3 install lttnganalyses ------------------- Development version ------------------- The **latest development version** can be installed directly from GitHub: .. code-block:: bash pip3 install --upgrade git+git://github.com/lttng/lttng-analyses.git ============== Trace creation ============== Here are the basic commands to create a trace, for more information on the LTTng setup, please refer to the `LTTng documentation Getting started guide `_. --------- Automatic --------- From the cloned git tree: .. code-block:: bash ./lttng-analyses-record ------ Manual ------ .. code-block:: bash lttng create lttng enable-channel -k bla --subbuf-size=4M lttng enable-event -k sched_switch,block_rq_complete,block_rq_issue,block_bio_remap,block_bio_backmerge,netif_receive_skb,net_dev_xmit,sched_process_fork,sched_process_exec,lttng_statedump_process_state,lttng_statedump_file_descriptor,lttng_statedump_block_device,writeback_pages_written,mm_vmscan_wakeup_kswapd,mm_page_free,mm_page_alloc,block_dirty_buffer,irq_handler_entry,irq_handler_exit,softirq_entry,softirq_exit,softirq_raise -c bla lttng enable-event -k --syscall -a -c bla lttng start ..do stuff... lttng stop lttng destroy ------ Remote ------ You can also create a trace on a server and send it to a remote host. The remote host only needs to run ``lttng-relayd -d`` and be reachable over the network. The only difference with the above commands is the tracing session's creation: .. code-block:: bash lttng create -U net:// ==================== Implemented analyses ==================== * CPU usage for the whole system * CPU usage per-process * Process CPU migration count * Memory usage per-process (as seen by the kernel) * Memory usage system-wide (as seen by the kernel) * I/O usage (syscalls, disk, network) * I/O operations log (with latency and usage) * I/O latency statistics (open, read, write, sync operations) * I/O latency frequency distribution * Interrupt handler duration statistics (count, min, max, average stdev) * Interrupt handler duration top * Interrupt handler duration log * Interrupt handler duration frequency distribution * SoftIRQ handler latency statistics * Syscalls usage statistics All of the analyses share the same code architecture making it possible to filter by timerange, process name, PID, min and max values using the same command-line options. Also note that reported timestamps can optionally be expressed in the GMT timezone to allow easy sharing between teams. The project's architecture makes it easy to add new analyses or to reuse the analysis backend in external tools which may then present the results in their own format (as opposed to text). ======== Examples ======== After having collected your trace, any script contained in this repository can be used to run an analysis. Read on for some examples! --- I/O --- ^^^^^^^^^^^^^^^^^ I/O latency stats ^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolatencystats mytrace/ Timerange: [2015-01-06 10:58:26.140545481, 2015-01-06 10:58:27.229358936] Syscalls latency statistics (usec): Type Count Min Average Max Stdev ----------------------------------------------------------------------------------------- Open 45 5.562 13.835 77.683 15.263 Read 109 0.316 5.774 62.569 9.277 Write 101 0.256 7.060 48.531 8.555 Sync 207 19.384 40.664 160.188 21.201 Disk latency statistics (usec): Name Count Min Average Max Stdev ----------------------------------------------------------------------------------------- dm-0 108 0.001 0.004 0.007 1.306 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ I/O latency frequency distribution ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolatencyfreq mytrace/ Timerange: [2015-01-06 10:58:26.140545481, 2015-01-06 10:58:27.229358936] Open latency distribution (usec) ############################################################################### 5.562 ███████████████████████████████████████████████████████████████████ 25 9.168 ██████████ 4 12.774 █████████████████████ 8 16.380 ████████ 3 19.986 █████ 2 23.592 0 27.198 0 30.804 0 34.410 ██ 1 38.016 0 41.623 0 45.229 0 48.835 0 52.441 0 56.047 0 59.653 0 63.259 0 66.865 0 70.471 0 74.077 █████ 2 ^^^^^^^^^^^^^^^ I/O latency top ^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolatencytop analysis-20150115-120942/ --limit 3 --minsize 2 Checking the trace for lost events... Timerange: [2015-01-15 12:18:37.216484041, 2015-01-15 12:18:53.821580313] Top open syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:50.432950815,12:18:50.870648568] open 437697.753 N/A apache2 31517 /var/lib/php5/sess_0ifir2hangm8ggaljdphl9o5b5 (fd=13) [12:18:52.946080165,12:18:52.946132278] open 52.113 N/A apache2 31588 /var/lib/php5/sess_mr9045p1k55vin1h0vg7rhgd63 (fd=13) [12:18:46.800846035,12:18:46.800874916] open 28.881 N/A apache2 31591 /var/lib/php5/sess_r7c12pccfvjtas15g3j69u14h0 (fd=13) [12:18:51.389797604,12:18:51.389824426] open 26.822 N/A apache2 31520 /var/lib/php5/sess_4sdb1rtjkhb78sabnoj8gpbl00 (fd=13) Top read syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:37.256073107,12:18:37.256555967] read 482.860 7.00 B bash 10237 unknown (origin not found) (fd=3) [12:18:52.000209798,12:18:52.000252304] read 42.506 1.00 KB irqbalance 1337 /proc/interrupts (fd=3) [12:18:37.256559439,12:18:37.256601615] read 42.176 5.00 B bash 10237 unknown (origin not found) (fd=3) [12:18:42.000281918,12:18:42.000320016] read 38.098 1.00 KB irqbalance 1337 /proc/interrupts (fd=3) Top write syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:49.913241516,12:18:49.915908862] write 2667.346 95.00 B apache2 31584 /var/log/apache2/access.log (fd=8) [12:18:37.472823631,12:18:37.472859836] writev 36.205 21.97 KB apache2 31544 unknown (origin not found) (fd=12) [12:18:37.991578372,12:18:37.991612724] writev 34.352 21.97 KB apache2 31589 unknown (origin not found) (fd=12) [12:18:39.547778549,12:18:39.547812515] writev 33.966 21.97 KB apache2 31584 unknown (origin not found) (fd=12) Top sync syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:50.162776739,12:18:51.157522361] sync 994745.622 N/A sync 22791 None (fd=None) [12:18:37.227867532,12:18:37.232289687] sync_file_range 4422.155 N/A lttng-consumerd 19964 /home/julien/lttng-traces/analysis-20150115-120942/kernel/metadata (fd=32) [12:18:37.238076585,12:18:37.239012027] sync_file_range 935.442 N/A lttng-consumerd 19964 /home/julien/lttng-traces/analysis-20150115-120942/kernel/metadata (fd=32) [12:18:37.220974711,12:18:37.221647124] sync_file_range 672.413 N/A lttng-consumerd 19964 /home/julien/lttng-traces/analysis-20150115-120942/kernel/metadata (fd=32) ^^^^^^^^^^^^^^^^^^ I/O operations log ^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolog mytrace/ [10:58:26.221618530,10:58:26.221620659] write 2.129 8.00 B /usr/bin/x-term 11793 anon_inode:[eventfd] (fd=5) [10:58:26.221623609,10:58:26.221628055] read 4.446 50.00 B /usr/bin/x-term 11793 /dev/ptmx (fd=24) [10:58:26.221638929,10:58:26.221640008] write 1.079 8.00 B /usr/bin/x-term 11793 anon_inode:[eventfd] (fd=5) [10:58:26.221676232,10:58:26.221677385] read 1.153 8.00 B /usr/bin/x-term 11793 anon_inode:[eventfd] (fd=5) [10:58:26.223401804,10:58:26.223411683] open 9.879 N/A sleep 12420 /etc/ld.so.cache (fd=3) [10:58:26.223448060,10:58:26.223455577] open 7.517 N/A sleep 12420 /lib/x86_64-linux-gnu/libc.so.6 (fd=3) [10:58:26.223456522,10:58:26.223458898] read 2.376 832.00 B sleep 12420 /lib/x86_64-linux-gnu/libc.so.6 (fd=3) [10:58:26.223918068,10:58:26.223929316] open 11.248 N/A sleep 12420 (fd=3) [10:58:26.231881565,10:58:26.231895970] writev 14.405 16.00 B /usr/bin/x-term 11793 socket:[45650] (fd=4) [10:58:26.231979636,10:58:26.231988446] recvmsg 8.810 16.00 B Xorg 1827 socket:[47480] (fd=38) ^^^^^^^^^^^^^ I/O usage top ^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iousagetop traces/pgread-writes Timerange: [2014-10-07 16:36:00.733214969, 2014-10-07 16:36:18.804584183] Per-process I/O Read ############################################################################### ██████████████████████████████████████████████████ 16.00 MB lttng-consumerd (2619) 0 B file 4.00 B net 16.00 MB unknown █████ 1.72 MB lttng-consumerd (2619) 0 B file 0 B net 1.72 MB unknown █ 398.13 KB postgres (4219) 121.05 KB file 277.07 KB net 8.00 B unknown 256.09 KB postgres (1348) 0 B file 255.97 KB net 117.00 B unknown 204.81 KB postgres (4218) 204.81 KB file 0 B net 0 B unknown 123.77 KB postgres (4220) 117.50 KB file 6.26 KB net 8.00 B unknown Per-process I/O Write ############################################################################### ██████████████████████████████████████████████████ 16.00 MB lttng-consumerd (2619) 0 B file 8.00 MB net 8.00 MB unknown ██████ 2.20 MB postgres (4219) 2.00 MB file 202.23 KB net 0 B unknown █████ 1.73 MB lttng-consumerd (2619) 0 B file 887.73 KB net 882.58 KB unknown ██ 726.33 KB postgres (1165) 8.00 KB file 6.33 KB net 712.00 KB unknown 158.69 KB postgres (1168) 158.69 KB file 0 B net 0 B unknown 80.66 KB postgres (1348) 0 B file 80.66 KB net 0 B unknown Files Read ############################################################################### ██████████████████████████████████████████████████ 8.00 MB anon_inode:[lttng_stream] (lttng-consumerd) 'fd 32 in lttng-consumerd (2619)' █████ 834.41 KB base/16384/pg_internal.init 'fd 7 in postgres (4219)', 'fd 7 in postgres (4220)', 'fd 7 in postgres (4221)', 'fd 7 in postgres (4222)', 'fd 7 in postgres (4223)', 'fd 7 in postgres (4224)', 'fd 7 in postgres (4225)', 'fd 7 in postgres (4226)' █ 256.09 KB socket:[8893] (postgres) 'fd 9 in postgres (1348)' █ 174.69 KB pg_stat_tmp/pgstat.stat 'fd 9 in postgres (4218)', 'fd 9 in postgres (1167)' 109.48 KB global/pg_internal.init 'fd 7 in postgres (4218)', 'fd 7 in postgres (4219)', 'fd 7 in postgres (4220)', 'fd 7 in postgres (4221)', 'fd 7 in postgres (4222)', 'fd 7 in postgres (4223)', 'fd 7 in postgres (4224)', 'fd 7 in postgres (4225)', 'fd 7 in postgres (4226)' 104.30 KB base/11951/pg_internal.init 'fd 7 in postgres (4218)' 12.85 KB socket (lttng-sessiond) 'fd 30 in lttng-sessiond (384)' 4.50 KB global/pg_filenode.map 'fd 7 in postgres (4218)', 'fd 7 in postgres (4219)', 'fd 7 in postgres (4220)', 'fd 7 in postgres (4221)', 'fd 7 in postgres (4222)', 'fd 7 in postgres (4223)', 'fd 7 in postgres (4224)', 'fd 7 in postgres (4225)', 'fd 7 in postgres (4226)' 4.16 KB socket (postgres) 'fd 9 in postgres (4226)' 4.00 KB /proc/interrupts 'fd 3 in irqbalance (1104)' Files Write ############################################################################### ██████████████████████████████████████████████████ 8.00 MB socket:[56371] (lttng-consumerd) 'fd 30 in lttng-consumerd (2619)' █████████████████████████████████████████████████ 8.00 MB pipe:[53306] (lttng-consumerd) 'fd 12 in lttng-consumerd (2619)' ██████████ 1.76 MB pg_xlog/00000001000000000000000B 'fd 31 in postgres (4219)' █████ 887.82 KB socket:[56369] (lttng-consumerd) 'fd 26 in lttng-consumerd (2619)' █████ 882.58 KB pipe:[53309] (lttng-consumerd) 'fd 18 in lttng-consumerd (2619)' 160.00 KB /var/lib/postgresql/9.1/main/base/16384/16602 'fd 14 in postgres (1165)' 158.69 KB pg_stat_tmp/pgstat.tmp 'fd 3 in postgres (1168)' 144.00 KB /var/lib/postgresql/9.1/main/base/16384/16613 'fd 12 in postgres (1165)' 88.00 KB /var/lib/postgresql/9.1/main/base/16384/16609 'fd 11 in postgres (1165)' 78.28 KB socket:[8893] (postgres) 'fd 9 in postgres (1348)' Block I/O Read ############################################################################### Block I/O Write ############################################################################### ██████████████████████████████████████████████████ 1.76 MB postgres (pid=4219) ████ 160.00 KB postgres (pid=1168) ██ 100.00 KB kworker/u8:0 (pid=1540) ██ 96.00 KB jbd2/vda1-8 (pid=257) █ 40.00 KB postgres (pid=1166) 8.00 KB kworker/u9:0 (pid=4197) 4.00 KB kworker/u9:2 (pid=1381) Disk nr_sector ############################################################################### ███████████████████████████████████████████████████████████████████ 4416.00 sectors vda1 Disk nr_requests ############################################################################### ████████████████████████████████████████████████████████████████████ 177.00 requests vda1 Disk request time/sector ############################################################################### ██████████████████████████████████████████████████████████████████ 0.01 ms vda1 Network recv_bytes ############################################################################### ███████████████████████████████████████████████████████ 739.50 KB eth0 █████ 80.27 KB lo Network sent_bytes ############################################################################### ████████████████████████████████████████████████████████ 9.36 MB eth0 -------- Syscalls -------- ^^^^^^^^^^ Statistics ^^^^^^^^^^ .. code-block:: bash $ ./lttng-syscallstats mytrace/ Timerange: [2015-01-15 12:18:37.216484041, 2015-01-15 12:18:53.821580313] Per-TID syscalls statistics (usec) find (22785) Count Min Average Max Stdev Return values - getdents 14240 0.380 364.301 43372.450 1629.390 {'success': 14240} - close 14236 0.233 0.506 4.932 0.217 {'success': 14236} - fchdir 14231 0.252 0.407 5.769 0.117 {'success': 14231} - open 7123 0.779 2.321 12.697 0.936 {'success': 7119, 'ENOENT': 4} - newfstatat 7118 1.457 143.562 28103.532 1410.281 {'success': 7118} - openat 7118 1.525 2.411 9.107 0.771 {'success': 7118} - newfstat 7117 0.272 0.654 8.707 0.248 {'success': 7117} - write 573 0.298 0.715 8.584 0.391 {'success': 573} - brk 27 0.615 5.768 30.792 7.830 {'success': 27} - rt_sigaction 22 0.227 0.283 0.589 0.098 {'success': 22} - mmap 12 1.116 2.116 3.597 0.762 {'success': 12} - mprotect 6 1.185 2.235 3.923 1.148 {'success': 6} - read 5 0.925 2.101 6.300 2.351 {'success': 5} - ioctl 4 0.342 1.151 2.280 0.873 {'success': 2, 'ENOTTY': 2} - access 4 1.166 2.530 4.202 1.527 {'ENOENT': 4} - rt_sigprocmask 3 0.325 0.570 0.979 0.357 {'success': 3} - dup2 2 0.250 0.562 0.874 ? {'success': 2} - munmap 2 3.006 5.399 7.792 ? {'success': 2} - execve 1 7277.974 7277.974 7277.974 ? {'success': 1} - setpgid 1 0.945 0.945 0.945 ? {'success': 1} - fcntl 1 ? 0.000 0.000 ? {} - newuname 1 1.240 1.240 1.240 ? {'success': 1} Total: 71847 ----------------------------------------------------------------------------------------------------------------- apache2 (31517) Count Min Average Max Stdev Return values - fcntl 192 ? 0.000 0.000 ? {} - newfstat 156 0.237 0.484 1.102 0.222 {'success': 156} - read 144 0.307 1.602 16.307 1.698 {'success': 117, 'EAGAIN': 27} - access 96 0.705 1.580 3.364 0.670 {'success': 12, 'ENOENT': 84} - newlstat 84 0.459 0.738 1.456 0.186 {'success': 63, 'ENOENT': 21} - newstat 74 0.735 2.266 11.212 1.772 {'success': 50, 'ENOENT': 24} - lseek 72 0.317 0.522 0.915 0.112 {'success': 72} - close 39 0.471 0.615 0.867 0.069 {'success': 39} - open 36 2.219 12162.689 437697.753 72948.868 {'success': 36} - getcwd 28 0.287 0.701 1.331 0.277 {'success': 28} - poll 27 1.080 1139.669 2851.163 856.723 {'success': 27} - times 24 0.765 0.956 1.327 0.107 {'success': 24} - setitimer 24 0.499 5.848 16.668 4.041 {'success': 24} - write 24 5.467 6.784 16.827 2.459 {'success': 24} - writev 24 10.241 17.645 29.817 5.116 {'success': 24} - mmap 15 3.060 3.482 4.406 0.317 {'success': 15} - munmap 15 2.944 3.502 4.154 0.427 {'success': 15} - brk 12 0.738 4.579 13.795 4.437 {'success': 12} - chdir 12 0.989 1.600 2.353 0.385 {'success': 12} - flock 6 0.906 1.282 2.043 0.423 {'success': 6} - rt_sigaction 6 0.530 0.725 1.123 0.217 {'success': 6} - pwrite64 6 1.262 1.430 1.692 0.143 {'success': 6} - rt_sigprocmask 6 0.539 0.650 0.976 0.162 {'success': 6} - shutdown 3 7.323 8.487 10.281 1.576 {'success': 3} - getsockname 3 1.015 1.228 1.585 0.311 {'success': 3} - accept4 3 5174453.611 3450157.282 5176018.235 ? {'success': 2} Total: 1131 --- IRQ --- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Handler duration and raise latency statistics ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-irqstats mytrace/ Timerange: [2014-03-11 16:05:41.314824752, 2014-03-11 16:05:45.041994298] Hard IRQ Duration (us) count min avg max stdev ----------------------------------------------------------------------------------| 1: 30 10.901 45.500 64.510 18.447 | 42: 259 3.203 7.863 21.426 3.183 | 43: 2 3.859 3.976 4.093 0.165 | 44: 92 0.300 3.995 6.542 2.181 | Soft IRQ Duration (us) Raise latency (us) count min avg max stdev | count min avg max stdev ----------------------------------------------------------------------------------|------------------------------------------------------------ 1: 495 0.202 21.058 51.060 11.047 | 53 2.141 11.217 20.005 7.233 3: 14 0.133 9.177 32.774 10.483 | 14 0.763 3.703 10.902 3.448 4: 257 5.981 29.064 125.862 15.891 | 257 0.891 3.104 15.054 2.046 6: 26 0.309 1.198 1.748 0.329 | 26 9.636 39.222 51.430 11.246 7: 299 1.185 14.768 90.465 15.992 | 298 1.286 31.387 61.700 11.866 9: 338 0.592 3.387 13.745 1.356 | 147 2.480 29.299 64.453 14.286 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Handler duration frequency distribution ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-irqfreq --timerange [16:05:42,16:05:45] --irq 44 --stats mytrace/ Timerange: [2014-03-11 16:05:42.042034570, 2014-03-11 16:05:44.998914297] Hard IRQ Duration (us) count min avg max stdev ----------------------------------------------------------------------------------| 44: 72 0.300 4.018 6.542 2.164 | Frequency distribution iwlwifi (44) ############################################################################### 0.300 █████ 1.00 0.612 ██████████████████████████████████████████████████████████████ 12.00 0.924 ████████████████████ 4.00 1.236 ██████████ 2.00 1.548 0.00 1.861 █████ 1.00 2.173 0.00 2.485 █████ 1.00 2.797 ██████████████████████████ 5.00 3.109 █████ 1.00 3.421 ███████████████ 3.00 3.733 0.00 4.045 █████ 1.00 4.357 █████ 1.00 4.669 ██████████ 2.00 4.981 ██████████ 2.00 5.294 █████████████████████████████████████████ 8.00 5.606 ████████████████████████████████████████████████████████████████████ 13.00 5.918 ██████████████████████████████████████████████████████████████ 12.00 6.230 ███████████████ 3.00 ------ Others ------ There are a lot of other scripts, we encourage you to try them and read the ``--help`` to see all the available options. ================ Work in progress ================ Track the page cache and extract the latencies associated with pages flush to disk. In order to do that, we rely on the assumption that the pages are flushed in a FIFO order. It might not be 100% accurate, but it already gives great results : An example here when saving a file in vim:: [19:57:51.173332284 - 19:57:51.177794657] vim (31517) syscall_entry_fsync(fd = 4 ) = 0, 4.462 ms 1 dirty page(s) were flushed (assuming FIFO): vim (31517): 1 pages - blabla : 1 pages 13 active dirty filesystem page(s) (known): redis-server (2092): 2 pages - /var/log/redis/redis-server.log : 2 pages vim (31517): 2 pages - .blabla.swp : 2 pages lttng-consumerd (6750): 9 pages - unknown (origin not found) : 9 pages An other example when running the 'sync' command:: [19:57:53.046840755 - 19:57:53.072809609] sync (31554) syscall_entry_sync(fd = ) = 0, 25.969 ms 23 dirty page(s) were flushed (assuming FIFO): redis-server (2092): 2 pages - /var/log/redis/redis-server.log : 2 pages vim (31517): 9 pages - /home/julien/.viminfo.tmp : 6 pages - .blabla.swp : 3 pages lttng-consumerd (6750): 12 pages - unknown (origin not found) : 12 pages PostgreSQL with 'sys_fdatasync':: [13:49:39.908599447 - 13:49:39.915930730] postgres (1137) sys_fdatasync(fd = 7 ) = 0, 7.331 ms 2 pages allocated during the period 88 dirty page(s) were flushed (assuming FIFO): postgres (1137): 88 pages - /var/lib/postgresql/9.1/main/pg_xlog/000000010000000000000008 : 88 pages 68 last dirtied filesystem page(s): postgres (2419): 68 pages - base/11951/18410 : 46 pages - base/11951/18407 : 10 pages - base/11951/18407_fsm : 6 pages - base/11951/18410_fsm : 6 pages Detecting a fight for the I/O between a huge write and postgresql:: [13:49:47.242730583 - 13:49:47.442835037] python (2353) sys_write(fd = 3 , count = 102395904) = 102395904, 200.104 ms 34760 pages allocated during the period woke up kswapd during the period 10046 pages written on disk freed 33753 pages from the cache during the period 1397 last dirtied filesystem page(s): python (2353): 1325 pages - /root/bla : 1325 pages postgres (2419): 72 pages - base/11951/18419 : 72 pages =========== Limitations =========== The main limitation of this project is the fact that it can be quite slow to process a large trace. This project is a work in progress and we focus on the problem-solving aspect. Therefore, features have been prioritized over performance for now. One other aspect is the fact that the state is not persistent; the trace has to be re-processed if another analysis script is to be used on the same trace. Some scripts belonging to the same category allow the combination of multiple analyses into a single pass (see ``--freq``, ``--log``, ``--usage``, ``--latencystats``, etc). We are planning to add a way to save the state and/or create an interactive environment to allow the user to run multiple analyses on the same trace without having to process the trace every time. ========== Conclusion ========== We hope you have fun trying this project and please remember it is a work in progress; feedback, bug reports and improvement ideas are always welcome! .. _pip: http://www.pip-installer.org/en/latest/index.html .. |pypi| image:: https://img.shields.io/pypi/v/lttnganalyses.svg?style=flat-square&label=latest%20version :target: https://pypi.python.org/pypi/lttnganalyses :alt: Latest version released on PyPi Keywords: lttng tracing Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: Topic :: System :: Monitoring Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python :: 3.4 lttnganalyses-0.4.3/lttnganalyses.egg-info/dependency_links.txt0000664000175000017500000000000112667421106026513 0ustar mjeansonmjeanson00000000000000 lttnganalyses-0.4.3/lttnganalyses.egg-info/top_level.txt0000664000175000017500000000001612667421106025174 0ustar mjeansonmjeanson00000000000000lttnganalyses lttnganalyses-0.4.3/lttnganalyses.egg-info/entry_points.txt0000664000175000017500000000304312667421106025743 0ustar mjeansonmjeanson00000000000000[console_scripts] lttng-cputop = lttnganalyses.cli.cputop:run lttng-cputop-mi = lttnganalyses.cli.cputop:run_mi lttng-iolatencyfreq = lttnganalyses.cli.io:runfreq lttng-iolatencyfreq-mi = lttnganalyses.cli.io:runfreq_mi lttng-iolatencystats = lttnganalyses.cli.io:runstats lttng-iolatencystats-mi = lttnganalyses.cli.io:runstats_mi lttng-iolatencytop = lttnganalyses.cli.io:runlatencytop lttng-iolatencytop-mi = lttnganalyses.cli.io:runlatencytop_mi lttng-iolog = lttnganalyses.cli.io:runlog lttng-iolog-mi = lttnganalyses.cli.io:runlog_mi lttng-iousagetop = lttnganalyses.cli.io:runusage lttng-iousagetop-mi = lttnganalyses.cli.io:runusage_mi lttng-irqfreq = lttnganalyses.cli.irq:runfreq lttng-irqfreq-mi = lttnganalyses.cli.irq:runfreq_mi lttng-irqlog = lttnganalyses.cli.irq:runlog lttng-irqlog-mi = lttnganalyses.cli.irq:runlog_mi lttng-irqstats = lttnganalyses.cli.irq:runstats lttng-irqstats-mi = lttnganalyses.cli.irq:runstats_mi lttng-memtop = lttnganalyses.cli.memtop:run lttng-memtop-mi = lttnganalyses.cli.memtop:run_mi lttng-schedfreq = lttnganalyses.cli.sched:runfreq lttng-schedfreq-mi = lttnganalyses.cli.sched:runfreq_mi lttng-schedlog = lttnganalyses.cli.sched:runlog lttng-schedlog-mi = lttnganalyses.cli.sched:runlog_mi lttng-schedstats = lttnganalyses.cli.sched:runstats lttng-schedstats-mi = lttnganalyses.cli.sched:runstats_mi lttng-schedtop = lttnganalyses.cli.sched:runtop lttng-schedtop-mi = lttnganalyses.cli.sched:runtop_mi lttng-syscallstats = lttnganalyses.cli.syscallstats:run lttng-syscallstats-mi = lttnganalyses.cli.syscallstats:run_mi lttnganalyses-0.4.3/lttnganalyses.egg-info/SOURCES.txt0000664000175000017500000000406712667421106024340 0ustar mjeansonmjeanson00000000000000ChangeLog LICENSE MANIFEST.in README.rst lttng-analyses-record lttng-cputop lttng-iolatencyfreq lttng-iolatencystats lttng-iolatencytop lttng-iolog lttng-iousagetop lttng-irqfreq lttng-irqlog lttng-irqstats lttng-memtop lttng-schedfreq lttng-schedlog lttng-schedstats lttng-schedtop lttng-syscallstats lttng-track-process mit-license.txt setup.cfg setup.py versioneer.py lttnganalyses/__init__.py lttnganalyses/_version.py lttnganalyses.egg-info/PKG-INFO lttnganalyses.egg-info/SOURCES.txt lttnganalyses.egg-info/dependency_links.txt lttnganalyses.egg-info/entry_points.txt lttnganalyses.egg-info/requires.txt lttnganalyses.egg-info/top_level.txt lttnganalyses/cli/__init__.py lttnganalyses/cli/command.py lttnganalyses/cli/cputop.py lttnganalyses/cli/io.py lttnganalyses/cli/irq.py lttnganalyses/cli/memtop.py lttnganalyses/cli/mi.py lttnganalyses/cli/progressbar.py lttnganalyses/cli/sched.py lttnganalyses/cli/syscallstats.py lttnganalyses/cli/termgraph.py lttnganalyses/common/__init__.py lttnganalyses/common/format_utils.py lttnganalyses/common/version_utils.py lttnganalyses/core/__init__.py lttnganalyses/core/analysis.py lttnganalyses/core/cputop.py lttnganalyses/core/io.py lttnganalyses/core/irq.py lttnganalyses/core/memtop.py lttnganalyses/core/sched.py lttnganalyses/core/stats.py lttnganalyses/core/syscalls.py lttnganalyses/linuxautomaton/__init__.py lttnganalyses/linuxautomaton/automaton.py lttnganalyses/linuxautomaton/block.py lttnganalyses/linuxautomaton/common.py lttnganalyses/linuxautomaton/io.py lttnganalyses/linuxautomaton/irq.py lttnganalyses/linuxautomaton/mem.py lttnganalyses/linuxautomaton/net.py lttnganalyses/linuxautomaton/sched.py lttnganalyses/linuxautomaton/sp.py lttnganalyses/linuxautomaton/statedump.py lttnganalyses/linuxautomaton/sv.py lttnganalyses/linuxautomaton/syscalls.py tests/__init__.py tests/analysis_test.py tests/gen_ctfwriter.py tests/test_cputop.py tests/test_io.py tests/test_irq.py tests/trace_writer.py tests/expected/cputop.txt tests/expected/iolatencytop.txt tests/expected/iousagetop.txt tests/expected/irqlog.txt tests/expected/irqstats.txtlttnganalyses-0.4.3/lttnganalyses.egg-info/requires.txt0000664000175000017500000000003312667421106025041 0ustar mjeansonmjeanson00000000000000 [progressbar] progressbar lttnganalyses-0.4.3/lttng-schedstats0000775000175000017500000000235312665072151021307 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import sched if __name__ == '__main__': sched.runstats() lttnganalyses-0.4.3/lttng-iolatencystats0000775000175000017500000000234712553274232022213 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import io if __name__ == '__main__': io.runstats() lttnganalyses-0.4.3/lttng-cputop0000775000175000017500000000235112553274232020452 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import cputop if __name__ == '__main__': cputop.run() lttnganalyses-0.4.3/README.rst0000664000175000017500000011510512665072151017555 0ustar mjeansonmjeanson00000000000000************** LTTng-analyses ************** This repository contains various scripts to extract monitoring data and metrics from LTTng kernel traces. As opposed to other diagnostic or monitoring solutions, this approach is designed to allow users to record their system's activity with a low overhead, wait for a problem to occur and then diagnose its cause offline. This solution allows the user to target hard to find problems and dig until the root cause is found. This README describes the implemented analyses as well as how to use them. |pypi| .. contents:: :local: :depth: 2 :backlinks: none ============ Requirements ============ * LTTng >= 2.5 * Babeltrace >= 1.2 (with python bindings built) * Python >= 3.4 ============ Installation ============ --------------- Release version --------------- On **Ubuntu** (12.04 and up) using the LTTng ppa: .. code-block:: bash apt-get install -y software-properties-common (or python-software-properties on 12.04) apt-add-repository -y ppa:lttng/ppa apt-get update apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On **Debian Sid**: .. code-block:: bash apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On other distributions: Please refer to the `LTTng documentation `_ to install LTTng and the `Babeltrace README `_ to install ``babeltrace`` with the python bindings. Optionally install the ``progressbar`` python module, and then: .. code-block:: bash pip3 install lttnganalyses ------------------- Development version ------------------- The **latest development version** can be installed directly from GitHub: .. code-block:: bash pip3 install --upgrade git+git://github.com/lttng/lttng-analyses.git ============== Trace creation ============== Here are the basic commands to create a trace, for more information on the LTTng setup, please refer to the `LTTng documentation Getting started guide `_. --------- Automatic --------- From the cloned git tree: .. code-block:: bash ./lttng-analyses-record ------ Manual ------ .. code-block:: bash lttng create lttng enable-channel -k bla --subbuf-size=4M lttng enable-event -k sched_switch,block_rq_complete,block_rq_issue,block_bio_remap,block_bio_backmerge,netif_receive_skb,net_dev_xmit,sched_process_fork,sched_process_exec,lttng_statedump_process_state,lttng_statedump_file_descriptor,lttng_statedump_block_device,writeback_pages_written,mm_vmscan_wakeup_kswapd,mm_page_free,mm_page_alloc,block_dirty_buffer,irq_handler_entry,irq_handler_exit,softirq_entry,softirq_exit,softirq_raise -c bla lttng enable-event -k --syscall -a -c bla lttng start ..do stuff... lttng stop lttng destroy ------ Remote ------ You can also create a trace on a server and send it to a remote host. The remote host only needs to run ``lttng-relayd -d`` and be reachable over the network. The only difference with the above commands is the tracing session's creation: .. code-block:: bash lttng create -U net:// ==================== Implemented analyses ==================== * CPU usage for the whole system * CPU usage per-process * Process CPU migration count * Memory usage per-process (as seen by the kernel) * Memory usage system-wide (as seen by the kernel) * I/O usage (syscalls, disk, network) * I/O operations log (with latency and usage) * I/O latency statistics (open, read, write, sync operations) * I/O latency frequency distribution * Interrupt handler duration statistics (count, min, max, average stdev) * Interrupt handler duration top * Interrupt handler duration log * Interrupt handler duration frequency distribution * SoftIRQ handler latency statistics * Syscalls usage statistics All of the analyses share the same code architecture making it possible to filter by timerange, process name, PID, min and max values using the same command-line options. Also note that reported timestamps can optionally be expressed in the GMT timezone to allow easy sharing between teams. The project's architecture makes it easy to add new analyses or to reuse the analysis backend in external tools which may then present the results in their own format (as opposed to text). ======== Examples ======== After having collected your trace, any script contained in this repository can be used to run an analysis. Read on for some examples! --- I/O --- ^^^^^^^^^^^^^^^^^ I/O latency stats ^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolatencystats mytrace/ Timerange: [2015-01-06 10:58:26.140545481, 2015-01-06 10:58:27.229358936] Syscalls latency statistics (usec): Type Count Min Average Max Stdev ----------------------------------------------------------------------------------------- Open 45 5.562 13.835 77.683 15.263 Read 109 0.316 5.774 62.569 9.277 Write 101 0.256 7.060 48.531 8.555 Sync 207 19.384 40.664 160.188 21.201 Disk latency statistics (usec): Name Count Min Average Max Stdev ----------------------------------------------------------------------------------------- dm-0 108 0.001 0.004 0.007 1.306 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ I/O latency frequency distribution ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolatencyfreq mytrace/ Timerange: [2015-01-06 10:58:26.140545481, 2015-01-06 10:58:27.229358936] Open latency distribution (usec) ############################################################################### 5.562 ███████████████████████████████████████████████████████████████████ 25 9.168 ██████████ 4 12.774 █████████████████████ 8 16.380 ████████ 3 19.986 █████ 2 23.592 0 27.198 0 30.804 0 34.410 ██ 1 38.016 0 41.623 0 45.229 0 48.835 0 52.441 0 56.047 0 59.653 0 63.259 0 66.865 0 70.471 0 74.077 █████ 2 ^^^^^^^^^^^^^^^ I/O latency top ^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolatencytop analysis-20150115-120942/ --limit 3 --minsize 2 Checking the trace for lost events... Timerange: [2015-01-15 12:18:37.216484041, 2015-01-15 12:18:53.821580313] Top open syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:50.432950815,12:18:50.870648568] open 437697.753 N/A apache2 31517 /var/lib/php5/sess_0ifir2hangm8ggaljdphl9o5b5 (fd=13) [12:18:52.946080165,12:18:52.946132278] open 52.113 N/A apache2 31588 /var/lib/php5/sess_mr9045p1k55vin1h0vg7rhgd63 (fd=13) [12:18:46.800846035,12:18:46.800874916] open 28.881 N/A apache2 31591 /var/lib/php5/sess_r7c12pccfvjtas15g3j69u14h0 (fd=13) [12:18:51.389797604,12:18:51.389824426] open 26.822 N/A apache2 31520 /var/lib/php5/sess_4sdb1rtjkhb78sabnoj8gpbl00 (fd=13) Top read syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:37.256073107,12:18:37.256555967] read 482.860 7.00 B bash 10237 unknown (origin not found) (fd=3) [12:18:52.000209798,12:18:52.000252304] read 42.506 1.00 KB irqbalance 1337 /proc/interrupts (fd=3) [12:18:37.256559439,12:18:37.256601615] read 42.176 5.00 B bash 10237 unknown (origin not found) (fd=3) [12:18:42.000281918,12:18:42.000320016] read 38.098 1.00 KB irqbalance 1337 /proc/interrupts (fd=3) Top write syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:49.913241516,12:18:49.915908862] write 2667.346 95.00 B apache2 31584 /var/log/apache2/access.log (fd=8) [12:18:37.472823631,12:18:37.472859836] writev 36.205 21.97 KB apache2 31544 unknown (origin not found) (fd=12) [12:18:37.991578372,12:18:37.991612724] writev 34.352 21.97 KB apache2 31589 unknown (origin not found) (fd=12) [12:18:39.547778549,12:18:39.547812515] writev 33.966 21.97 KB apache2 31584 unknown (origin not found) (fd=12) Top sync syscall latencies (usec) Begin End Name Duration (usec) Size Proc PID Filename [12:18:50.162776739,12:18:51.157522361] sync 994745.622 N/A sync 22791 None (fd=None) [12:18:37.227867532,12:18:37.232289687] sync_file_range 4422.155 N/A lttng-consumerd 19964 /home/julien/lttng-traces/analysis-20150115-120942/kernel/metadata (fd=32) [12:18:37.238076585,12:18:37.239012027] sync_file_range 935.442 N/A lttng-consumerd 19964 /home/julien/lttng-traces/analysis-20150115-120942/kernel/metadata (fd=32) [12:18:37.220974711,12:18:37.221647124] sync_file_range 672.413 N/A lttng-consumerd 19964 /home/julien/lttng-traces/analysis-20150115-120942/kernel/metadata (fd=32) ^^^^^^^^^^^^^^^^^^ I/O operations log ^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iolog mytrace/ [10:58:26.221618530,10:58:26.221620659] write 2.129 8.00 B /usr/bin/x-term 11793 anon_inode:[eventfd] (fd=5) [10:58:26.221623609,10:58:26.221628055] read 4.446 50.00 B /usr/bin/x-term 11793 /dev/ptmx (fd=24) [10:58:26.221638929,10:58:26.221640008] write 1.079 8.00 B /usr/bin/x-term 11793 anon_inode:[eventfd] (fd=5) [10:58:26.221676232,10:58:26.221677385] read 1.153 8.00 B /usr/bin/x-term 11793 anon_inode:[eventfd] (fd=5) [10:58:26.223401804,10:58:26.223411683] open 9.879 N/A sleep 12420 /etc/ld.so.cache (fd=3) [10:58:26.223448060,10:58:26.223455577] open 7.517 N/A sleep 12420 /lib/x86_64-linux-gnu/libc.so.6 (fd=3) [10:58:26.223456522,10:58:26.223458898] read 2.376 832.00 B sleep 12420 /lib/x86_64-linux-gnu/libc.so.6 (fd=3) [10:58:26.223918068,10:58:26.223929316] open 11.248 N/A sleep 12420 (fd=3) [10:58:26.231881565,10:58:26.231895970] writev 14.405 16.00 B /usr/bin/x-term 11793 socket:[45650] (fd=4) [10:58:26.231979636,10:58:26.231988446] recvmsg 8.810 16.00 B Xorg 1827 socket:[47480] (fd=38) ^^^^^^^^^^^^^ I/O usage top ^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-iousagetop traces/pgread-writes Timerange: [2014-10-07 16:36:00.733214969, 2014-10-07 16:36:18.804584183] Per-process I/O Read ############################################################################### ██████████████████████████████████████████████████ 16.00 MB lttng-consumerd (2619) 0 B file 4.00 B net 16.00 MB unknown █████ 1.72 MB lttng-consumerd (2619) 0 B file 0 B net 1.72 MB unknown █ 398.13 KB postgres (4219) 121.05 KB file 277.07 KB net 8.00 B unknown 256.09 KB postgres (1348) 0 B file 255.97 KB net 117.00 B unknown 204.81 KB postgres (4218) 204.81 KB file 0 B net 0 B unknown 123.77 KB postgres (4220) 117.50 KB file 6.26 KB net 8.00 B unknown Per-process I/O Write ############################################################################### ██████████████████████████████████████████████████ 16.00 MB lttng-consumerd (2619) 0 B file 8.00 MB net 8.00 MB unknown ██████ 2.20 MB postgres (4219) 2.00 MB file 202.23 KB net 0 B unknown █████ 1.73 MB lttng-consumerd (2619) 0 B file 887.73 KB net 882.58 KB unknown ██ 726.33 KB postgres (1165) 8.00 KB file 6.33 KB net 712.00 KB unknown 158.69 KB postgres (1168) 158.69 KB file 0 B net 0 B unknown 80.66 KB postgres (1348) 0 B file 80.66 KB net 0 B unknown Files Read ############################################################################### ██████████████████████████████████████████████████ 8.00 MB anon_inode:[lttng_stream] (lttng-consumerd) 'fd 32 in lttng-consumerd (2619)' █████ 834.41 KB base/16384/pg_internal.init 'fd 7 in postgres (4219)', 'fd 7 in postgres (4220)', 'fd 7 in postgres (4221)', 'fd 7 in postgres (4222)', 'fd 7 in postgres (4223)', 'fd 7 in postgres (4224)', 'fd 7 in postgres (4225)', 'fd 7 in postgres (4226)' █ 256.09 KB socket:[8893] (postgres) 'fd 9 in postgres (1348)' █ 174.69 KB pg_stat_tmp/pgstat.stat 'fd 9 in postgres (4218)', 'fd 9 in postgres (1167)' 109.48 KB global/pg_internal.init 'fd 7 in postgres (4218)', 'fd 7 in postgres (4219)', 'fd 7 in postgres (4220)', 'fd 7 in postgres (4221)', 'fd 7 in postgres (4222)', 'fd 7 in postgres (4223)', 'fd 7 in postgres (4224)', 'fd 7 in postgres (4225)', 'fd 7 in postgres (4226)' 104.30 KB base/11951/pg_internal.init 'fd 7 in postgres (4218)' 12.85 KB socket (lttng-sessiond) 'fd 30 in lttng-sessiond (384)' 4.50 KB global/pg_filenode.map 'fd 7 in postgres (4218)', 'fd 7 in postgres (4219)', 'fd 7 in postgres (4220)', 'fd 7 in postgres (4221)', 'fd 7 in postgres (4222)', 'fd 7 in postgres (4223)', 'fd 7 in postgres (4224)', 'fd 7 in postgres (4225)', 'fd 7 in postgres (4226)' 4.16 KB socket (postgres) 'fd 9 in postgres (4226)' 4.00 KB /proc/interrupts 'fd 3 in irqbalance (1104)' Files Write ############################################################################### ██████████████████████████████████████████████████ 8.00 MB socket:[56371] (lttng-consumerd) 'fd 30 in lttng-consumerd (2619)' █████████████████████████████████████████████████ 8.00 MB pipe:[53306] (lttng-consumerd) 'fd 12 in lttng-consumerd (2619)' ██████████ 1.76 MB pg_xlog/00000001000000000000000B 'fd 31 in postgres (4219)' █████ 887.82 KB socket:[56369] (lttng-consumerd) 'fd 26 in lttng-consumerd (2619)' █████ 882.58 KB pipe:[53309] (lttng-consumerd) 'fd 18 in lttng-consumerd (2619)' 160.00 KB /var/lib/postgresql/9.1/main/base/16384/16602 'fd 14 in postgres (1165)' 158.69 KB pg_stat_tmp/pgstat.tmp 'fd 3 in postgres (1168)' 144.00 KB /var/lib/postgresql/9.1/main/base/16384/16613 'fd 12 in postgres (1165)' 88.00 KB /var/lib/postgresql/9.1/main/base/16384/16609 'fd 11 in postgres (1165)' 78.28 KB socket:[8893] (postgres) 'fd 9 in postgres (1348)' Block I/O Read ############################################################################### Block I/O Write ############################################################################### ██████████████████████████████████████████████████ 1.76 MB postgres (pid=4219) ████ 160.00 KB postgres (pid=1168) ██ 100.00 KB kworker/u8:0 (pid=1540) ██ 96.00 KB jbd2/vda1-8 (pid=257) █ 40.00 KB postgres (pid=1166) 8.00 KB kworker/u9:0 (pid=4197) 4.00 KB kworker/u9:2 (pid=1381) Disk nr_sector ############################################################################### ███████████████████████████████████████████████████████████████████ 4416.00 sectors vda1 Disk nr_requests ############################################################################### ████████████████████████████████████████████████████████████████████ 177.00 requests vda1 Disk request time/sector ############################################################################### ██████████████████████████████████████████████████████████████████ 0.01 ms vda1 Network recv_bytes ############################################################################### ███████████████████████████████████████████████████████ 739.50 KB eth0 █████ 80.27 KB lo Network sent_bytes ############################################################################### ████████████████████████████████████████████████████████ 9.36 MB eth0 -------- Syscalls -------- ^^^^^^^^^^ Statistics ^^^^^^^^^^ .. code-block:: bash $ ./lttng-syscallstats mytrace/ Timerange: [2015-01-15 12:18:37.216484041, 2015-01-15 12:18:53.821580313] Per-TID syscalls statistics (usec) find (22785) Count Min Average Max Stdev Return values - getdents 14240 0.380 364.301 43372.450 1629.390 {'success': 14240} - close 14236 0.233 0.506 4.932 0.217 {'success': 14236} - fchdir 14231 0.252 0.407 5.769 0.117 {'success': 14231} - open 7123 0.779 2.321 12.697 0.936 {'success': 7119, 'ENOENT': 4} - newfstatat 7118 1.457 143.562 28103.532 1410.281 {'success': 7118} - openat 7118 1.525 2.411 9.107 0.771 {'success': 7118} - newfstat 7117 0.272 0.654 8.707 0.248 {'success': 7117} - write 573 0.298 0.715 8.584 0.391 {'success': 573} - brk 27 0.615 5.768 30.792 7.830 {'success': 27} - rt_sigaction 22 0.227 0.283 0.589 0.098 {'success': 22} - mmap 12 1.116 2.116 3.597 0.762 {'success': 12} - mprotect 6 1.185 2.235 3.923 1.148 {'success': 6} - read 5 0.925 2.101 6.300 2.351 {'success': 5} - ioctl 4 0.342 1.151 2.280 0.873 {'success': 2, 'ENOTTY': 2} - access 4 1.166 2.530 4.202 1.527 {'ENOENT': 4} - rt_sigprocmask 3 0.325 0.570 0.979 0.357 {'success': 3} - dup2 2 0.250 0.562 0.874 ? {'success': 2} - munmap 2 3.006 5.399 7.792 ? {'success': 2} - execve 1 7277.974 7277.974 7277.974 ? {'success': 1} - setpgid 1 0.945 0.945 0.945 ? {'success': 1} - fcntl 1 ? 0.000 0.000 ? {} - newuname 1 1.240 1.240 1.240 ? {'success': 1} Total: 71847 ----------------------------------------------------------------------------------------------------------------- apache2 (31517) Count Min Average Max Stdev Return values - fcntl 192 ? 0.000 0.000 ? {} - newfstat 156 0.237 0.484 1.102 0.222 {'success': 156} - read 144 0.307 1.602 16.307 1.698 {'success': 117, 'EAGAIN': 27} - access 96 0.705 1.580 3.364 0.670 {'success': 12, 'ENOENT': 84} - newlstat 84 0.459 0.738 1.456 0.186 {'success': 63, 'ENOENT': 21} - newstat 74 0.735 2.266 11.212 1.772 {'success': 50, 'ENOENT': 24} - lseek 72 0.317 0.522 0.915 0.112 {'success': 72} - close 39 0.471 0.615 0.867 0.069 {'success': 39} - open 36 2.219 12162.689 437697.753 72948.868 {'success': 36} - getcwd 28 0.287 0.701 1.331 0.277 {'success': 28} - poll 27 1.080 1139.669 2851.163 856.723 {'success': 27} - times 24 0.765 0.956 1.327 0.107 {'success': 24} - setitimer 24 0.499 5.848 16.668 4.041 {'success': 24} - write 24 5.467 6.784 16.827 2.459 {'success': 24} - writev 24 10.241 17.645 29.817 5.116 {'success': 24} - mmap 15 3.060 3.482 4.406 0.317 {'success': 15} - munmap 15 2.944 3.502 4.154 0.427 {'success': 15} - brk 12 0.738 4.579 13.795 4.437 {'success': 12} - chdir 12 0.989 1.600 2.353 0.385 {'success': 12} - flock 6 0.906 1.282 2.043 0.423 {'success': 6} - rt_sigaction 6 0.530 0.725 1.123 0.217 {'success': 6} - pwrite64 6 1.262 1.430 1.692 0.143 {'success': 6} - rt_sigprocmask 6 0.539 0.650 0.976 0.162 {'success': 6} - shutdown 3 7.323 8.487 10.281 1.576 {'success': 3} - getsockname 3 1.015 1.228 1.585 0.311 {'success': 3} - accept4 3 5174453.611 3450157.282 5176018.235 ? {'success': 2} Total: 1131 --- IRQ --- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Handler duration and raise latency statistics ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-irqstats mytrace/ Timerange: [2014-03-11 16:05:41.314824752, 2014-03-11 16:05:45.041994298] Hard IRQ Duration (us) count min avg max stdev ----------------------------------------------------------------------------------| 1: 30 10.901 45.500 64.510 18.447 | 42: 259 3.203 7.863 21.426 3.183 | 43: 2 3.859 3.976 4.093 0.165 | 44: 92 0.300 3.995 6.542 2.181 | Soft IRQ Duration (us) Raise latency (us) count min avg max stdev | count min avg max stdev ----------------------------------------------------------------------------------|------------------------------------------------------------ 1: 495 0.202 21.058 51.060 11.047 | 53 2.141 11.217 20.005 7.233 3: 14 0.133 9.177 32.774 10.483 | 14 0.763 3.703 10.902 3.448 4: 257 5.981 29.064 125.862 15.891 | 257 0.891 3.104 15.054 2.046 6: 26 0.309 1.198 1.748 0.329 | 26 9.636 39.222 51.430 11.246 7: 299 1.185 14.768 90.465 15.992 | 298 1.286 31.387 61.700 11.866 9: 338 0.592 3.387 13.745 1.356 | 147 2.480 29.299 64.453 14.286 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Handler duration frequency distribution ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash $ ./lttng-irqfreq --timerange [16:05:42,16:05:45] --irq 44 --stats mytrace/ Timerange: [2014-03-11 16:05:42.042034570, 2014-03-11 16:05:44.998914297] Hard IRQ Duration (us) count min avg max stdev ----------------------------------------------------------------------------------| 44: 72 0.300 4.018 6.542 2.164 | Frequency distribution iwlwifi (44) ############################################################################### 0.300 █████ 1.00 0.612 ██████████████████████████████████████████████████████████████ 12.00 0.924 ████████████████████ 4.00 1.236 ██████████ 2.00 1.548 0.00 1.861 █████ 1.00 2.173 0.00 2.485 █████ 1.00 2.797 ██████████████████████████ 5.00 3.109 █████ 1.00 3.421 ███████████████ 3.00 3.733 0.00 4.045 █████ 1.00 4.357 █████ 1.00 4.669 ██████████ 2.00 4.981 ██████████ 2.00 5.294 █████████████████████████████████████████ 8.00 5.606 ████████████████████████████████████████████████████████████████████ 13.00 5.918 ██████████████████████████████████████████████████████████████ 12.00 6.230 ███████████████ 3.00 ------ Others ------ There are a lot of other scripts, we encourage you to try them and read the ``--help`` to see all the available options. ================ Work in progress ================ Track the page cache and extract the latencies associated with pages flush to disk. In order to do that, we rely on the assumption that the pages are flushed in a FIFO order. It might not be 100% accurate, but it already gives great results : An example here when saving a file in vim:: [19:57:51.173332284 - 19:57:51.177794657] vim (31517) syscall_entry_fsync(fd = 4 ) = 0, 4.462 ms 1 dirty page(s) were flushed (assuming FIFO): vim (31517): 1 pages - blabla : 1 pages 13 active dirty filesystem page(s) (known): redis-server (2092): 2 pages - /var/log/redis/redis-server.log : 2 pages vim (31517): 2 pages - .blabla.swp : 2 pages lttng-consumerd (6750): 9 pages - unknown (origin not found) : 9 pages An other example when running the 'sync' command:: [19:57:53.046840755 - 19:57:53.072809609] sync (31554) syscall_entry_sync(fd = ) = 0, 25.969 ms 23 dirty page(s) were flushed (assuming FIFO): redis-server (2092): 2 pages - /var/log/redis/redis-server.log : 2 pages vim (31517): 9 pages - /home/julien/.viminfo.tmp : 6 pages - .blabla.swp : 3 pages lttng-consumerd (6750): 12 pages - unknown (origin not found) : 12 pages PostgreSQL with 'sys_fdatasync':: [13:49:39.908599447 - 13:49:39.915930730] postgres (1137) sys_fdatasync(fd = 7 ) = 0, 7.331 ms 2 pages allocated during the period 88 dirty page(s) were flushed (assuming FIFO): postgres (1137): 88 pages - /var/lib/postgresql/9.1/main/pg_xlog/000000010000000000000008 : 88 pages 68 last dirtied filesystem page(s): postgres (2419): 68 pages - base/11951/18410 : 46 pages - base/11951/18407 : 10 pages - base/11951/18407_fsm : 6 pages - base/11951/18410_fsm : 6 pages Detecting a fight for the I/O between a huge write and postgresql:: [13:49:47.242730583 - 13:49:47.442835037] python (2353) sys_write(fd = 3 , count = 102395904) = 102395904, 200.104 ms 34760 pages allocated during the period woke up kswapd during the period 10046 pages written on disk freed 33753 pages from the cache during the period 1397 last dirtied filesystem page(s): python (2353): 1325 pages - /root/bla : 1325 pages postgres (2419): 72 pages - base/11951/18419 : 72 pages =========== Limitations =========== The main limitation of this project is the fact that it can be quite slow to process a large trace. This project is a work in progress and we focus on the problem-solving aspect. Therefore, features have been prioritized over performance for now. One other aspect is the fact that the state is not persistent; the trace has to be re-processed if another analysis script is to be used on the same trace. Some scripts belonging to the same category allow the combination of multiple analyses into a single pass (see ``--freq``, ``--log``, ``--usage``, ``--latencystats``, etc). We are planning to add a way to save the state and/or create an interactive environment to allow the user to run multiple analyses on the same trace without having to process the trace every time. ========== Conclusion ========== We hope you have fun trying this project and please remember it is a work in progress; feedback, bug reports and improvement ideas are always welcome! .. _pip: http://www.pip-installer.org/en/latest/index.html .. |pypi| image:: https://img.shields.io/pypi/v/lttnganalyses.svg?style=flat-square&label=latest%20version :target: https://pypi.python.org/pypi/lttnganalyses :alt: Latest version released on PyPi lttnganalyses-0.4.3/lttng-irqlog0000775000175000017500000000234712553274232020442 0ustar mjeansonmjeanson00000000000000#!/usr/bin/env python3 # # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from lttnganalyses.cli import irq if __name__ == '__main__': irq.runlog()